aboutsummaryrefslogtreecommitdiffhomepage
path: root/vendor/github.com/hashicorp
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/hashicorp')
-rw-r--r--vendor/github.com/hashicorp/errwrap/README.md2
-rw-r--r--vendor/github.com/hashicorp/errwrap/go.mod1
-rw-r--r--vendor/github.com/hashicorp/go-getter/.travis.yml15
-rw-r--r--vendor/github.com/hashicorp/go-getter/README.md81
-rw-r--r--vendor/github.com/hashicorp/go-getter/appveyor.yml2
-rw-r--r--vendor/github.com/hashicorp/go-getter/checksum.go314
-rw-r--r--vendor/github.com/hashicorp/go-getter/client.go140
-rw-r--r--vendor/github.com/hashicorp/go-getter/client_option.go46
-rw-r--r--vendor/github.com/hashicorp/go-getter/client_option_progress.go38
-rw-r--r--vendor/github.com/hashicorp/go-getter/common.go14
-rw-r--r--vendor/github.com/hashicorp/go-getter/copy_dir.go6
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_tar.go32
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_testing.go35
-rw-r--r--vendor/github.com/hashicorp/go-getter/decompress_zip.go2
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect.go2
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_gcs.go43
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_git.go26
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_github.go26
-rw-r--r--vendor/github.com/hashicorp/go-getter/detect_ssh.go49
-rw-r--r--vendor/github.com/hashicorp/go-getter/get.go18
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_base.go20
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_file.go6
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_file_copy.go29
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_file_unix.go4
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_file_windows.go24
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_gcs.go172
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_git.go81
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_hg.go16
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_http.go118
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_mock.go2
-rw-r--r--vendor/github.com/hashicorp/go-getter/get_s3.go17
-rw-r--r--vendor/github.com/hashicorp/go-getter/go.mod22
-rw-r--r--vendor/github.com/hashicorp/go-getter/go.sum182
-rw-r--r--vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go15
-rw-r--r--vendor/github.com/hashicorp/go-getter/source.go23
-rw-r--r--vendor/github.com/hashicorp/go-hclog/.gitignore1
-rw-r--r--vendor/github.com/hashicorp/go-hclog/README.md14
-rw-r--r--vendor/github.com/hashicorp/go-hclog/go.mod7
-rw-r--r--vendor/github.com/hashicorp/go-hclog/go.sum6
-rw-r--r--vendor/github.com/hashicorp/go-hclog/int.go174
-rw-r--r--vendor/github.com/hashicorp/go-hclog/log.go29
-rw-r--r--vendor/github.com/hashicorp/go-hclog/nulllogger.go47
-rw-r--r--vendor/github.com/hashicorp/go-multierror/.travis.yml12
-rw-r--r--vendor/github.com/hashicorp/go-multierror/Makefile31
-rw-r--r--vendor/github.com/hashicorp/go-multierror/README.md6
-rw-r--r--vendor/github.com/hashicorp/go-multierror/append.go8
-rw-r--r--vendor/github.com/hashicorp/go-multierror/format.go8
-rw-r--r--vendor/github.com/hashicorp/go-multierror/go.mod3
-rw-r--r--vendor/github.com/hashicorp/go-multierror/go.sum4
-rw-r--r--vendor/github.com/hashicorp/go-multierror/multierror.go4
-rw-r--r--vendor/github.com/hashicorp/go-multierror/sort.go16
-rw-r--r--vendor/github.com/hashicorp/go-plugin/README.md20
-rw-r--r--vendor/github.com/hashicorp/go-plugin/client.go523
-rw-r--r--vendor/github.com/hashicorp/go-plugin/go.mod17
-rw-r--r--vendor/github.com/hashicorp/go-plugin/go.sum31
-rw-r--r--vendor/github.com/hashicorp/go-plugin/grpc_broker.go38
-rw-r--r--vendor/github.com/hashicorp/go-plugin/grpc_client.go24
-rw-r--r--vendor/github.com/hashicorp/go-plugin/grpc_controller.go23
-rw-r--r--vendor/github.com/hashicorp/go-plugin/grpc_server.go24
-rw-r--r--vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go3
-rw-r--r--vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go (renamed from vendor/github.com/hashicorp/go-plugin/grpc_broker.pb.go)97
-rw-r--r--vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto (renamed from vendor/github.com/hashicorp/go-plugin/grpc_broker.proto)1
-rw-r--r--vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go143
-rw-r--r--vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto11
-rw-r--r--vendor/github.com/hashicorp/go-plugin/log_entry.go4
-rw-r--r--vendor/github.com/hashicorp/go-plugin/mtls.go73
-rw-r--r--vendor/github.com/hashicorp/go-plugin/server.go165
-rw-r--r--vendor/github.com/hashicorp/go-plugin/testing.go36
-rw-r--r--vendor/github.com/hashicorp/go-safetemp/go.mod1
-rw-r--r--vendor/github.com/hashicorp/go-uuid/uuid.go24
-rw-r--r--vendor/github.com/hashicorp/go-version/version.go35
-rw-r--r--vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md184
-rw-r--r--vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_body.go262
-rw-r--r--vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_spec.go215
-rw-r--r--vendor/github.com/hashicorp/hcl2/ext/dynblock/expr_wrap.go42
-rw-r--r--vendor/github.com/hashicorp/hcl2/ext/dynblock/iteration.go66
-rw-r--r--vendor/github.com/hashicorp/hcl2/ext/dynblock/public.go44
-rw-r--r--vendor/github.com/hashicorp/hcl2/ext/dynblock/schema.go50
-rw-r--r--vendor/github.com/hashicorp/hcl2/ext/dynblock/unknown_body.go84
-rw-r--r--vendor/github.com/hashicorp/hcl2/ext/dynblock/variables.go209
-rw-r--r--vendor/github.com/hashicorp/hcl2/ext/dynblock/variables_hcldec.go43
-rw-r--r--vendor/github.com/hashicorp/hcl2/ext/typeexpr/README.md67
-rw-r--r--vendor/github.com/hashicorp/hcl2/ext/typeexpr/doc.go11
-rw-r--r--vendor/github.com/hashicorp/hcl2/ext/typeexpr/get_type.go196
-rw-r--r--vendor/github.com/hashicorp/hcl2/ext/typeexpr/public.go129
-rw-r--r--vendor/github.com/hashicorp/hcl2/gohcl/doc.go4
-rw-r--r--vendor/github.com/hashicorp/hcl2/gohcl/encode.go191
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go42
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go143
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/diagnostics.go23
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go456
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go62
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go28
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go18
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go2
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go388
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go79
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go24
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go6234
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl63
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md149
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go47
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure_at_pos.go118
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go92
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token_type_string.go158
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/variables.go6
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/walk.go44
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/parser.go31
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go8
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/spec.md22
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/json/structure.go65
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/merged.go14
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/ops.go141
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/pos.go13
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go18
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/spec.md102
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/structure_at_pos.go117
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/traversal.go61
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go5
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcldec/public.go5
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcldec/spec.go571
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcldec/variables.go12
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcled/doc.go4
-rw-r--r--vendor/github.com/hashicorp/hcl2/hcled/navigation.go34
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/ast.go121
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go48
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go74
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go153
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go201
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/doc.go11
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/format.go492
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/generate.go250
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go23
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/node.go236
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/parser.go594
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/public.go44
-rw-r--r--vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go122
-rw-r--r--vendor/github.com/hashicorp/hil/convert.go19
-rw-r--r--vendor/github.com/hashicorp/hil/go.mod6
-rw-r--r--vendor/github.com/hashicorp/hil/go.sum4
-rw-r--r--vendor/github.com/hashicorp/logutils/go.mod1
-rw-r--r--vendor/github.com/hashicorp/terraform-config-inspect/LICENSE353
-rw-r--r--vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go138
-rw-r--r--vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go21
-rw-r--r--vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go130
-rw-r--r--vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go322
-rw-r--r--vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go325
-rw-r--r--vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go35
-rw-r--r--vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go11
-rw-r--r--vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go9
-rw-r--r--vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go9
-rw-r--r--vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go64
-rw-r--r--vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go106
-rw-r--r--vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go50
-rw-r--r--vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/count_attr.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/doc.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/input_variable.go41
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/instance_key.go123
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/local_value.go48
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/module.go75
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/module_call.go81
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/module_instance.go415
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/output_value.go75
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/parse_ref.go338
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/parse_target.go318
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/path_attr.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/provider_config.go297
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/referenceable.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/resource.go270
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/resource_phase.go105
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go33
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/self.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/targetable.go26
-rw-r--r--vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/command/format/diagnostic.go295
-rw-r--r--vendor/github.com/hashicorp/terraform/command/format/diff.go1192
-rw-r--r--vendor/github.com/hashicorp/terraform/command/format/format.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/command/format/object_id.go123
-rw-r--r--vendor/github.com/hashicorp/terraform/command/format/plan.go302
-rw-r--r--vendor/github.com/hashicorp/terraform/command/format/state.go286
-rw-r--r--vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go97
-rw-r--r--vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/config/hcl2shim/flatmap.go424
-rw-r--r--vendor/github.com/hashicorp/terraform/config/hcl2shim/paths.go276
-rw-r--r--vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go109
-rw-r--r--vendor/github.com/hashicorp/terraform/config/hcl2shim/values_equiv.go214
-rw-r--r--vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go185
-rw-r--r--vendor/github.com/hashicorp/terraform/config/module/storage.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/config/resource_mode_string.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/backend.go55
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/compat_shim.go116
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/config.go205
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/config_build.go179
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go125
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/doc.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/getter.go150
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/inode.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/loader.go150
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go97
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go504
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go76
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go45
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configload/testing.go43
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go274
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go117
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/doc.go (renamed from vendor/github.com/hashicorp/terraform/config/configschema/doc.go)0
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go42
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go (renamed from vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go)13
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go28
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/schema.go (renamed from vendor/github.com/hashicorp/terraform/config/configschema/schema.go)23
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go173
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/depends_on.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/doc.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/module.go404
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/module_call.go188
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/module_merge.go247
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/module_merge_body.go143
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/named_values.go364
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/parser.go100
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/parser_config.go247
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go142
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/parser_values.go43
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/provider.go144
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/provisioner.go150
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go25
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/resource.go486
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/synth_body.go118
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/util.go63
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go45
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go39
-rw-r--r--vendor/github.com/hashicorp/terraform/configs/version_constraint.go64
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/dag.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/dag/walk.go94
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/plugin/doc.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go1338
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go147
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go131
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go43
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/state.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go163
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing.go298
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go334
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go113
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/backend.go138
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go192
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provider.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource.go270
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go98
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/schema.go316
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/set.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/shims.go115
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/testing.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go123
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go144
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go78
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go125
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/doc.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go363
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/getter.go210
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/inode.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go21
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go56
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go558
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go36
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/testing.go73
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go83
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go138
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh16
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go3455
-rw-r--r--vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto351
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go187
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go145
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go43
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/data.go33
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/doc.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/eval.go477
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go129
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/collection.go1511
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go87
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go285
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go70
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go140
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go345
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/number.go155
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/funcs/string.go280
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/functions.go147
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/references.go81
-rw-r--r--vendor/github.com/hashicorp/terraform/lang/scope.go34
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/action.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/action_string.go49
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/changes.go308
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/changes_src.go190
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/changes_state.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/changes_sync.go144
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/doc.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/dynamic_value.go96
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go437
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/objchange/doc.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go104
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go390
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go267
-rw-r--r--vendor/github.com/hashicorp/terraform/plans/plan.go92
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/client.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/convert/schema.go154
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/error.go34
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/get.go607
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go34
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go42
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/version.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go562
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go178
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/plugin.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/resource_provider.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/serve.go87
-rw-r--r--vendor/github.com/hashicorp/terraform/plugin/ui_input.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/providers/addressed_types.go47
-rw-r--r--vendor/github.com/hashicorp/terraform/providers/doc.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/providers/provider.go351
-rw-r--r--vendor/github.com/hashicorp/terraform/providers/resolver.go112
-rw-r--r--vendor/github.com/hashicorp/terraform/provisioners/doc.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/provisioners/factory.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/provisioners/provisioner.go82
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/client.go140
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/errors.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go60
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/response/provider.go36
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/response/provider_list.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go96
-rw-r--r--vendor/github.com/hashicorp/terraform/states/doc.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/states/eachmode_string.go35
-rw-r--r--vendor/github.com/hashicorp/terraform/states/instance_generation.go24
-rw-r--r--vendor/github.com/hashicorp/terraform/states/instance_object.go120
-rw-r--r--vendor/github.com/hashicorp/terraform/states/instance_object_src.go113
-rw-r--r--vendor/github.com/hashicorp/terraform/states/module.go285
-rw-r--r--vendor/github.com/hashicorp/terraform/states/objectstatus_string.go33
-rw-r--r--vendor/github.com/hashicorp/terraform/states/output_value.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/states/resource.go239
-rw-r--r--vendor/github.com/hashicorp/terraform/states/state.go229
-rw-r--r--vendor/github.com/hashicorp/terraform/states/state_deepcopy.go218
-rw-r--r--vendor/github.com/hashicorp/terraform/states/state_equal.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/states/state_string.go279
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go62
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/doc.go3
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/file.go62
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/read.go209
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version0.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version1.go174
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go172
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version2.go209
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go145
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version3.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go431
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/version4.go604
-rw-r--r--vendor/github.com/hashicorp/terraform/states/statefile/write.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/states/sync.go537
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context.go793
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_components.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_import.go56
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/context_input.go251
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/debug.go523
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/diff.go573
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_apply.go596
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go41
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context.go122
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go292
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go255
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count.go142
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go95
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_diff.go906
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go65
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go56
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_lang.go61
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_local.go84
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_output.go165
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provider.go172
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go376
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go79
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_resource.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_state.go552
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go106
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate.go591
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go105
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/eval_variable.go219
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go70
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/evaluate.go933
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go299
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph.go97
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder.go40
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go116
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go58
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go108
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go48
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go27
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go99
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go89
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk.go48
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go161
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go2
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook.go112
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_mock.go287
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/hook_stop.go41
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/interpolate.go86
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go139
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go18
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go42
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go239
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_local.go68
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go88
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go162
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output.go179
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go32
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go29
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go20
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go437
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go50
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go423
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go433
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go358
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go313
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go110
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go89
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go248
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go86
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go239
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go191
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go38
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/path.go15
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/plan.go130
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/provider_mock.go522
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go154
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource.go194
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_address.go175
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provider.go55
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go16
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/schemas.go256
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/semantics.go132
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state.go415
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/state_add.go374
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform.go11
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go54
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go99
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go56
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config.go80
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go49
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go9
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go178
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go185
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go108
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_diff.go220
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go203
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_local.go30
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go152
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go139
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go31
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go167
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_output.go42
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provider.go489
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go23
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_reference.go354
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go17
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go45
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_state.go77
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_targets.go81
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/transform_variable.go22
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go4
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go5
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go12
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go59
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/variables.go409
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/version_required.go97
-rw-r--r--vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go19
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go68
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/contextual.go372
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go14
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go31
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go149
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/error.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/hcl.go10
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go6
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go8
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go7
-rw-r--r--vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go13
-rw-r--r--vendor/github.com/hashicorp/terraform/version/version.go10
-rw-r--r--vendor/github.com/hashicorp/yamux/session.go65
-rw-r--r--vendor/github.com/hashicorp/yamux/stream.go45
-rw-r--r--vendor/github.com/hashicorp/yamux/util.go15
532 files changed, 61956 insertions, 12671 deletions
diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md
index 1c95f59..444df08 100644
--- a/vendor/github.com/hashicorp/errwrap/README.md
+++ b/vendor/github.com/hashicorp/errwrap/README.md
@@ -48,7 +48,7 @@ func main() {
48 // We can use the Contains helpers to check if an error contains 48 // We can use the Contains helpers to check if an error contains
49 // another error. It is safe to do this with a nil error, or with 49 // another error. It is safe to do this with a nil error, or with
50 // an error that doesn't even use the errwrap package. 50 // an error that doesn't even use the errwrap package.
51 if errwrap.Contains(err, ErrNotExist) { 51 if errwrap.Contains(err, "does not exist") {
52 // Do something 52 // Do something
53 } 53 }
54 if errwrap.ContainsType(err, new(os.PathError)) { 54 if errwrap.ContainsType(err, new(os.PathError)) {
diff --git a/vendor/github.com/hashicorp/errwrap/go.mod b/vendor/github.com/hashicorp/errwrap/go.mod
new file mode 100644
index 0000000..c9b8402
--- /dev/null
+++ b/vendor/github.com/hashicorp/errwrap/go.mod
@@ -0,0 +1 @@
module github.com/hashicorp/errwrap
diff --git a/vendor/github.com/hashicorp/go-getter/.travis.yml b/vendor/github.com/hashicorp/go-getter/.travis.yml
index da804c2..4fe9176 100644
--- a/vendor/github.com/hashicorp/go-getter/.travis.yml
+++ b/vendor/github.com/hashicorp/go-getter/.travis.yml
@@ -9,15 +9,16 @@ addons:
9 9
10language: go 10language: go
11 11
12os:
13 - linux
14 - osx
15
12go: 16go:
13 - 1.8.x 17 - "1.11.x"
14 - 1.9.x 18
15 - master 19before_script:
20 - go build ./cmd/go-getter
16 21
17branches: 22branches:
18 only: 23 only:
19 - master 24 - master
20
21matrix:
22 allow_failures:
23 - go: master
diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md
index 40ace74..ba4df6f 100644
--- a/vendor/github.com/hashicorp/go-getter/README.md
+++ b/vendor/github.com/hashicorp/go-getter/README.md
@@ -71,6 +71,7 @@ can be augmented at runtime by implementing the `Getter` interface.
71 * Mercurial 71 * Mercurial
72 * HTTP 72 * HTTP
73 * Amazon S3 73 * Amazon S3
74 * Google GCP
74 75
75In addition to the above protocols, go-getter has what are called "detectors." 76In addition to the above protocols, go-getter has what are called "detectors."
76These take a URL and attempt to automatically choose the best protocol for 77These take a URL and attempt to automatically choose the best protocol for
@@ -97,7 +98,7 @@ would download the given HTTP URL using the Git protocol.
97 98
98Forced protocols will also override any detectors. 99Forced protocols will also override any detectors.
99 100
100In the absense of a forced protocol, detectors may be run on the URL, transforming 101In the absence of a forced protocol, detectors may be run on the URL, transforming
101the protocol anyways. The above example would've used the Git protocol either 102the protocol anyways. The above example would've used the Git protocol either
102way since the Git detector would've detected it was a GitHub URL. 103way since the Git detector would've detected it was a GitHub URL.
103 104
@@ -155,20 +156,44 @@ For file downloads of any protocol, go-getter can automatically verify
155a checksum for you. Note that checksumming only works for downloading files, 156a checksum for you. Note that checksumming only works for downloading files,
156not directories, but checksumming will work for any protocol. 157not directories, but checksumming will work for any protocol.
157 158
158To checksum a file, append a `checksum` query parameter to the URL. 159To checksum a file, append a `checksum` query parameter to the URL. go-getter
159The paramter value should be in the format of `type:value`, where 160will parse out this query parameter automatically and use it to verify the
160type is "md5", "sha1", "sha256", or "sha512". The "value" should be 161checksum. The parameter value can be in the format of `type:value` or just
161the actual checksum value. go-getter will parse out this query parameter 162`value`, where type is "md5", "sha1", "sha256", "sha512" or "file" . The
162automatically and use it to verify the checksum. An example URL 163"value" should be the actual checksum value or download URL for "file". When
163is shown below: 164`type` part is omitted, type will be guessed based on the length of the
165checksum string. Examples:
164 166
165``` 167```
166./foo.txt?checksum=md5:b7d96c89d09d9e204f5fedc4d5d55b21 168./foo.txt?checksum=md5:b7d96c89d09d9e204f5fedc4d5d55b21
167``` 169```
168 170
171```
172./foo.txt?checksum=b7d96c89d09d9e204f5fedc4d5d55b21
173```
174
175```
176./foo.txt?checksum=file:./foo.txt.sha256sum
177```
178
179When checksumming from a file - ex: with `checksum=file:url` - go-getter will
180get the file linked in the URL after `file:` using the same configuration. For
181example, in `file:http://releases.ubuntu.com/cosmic/MD5SUMS` go-getter will
182download a checksum file under the aforementioned url using the http protocol.
183All protocols supported by go-getter can be used. The checksum file will be
184downloaded in a temporary file then parsed. The destination of the temporary
185file can be changed by setting system specific environment variables: `TMPDIR`
186for unix; `TMP`, `TEMP` or `USERPROFILE` on windows. Read godoc of
187[os.TempDir](https://golang.org/pkg/os/#TempDir) for more information on the
188temporary directory selection. Content of files are expected to be BSD or GNU
189style. Once go-getter is done with the checksum file; it is deleted.
190
169The checksum query parameter is never sent to the backend protocol 191The checksum query parameter is never sent to the backend protocol
170implementation. It is used at a higher level by go-getter itself. 192implementation. It is used at a higher level by go-getter itself.
171 193
194If the destination file exists and the checksums match: download
195will be skipped.
196
172### Unarchiving 197### Unarchiving
173 198
174go-getter will automatically unarchive files into a file or directory 199go-getter will automatically unarchive files into a file or directory
@@ -215,11 +240,12 @@ from the URL before going to the final protocol downloader.
215 240
216## Protocol-Specific Options 241## Protocol-Specific Options
217 242
218This section documents the protocol-specific options that can be specified 243This section documents the protocol-specific options that can be specified for
219for go-getter. These options should be appended to the input as normal query 244go-getter. These options should be appended to the input as normal query
220parameters. Depending on the usage of go-getter, applications may provide 245parameters ([HTTP headers](#headers) are an exception to this, however).
221alternate ways of inputting options. For example, [Nomad](https://www.nomadproject.io) 246Depending on the usage of go-getter, applications may provide alternate ways of
222provides a nice options block for specifying options rather than in the URL. 247inputting options. For example, [Nomad](https://www.nomadproject.io) provides a
248nice options block for specifying options rather than in the URL.
223 249
224## General (All Protocols) 250## General (All Protocols)
225 251
@@ -250,6 +276,19 @@ None
250 from a private key file on disk, you would run `base64 -w0 <file>`. 276 from a private key file on disk, you would run `base64 -w0 <file>`.
251 277
252 **Note**: Git 2.3+ is required to use this feature. 278 **Note**: Git 2.3+ is required to use this feature.
279
280 * `depth` - The Git clone depth. The provided number specifies the last `n`
281 revisions to clone from the repository.
282
283
284The `git` getter accepts both URL-style SSH addresses like
285`git::ssh://git@example.com/foo/bar`, and "scp-style" addresses like
286`git::git@example.com/foo/bar`. In the latter case, omitting the `git::`
287force prefix is allowed if the username prefix is exactly `git@`.
288
289The "scp-style" addresses _cannot_ be used in conjunction with the `ssh://`
290scheme prefix, because in that case the colon is used to mark an optional
291port number to connect on, rather than to delimit the path from the host.
253 292
254### Mercurial (`hg`) 293### Mercurial (`hg`)
255 294
@@ -263,6 +302,13 @@ To use HTTP basic authentication with go-getter, simply prepend `username:passwo
263hostname in the URL such as `https://Aladdin:OpenSesame@www.example.com/index.html`. All special 302hostname in the URL such as `https://Aladdin:OpenSesame@www.example.com/index.html`. All special
264characters, including the username and password, must be URL encoded. 303characters, including the username and password, must be URL encoded.
265 304
305#### Headers
306
307Optional request headers can be added by supplying them in a custom
308[`HttpGetter`](https://godoc.org/github.com/hashicorp/go-getter#HttpGetter)
309(_not_ as query parameters like most other options). These headers will be sent
310out on every request the getter in question makes.
311
266### S3 (`s3`) 312### S3 (`s3`)
267 313
268S3 takes various access configurations in the URL. Note that it will also 314S3 takes various access configurations in the URL. Note that it will also
@@ -299,3 +345,14 @@ Some examples for these addressing schemes:
299- bucket.s3-eu-west-1.amazonaws.com/foo/bar 345- bucket.s3-eu-west-1.amazonaws.com/foo/bar
300- "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=KEYID&aws_access_key_secret=SECRETKEY&region=us-east-2" 346- "s3::http://127.0.0.1:9000/test-bucket/hello.txt?aws_access_key_id=KEYID&aws_access_key_secret=SECRETKEY&region=us-east-2"
301 347
348### GCS (`gcs`)
349
350#### GCS Authentication
351
352In order to access to GCS, authentication credentials should be provided. More information can be found [here](https://cloud.google.com/docs/authentication/getting-started)
353
354#### GCS Bucket Examples
355
356- gcs::https://www.googleapis.com/storage/v1/bucket
357- gcs::https://www.googleapis.com/storage/v1/bucket/foo.zip
358- www.googleapis.com/storage/v1/bucket/foo
diff --git a/vendor/github.com/hashicorp/go-getter/appveyor.yml b/vendor/github.com/hashicorp/go-getter/appveyor.yml
index ec48d45..1e8718e 100644
--- a/vendor/github.com/hashicorp/go-getter/appveyor.yml
+++ b/vendor/github.com/hashicorp/go-getter/appveyor.yml
@@ -13,4 +13,4 @@ install:
13 13
14 go get -d -v -t ./... 14 go get -d -v -t ./...
15build_script: 15build_script:
16- cmd: go test -v ./... 16- cmd: go test ./...
diff --git a/vendor/github.com/hashicorp/go-getter/checksum.go b/vendor/github.com/hashicorp/go-getter/checksum.go
new file mode 100644
index 0000000..bea7ed1
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/checksum.go
@@ -0,0 +1,314 @@
1package getter
2
3import (
4 "bufio"
5 "bytes"
6 "crypto/md5"
7 "crypto/sha1"
8 "crypto/sha256"
9 "crypto/sha512"
10 "encoding/hex"
11 "fmt"
12 "hash"
13 "io"
14 "net/url"
15 "os"
16 "path/filepath"
17 "strings"
18
19 urlhelper "github.com/hashicorp/go-getter/helper/url"
20)
21
22// fileChecksum helps verifying the checksum for a file.
23type fileChecksum struct {
24 Type string
25 Hash hash.Hash
26 Value []byte
27 Filename string
28}
29
30// A ChecksumError is returned when a checksum differs
31type ChecksumError struct {
32 Hash hash.Hash
33 Actual []byte
34 Expected []byte
35 File string
36}
37
38func (cerr *ChecksumError) Error() string {
39 if cerr == nil {
40 return "<nil>"
41 }
42 return fmt.Sprintf(
43 "Checksums did not match for %s.\nExpected: %s\nGot: %s\n%T",
44 cerr.File,
45 hex.EncodeToString(cerr.Expected),
46 hex.EncodeToString(cerr.Actual),
47 cerr.Hash, // ex: *sha256.digest
48 )
49}
50
51// checksum is a simple method to compute the checksum of a source file
52// and compare it to the given expected value.
53func (c *fileChecksum) checksum(source string) error {
54 f, err := os.Open(source)
55 if err != nil {
56 return fmt.Errorf("Failed to open file for checksum: %s", err)
57 }
58 defer f.Close()
59
60 c.Hash.Reset()
61 if _, err := io.Copy(c.Hash, f); err != nil {
62 return fmt.Errorf("Failed to hash: %s", err)
63 }
64
65 if actual := c.Hash.Sum(nil); !bytes.Equal(actual, c.Value) {
66 return &ChecksumError{
67 Hash: c.Hash,
68 Actual: actual,
69 Expected: c.Value,
70 File: source,
71 }
72 }
73
74 return nil
75}
76
77// extractChecksum will return a fileChecksum based on the 'checksum'
78// parameter of u.
79// ex:
80// http://hashicorp.com/terraform?checksum=<checksumValue>
81// http://hashicorp.com/terraform?checksum=<checksumType>:<checksumValue>
82// http://hashicorp.com/terraform?checksum=file:<checksum_url>
83// when checksumming from a file, extractChecksum will go get checksum_url
84// in a temporary directory, parse the content of the file then delete it.
85// Content of files are expected to be BSD style or GNU style.
86//
87// BSD-style checksum:
88// MD5 (file1) = <checksum>
89// MD5 (file2) = <checksum>
90//
91// GNU-style:
92// <checksum> file1
93// <checksum> *file2
94//
95// see parseChecksumLine for more detail on checksum file parsing
96func (c *Client) extractChecksum(u *url.URL) (*fileChecksum, error) {
97 q := u.Query()
98 v := q.Get("checksum")
99
100 if v == "" {
101 return nil, nil
102 }
103
104 vs := strings.SplitN(v, ":", 2)
105 switch len(vs) {
106 case 2:
107 break // good
108 default:
109 // here, we try to guess the checksum from it's length
110 // if the type was not passed
111 return newChecksumFromValue(v, filepath.Base(u.EscapedPath()))
112 }
113
114 checksumType, checksumValue := vs[0], vs[1]
115
116 switch checksumType {
117 case "file":
118 return c.checksumFromFile(checksumValue, u)
119 default:
120 return newChecksumFromType(checksumType, checksumValue, filepath.Base(u.EscapedPath()))
121 }
122}
123
124func newChecksum(checksumValue, filename string) (*fileChecksum, error) {
125 c := &fileChecksum{
126 Filename: filename,
127 }
128 var err error
129 c.Value, err = hex.DecodeString(checksumValue)
130 if err != nil {
131 return nil, fmt.Errorf("invalid checksum: %s", err)
132 }
133 return c, nil
134}
135
136func newChecksumFromType(checksumType, checksumValue, filename string) (*fileChecksum, error) {
137 c, err := newChecksum(checksumValue, filename)
138 if err != nil {
139 return nil, err
140 }
141
142 c.Type = strings.ToLower(checksumType)
143 switch c.Type {
144 case "md5":
145 c.Hash = md5.New()
146 case "sha1":
147 c.Hash = sha1.New()
148 case "sha256":
149 c.Hash = sha256.New()
150 case "sha512":
151 c.Hash = sha512.New()
152 default:
153 return nil, fmt.Errorf(
154 "unsupported checksum type: %s", checksumType)
155 }
156
157 return c, nil
158}
159
160func newChecksumFromValue(checksumValue, filename string) (*fileChecksum, error) {
161 c, err := newChecksum(checksumValue, filename)
162 if err != nil {
163 return nil, err
164 }
165
166 switch len(c.Value) {
167 case md5.Size:
168 c.Hash = md5.New()
169 c.Type = "md5"
170 case sha1.Size:
171 c.Hash = sha1.New()
172 c.Type = "sha1"
173 case sha256.Size:
174 c.Hash = sha256.New()
175 c.Type = "sha256"
176 case sha512.Size:
177 c.Hash = sha512.New()
178 c.Type = "sha512"
179 default:
180 return nil, fmt.Errorf("Unknown type for checksum %s", checksumValue)
181 }
182
183 return c, nil
184}
185
186// checksumsFromFile will return all the fileChecksums found in file
187//
188// checksumsFromFile will try to guess the hashing algorithm based on content
189// of checksum file
190//
191// checksumsFromFile will only return checksums for files that match file
192// behind src
193func (c *Client) checksumFromFile(checksumFile string, src *url.URL) (*fileChecksum, error) {
194 checksumFileURL, err := urlhelper.Parse(checksumFile)
195 if err != nil {
196 return nil, err
197 }
198
199 tempfile, err := tmpFile("", filepath.Base(checksumFileURL.Path))
200 if err != nil {
201 return nil, err
202 }
203 defer os.Remove(tempfile)
204
205 c2 := &Client{
206 Ctx: c.Ctx,
207 Getters: c.Getters,
208 Decompressors: c.Decompressors,
209 Detectors: c.Detectors,
210 Pwd: c.Pwd,
211 Dir: false,
212 Src: checksumFile,
213 Dst: tempfile,
214 ProgressListener: c.ProgressListener,
215 }
216 if err = c2.Get(); err != nil {
217 return nil, fmt.Errorf(
218 "Error downloading checksum file: %s", err)
219 }
220
221 filename := filepath.Base(src.Path)
222 absPath, err := filepath.Abs(src.Path)
223 if err != nil {
224 return nil, err
225 }
226 checksumFileDir := filepath.Dir(checksumFileURL.Path)
227 relpath, err := filepath.Rel(checksumFileDir, absPath)
228 switch {
229 case err == nil ||
230 err.Error() == "Rel: can't make "+absPath+" relative to "+checksumFileDir:
231 // ex: on windows C:\gopath\...\content.txt cannot be relative to \
232 // which is okay, may be another expected path will work.
233 break
234 default:
235 return nil, err
236 }
237
238 // possible file identifiers:
239 options := []string{
240 filename, // ubuntu-14.04.1-server-amd64.iso
241 "*" + filename, // *ubuntu-14.04.1-server-amd64.iso Standard checksum
242 "?" + filename, // ?ubuntu-14.04.1-server-amd64.iso shasum -p
243 relpath, // dir/ubuntu-14.04.1-server-amd64.iso
244 "./" + relpath, // ./dir/ubuntu-14.04.1-server-amd64.iso
245 absPath, // fullpath; set if local
246 }
247
248 f, err := os.Open(tempfile)
249 if err != nil {
250 return nil, fmt.Errorf(
251 "Error opening downloaded file: %s", err)
252 }
253 defer f.Close()
254 rd := bufio.NewReader(f)
255 for {
256 line, err := rd.ReadString('\n')
257 if err != nil {
258 if err != io.EOF {
259 return nil, fmt.Errorf(
260 "Error reading checksum file: %s", err)
261 }
262 break
263 }
264 checksum, err := parseChecksumLine(line)
265 if err != nil || checksum == nil {
266 continue
267 }
268 if checksum.Filename == "" {
269 // filename not sure, let's try
270 return checksum, nil
271 }
272 // make sure the checksum is for the right file
273 for _, option := range options {
274 if option != "" && checksum.Filename == option {
275 // any checksum will work so we return the first one
276 return checksum, nil
277 }
278 }
279 }
280 return nil, fmt.Errorf("no checksum found in: %s", checksumFile)
281}
282
283// parseChecksumLine takes a line from a checksum file and returns
284// checksumType, checksumValue and filename parseChecksumLine guesses the style
285// of the checksum BSD vs GNU by splitting the line and by counting the parts.
286// of a line.
287// for BSD type sums parseChecksumLine guesses the hashing algorithm
288// by checking the length of the checksum.
289func parseChecksumLine(line string) (*fileChecksum, error) {
290 parts := strings.Fields(line)
291
292 switch len(parts) {
293 case 4:
294 // BSD-style checksum:
295 // MD5 (file1) = <checksum>
296 // MD5 (file2) = <checksum>
297 if len(parts[1]) <= 2 ||
298 parts[1][0] != '(' || parts[1][len(parts[1])-1] != ')' {
299 return nil, fmt.Errorf(
300 "Unexpected BSD-style-checksum filename format: %s", line)
301 }
302 filename := parts[1][1 : len(parts[1])-1]
303 return newChecksumFromType(parts[0], parts[3], filename)
304 case 2:
305 // GNU-style:
306 // <checksum> file1
307 // <checksum> *file2
308 return newChecksumFromValue(parts[0], parts[1])
309 case 0:
310 return nil, nil // empty line
311 default:
312 return newChecksumFromValue(parts[0], "")
313 }
314}
diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go
index 300301c..007a78b 100644
--- a/vendor/github.com/hashicorp/go-getter/client.go
+++ b/vendor/github.com/hashicorp/go-getter/client.go
@@ -1,15 +1,8 @@
1package getter 1package getter
2 2
3import ( 3import (
4 "bytes" 4 "context"
5 "crypto/md5"
6 "crypto/sha1"
7 "crypto/sha256"
8 "crypto/sha512"
9 "encoding/hex"
10 "fmt" 5 "fmt"
11 "hash"
12 "io"
13 "io/ioutil" 6 "io/ioutil"
14 "os" 7 "os"
15 "path/filepath" 8 "path/filepath"
@@ -17,7 +10,7 @@ import (
17 "strings" 10 "strings"
18 11
19 urlhelper "github.com/hashicorp/go-getter/helper/url" 12 urlhelper "github.com/hashicorp/go-getter/helper/url"
20 "github.com/hashicorp/go-safetemp" 13 safetemp "github.com/hashicorp/go-safetemp"
21) 14)
22 15
23// Client is a client for downloading things. 16// Client is a client for downloading things.
@@ -26,6 +19,9 @@ import (
26// Using a client directly allows more fine-grained control over how downloading 19// Using a client directly allows more fine-grained control over how downloading
27// is done, as well as customizing the protocols supported. 20// is done, as well as customizing the protocols supported.
28type Client struct { 21type Client struct {
22 // Ctx for cancellation
23 Ctx context.Context
24
29 // Src is the source URL to get. 25 // Src is the source URL to get.
30 // 26 //
31 // Dst is the path to save the downloaded thing as. If Dir is set to 27 // Dst is the path to save the downloaded thing as. If Dir is set to
@@ -62,10 +58,20 @@ type Client struct {
62 // 58 //
63 // WARNING: deprecated. If Mode is set, that will take precedence. 59 // WARNING: deprecated. If Mode is set, that will take precedence.
64 Dir bool 60 Dir bool
61
62 // ProgressListener allows to track file downloads.
63 // By default a no op progress listener is used.
64 ProgressListener ProgressTracker
65
66 Options []ClientOption
65} 67}
66 68
67// Get downloads the configured source to the destination. 69// Get downloads the configured source to the destination.
68func (c *Client) Get() error { 70func (c *Client) Get() error {
71 if err := c.Configure(c.Options...); err != nil {
72 return err
73 }
74
69 // Store this locally since there are cases we swap this 75 // Store this locally since there are cases we swap this
70 mode := c.Mode 76 mode := c.Mode
71 if mode == ClientModeInvalid { 77 if mode == ClientModeInvalid {
@@ -76,18 +82,7 @@ func (c *Client) Get() error {
76 } 82 }
77 } 83 }
78 84
79 // Default decompressor value 85 src, err := Detect(c.Src, c.Pwd, c.Detectors)
80 decompressors := c.Decompressors
81 if decompressors == nil {
82 decompressors = Decompressors
83 }
84
85 // Detect the URL. This is safe if it is already detected.
86 detectors := c.Detectors
87 if detectors == nil {
88 detectors = Detectors
89 }
90 src, err := Detect(c.Src, c.Pwd, detectors)
91 if err != nil { 86 if err != nil {
92 return err 87 return err
93 } 88 }
@@ -119,12 +114,7 @@ func (c *Client) Get() error {
119 force = u.Scheme 114 force = u.Scheme
120 } 115 }
121 116
122 getters := c.Getters 117 g, ok := c.Getters[force]
123 if getters == nil {
124 getters = Getters
125 }
126
127 g, ok := getters[force]
128 if !ok { 118 if !ok {
129 return fmt.Errorf( 119 return fmt.Errorf(
130 "download not supported for scheme '%s'", force) 120 "download not supported for scheme '%s'", force)
@@ -150,7 +140,7 @@ func (c *Client) Get() error {
150 if archiveV == "" { 140 if archiveV == "" {
151 // We don't appear to... but is it part of the filename? 141 // We don't appear to... but is it part of the filename?
152 matchingLen := 0 142 matchingLen := 0
153 for k, _ := range decompressors { 143 for k := range c.Decompressors {
154 if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen { 144 if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen {
155 archiveV = k 145 archiveV = k
156 matchingLen = len(k) 146 matchingLen = len(k)
@@ -163,7 +153,7 @@ func (c *Client) Get() error {
163 // real path. 153 // real path.
164 var decompressDst string 154 var decompressDst string
165 var decompressDir bool 155 var decompressDir bool
166 decompressor := decompressors[archiveV] 156 decompressor := c.Decompressors[archiveV]
167 if decompressor != nil { 157 if decompressor != nil {
168 // Create a temporary directory to store our archive. We delete 158 // Create a temporary directory to store our archive. We delete
169 // this at the end of everything. 159 // this at the end of everything.
@@ -182,44 +172,16 @@ func (c *Client) Get() error {
182 mode = ClientModeFile 172 mode = ClientModeFile
183 } 173 }
184 174
185 // Determine if we have a checksum 175 // Determine checksum if we have one
186 var checksumHash hash.Hash 176 checksum, err := c.extractChecksum(u)
187 var checksumValue []byte 177 if err != nil {
188 if v := q.Get("checksum"); v != "" { 178 return fmt.Errorf("invalid checksum: %s", err)
189 // Delete the query parameter if we have it.
190 q.Del("checksum")
191 u.RawQuery = q.Encode()
192
193 // Determine the checksum hash type
194 checksumType := ""
195 idx := strings.Index(v, ":")
196 if idx > -1 {
197 checksumType = v[:idx]
198 }
199 switch checksumType {
200 case "md5":
201 checksumHash = md5.New()
202 case "sha1":
203 checksumHash = sha1.New()
204 case "sha256":
205 checksumHash = sha256.New()
206 case "sha512":
207 checksumHash = sha512.New()
208 default:
209 return fmt.Errorf(
210 "unsupported checksum type: %s", checksumType)
211 }
212
213 // Get the remainder of the value and parse it into bytes
214 b, err := hex.DecodeString(v[idx+1:])
215 if err != nil {
216 return fmt.Errorf("invalid checksum: %s", err)
217 }
218
219 // Set our value
220 checksumValue = b
221 } 179 }
222 180
181 // Delete the query parameter if we have it.
182 q.Del("checksum")
183 u.RawQuery = q.Encode()
184
223 if mode == ClientModeAny { 185 if mode == ClientModeAny {
224 // Ask the getter which client mode to use 186 // Ask the getter which client mode to use
225 mode, err = g.ClientMode(u) 187 mode, err = g.ClientMode(u)
@@ -248,15 +210,24 @@ func (c *Client) Get() error {
248 // If we're not downloading a directory, then just download the file 210 // If we're not downloading a directory, then just download the file
249 // and return. 211 // and return.
250 if mode == ClientModeFile { 212 if mode == ClientModeFile {
251 err := g.GetFile(dst, u) 213 getFile := true
252 if err != nil { 214 if checksum != nil {
253 return err 215 if err := checksum.checksum(dst); err == nil {
216 // don't get the file if the checksum of dst is correct
217 getFile = false
218 }
254 } 219 }
255 220 if getFile {
256 if checksumHash != nil { 221 err := g.GetFile(dst, u)
257 if err := checksum(dst, checksumHash, checksumValue); err != nil { 222 if err != nil {
258 return err 223 return err
259 } 224 }
225
226 if checksum != nil {
227 if err := checksum.checksum(dst); err != nil {
228 return err
229 }
230 }
260 } 231 }
261 232
262 if decompressor != nil { 233 if decompressor != nil {
@@ -291,7 +262,7 @@ func (c *Client) Get() error {
291 if decompressor == nil { 262 if decompressor == nil {
292 // If we're getting a directory, then this is an error. You cannot 263 // If we're getting a directory, then this is an error. You cannot
293 // checksum a directory. TODO: test 264 // checksum a directory. TODO: test
294 if checksumHash != nil { 265 if checksum != nil {
295 return fmt.Errorf( 266 return fmt.Errorf(
296 "checksum cannot be specified for directory download") 267 "checksum cannot be specified for directory download")
297 } 268 }
@@ -320,30 +291,7 @@ func (c *Client) Get() error {
320 return err 291 return err
321 } 292 }
322 293
323 return copyDir(realDst, subDir, false) 294 return copyDir(c.Ctx, realDst, subDir, false)
324 }
325
326 return nil
327}
328
329// checksum is a simple method to compute the checksum of a source file
330// and compare it to the given expected value.
331func checksum(source string, h hash.Hash, v []byte) error {
332 f, err := os.Open(source)
333 if err != nil {
334 return fmt.Errorf("Failed to open file for checksum: %s", err)
335 }
336 defer f.Close()
337
338 if _, err := io.Copy(h, f); err != nil {
339 return fmt.Errorf("Failed to hash: %s", err)
340 }
341
342 if actual := h.Sum(nil); !bytes.Equal(actual, v) {
343 return fmt.Errorf(
344 "Checksums did not match.\nExpected: %s\nGot: %s",
345 hex.EncodeToString(v),
346 hex.EncodeToString(actual))
347 } 295 }
348 296
349 return nil 297 return nil
diff --git a/vendor/github.com/hashicorp/go-getter/client_option.go b/vendor/github.com/hashicorp/go-getter/client_option.go
new file mode 100644
index 0000000..c1ee413
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/client_option.go
@@ -0,0 +1,46 @@
1package getter
2
3import "context"
4
5// A ClientOption allows to configure a client
6type ClientOption func(*Client) error
7
8// Configure configures a client with options.
9func (c *Client) Configure(opts ...ClientOption) error {
10 if c.Ctx == nil {
11 c.Ctx = context.Background()
12 }
13 c.Options = opts
14 for _, opt := range opts {
15 err := opt(c)
16 if err != nil {
17 return err
18 }
19 }
20 // Default decompressor values
21 if c.Decompressors == nil {
22 c.Decompressors = Decompressors
23 }
24 // Default detector values
25 if c.Detectors == nil {
26 c.Detectors = Detectors
27 }
28 // Default getter values
29 if c.Getters == nil {
30 c.Getters = Getters
31 }
32
33 for _, getter := range c.Getters {
34 getter.SetClient(c)
35 }
36 return nil
37}
38
39// WithContext allows to pass a context to operation
40// in order to be able to cancel a download in progress.
41func WithContext(ctx context.Context) func(*Client) error {
42 return func(c *Client) error {
43 c.Ctx = ctx
44 return nil
45 }
46}
diff --git a/vendor/github.com/hashicorp/go-getter/client_option_progress.go b/vendor/github.com/hashicorp/go-getter/client_option_progress.go
new file mode 100644
index 0000000..9b185f7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/client_option_progress.go
@@ -0,0 +1,38 @@
1package getter
2
3import (
4 "io"
5)
6
7// WithProgress allows for a user to track
8// the progress of a download.
9// For example by displaying a progress bar with
10// current download.
11// Not all getters have progress support yet.
12func WithProgress(pl ProgressTracker) func(*Client) error {
13 return func(c *Client) error {
14 c.ProgressListener = pl
15 return nil
16 }
17}
18
19// ProgressTracker allows to track the progress of downloads.
20type ProgressTracker interface {
21 // TrackProgress should be called when
22 // a new object is being downloaded.
23 // src is the location the file is
24 // downloaded from.
25 // currentSize is the current size of
26 // the file in case it is a partial
27 // download.
28 // totalSize is the total size in bytes,
29 // size can be zero if the file size
30 // is not known.
31 // stream is the file being downloaded, every
32 // written byte will add up to processed size.
33 //
34 // TrackProgress returns a ReadCloser that wraps the
35 // download in progress ( stream ).
36 // When the download is finished, body shall be closed.
37 TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) (body io.ReadCloser)
38}
diff --git a/vendor/github.com/hashicorp/go-getter/common.go b/vendor/github.com/hashicorp/go-getter/common.go
new file mode 100644
index 0000000..d2afd8a
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/common.go
@@ -0,0 +1,14 @@
1package getter
2
3import (
4 "io/ioutil"
5)
6
7func tmpFile(dir, pattern string) (string, error) {
8 f, err := ioutil.TempFile(dir, pattern)
9 if err != nil {
10 return "", err
11 }
12 f.Close()
13 return f.Name(), nil
14}
diff --git a/vendor/github.com/hashicorp/go-getter/copy_dir.go b/vendor/github.com/hashicorp/go-getter/copy_dir.go
index 2f58e8a..641fe6d 100644
--- a/vendor/github.com/hashicorp/go-getter/copy_dir.go
+++ b/vendor/github.com/hashicorp/go-getter/copy_dir.go
@@ -1,7 +1,7 @@
1package getter 1package getter
2 2
3import ( 3import (
4 "io" 4 "context"
5 "os" 5 "os"
6 "path/filepath" 6 "path/filepath"
7 "strings" 7 "strings"
@@ -11,7 +11,7 @@ import (
11// should already exist. 11// should already exist.
12// 12//
13// If ignoreDot is set to true, then dot-prefixed files/folders are ignored. 13// If ignoreDot is set to true, then dot-prefixed files/folders are ignored.
14func copyDir(dst string, src string, ignoreDot bool) error { 14func copyDir(ctx context.Context, dst string, src string, ignoreDot bool) error {
15 src, err := filepath.EvalSymlinks(src) 15 src, err := filepath.EvalSymlinks(src)
16 if err != nil { 16 if err != nil {
17 return err 17 return err
@@ -66,7 +66,7 @@ func copyDir(dst string, src string, ignoreDot bool) error {
66 } 66 }
67 defer dstF.Close() 67 defer dstF.Close()
68 68
69 if _, err := io.Copy(dstF, srcF); err != nil { 69 if _, err := Copy(ctx, dstF, srcF); err != nil {
70 return err 70 return err
71 } 71 }
72 72
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tar.go b/vendor/github.com/hashicorp/go-getter/decompress_tar.go
index 39cb392..b6986a2 100644
--- a/vendor/github.com/hashicorp/go-getter/decompress_tar.go
+++ b/vendor/github.com/hashicorp/go-getter/decompress_tar.go
@@ -6,6 +6,7 @@ import (
6 "io" 6 "io"
7 "os" 7 "os"
8 "path/filepath" 8 "path/filepath"
9 "time"
9) 10)
10 11
11// untar is a shared helper for untarring an archive. The reader should provide 12// untar is a shared helper for untarring an archive. The reader should provide
@@ -14,6 +15,7 @@ func untar(input io.Reader, dst, src string, dir bool) error {
14 tarR := tar.NewReader(input) 15 tarR := tar.NewReader(input)
15 done := false 16 done := false
16 dirHdrs := []*tar.Header{} 17 dirHdrs := []*tar.Header{}
18 now := time.Now()
17 for { 19 for {
18 hdr, err := tarR.Next() 20 hdr, err := tarR.Next()
19 if err == io.EOF { 21 if err == io.EOF {
@@ -95,17 +97,37 @@ func untar(input io.Reader, dst, src string, dir bool) error {
95 return err 97 return err
96 } 98 }
97 99
98 // Set the access and modification time 100 // Set the access and modification time if valid, otherwise default to current time
99 if err := os.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { 101 aTime := now
102 mTime := now
103 if hdr.AccessTime.Unix() > 0 {
104 aTime = hdr.AccessTime
105 }
106 if hdr.ModTime.Unix() > 0 {
107 mTime = hdr.ModTime
108 }
109 if err := os.Chtimes(path, aTime, mTime); err != nil {
100 return err 110 return err
101 } 111 }
102 } 112 }
103 113
104 // Adding a file or subdirectory changes the mtime of a directory 114 // Perform a final pass over extracted directories to update metadata
105 // We therefore wait until we've extracted everything and then set the mtime and atime attributes
106 for _, dirHdr := range dirHdrs { 115 for _, dirHdr := range dirHdrs {
107 path := filepath.Join(dst, dirHdr.Name) 116 path := filepath.Join(dst, dirHdr.Name)
108 if err := os.Chtimes(path, dirHdr.AccessTime, dirHdr.ModTime); err != nil { 117 // Chmod the directory since they might be created before we know the mode flags
118 if err := os.Chmod(path, dirHdr.FileInfo().Mode()); err != nil {
119 return err
120 }
121 // Set the mtime/atime attributes since they would have been changed during extraction
122 aTime := now
123 mTime := now
124 if dirHdr.AccessTime.Unix() > 0 {
125 aTime = dirHdr.AccessTime
126 }
127 if dirHdr.ModTime.Unix() > 0 {
128 mTime = dirHdr.ModTime
129 }
130 if err := os.Chtimes(path, aTime, mTime); err != nil {
109 return err 131 return err
110 } 132 }
111 } 133 }
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go
index 91cf33d..b2f662a 100644
--- a/vendor/github.com/hashicorp/go-getter/decompress_testing.go
+++ b/vendor/github.com/hashicorp/go-getter/decompress_testing.go
@@ -18,16 +18,18 @@ import (
18 18
19// TestDecompressCase is a single test case for testing decompressors 19// TestDecompressCase is a single test case for testing decompressors
20type TestDecompressCase struct { 20type TestDecompressCase struct {
21 Input string // Input is the complete path to the input file 21 Input string // Input is the complete path to the input file
22 Dir bool // Dir is whether or not we're testing directory mode 22 Dir bool // Dir is whether or not we're testing directory mode
23 Err bool // Err is whether we expect an error or not 23 Err bool // Err is whether we expect an error or not
24 DirList []string // DirList is the list of files for Dir mode 24 DirList []string // DirList is the list of files for Dir mode
25 FileMD5 string // FileMD5 is the expected MD5 for a single file 25 FileMD5 string // FileMD5 is the expected MD5 for a single file
26 Mtime *time.Time // Mtime is the optionally expected mtime for a single file (or all files if in Dir mode) 26 Mtime *time.Time // Mtime is the optionally expected mtime for a single file (or all files if in Dir mode)
27} 27}
28 28
29// TestDecompressor is a helper function for testing generic decompressors. 29// TestDecompressor is a helper function for testing generic decompressors.
30func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) { 30func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) {
31 t.Helper()
32
31 for _, tc := range cases { 33 for _, tc := range cases {
32 t.Logf("Testing: %s", tc.Input) 34 t.Logf("Testing: %s", tc.Input)
33 35
@@ -72,9 +74,13 @@ func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) {
72 74
73 if tc.Mtime != nil { 75 if tc.Mtime != nil {
74 actual := fi.ModTime() 76 actual := fi.ModTime()
75 expected := *tc.Mtime 77 if tc.Mtime.Unix() > 0 {
76 if actual != expected { 78 expected := *tc.Mtime
77 t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), dst, actual.String()) 79 if actual != expected {
80 t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), dst, actual.String())
81 }
82 } else if actual.Unix() <= 0 {
83 t.Fatalf("err %s: expected mtime to be > 0, got '%s'", actual.String())
78 } 84 }
79 } 85 }
80 86
@@ -103,10 +109,15 @@ func TestDecompressor(t testing.T, d Decompressor, cases []TestDecompressCase) {
103 t.Fatalf("err: %s", err) 109 t.Fatalf("err: %s", err)
104 } 110 }
105 actual := fi.ModTime() 111 actual := fi.ModTime()
106 expected := *tc.Mtime 112 if tc.Mtime.Unix() > 0 {
107 if actual != expected { 113 expected := *tc.Mtime
108 t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), path, actual.String()) 114 if actual != expected {
115 t.Fatalf("err %s: expected mtime '%s' for %s, got '%s'", tc.Input, expected.String(), path, actual.String())
116 }
117 } else if actual.Unix() < 0 {
118 t.Fatalf("err %s: expected mtime to be > 0, got '%s'", actual.String())
109 } 119 }
120
110 } 121 }
111 } 122 }
112 }() 123 }()
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go
index b0e70ca..0830f79 100644
--- a/vendor/github.com/hashicorp/go-getter/decompress_zip.go
+++ b/vendor/github.com/hashicorp/go-getter/decompress_zip.go
@@ -9,7 +9,7 @@ import (
9) 9)
10 10
11// ZipDecompressor is an implementation of Decompressor that can 11// ZipDecompressor is an implementation of Decompressor that can
12// decompress tar.gzip files. 12// decompress zip files.
13type ZipDecompressor struct{} 13type ZipDecompressor struct{}
14 14
15func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error { 15func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error {
diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go
index c369551..5bb750c 100644
--- a/vendor/github.com/hashicorp/go-getter/detect.go
+++ b/vendor/github.com/hashicorp/go-getter/detect.go
@@ -23,8 +23,10 @@ var Detectors []Detector
23func init() { 23func init() {
24 Detectors = []Detector{ 24 Detectors = []Detector{
25 new(GitHubDetector), 25 new(GitHubDetector),
26 new(GitDetector),
26 new(BitBucketDetector), 27 new(BitBucketDetector),
27 new(S3Detector), 28 new(S3Detector),
29 new(GCSDetector),
28 new(FileDetector), 30 new(FileDetector),
29 } 31 }
30} 32}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_gcs.go b/vendor/github.com/hashicorp/go-getter/detect_gcs.go
new file mode 100644
index 0000000..1136373
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect_gcs.go
@@ -0,0 +1,43 @@
1package getter
2
3import (
4 "fmt"
5 "net/url"
6 "strings"
7)
8
9// GCSDetector implements Detector to detect GCS URLs and turn
10// them into URLs that the GCSGetter can understand.
11type GCSDetector struct{}
12
13func (d *GCSDetector) Detect(src, _ string) (string, bool, error) {
14 if len(src) == 0 {
15 return "", false, nil
16 }
17
18 if strings.Contains(src, "googleapis.com/") {
19 return d.detectHTTP(src)
20 }
21
22 return "", false, nil
23}
24
25func (d *GCSDetector) detectHTTP(src string) (string, bool, error) {
26
27 parts := strings.Split(src, "/")
28 if len(parts) < 5 {
29 return "", false, fmt.Errorf(
30 "URL is not a valid GCS URL")
31 }
32 version := parts[2]
33 bucket := parts[3]
34 object := strings.Join(parts[4:], "/")
35
36 url, err := url.Parse(fmt.Sprintf("https://www.googleapis.com/storage/%s/%s/%s",
37 version, bucket, object))
38 if err != nil {
39 return "", false, fmt.Errorf("error parsing GCS URL: %s", err)
40 }
41
42 return "gcs::" + url.String(), true, nil
43}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_git.go b/vendor/github.com/hashicorp/go-getter/detect_git.go
new file mode 100644
index 0000000..eeb8a04
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect_git.go
@@ -0,0 +1,26 @@
1package getter
2
3// GitDetector implements Detector to detect Git SSH URLs such as
4// git@host.com:dir1/dir2 and converts them to proper URLs.
5type GitDetector struct{}
6
7func (d *GitDetector) Detect(src, _ string) (string, bool, error) {
8 if len(src) == 0 {
9 return "", false, nil
10 }
11
12 u, err := detectSSH(src)
13 if err != nil {
14 return "", true, err
15 }
16 if u == nil {
17 return "", false, nil
18 }
19
20 // We require the username to be "git" to assume that this is a Git URL
21 if u.User.Username() != "git" {
22 return "", false, nil
23 }
24
25 return "git::" + u.String(), true, nil
26}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_github.go b/vendor/github.com/hashicorp/go-getter/detect_github.go
index c084ad9..4bf4daf 100644
--- a/vendor/github.com/hashicorp/go-getter/detect_github.go
+++ b/vendor/github.com/hashicorp/go-getter/detect_github.go
@@ -17,8 +17,6 @@ func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) {
17 17
18 if strings.HasPrefix(src, "github.com/") { 18 if strings.HasPrefix(src, "github.com/") {
19 return d.detectHTTP(src) 19 return d.detectHTTP(src)
20 } else if strings.HasPrefix(src, "git@github.com:") {
21 return d.detectSSH(src)
22 } 20 }
23 21
24 return "", false, nil 22 return "", false, nil
@@ -47,27 +45,3 @@ func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) {
47 45
48 return "git::" + url.String(), true, nil 46 return "git::" + url.String(), true, nil
49} 47}
50
51func (d *GitHubDetector) detectSSH(src string) (string, bool, error) {
52 idx := strings.Index(src, ":")
53 qidx := strings.Index(src, "?")
54 if qidx == -1 {
55 qidx = len(src)
56 }
57
58 var u url.URL
59 u.Scheme = "ssh"
60 u.User = url.User("git")
61 u.Host = "github.com"
62 u.Path = src[idx+1 : qidx]
63 if qidx < len(src) {
64 q, err := url.ParseQuery(src[qidx+1:])
65 if err != nil {
66 return "", true, fmt.Errorf("error parsing GitHub SSH URL: %s", err)
67 }
68
69 u.RawQuery = q.Encode()
70 }
71
72 return "git::" + u.String(), true, nil
73}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_ssh.go b/vendor/github.com/hashicorp/go-getter/detect_ssh.go
new file mode 100644
index 0000000..c0dbe9d
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/detect_ssh.go
@@ -0,0 +1,49 @@
1package getter
2
3import (
4 "fmt"
5 "net/url"
6 "regexp"
7 "strings"
8)
9
10// Note that we do not have an SSH-getter currently so this file serves
11// only to hold the detectSSH helper that is used by other detectors.
12
13// sshPattern matches SCP-like SSH patterns (user@host:path)
14var sshPattern = regexp.MustCompile("^(?:([^@]+)@)?([^:]+):/?(.+)$")
15
16// detectSSH determines if the src string matches an SSH-like URL and
17// converts it into a net.URL compatible string. This returns nil if the
18// string doesn't match the SSH pattern.
19//
20// This function is tested indirectly via detect_git_test.go
21func detectSSH(src string) (*url.URL, error) {
22 matched := sshPattern.FindStringSubmatch(src)
23 if matched == nil {
24 return nil, nil
25 }
26
27 user := matched[1]
28 host := matched[2]
29 path := matched[3]
30 qidx := strings.Index(path, "?")
31 if qidx == -1 {
32 qidx = len(path)
33 }
34
35 var u url.URL
36 u.Scheme = "ssh"
37 u.User = url.User(user)
38 u.Host = host
39 u.Path = path[0:qidx]
40 if qidx < len(path) {
41 q, err := url.ParseQuery(path[qidx+1:])
42 if err != nil {
43 return nil, fmt.Errorf("error parsing GitHub SSH URL: %s", err)
44 }
45 u.RawQuery = q.Encode()
46 }
47
48 return &u, nil
49}
diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go
index e6053d9..c233763 100644
--- a/vendor/github.com/hashicorp/go-getter/get.go
+++ b/vendor/github.com/hashicorp/go-getter/get.go
@@ -41,6 +41,11 @@ type Getter interface {
41 // ClientMode returns the mode based on the given URL. This is used to 41 // ClientMode returns the mode based on the given URL. This is used to
42 // allow clients to let the getters decide which mode to use. 42 // allow clients to let the getters decide which mode to use.
43 ClientMode(*url.URL) (ClientMode, error) 43 ClientMode(*url.URL) (ClientMode, error)
44
45 // SetClient allows a getter to know it's client
46 // in order to access client's Get functions or
47 // progress tracking.
48 SetClient(*Client)
44} 49}
45 50
46// Getters is the mapping of scheme to the Getter implementation that will 51// Getters is the mapping of scheme to the Getter implementation that will
@@ -62,6 +67,7 @@ func init() {
62 Getters = map[string]Getter{ 67 Getters = map[string]Getter{
63 "file": new(FileGetter), 68 "file": new(FileGetter),
64 "git": new(GitGetter), 69 "git": new(GitGetter),
70 "gcs": new(GCSGetter),
65 "hg": new(HgGetter), 71 "hg": new(HgGetter),
66 "s3": new(S3Getter), 72 "s3": new(S3Getter),
67 "http": httpGetter, 73 "http": httpGetter,
@@ -74,12 +80,12 @@ func init() {
74// 80//
75// src is a URL, whereas dst is always just a file path to a folder. This 81// src is a URL, whereas dst is always just a file path to a folder. This
76// folder doesn't need to exist. It will be created if it doesn't exist. 82// folder doesn't need to exist. It will be created if it doesn't exist.
77func Get(dst, src string) error { 83func Get(dst, src string, opts ...ClientOption) error {
78 return (&Client{ 84 return (&Client{
79 Src: src, 85 Src: src,
80 Dst: dst, 86 Dst: dst,
81 Dir: true, 87 Dir: true,
82 Getters: Getters, 88 Options: opts,
83 }).Get() 89 }).Get()
84} 90}
85 91
@@ -89,23 +95,23 @@ func Get(dst, src string) error {
89// dst must be a directory. If src is a file, it will be downloaded 95// dst must be a directory. If src is a file, it will be downloaded
90// into dst with the basename of the URL. If src is a directory or 96// into dst with the basename of the URL. If src is a directory or
91// archive, it will be unpacked directly into dst. 97// archive, it will be unpacked directly into dst.
92func GetAny(dst, src string) error { 98func GetAny(dst, src string, opts ...ClientOption) error {
93 return (&Client{ 99 return (&Client{
94 Src: src, 100 Src: src,
95 Dst: dst, 101 Dst: dst,
96 Mode: ClientModeAny, 102 Mode: ClientModeAny,
97 Getters: Getters, 103 Options: opts,
98 }).Get() 104 }).Get()
99} 105}
100 106
101// GetFile downloads the file specified by src into the path specified by 107// GetFile downloads the file specified by src into the path specified by
102// dst. 108// dst.
103func GetFile(dst, src string) error { 109func GetFile(dst, src string, opts ...ClientOption) error {
104 return (&Client{ 110 return (&Client{
105 Src: src, 111 Src: src,
106 Dst: dst, 112 Dst: dst,
107 Dir: false, 113 Dir: false,
108 Getters: Getters, 114 Options: opts,
109 }).Get() 115 }).Get()
110} 116}
111 117
diff --git a/vendor/github.com/hashicorp/go-getter/get_base.go b/vendor/github.com/hashicorp/go-getter/get_base.go
new file mode 100644
index 0000000..09e9b63
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_base.go
@@ -0,0 +1,20 @@
1package getter
2
3import "context"
4
5// getter is our base getter; it regroups
6// fields all getters have in common.
7type getter struct {
8 client *Client
9}
10
11func (g *getter) SetClient(c *Client) { g.client = c }
12
13// Context tries to returns the Contex from the getter's
14// client. otherwise context.Background() is returned.
15func (g *getter) Context() context.Context {
16 if g == nil || g.client == nil {
17 return context.Background()
18 }
19 return g.client.Ctx
20}
diff --git a/vendor/github.com/hashicorp/go-getter/get_file.go b/vendor/github.com/hashicorp/go-getter/get_file.go
index e5d2d61..7866083 100644
--- a/vendor/github.com/hashicorp/go-getter/get_file.go
+++ b/vendor/github.com/hashicorp/go-getter/get_file.go
@@ -8,7 +8,11 @@ import (
8// FileGetter is a Getter implementation that will download a module from 8// FileGetter is a Getter implementation that will download a module from
9// a file scheme. 9// a file scheme.
10type FileGetter struct { 10type FileGetter struct {
11 // Copy, if set to true, will copy data instead of using a symlink 11 getter
12
13 // Copy, if set to true, will copy data instead of using a symlink. If
14 // false, attempts to symlink to speed up the operation and to lower the
15 // disk space usage. If the symlink fails, may attempt to copy on windows.
12 Copy bool 16 Copy bool
13} 17}
14 18
diff --git a/vendor/github.com/hashicorp/go-getter/get_file_copy.go b/vendor/github.com/hashicorp/go-getter/get_file_copy.go
new file mode 100644
index 0000000..d70fb49
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_file_copy.go
@@ -0,0 +1,29 @@
1package getter
2
3import (
4 "context"
5 "io"
6)
7
8// readerFunc is syntactic sugar for read interface.
9type readerFunc func(p []byte) (n int, err error)
10
11func (rf readerFunc) Read(p []byte) (n int, err error) { return rf(p) }
12
13// Copy is a io.Copy cancellable by context
14func Copy(ctx context.Context, dst io.Writer, src io.Reader) (int64, error) {
15 // Copy will call the Reader and Writer interface multiple time, in order
16 // to copy by chunk (avoiding loading the whole file in memory).
17 return io.Copy(dst, readerFunc(func(p []byte) (int, error) {
18
19 select {
20 case <-ctx.Done():
21 // context has been canceled
22 // stop process and propagate "context canceled" error
23 return 0, ctx.Err()
24 default:
25 // otherwise just run default io.Reader implementation
26 return src.Read(p)
27 }
28 }))
29}
diff --git a/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/vendor/github.com/hashicorp/go-getter/get_file_unix.go
index c89a2d5..c3b28ae 100644
--- a/vendor/github.com/hashicorp/go-getter/get_file_unix.go
+++ b/vendor/github.com/hashicorp/go-getter/get_file_unix.go
@@ -4,7 +4,6 @@ package getter
4 4
5import ( 5import (
6 "fmt" 6 "fmt"
7 "io"
8 "net/url" 7 "net/url"
9 "os" 8 "os"
10 "path/filepath" 9 "path/filepath"
@@ -50,6 +49,7 @@ func (g *FileGetter) Get(dst string, u *url.URL) error {
50} 49}
51 50
52func (g *FileGetter) GetFile(dst string, u *url.URL) error { 51func (g *FileGetter) GetFile(dst string, u *url.URL) error {
52 ctx := g.Context()
53 path := u.Path 53 path := u.Path
54 if u.RawPath != "" { 54 if u.RawPath != "" {
55 path = u.RawPath 55 path = u.RawPath
@@ -98,6 +98,6 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error {
98 } 98 }
99 defer dstF.Close() 99 defer dstF.Close()
100 100
101 _, err = io.Copy(dstF, srcF) 101 _, err = Copy(ctx, dstF, srcF)
102 return err 102 return err
103} 103}
diff --git a/vendor/github.com/hashicorp/go-getter/get_file_windows.go b/vendor/github.com/hashicorp/go-getter/get_file_windows.go
index f87ed0a..24f1acb 100644
--- a/vendor/github.com/hashicorp/go-getter/get_file_windows.go
+++ b/vendor/github.com/hashicorp/go-getter/get_file_windows.go
@@ -4,15 +4,16 @@ package getter
4 4
5import ( 5import (
6 "fmt" 6 "fmt"
7 "io"
8 "net/url" 7 "net/url"
9 "os" 8 "os"
10 "os/exec" 9 "os/exec"
11 "path/filepath" 10 "path/filepath"
12 "strings" 11 "strings"
12 "syscall"
13) 13)
14 14
15func (g *FileGetter) Get(dst string, u *url.URL) error { 15func (g *FileGetter) Get(dst string, u *url.URL) error {
16 ctx := g.Context()
16 path := u.Path 17 path := u.Path
17 if u.RawPath != "" { 18 if u.RawPath != "" {
18 path = u.RawPath 19 path = u.RawPath
@@ -51,7 +52,7 @@ func (g *FileGetter) Get(dst string, u *url.URL) error {
51 sourcePath := toBackslash(path) 52 sourcePath := toBackslash(path)
52 53
53 // Use mklink to create a junction point 54 // Use mklink to create a junction point
54 output, err := exec.Command("cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput() 55 output, err := exec.CommandContext(ctx, "cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput()
55 if err != nil { 56 if err != nil {
56 return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output) 57 return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output)
57 } 58 }
@@ -60,6 +61,7 @@ func (g *FileGetter) Get(dst string, u *url.URL) error {
60} 61}
61 62
62func (g *FileGetter) GetFile(dst string, u *url.URL) error { 63func (g *FileGetter) GetFile(dst string, u *url.URL) error {
64 ctx := g.Context()
63 path := u.Path 65 path := u.Path
64 if u.RawPath != "" { 66 if u.RawPath != "" {
65 path = u.RawPath 67 path = u.RawPath
@@ -92,7 +94,21 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error {
92 94
93 // If we're not copying, just symlink and we're done 95 // If we're not copying, just symlink and we're done
94 if !g.Copy { 96 if !g.Copy {
95 return os.Symlink(path, dst) 97 if err = os.Symlink(path, dst); err == nil {
98 return err
99 }
100 lerr, ok := err.(*os.LinkError)
101 if !ok {
102 return err
103 }
104 switch lerr.Err {
105 case syscall.ERROR_PRIVILEGE_NOT_HELD:
106 // no symlink privilege, let's
107 // fallback to a copy to avoid an error.
108 break
109 default:
110 return err
111 }
96 } 112 }
97 113
98 // Copy 114 // Copy
@@ -108,7 +124,7 @@ func (g *FileGetter) GetFile(dst string, u *url.URL) error {
108 } 124 }
109 defer dstF.Close() 125 defer dstF.Close()
110 126
111 _, err = io.Copy(dstF, srcF) 127 _, err = Copy(ctx, dstF, srcF)
112 return err 128 return err
113} 129}
114 130
diff --git a/vendor/github.com/hashicorp/go-getter/get_gcs.go b/vendor/github.com/hashicorp/go-getter/get_gcs.go
new file mode 100644
index 0000000..6faa70f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/get_gcs.go
@@ -0,0 +1,172 @@
1package getter
2
3import (
4 "context"
5 "fmt"
6 "net/url"
7 "os"
8 "path/filepath"
9 "strings"
10
11 "cloud.google.com/go/storage"
12 "google.golang.org/api/iterator"
13)
14
15// GCSGetter is a Getter implementation that will download a module from
16// a GCS bucket.
17type GCSGetter struct {
18 getter
19}
20
21func (g *GCSGetter) ClientMode(u *url.URL) (ClientMode, error) {
22 ctx := g.Context()
23
24 // Parse URL
25 bucket, object, err := g.parseURL(u)
26 if err != nil {
27 return 0, err
28 }
29
30 client, err := storage.NewClient(ctx)
31 if err != nil {
32 return 0, err
33 }
34 iter := client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: object})
35 for {
36 obj, err := iter.Next()
37 if err != nil && err != iterator.Done {
38 return 0, err
39 }
40
41 if err == iterator.Done {
42 break
43 }
44 if strings.HasSuffix(obj.Name, "/") {
45 // A directory matched the prefix search, so this must be a directory
46 return ClientModeDir, nil
47 } else if obj.Name != object {
48 // A file matched the prefix search and doesn't have the same name
49 // as the query, so this must be a directory
50 return ClientModeDir, nil
51 }
52 }
53 // There are no directories or subdirectories, and if a match was returned,
54 // it was exactly equal to the prefix search. So return File mode
55 return ClientModeFile, nil
56}
57
58func (g *GCSGetter) Get(dst string, u *url.URL) error {
59 ctx := g.Context()
60
61 // Parse URL
62 bucket, object, err := g.parseURL(u)
63 if err != nil {
64 return err
65 }
66
67 // Remove destination if it already exists
68 _, err = os.Stat(dst)
69 if err != nil && !os.IsNotExist(err) {
70 return err
71 }
72 if err == nil {
73 // Remove the destination
74 if err := os.RemoveAll(dst); err != nil {
75 return err
76 }
77 }
78
79 // Create all the parent directories
80 if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
81 return err
82 }
83
84 client, err := storage.NewClient(ctx)
85 if err != nil {
86 return err
87 }
88
89 // Iterate through all matching objects.
90 iter := client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: object})
91 for {
92 obj, err := iter.Next()
93 if err != nil && err != iterator.Done {
94 return err
95 }
96 if err == iterator.Done {
97 break
98 }
99
100 if !strings.HasSuffix(obj.Name, "/") {
101 // Get the object destination path
102 objDst, err := filepath.Rel(object, obj.Name)
103 if err != nil {
104 return err
105 }
106 objDst = filepath.Join(dst, objDst)
107 // Download the matching object.
108 err = g.getObject(ctx, client, objDst, bucket, obj.Name)
109 if err != nil {
110 return err
111 }
112 }
113 }
114 return nil
115}
116
117func (g *GCSGetter) GetFile(dst string, u *url.URL) error {
118 ctx := g.Context()
119
120 // Parse URL
121 bucket, object, err := g.parseURL(u)
122 if err != nil {
123 return err
124 }
125
126 client, err := storage.NewClient(ctx)
127 if err != nil {
128 return err
129 }
130 return g.getObject(ctx, client, dst, bucket, object)
131}
132
133func (g *GCSGetter) getObject(ctx context.Context, client *storage.Client, dst, bucket, object string) error {
134 rc, err := client.Bucket(bucket).Object(object).NewReader(ctx)
135 if err != nil {
136 return err
137 }
138 defer rc.Close()
139
140 // Create all the parent directories
141 if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
142 return err
143 }
144
145 f, err := os.Create(dst)
146 if err != nil {
147 return err
148 }
149 defer f.Close()
150
151 _, err = Copy(ctx, f, rc)
152 return err
153}
154
155func (g *GCSGetter) parseURL(u *url.URL) (bucket, path string, err error) {
156 if strings.Contains(u.Host, "googleapis.com") {
157 hostParts := strings.Split(u.Host, ".")
158 if len(hostParts) != 3 {
159 err = fmt.Errorf("URL is not a valid GCS URL")
160 return
161 }
162
163 pathParts := strings.SplitN(u.Path, "/", 5)
164 if len(pathParts) != 5 {
165 err = fmt.Errorf("URL is not a valid GCS URL")
166 return
167 }
168 bucket = pathParts[3]
169 path = pathParts[4]
170 }
171 return
172}
diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go
index cb1d029..67e8b2f 100644
--- a/vendor/github.com/hashicorp/go-getter/get_git.go
+++ b/vendor/github.com/hashicorp/go-getter/get_git.go
@@ -1,6 +1,7 @@
1package getter 1package getter
2 2
3import ( 3import (
4 "context"
4 "encoding/base64" 5 "encoding/base64"
5 "fmt" 6 "fmt"
6 "io/ioutil" 7 "io/ioutil"
@@ -8,28 +9,43 @@ import (
8 "os" 9 "os"
9 "os/exec" 10 "os/exec"
10 "path/filepath" 11 "path/filepath"
12 "runtime"
13 "strconv"
11 "strings" 14 "strings"
12 15
13 urlhelper "github.com/hashicorp/go-getter/helper/url" 16 urlhelper "github.com/hashicorp/go-getter/helper/url"
14 "github.com/hashicorp/go-safetemp" 17 safetemp "github.com/hashicorp/go-safetemp"
15 "github.com/hashicorp/go-version" 18 version "github.com/hashicorp/go-version"
16) 19)
17 20
18// GitGetter is a Getter implementation that will download a module from 21// GitGetter is a Getter implementation that will download a module from
19// a git repository. 22// a git repository.
20type GitGetter struct{} 23type GitGetter struct {
24 getter
25}
21 26
22func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) { 27func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) {
23 return ClientModeDir, nil 28 return ClientModeDir, nil
24} 29}
25 30
26func (g *GitGetter) Get(dst string, u *url.URL) error { 31func (g *GitGetter) Get(dst string, u *url.URL) error {
32 ctx := g.Context()
27 if _, err := exec.LookPath("git"); err != nil { 33 if _, err := exec.LookPath("git"); err != nil {
28 return fmt.Errorf("git must be available and on the PATH") 34 return fmt.Errorf("git must be available and on the PATH")
29 } 35 }
30 36
37 // The port number must be parseable as an integer. If not, the user
38 // was probably trying to use a scp-style address, in which case the
39 // ssh:// prefix must be removed to indicate that.
40 if portStr := u.Port(); portStr != "" {
41 if _, err := strconv.ParseUint(portStr, 10, 16); err != nil {
42 return fmt.Errorf("invalid port number %q; if using the \"scp-like\" git address scheme where a colon introduces the path instead, remove the ssh:// portion and use just the git:: prefix", portStr)
43 }
44 }
45
31 // Extract some query parameters we use 46 // Extract some query parameters we use
32 var ref, sshKey string 47 var ref, sshKey string
48 var depth int
33 q := u.Query() 49 q := u.Query()
34 if len(q) > 0 { 50 if len(q) > 0 {
35 ref = q.Get("ref") 51 ref = q.Get("ref")
@@ -38,6 +54,11 @@ func (g *GitGetter) Get(dst string, u *url.URL) error {
38 sshKey = q.Get("sshkey") 54 sshKey = q.Get("sshkey")
39 q.Del("sshkey") 55 q.Del("sshkey")
40 56
57 if n, err := strconv.Atoi(q.Get("depth")); err == nil {
58 depth = n
59 }
60 q.Del("depth")
61
41 // Copy the URL 62 // Copy the URL
42 var newU url.URL = *u 63 var newU url.URL = *u
43 u = &newU 64 u = &newU
@@ -84,9 +105,9 @@ func (g *GitGetter) Get(dst string, u *url.URL) error {
84 return err 105 return err
85 } 106 }
86 if err == nil { 107 if err == nil {
87 err = g.update(dst, sshKeyFile, ref) 108 err = g.update(ctx, dst, sshKeyFile, ref, depth)
88 } else { 109 } else {
89 err = g.clone(dst, sshKeyFile, u) 110 err = g.clone(ctx, dst, sshKeyFile, u, depth)
90 } 111 }
91 if err != nil { 112 if err != nil {
92 return err 113 return err
@@ -100,7 +121,7 @@ func (g *GitGetter) Get(dst string, u *url.URL) error {
100 } 121 }
101 122
102 // Lastly, download any/all submodules. 123 // Lastly, download any/all submodules.
103 return g.fetchSubmodules(dst, sshKeyFile) 124 return g.fetchSubmodules(ctx, dst, sshKeyFile, depth)
104} 125}
105 126
106// GetFile for Git doesn't support updating at this time. It will download 127// GetFile for Git doesn't support updating at this time. It will download
@@ -138,16 +159,23 @@ func (g *GitGetter) checkout(dst string, ref string) error {
138 return getRunCommand(cmd) 159 return getRunCommand(cmd)
139} 160}
140 161
141func (g *GitGetter) clone(dst, sshKeyFile string, u *url.URL) error { 162func (g *GitGetter) clone(ctx context.Context, dst, sshKeyFile string, u *url.URL, depth int) error {
142 cmd := exec.Command("git", "clone", u.String(), dst) 163 args := []string{"clone"}
164
165 if depth > 0 {
166 args = append(args, "--depth", strconv.Itoa(depth))
167 }
168
169 args = append(args, u.String(), dst)
170 cmd := exec.CommandContext(ctx, "git", args...)
143 setupGitEnv(cmd, sshKeyFile) 171 setupGitEnv(cmd, sshKeyFile)
144 return getRunCommand(cmd) 172 return getRunCommand(cmd)
145} 173}
146 174
147func (g *GitGetter) update(dst, sshKeyFile, ref string) error { 175func (g *GitGetter) update(ctx context.Context, dst, sshKeyFile, ref string, depth int) error {
148 // Determine if we're a branch. If we're NOT a branch, then we just 176 // Determine if we're a branch. If we're NOT a branch, then we just
149 // switch to master prior to checking out 177 // switch to master prior to checking out
150 cmd := exec.Command("git", "show-ref", "-q", "--verify", "refs/heads/"+ref) 178 cmd := exec.CommandContext(ctx, "git", "show-ref", "-q", "--verify", "refs/heads/"+ref)
151 cmd.Dir = dst 179 cmd.Dir = dst
152 180
153 if getRunCommand(cmd) != nil { 181 if getRunCommand(cmd) != nil {
@@ -162,15 +190,24 @@ func (g *GitGetter) update(dst, sshKeyFile, ref string) error {
162 return err 190 return err
163 } 191 }
164 192
165 cmd = exec.Command("git", "pull", "--ff-only") 193 if depth > 0 {
194 cmd = exec.Command("git", "pull", "--depth", strconv.Itoa(depth), "--ff-only")
195 } else {
196 cmd = exec.Command("git", "pull", "--ff-only")
197 }
198
166 cmd.Dir = dst 199 cmd.Dir = dst
167 setupGitEnv(cmd, sshKeyFile) 200 setupGitEnv(cmd, sshKeyFile)
168 return getRunCommand(cmd) 201 return getRunCommand(cmd)
169} 202}
170 203
171// fetchSubmodules downloads any configured submodules recursively. 204// fetchSubmodules downloads any configured submodules recursively.
172func (g *GitGetter) fetchSubmodules(dst, sshKeyFile string) error { 205func (g *GitGetter) fetchSubmodules(ctx context.Context, dst, sshKeyFile string, depth int) error {
173 cmd := exec.Command("git", "submodule", "update", "--init", "--recursive") 206 args := []string{"submodule", "update", "--init", "--recursive"}
207 if depth > 0 {
208 args = append(args, "--depth", strconv.Itoa(depth))
209 }
210 cmd := exec.CommandContext(ctx, "git", args...)
174 cmd.Dir = dst 211 cmd.Dir = dst
175 setupGitEnv(cmd, sshKeyFile) 212 setupGitEnv(cmd, sshKeyFile)
176 return getRunCommand(cmd) 213 return getRunCommand(cmd)
@@ -187,7 +224,7 @@ func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) {
187 // with versions of Go < 1.9. 224 // with versions of Go < 1.9.
188 env := os.Environ() 225 env := os.Environ()
189 for i, v := range env { 226 for i, v := range env {
190 if strings.HasPrefix(v, gitSSHCommand) { 227 if strings.HasPrefix(v, gitSSHCommand) && len(v) > len(gitSSHCommand) {
191 sshCmd = []string{v} 228 sshCmd = []string{v}
192 229
193 env[i], env[len(env)-1] = env[len(env)-1], env[i] 230 env[i], env[len(env)-1] = env[len(env)-1], env[i]
@@ -202,6 +239,9 @@ func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) {
202 239
203 if sshKeyFile != "" { 240 if sshKeyFile != "" {
204 // We have an SSH key temp file configured, tell ssh about this. 241 // We have an SSH key temp file configured, tell ssh about this.
242 if runtime.GOOS == "windows" {
243 sshKeyFile = strings.Replace(sshKeyFile, `\`, `/`, -1)
244 }
205 sshCmd = append(sshCmd, "-i", sshKeyFile) 245 sshCmd = append(sshCmd, "-i", sshKeyFile)
206 } 246 }
207 247
@@ -224,11 +264,20 @@ func checkGitVersion(min string) error {
224 } 264 }
225 265
226 fields := strings.Fields(string(out)) 266 fields := strings.Fields(string(out))
227 if len(fields) != 3 { 267 if len(fields) < 3 {
228 return fmt.Errorf("Unexpected 'git version' output: %q", string(out)) 268 return fmt.Errorf("Unexpected 'git version' output: %q", string(out))
229 } 269 }
270 v := fields[2]
271 if runtime.GOOS == "windows" && strings.Contains(v, ".windows.") {
272 // on windows, git version will return for example:
273 // git version 2.20.1.windows.1
274 // Which does not follow the semantic versionning specs
275 // https://semver.org. We remove that part in order for
276 // go-version to not error.
277 v = v[:strings.Index(v, ".windows.")]
278 }
230 279
231 have, err := version.NewVersion(fields[2]) 280 have, err := version.NewVersion(v)
232 if err != nil { 281 if err != nil {
233 return err 282 return err
234 } 283 }
diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go
index f386922..290649c 100644
--- a/vendor/github.com/hashicorp/go-getter/get_hg.go
+++ b/vendor/github.com/hashicorp/go-getter/get_hg.go
@@ -1,6 +1,7 @@
1package getter 1package getter
2 2
3import ( 3import (
4 "context"
4 "fmt" 5 "fmt"
5 "net/url" 6 "net/url"
6 "os" 7 "os"
@@ -9,18 +10,21 @@ import (
9 "runtime" 10 "runtime"
10 11
11 urlhelper "github.com/hashicorp/go-getter/helper/url" 12 urlhelper "github.com/hashicorp/go-getter/helper/url"
12 "github.com/hashicorp/go-safetemp" 13 safetemp "github.com/hashicorp/go-safetemp"
13) 14)
14 15
15// HgGetter is a Getter implementation that will download a module from 16// HgGetter is a Getter implementation that will download a module from
16// a Mercurial repository. 17// a Mercurial repository.
17type HgGetter struct{} 18type HgGetter struct {
19 getter
20}
18 21
19func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) { 22func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) {
20 return ClientModeDir, nil 23 return ClientModeDir, nil
21} 24}
22 25
23func (g *HgGetter) Get(dst string, u *url.URL) error { 26func (g *HgGetter) Get(dst string, u *url.URL) error {
27 ctx := g.Context()
24 if _, err := exec.LookPath("hg"); err != nil { 28 if _, err := exec.LookPath("hg"); err != nil {
25 return fmt.Errorf("hg must be available and on the PATH") 29 return fmt.Errorf("hg must be available and on the PATH")
26 } 30 }
@@ -58,7 +62,7 @@ func (g *HgGetter) Get(dst string, u *url.URL) error {
58 return err 62 return err
59 } 63 }
60 64
61 return g.update(dst, newURL, rev) 65 return g.update(ctx, dst, newURL, rev)
62} 66}
63 67
64// GetFile for Hg doesn't support updating at this time. It will download 68// GetFile for Hg doesn't support updating at this time. It will download
@@ -93,7 +97,7 @@ func (g *HgGetter) GetFile(dst string, u *url.URL) error {
93 return err 97 return err
94 } 98 }
95 99
96 fg := &FileGetter{Copy: true} 100 fg := &FileGetter{Copy: true, getter: g.getter}
97 return fg.GetFile(dst, u) 101 return fg.GetFile(dst, u)
98} 102}
99 103
@@ -108,13 +112,13 @@ func (g *HgGetter) pull(dst string, u *url.URL) error {
108 return getRunCommand(cmd) 112 return getRunCommand(cmd)
109} 113}
110 114
111func (g *HgGetter) update(dst string, u *url.URL, rev string) error { 115func (g *HgGetter) update(ctx context.Context, dst string, u *url.URL, rev string) error {
112 args := []string{"update"} 116 args := []string{"update"}
113 if rev != "" { 117 if rev != "" {
114 args = append(args, rev) 118 args = append(args, rev)
115 } 119 }
116 120
117 cmd := exec.Command("hg", args...) 121 cmd := exec.CommandContext(ctx, "hg", args...)
118 cmd.Dir = dst 122 cmd.Dir = dst
119 return getRunCommand(cmd) 123 return getRunCommand(cmd)
120} 124}
diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go
index d2e2879..7c4541c 100644
--- a/vendor/github.com/hashicorp/go-getter/get_http.go
+++ b/vendor/github.com/hashicorp/go-getter/get_http.go
@@ -1,6 +1,7 @@
1package getter 1package getter
2 2
3import ( 3import (
4 "context"
4 "encoding/xml" 5 "encoding/xml"
5 "fmt" 6 "fmt"
6 "io" 7 "io"
@@ -8,9 +9,10 @@ import (
8 "net/url" 9 "net/url"
9 "os" 10 "os"
10 "path/filepath" 11 "path/filepath"
12 "strconv"
11 "strings" 13 "strings"
12 14
13 "github.com/hashicorp/go-safetemp" 15 safetemp "github.com/hashicorp/go-safetemp"
14) 16)
15 17
16// HttpGetter is a Getter implementation that will download from an HTTP 18// HttpGetter is a Getter implementation that will download from an HTTP
@@ -18,7 +20,7 @@ import (
18// 20//
19// For file downloads, HTTP is used directly. 21// For file downloads, HTTP is used directly.
20// 22//
21// The protocol for downloading a directory from an HTTP endpoing is as follows: 23// The protocol for downloading a directory from an HTTP endpoint is as follows:
22// 24//
23// An HTTP GET request is made to the URL with the additional GET parameter 25// An HTTP GET request is made to the URL with the additional GET parameter
24// "terraform-get=1". This lets you handle that scenario specially if you 26// "terraform-get=1". This lets you handle that scenario specially if you
@@ -34,6 +36,8 @@ import (
34// formed URL. The shorthand syntax of "github.com/foo/bar" or relative 36// formed URL. The shorthand syntax of "github.com/foo/bar" or relative
35// paths are not allowed. 37// paths are not allowed.
36type HttpGetter struct { 38type HttpGetter struct {
39 getter
40
37 // Netrc, if true, will lookup and use auth information found 41 // Netrc, if true, will lookup and use auth information found
38 // in the user's netrc file if available. 42 // in the user's netrc file if available.
39 Netrc bool 43 Netrc bool
@@ -41,6 +45,12 @@ type HttpGetter struct {
41 // Client is the http.Client to use for Get requests. 45 // Client is the http.Client to use for Get requests.
42 // This defaults to a cleanhttp.DefaultClient if left unset. 46 // This defaults to a cleanhttp.DefaultClient if left unset.
43 Client *http.Client 47 Client *http.Client
48
49 // Header contains optional request header fields that should be included
50 // with every HTTP request. Note that the zero value of this field is nil,
51 // and as such it needs to be initialized before use, via something like
52 // make(http.Header).
53 Header http.Header
44} 54}
45 55
46func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) { 56func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) {
@@ -51,6 +61,7 @@ func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) {
51} 61}
52 62
53func (g *HttpGetter) Get(dst string, u *url.URL) error { 63func (g *HttpGetter) Get(dst string, u *url.URL) error {
64 ctx := g.Context()
54 // Copy the URL so we can modify it 65 // Copy the URL so we can modify it
55 var newU url.URL = *u 66 var newU url.URL = *u
56 u = &newU 67 u = &newU
@@ -72,10 +83,17 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error {
72 u.RawQuery = q.Encode() 83 u.RawQuery = q.Encode()
73 84
74 // Get the URL 85 // Get the URL
75 resp, err := g.Client.Get(u.String()) 86 req, err := http.NewRequest("GET", u.String(), nil)
76 if err != nil { 87 if err != nil {
77 return err 88 return err
78 } 89 }
90
91 req.Header = g.Header
92 resp, err := g.Client.Do(req)
93 if err != nil {
94 return err
95 }
96
79 defer resp.Body.Close() 97 defer resp.Body.Close()
80 if resp.StatusCode < 200 || resp.StatusCode >= 300 { 98 if resp.StatusCode < 200 || resp.StatusCode >= 300 {
81 return fmt.Errorf("bad response code: %d", resp.StatusCode) 99 return fmt.Errorf("bad response code: %d", resp.StatusCode)
@@ -99,57 +117,107 @@ func (g *HttpGetter) Get(dst string, u *url.URL) error {
99 // into a temporary directory, then copy over the proper subdir. 117 // into a temporary directory, then copy over the proper subdir.
100 source, subDir := SourceDirSubdir(source) 118 source, subDir := SourceDirSubdir(source)
101 if subDir == "" { 119 if subDir == "" {
102 return Get(dst, source) 120 var opts []ClientOption
121 if g.client != nil {
122 opts = g.client.Options
123 }
124 return Get(dst, source, opts...)
103 } 125 }
104 126
105 // We have a subdir, time to jump some hoops 127 // We have a subdir, time to jump some hoops
106 return g.getSubdir(dst, source, subDir) 128 return g.getSubdir(ctx, dst, source, subDir)
107} 129}
108 130
109func (g *HttpGetter) GetFile(dst string, u *url.URL) error { 131func (g *HttpGetter) GetFile(dst string, src *url.URL) error {
132 ctx := g.Context()
110 if g.Netrc { 133 if g.Netrc {
111 // Add auth from netrc if we can 134 // Add auth from netrc if we can
112 if err := addAuthFromNetrc(u); err != nil { 135 if err := addAuthFromNetrc(src); err != nil {
113 return err 136 return err
114 } 137 }
115 } 138 }
116 139
117 if g.Client == nil { 140 // Create all the parent directories if needed
118 g.Client = httpClient 141 if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
142 return err
119 } 143 }
120 144
121 resp, err := g.Client.Get(u.String()) 145 f, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE, os.FileMode(0666))
122 if err != nil { 146 if err != nil {
123 return err 147 return err
124 } 148 }
125 defer resp.Body.Close() 149 defer f.Close()
126 if resp.StatusCode != 200 { 150
127 return fmt.Errorf("bad response code: %d", resp.StatusCode) 151 if g.Client == nil {
152 g.Client = httpClient
128 } 153 }
129 154
130 // Create all the parent directories 155 var currentFileSize int64
131 if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil { 156
157 // We first make a HEAD request so we can check
158 // if the server supports range queries. If the server/URL doesn't
159 // support HEAD requests, we just fall back to GET.
160 req, err := http.NewRequest("HEAD", src.String(), nil)
161 if err != nil {
132 return err 162 return err
133 } 163 }
164 if g.Header != nil {
165 req.Header = g.Header
166 }
167 headResp, err := g.Client.Do(req)
168 if err == nil && headResp != nil {
169 headResp.Body.Close()
170 if headResp.StatusCode == 200 {
171 // If the HEAD request succeeded, then attempt to set the range
172 // query if we can.
173 if headResp.Header.Get("Accept-Ranges") == "bytes" {
174 if fi, err := f.Stat(); err == nil {
175 if _, err = f.Seek(0, os.SEEK_END); err == nil {
176 req.Header.Set("Range", fmt.Sprintf("bytes=%d-", fi.Size()))
177 currentFileSize = fi.Size()
178 totalFileSize, _ := strconv.ParseInt(headResp.Header.Get("Content-Length"), 10, 64)
179 if currentFileSize >= totalFileSize {
180 // file already present
181 return nil
182 }
183 }
184 }
185 }
186 }
187 }
188 req.Method = "GET"
134 189
135 f, err := os.Create(dst) 190 resp, err := g.Client.Do(req)
136 if err != nil { 191 if err != nil {
137 return err 192 return err
138 } 193 }
194 switch resp.StatusCode {
195 case http.StatusOK, http.StatusPartialContent:
196 // all good
197 default:
198 resp.Body.Close()
199 return fmt.Errorf("bad response code: %d", resp.StatusCode)
200 }
201
202 body := resp.Body
139 203
140 n, err := io.Copy(f, resp.Body) 204 if g.client != nil && g.client.ProgressListener != nil {
205 // track download
206 fn := filepath.Base(src.EscapedPath())
207 body = g.client.ProgressListener.TrackProgress(fn, currentFileSize, currentFileSize+resp.ContentLength, resp.Body)
208 }
209 defer body.Close()
210
211 n, err := Copy(ctx, f, body)
141 if err == nil && n < resp.ContentLength { 212 if err == nil && n < resp.ContentLength {
142 err = io.ErrShortWrite 213 err = io.ErrShortWrite
143 } 214 }
144 if err1 := f.Close(); err == nil {
145 err = err1
146 }
147 return err 215 return err
148} 216}
149 217
150// getSubdir downloads the source into the destination, but with 218// getSubdir downloads the source into the destination, but with
151// the proper subdir. 219// the proper subdir.
152func (g *HttpGetter) getSubdir(dst, source, subDir string) error { 220func (g *HttpGetter) getSubdir(ctx context.Context, dst, source, subDir string) error {
153 // Create a temporary directory to store the full source. This has to be 221 // Create a temporary directory to store the full source. This has to be
154 // a non-existent directory. 222 // a non-existent directory.
155 td, tdcloser, err := safetemp.Dir("", "getter") 223 td, tdcloser, err := safetemp.Dir("", "getter")
@@ -158,8 +226,12 @@ func (g *HttpGetter) getSubdir(dst, source, subDir string) error {
158 } 226 }
159 defer tdcloser.Close() 227 defer tdcloser.Close()
160 228
229 var opts []ClientOption
230 if g.client != nil {
231 opts = g.client.Options
232 }
161 // Download that into the given directory 233 // Download that into the given directory
162 if err := Get(td, source); err != nil { 234 if err := Get(td, source, opts...); err != nil {
163 return err 235 return err
164 } 236 }
165 237
@@ -185,7 +257,7 @@ func (g *HttpGetter) getSubdir(dst, source, subDir string) error {
185 return err 257 return err
186 } 258 }
187 259
188 return copyDir(dst, sourcePath, false) 260 return copyDir(ctx, dst, sourcePath, false)
189} 261}
190 262
191// parseMeta looks for the first meta tag in the given reader that 263// parseMeta looks for the first meta tag in the given reader that
diff --git a/vendor/github.com/hashicorp/go-getter/get_mock.go b/vendor/github.com/hashicorp/go-getter/get_mock.go
index 882e694..e2a98ea 100644
--- a/vendor/github.com/hashicorp/go-getter/get_mock.go
+++ b/vendor/github.com/hashicorp/go-getter/get_mock.go
@@ -6,6 +6,8 @@ import (
6 6
7// MockGetter is an implementation of Getter that can be used for tests. 7// MockGetter is an implementation of Getter that can be used for tests.
8type MockGetter struct { 8type MockGetter struct {
9 getter
10
9 // Proxy, if set, will be called after recording the calls below. 11 // Proxy, if set, will be called after recording the calls below.
10 // If it isn't set, then the *Err values will be returned. 12 // If it isn't set, then the *Err values will be returned.
11 Proxy Getter 13 Proxy Getter
diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go
index ebb3217..93eeb0b 100644
--- a/vendor/github.com/hashicorp/go-getter/get_s3.go
+++ b/vendor/github.com/hashicorp/go-getter/get_s3.go
@@ -1,8 +1,8 @@
1package getter 1package getter
2 2
3import ( 3import (
4 "context"
4 "fmt" 5 "fmt"
5 "io"
6 "net/url" 6 "net/url"
7 "os" 7 "os"
8 "path/filepath" 8 "path/filepath"
@@ -18,7 +18,9 @@ import (
18 18
19// S3Getter is a Getter implementation that will download a module from 19// S3Getter is a Getter implementation that will download a module from
20// a S3 bucket. 20// a S3 bucket.
21type S3Getter struct{} 21type S3Getter struct {
22 getter
23}
22 24
23func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) { 25func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) {
24 // Parse URL 26 // Parse URL
@@ -60,6 +62,8 @@ func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) {
60} 62}
61 63
62func (g *S3Getter) Get(dst string, u *url.URL) error { 64func (g *S3Getter) Get(dst string, u *url.URL) error {
65 ctx := g.Context()
66
63 // Parse URL 67 // Parse URL
64 region, bucket, path, _, creds, err := g.parseUrl(u) 68 region, bucket, path, _, creds, err := g.parseUrl(u)
65 if err != nil { 69 if err != nil {
@@ -124,7 +128,7 @@ func (g *S3Getter) Get(dst string, u *url.URL) error {
124 } 128 }
125 objDst = filepath.Join(dst, objDst) 129 objDst = filepath.Join(dst, objDst)
126 130
127 if err := g.getObject(client, objDst, bucket, objPath, ""); err != nil { 131 if err := g.getObject(ctx, client, objDst, bucket, objPath, ""); err != nil {
128 return err 132 return err
129 } 133 }
130 } 134 }
@@ -134,6 +138,7 @@ func (g *S3Getter) Get(dst string, u *url.URL) error {
134} 138}
135 139
136func (g *S3Getter) GetFile(dst string, u *url.URL) error { 140func (g *S3Getter) GetFile(dst string, u *url.URL) error {
141 ctx := g.Context()
137 region, bucket, path, version, creds, err := g.parseUrl(u) 142 region, bucket, path, version, creds, err := g.parseUrl(u)
138 if err != nil { 143 if err != nil {
139 return err 144 return err
@@ -142,10 +147,10 @@ func (g *S3Getter) GetFile(dst string, u *url.URL) error {
142 config := g.getAWSConfig(region, u, creds) 147 config := g.getAWSConfig(region, u, creds)
143 sess := session.New(config) 148 sess := session.New(config)
144 client := s3.New(sess) 149 client := s3.New(sess)
145 return g.getObject(client, dst, bucket, path, version) 150 return g.getObject(ctx, client, dst, bucket, path, version)
146} 151}
147 152
148func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) error { 153func (g *S3Getter) getObject(ctx context.Context, client *s3.S3, dst, bucket, key, version string) error {
149 req := &s3.GetObjectInput{ 154 req := &s3.GetObjectInput{
150 Bucket: aws.String(bucket), 155 Bucket: aws.String(bucket),
151 Key: aws.String(key), 156 Key: aws.String(key),
@@ -170,7 +175,7 @@ func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) er
170 } 175 }
171 defer f.Close() 176 defer f.Close()
172 177
173 _, err = io.Copy(f, resp.Body) 178 _, err = Copy(ctx, f, resp.Body)
174 return err 179 return err
175} 180}
176 181
diff --git a/vendor/github.com/hashicorp/go-getter/go.mod b/vendor/github.com/hashicorp/go-getter/go.mod
new file mode 100644
index 0000000..807c0a2
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/go.mod
@@ -0,0 +1,22 @@
1module github.com/hashicorp/go-getter
2
3require (
4 cloud.google.com/go v0.36.0
5 github.com/aws/aws-sdk-go v1.15.78
6 github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d
7 github.com/cheggaaa/pb v1.0.27
8 github.com/fatih/color v1.7.0 // indirect
9 github.com/hashicorp/go-cleanhttp v0.5.0
10 github.com/hashicorp/go-safetemp v1.0.0
11 github.com/hashicorp/go-version v1.1.0
12 github.com/mattn/go-colorable v0.0.9 // indirect
13 github.com/mattn/go-isatty v0.0.4 // indirect
14 github.com/mattn/go-runewidth v0.0.4 // indirect
15 github.com/mitchellh/go-homedir v1.0.0
16 github.com/mitchellh/go-testing-interface v1.0.0
17 github.com/ulikunitz/xz v0.5.5
18 golang.org/x/net v0.0.0-20181114220301-adae6a3d119a // indirect
19 golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06 // indirect
20 google.golang.org/api v0.1.0
21 gopkg.in/cheggaaa/pb.v1 v1.0.27 // indirect
22)
diff --git a/vendor/github.com/hashicorp/go-getter/go.sum b/vendor/github.com/hashicorp/go-getter/go.sum
new file mode 100644
index 0000000..0fc5088
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-getter/go.sum
@@ -0,0 +1,182 @@
1cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
2cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
3cloud.google.com/go v0.36.0 h1:+aCSj7tOo2LODWVEuZDZeGCckdt6MlSF+X/rB3wUiS8=
4cloud.google.com/go v0.36.0/go.mod h1:RUoy9p/M4ge0HzT8L+SDZ8jg+Q6fth0CiBuhFJpSV40=
5dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
6dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
7dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
8dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
9git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
10github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
11github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
12github.com/aws/aws-sdk-go v1.15.78 h1:LaXy6lWR0YK7LKyuU0QWy2ws/LWTPfYV/UgfiBu4tvY=
13github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
14github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
15github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=
16github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
17github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
18github.com/cheggaaa/pb v1.0.27 h1:wIkZHkNfC7R6GI5w7l/PdAdzXzlrbcI3p8OAlnkTsnc=
19github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
20github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
21github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
22github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
23github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
24github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
25github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
26github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
27github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
28github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
29github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
30github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
31github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
32github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
33github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
34github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
35github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
36github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
37github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
38github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
39github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
40github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
41github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
42github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
43github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
44github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
45github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
46github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
47github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
48github.com/googleapis/gax-go/v2 v2.0.3 h1:siORttZ36U2R/WjiJuDz8znElWBiAlO9rVt+mqJt0Cc=
49github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
50github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
51github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
52github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
53github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
54github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
55github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=
56github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
57github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0=
58github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
59github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
60github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE=
61github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
62github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
63github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
64github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
65github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
66github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
67github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
68github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
69github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
70github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
71github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
72github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
73github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
74github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
75github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
76github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
77github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
78github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
79github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
80github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
81github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
82github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
83github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
84github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
85github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
86github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
87github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
88github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
89github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
90github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
91github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
92github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
93github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
94github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
95github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
96github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
97github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
98github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
99github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
100github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
101github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
102github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
103github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
104github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
105github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
106github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
107github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
108github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
109github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
110github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
111github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
112github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
113github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
114github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
115github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
116github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
117github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
118github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
119go.opencensus.io v0.18.0 h1:Mk5rgZcggtbvtAun5aJzAtjKKN/t0R3jJPlWILlv938=
120go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
121go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
122golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
123golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
124golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
125golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
126golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
127golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
128golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
129golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
130golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
131golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
132golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U=
133golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
134golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
135golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
136golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890 h1:uESlIz09WIHT2I+pasSXcpLYqYK8wHcdCetU3VuMBJE=
137golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
138golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
139golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
140golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
141golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
142golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
143golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
144golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
145golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06 h1:0oC8rFnE+74kEmuHZ46F6KHsMr5Gx2gUQPuNz28iQZM=
146golang.org/x/sys v0.0.0-20181213200352-4d1cda033e06/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
147golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
148golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
149golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
150golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
151golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
152golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
153golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
154golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
155google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
156google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
157google.golang.org/api v0.1.0 h1:K6z2u68e86TPdSdefXdzvXgR1zEMa+459vBSfWYAZkI=
158google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
159google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
160google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
161google.golang.org/appengine v1.3.0 h1:FBSsiFRMz3LBeXIomRnVzrQwSDj4ibvcRexLG0LZGQk=
162google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
163google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
164google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
165google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
166google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
167google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922 h1:mBVYJnbrXLA/ZCBTCe7PtEgAUP+1bg92qTaFoPHdz+8=
168google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922/go.mod h1:L3J43x8/uS+qIUoksaLKe6OS3nUKxOKuIFz1sl2/jx4=
169google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
170google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
171google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk=
172google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
173gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
174gopkg.in/cheggaaa/pb.v1 v1.0.27 h1:kJdccidYzt3CaHD1crCFTS1hxyhSi059NhOFUf03YFo=
175gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
176gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
177gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
178grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
179honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
180honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
181sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
182sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go
index 4655226..4280ec5 100644
--- a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go
+++ b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go
@@ -11,19 +11,18 @@ func parse(rawURL string) (*url.URL, error) {
11 // Make sure we're using "/" since URLs are "/"-based. 11 // Make sure we're using "/" since URLs are "/"-based.
12 rawURL = filepath.ToSlash(rawURL) 12 rawURL = filepath.ToSlash(rawURL)
13 13
14 if len(rawURL) > 1 && rawURL[1] == ':' {
15 // Assume we're dealing with a drive letter. In which case we
16 // force the 'file' scheme to avoid "net/url" URL.String() prepending
17 // our url with "./".
18 rawURL = "file://" + rawURL
19 }
20
14 u, err := url.Parse(rawURL) 21 u, err := url.Parse(rawURL)
15 if err != nil { 22 if err != nil {
16 return nil, err 23 return nil, err
17 } 24 }
18 25
19 if len(rawURL) > 1 && rawURL[1] == ':' {
20 // Assume we're dealing with a drive letter file path where the drive
21 // letter has been parsed into the URL Scheme, and the rest of the path
22 // has been parsed into the URL Path without the leading ':' character.
23 u.Path = fmt.Sprintf("%s:%s", string(rawURL[0]), u.Path)
24 u.Scheme = ""
25 }
26
27 if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") { 26 if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") {
28 // Assume we're dealing with a drive letter file path where the drive 27 // Assume we're dealing with a drive letter file path where the drive
29 // letter has been parsed into the URL Host. 28 // letter has been parsed into the URL Host.
diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go
index c63f2bb..dab6d40 100644
--- a/vendor/github.com/hashicorp/go-getter/source.go
+++ b/vendor/github.com/hashicorp/go-getter/source.go
@@ -6,18 +6,31 @@ import (
6 "strings" 6 "strings"
7) 7)
8 8
9// SourceDirSubdir takes a source and returns a tuple of the URL without 9// SourceDirSubdir takes a source URL and returns a tuple of the URL without
10// the subdir and the URL with the subdir. 10// the subdir and the subdir.
11//
12// ex:
13// dom.com/path/?q=p => dom.com/path/?q=p, ""
14// proto://dom.com/path//*?q=p => proto://dom.com/path?q=p, "*"
15// proto://dom.com/path//path2?q=p => proto://dom.com/path?q=p, "path2"
16//
11func SourceDirSubdir(src string) (string, string) { 17func SourceDirSubdir(src string) (string, string) {
12 // Calcaulate an offset to avoid accidentally marking the scheme 18
19 // URL might contains another url in query parameters
20 stop := len(src)
21 if idx := strings.Index(src, "?"); idx > -1 {
22 stop = idx
23 }
24
25 // Calculate an offset to avoid accidentally marking the scheme
13 // as the dir. 26 // as the dir.
14 var offset int 27 var offset int
15 if idx := strings.Index(src, "://"); idx > -1 { 28 if idx := strings.Index(src[:stop], "://"); idx > -1 {
16 offset = idx + 3 29 offset = idx + 3
17 } 30 }
18 31
19 // First see if we even have an explicit subdir 32 // First see if we even have an explicit subdir
20 idx := strings.Index(src[offset:], "//") 33 idx := strings.Index(src[offset:stop], "//")
21 if idx == -1 { 34 if idx == -1 {
22 return src, "" 35 return src, ""
23 } 36 }
diff --git a/vendor/github.com/hashicorp/go-hclog/.gitignore b/vendor/github.com/hashicorp/go-hclog/.gitignore
new file mode 100644
index 0000000..42cc410
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/.gitignore
@@ -0,0 +1 @@
.idea* \ No newline at end of file
diff --git a/vendor/github.com/hashicorp/go-hclog/README.md b/vendor/github.com/hashicorp/go-hclog/README.md
index 614342b..1153e28 100644
--- a/vendor/github.com/hashicorp/go-hclog/README.md
+++ b/vendor/github.com/hashicorp/go-hclog/README.md
@@ -10,8 +10,7 @@ interface for use in development and production environments.
10It provides logging levels that provide decreased output based upon the 10It provides logging levels that provide decreased output based upon the
11desired amount of output, unlike the standard library `log` package. 11desired amount of output, unlike the standard library `log` package.
12 12
13It does not provide `Printf` style logging, only key/value logging that is 13It provides `Printf` style logging of values via `hclog.Fmt()`.
14exposed as arguments to the logging functions for simplicity.
15 14
16It provides a human readable output mode for use in development as well as 15It provides a human readable output mode for use in development as well as
17JSON output mode for production. 16JSON output mode for production.
@@ -100,6 +99,17 @@ requestLogger.Info("we are transporting a request")
100This allows sub Loggers to be context specific without having to thread that 99This allows sub Loggers to be context specific without having to thread that
101into all the callers. 100into all the callers.
102 101
102### Using `hclog.Fmt()`
103
104```go
105var int totalBandwidth = 200
106appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth))
107```
108
109```text
110... [INFO ] my-app: total bandwidth exceeded: bandwidth="200 GB/s"
111```
112
103### Use this with code that uses the standard library logger 113### Use this with code that uses the standard library logger
104 114
105If you want to use the standard library's `log.Logger` interface you can wrap 115If you want to use the standard library's `log.Logger` interface you can wrap
diff --git a/vendor/github.com/hashicorp/go-hclog/go.mod b/vendor/github.com/hashicorp/go-hclog/go.mod
new file mode 100644
index 0000000..0d079a6
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/go.mod
@@ -0,0 +1,7 @@
1module github.com/hashicorp/go-hclog
2
3require (
4 github.com/davecgh/go-spew v1.1.1 // indirect
5 github.com/pmezard/go-difflib v1.0.0 // indirect
6 github.com/stretchr/testify v1.2.2
7)
diff --git a/vendor/github.com/hashicorp/go-hclog/go.sum b/vendor/github.com/hashicorp/go-hclog/go.sum
new file mode 100644
index 0000000..e03ee77
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/go.sum
@@ -0,0 +1,6 @@
1github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
2github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
3github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
4github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
5github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
6github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
diff --git a/vendor/github.com/hashicorp/go-hclog/int.go b/vendor/github.com/hashicorp/go-hclog/int.go
index 9f90c28..2aaa1f8 100644
--- a/vendor/github.com/hashicorp/go-hclog/int.go
+++ b/vendor/github.com/hashicorp/go-hclog/int.go
@@ -2,14 +2,19 @@ package hclog
2 2
3import ( 3import (
4 "bufio" 4 "bufio"
5 "bytes"
6 "encoding"
5 "encoding/json" 7 "encoding/json"
6 "fmt" 8 "fmt"
7 "log" 9 "log"
8 "os" 10 "os"
11 "reflect"
9 "runtime" 12 "runtime"
13 "sort"
10 "strconv" 14 "strconv"
11 "strings" 15 "strings"
12 "sync" 16 "sync"
17 "sync/atomic"
13 "time" 18 "time"
14) 19)
15 20
@@ -17,8 +22,8 @@ var (
17 _levelToBracket = map[Level]string{ 22 _levelToBracket = map[Level]string{
18 Debug: "[DEBUG]", 23 Debug: "[DEBUG]",
19 Trace: "[TRACE]", 24 Trace: "[TRACE]",
20 Info: "[INFO ]", 25 Info: "[INFO] ",
21 Warn: "[WARN ]", 26 Warn: "[WARN] ",
22 Error: "[ERROR]", 27 Error: "[ERROR]",
23 } 28 }
24) 29)
@@ -39,28 +44,40 @@ func New(opts *LoggerOptions) Logger {
39 level = DefaultLevel 44 level = DefaultLevel
40 } 45 }
41 46
42 return &intLogger{ 47 mtx := opts.Mutex
43 m: new(sync.Mutex), 48 if mtx == nil {
44 json: opts.JSONFormat, 49 mtx = new(sync.Mutex)
45 caller: opts.IncludeLocation,
46 name: opts.Name,
47 w: bufio.NewWriter(output),
48 level: level,
49 } 50 }
51
52 ret := &intLogger{
53 m: mtx,
54 json: opts.JSONFormat,
55 caller: opts.IncludeLocation,
56 name: opts.Name,
57 timeFormat: TimeFormat,
58 w: bufio.NewWriter(output),
59 level: new(int32),
60 }
61 if opts.TimeFormat != "" {
62 ret.timeFormat = opts.TimeFormat
63 }
64 atomic.StoreInt32(ret.level, int32(level))
65 return ret
50} 66}
51 67
52// The internal logger implementation. Internal in that it is defined entirely 68// The internal logger implementation. Internal in that it is defined entirely
53// by this package. 69// by this package.
54type intLogger struct { 70type intLogger struct {
55 json bool 71 json bool
56 caller bool 72 caller bool
57 name string 73 name string
74 timeFormat string
58 75
59 // this is a pointer so that it's shared by any derived loggers, since 76 // this is a pointer so that it's shared by any derived loggers, since
60 // those derived loggers share the bufio.Writer as well. 77 // those derived loggers share the bufio.Writer as well.
61 m *sync.Mutex 78 m *sync.Mutex
62 w *bufio.Writer 79 w *bufio.Writer
63 level Level 80 level *int32
64 81
65 implied []interface{} 82 implied []interface{}
66} 83}
@@ -75,7 +92,7 @@ const TimeFormat = "2006-01-02T15:04:05.000Z0700"
75// Log a message and a set of key/value pairs if the given level is at 92// Log a message and a set of key/value pairs if the given level is at
76// or more severe that the threshold configured in the Logger. 93// or more severe that the threshold configured in the Logger.
77func (z *intLogger) Log(level Level, msg string, args ...interface{}) { 94func (z *intLogger) Log(level Level, msg string, args ...interface{}) {
78 if level < z.level { 95 if level < Level(atomic.LoadInt32(z.level)) {
79 return 96 return
80 } 97 }
81 98
@@ -126,14 +143,14 @@ func trimCallerPath(path string) string {
126 143
127// Non-JSON logging format function 144// Non-JSON logging format function
128func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) { 145func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{}) {
129 z.w.WriteString(t.Format(TimeFormat)) 146 z.w.WriteString(t.Format(z.timeFormat))
130 z.w.WriteByte(' ') 147 z.w.WriteByte(' ')
131 148
132 s, ok := _levelToBracket[level] 149 s, ok := _levelToBracket[level]
133 if ok { 150 if ok {
134 z.w.WriteString(s) 151 z.w.WriteString(s)
135 } else { 152 } else {
136 z.w.WriteString("[UNKN ]") 153 z.w.WriteString("[?????]")
137 } 154 }
138 155
139 if z.caller { 156 if z.caller {
@@ -174,7 +191,10 @@ func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{
174 191
175 FOR: 192 FOR:
176 for i := 0; i < len(args); i = i + 2 { 193 for i := 0; i < len(args); i = i + 2 {
177 var val string 194 var (
195 val string
196 raw bool
197 )
178 198
179 switch st := args[i+1].(type) { 199 switch st := args[i+1].(type) {
180 case string: 200 case string:
@@ -202,15 +222,23 @@ func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{
202 case CapturedStacktrace: 222 case CapturedStacktrace:
203 stacktrace = st 223 stacktrace = st
204 continue FOR 224 continue FOR
225 case Format:
226 val = fmt.Sprintf(st[0].(string), st[1:]...)
205 default: 227 default:
206 val = fmt.Sprintf("%v", st) 228 v := reflect.ValueOf(st)
229 if v.Kind() == reflect.Slice {
230 val = z.renderSlice(v)
231 raw = true
232 } else {
233 val = fmt.Sprintf("%v", st)
234 }
207 } 235 }
208 236
209 z.w.WriteByte(' ') 237 z.w.WriteByte(' ')
210 z.w.WriteString(args[i].(string)) 238 z.w.WriteString(args[i].(string))
211 z.w.WriteByte('=') 239 z.w.WriteByte('=')
212 240
213 if strings.ContainsAny(val, " \t\n\r") { 241 if !raw && strings.ContainsAny(val, " \t\n\r") {
214 z.w.WriteByte('"') 242 z.w.WriteByte('"')
215 z.w.WriteString(val) 243 z.w.WriteString(val)
216 z.w.WriteByte('"') 244 z.w.WriteByte('"')
@@ -227,6 +255,45 @@ func (z *intLogger) log(t time.Time, level Level, msg string, args ...interface{
227 } 255 }
228} 256}
229 257
258func (z *intLogger) renderSlice(v reflect.Value) string {
259 var buf bytes.Buffer
260
261 buf.WriteRune('[')
262
263 for i := 0; i < v.Len(); i++ {
264 if i > 0 {
265 buf.WriteString(", ")
266 }
267
268 sv := v.Index(i)
269
270 var val string
271
272 switch sv.Kind() {
273 case reflect.String:
274 val = sv.String()
275 case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64:
276 val = strconv.FormatInt(sv.Int(), 10)
277 case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
278 val = strconv.FormatUint(sv.Uint(), 10)
279 default:
280 val = fmt.Sprintf("%v", sv.Interface())
281 }
282
283 if strings.ContainsAny(val, " \t\n\r") {
284 buf.WriteByte('"')
285 buf.WriteString(val)
286 buf.WriteByte('"')
287 } else {
288 buf.WriteString(val)
289 }
290 }
291
292 buf.WriteRune(']')
293
294 return buf.String()
295}
296
230// JSON logging function 297// JSON logging function
231func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interface{}) { 298func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interface{}) {
232 vals := map[string]interface{}{ 299 vals := map[string]interface{}{
@@ -262,6 +329,8 @@ func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interf
262 } 329 }
263 } 330 }
264 331
332 args = append(z.implied, args...)
333
265 if args != nil && len(args) > 0 { 334 if args != nil && len(args) > 0 {
266 if len(args)%2 != 0 { 335 if len(args)%2 != 0 {
267 cs, ok := args[len(args)-1].(CapturedStacktrace) 336 cs, ok := args[len(args)-1].(CapturedStacktrace)
@@ -279,7 +348,22 @@ func (z *intLogger) logJson(t time.Time, level Level, msg string, args ...interf
279 // without injecting into logs... 348 // without injecting into logs...
280 continue 349 continue
281 } 350 }
282 vals[args[i].(string)] = args[i+1] 351 val := args[i+1]
352 switch sv := val.(type) {
353 case error:
354 // Check if val is of type error. If error type doesn't
355 // implement json.Marshaler or encoding.TextMarshaler
356 // then set val to err.Error() so that it gets marshaled
357 switch sv.(type) {
358 case json.Marshaler, encoding.TextMarshaler:
359 default:
360 val = sv.Error()
361 }
362 case Format:
363 val = fmt.Sprintf(sv[0].(string), sv[1:]...)
364 }
365
366 vals[args[i].(string)] = val
283 } 367 }
284 } 368 }
285 369
@@ -316,36 +400,66 @@ func (z *intLogger) Error(msg string, args ...interface{}) {
316 400
317// Indicate that the logger would emit TRACE level logs 401// Indicate that the logger would emit TRACE level logs
318func (z *intLogger) IsTrace() bool { 402func (z *intLogger) IsTrace() bool {
319 return z.level == Trace 403 return Level(atomic.LoadInt32(z.level)) == Trace
320} 404}
321 405
322// Indicate that the logger would emit DEBUG level logs 406// Indicate that the logger would emit DEBUG level logs
323func (z *intLogger) IsDebug() bool { 407func (z *intLogger) IsDebug() bool {
324 return z.level <= Debug 408 return Level(atomic.LoadInt32(z.level)) <= Debug
325} 409}
326 410
327// Indicate that the logger would emit INFO level logs 411// Indicate that the logger would emit INFO level logs
328func (z *intLogger) IsInfo() bool { 412func (z *intLogger) IsInfo() bool {
329 return z.level <= Info 413 return Level(atomic.LoadInt32(z.level)) <= Info
330} 414}
331 415
332// Indicate that the logger would emit WARN level logs 416// Indicate that the logger would emit WARN level logs
333func (z *intLogger) IsWarn() bool { 417func (z *intLogger) IsWarn() bool {
334 return z.level <= Warn 418 return Level(atomic.LoadInt32(z.level)) <= Warn
335} 419}
336 420
337// Indicate that the logger would emit ERROR level logs 421// Indicate that the logger would emit ERROR level logs
338func (z *intLogger) IsError() bool { 422func (z *intLogger) IsError() bool {
339 return z.level <= Error 423 return Level(atomic.LoadInt32(z.level)) <= Error
340} 424}
341 425
342// Return a sub-Logger for which every emitted log message will contain 426// Return a sub-Logger for which every emitted log message will contain
343// the given key/value pairs. This is used to create a context specific 427// the given key/value pairs. This is used to create a context specific
344// Logger. 428// Logger.
345func (z *intLogger) With(args ...interface{}) Logger { 429func (z *intLogger) With(args ...interface{}) Logger {
430 if len(args)%2 != 0 {
431 panic("With() call requires paired arguments")
432 }
433
346 var nz intLogger = *z 434 var nz intLogger = *z
347 435
348 nz.implied = append(nz.implied, args...) 436 result := make(map[string]interface{}, len(z.implied)+len(args))
437 keys := make([]string, 0, len(z.implied)+len(args))
438
439 // Read existing args, store map and key for consistent sorting
440 for i := 0; i < len(z.implied); i += 2 {
441 key := z.implied[i].(string)
442 keys = append(keys, key)
443 result[key] = z.implied[i+1]
444 }
445 // Read new args, store map and key for consistent sorting
446 for i := 0; i < len(args); i += 2 {
447 key := args[i].(string)
448 _, exists := result[key]
449 if !exists {
450 keys = append(keys, key)
451 }
452 result[key] = args[i+1]
453 }
454
455 // Sort keys to be consistent
456 sort.Strings(keys)
457
458 nz.implied = make([]interface{}, 0, len(z.implied)+len(args))
459 for _, k := range keys {
460 nz.implied = append(nz.implied, k)
461 nz.implied = append(nz.implied, result[k])
462 }
349 463
350 return &nz 464 return &nz
351} 465}
@@ -357,6 +471,8 @@ func (z *intLogger) Named(name string) Logger {
357 471
358 if nz.name != "" { 472 if nz.name != "" {
359 nz.name = nz.name + "." + name 473 nz.name = nz.name + "." + name
474 } else {
475 nz.name = name
360 } 476 }
361 477
362 return &nz 478 return &nz
@@ -373,6 +489,12 @@ func (z *intLogger) ResetNamed(name string) Logger {
373 return &nz 489 return &nz
374} 490}
375 491
492// Update the logging level on-the-fly. This will affect all subloggers as
493// well.
494func (z *intLogger) SetLevel(level Level) {
495 atomic.StoreInt32(z.level, int32(level))
496}
497
376// Create a *log.Logger that will send it's data through this Logger. This 498// Create a *log.Logger that will send it's data through this Logger. This
377// allows packages that expect to be using the standard library log to actually 499// allows packages that expect to be using the standard library log to actually
378// use this logger. 500// use this logger.
diff --git a/vendor/github.com/hashicorp/go-hclog/log.go b/vendor/github.com/hashicorp/go-hclog/log.go
index 6bb16ba..d98714e 100644
--- a/vendor/github.com/hashicorp/go-hclog/log.go
+++ b/vendor/github.com/hashicorp/go-hclog/log.go
@@ -5,6 +5,7 @@ import (
5 "log" 5 "log"
6 "os" 6 "os"
7 "strings" 7 "strings"
8 "sync"
8) 9)
9 10
10var ( 11var (
@@ -12,7 +13,7 @@ var (
12 DefaultLevel = Info 13 DefaultLevel = Info
13) 14)
14 15
15type Level int 16type Level int32
16 17
17const ( 18const (
18 // This is a special level used to indicate that no level has been 19 // This is a special level used to indicate that no level has been
@@ -36,6 +37,18 @@ const (
36 Error Level = 5 37 Error Level = 5
37) 38)
38 39
40// When processing a value of this type, the logger automatically treats the first
41// argument as a Printf formatting string and passes the rest as the values to be
42// formatted. For example: L.Info(Fmt{"%d beans/day", beans}). This is a simple
43// convience type for when formatting is required.
44type Format []interface{}
45
46// Fmt returns a Format type. This is a convience function for creating a Format
47// type.
48func Fmt(str string, args ...interface{}) Format {
49 return append(Format{str}, args...)
50}
51
39// LevelFromString returns a Level type for the named log level, or "NoLevel" if 52// LevelFromString returns a Level type for the named log level, or "NoLevel" if
40// the level string is invalid. This facilitates setting the log level via 53// the level string is invalid. This facilitates setting the log level via
41// config or environment variable by name in a predictable way. 54// config or environment variable by name in a predictable way.
@@ -108,6 +121,10 @@ type Logger interface {
108 // the current name as well. 121 // the current name as well.
109 ResetNamed(name string) Logger 122 ResetNamed(name string) Logger
110 123
124 // Updates the level. This should affect all sub-loggers as well. If an
125 // implementation cannot update the level on the fly, it should no-op.
126 SetLevel(level Level)
127
111 // Return a value that conforms to the stdlib log.Logger interface 128 // Return a value that conforms to the stdlib log.Logger interface
112 StandardLogger(opts *StandardLoggerOptions) *log.Logger 129 StandardLogger(opts *StandardLoggerOptions) *log.Logger
113} 130}
@@ -127,12 +144,18 @@ type LoggerOptions struct {
127 // The threshold for the logger. Anything less severe is supressed 144 // The threshold for the logger. Anything less severe is supressed
128 Level Level 145 Level Level
129 146
130 // Where to write the logs to. Defaults to os.Stdout if nil 147 // Where to write the logs to. Defaults to os.Stderr if nil
131 Output io.Writer 148 Output io.Writer
132 149
150 // An optional mutex pointer in case Output is shared
151 Mutex *sync.Mutex
152
133 // Control if the output should be in JSON. 153 // Control if the output should be in JSON.
134 JSONFormat bool 154 JSONFormat bool
135 155
136 // Intclude file and line information in each log line 156 // Include file and line information in each log line
137 IncludeLocation bool 157 IncludeLocation bool
158
159 // The time format to use instead of the default
160 TimeFormat string
138} 161}
diff --git a/vendor/github.com/hashicorp/go-hclog/nulllogger.go b/vendor/github.com/hashicorp/go-hclog/nulllogger.go
new file mode 100644
index 0000000..0942361
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-hclog/nulllogger.go
@@ -0,0 +1,47 @@
1package hclog
2
3import (
4 "io/ioutil"
5 "log"
6)
7
8// NewNullLogger instantiates a Logger for which all calls
9// will succeed without doing anything.
10// Useful for testing purposes.
11func NewNullLogger() Logger {
12 return &nullLogger{}
13}
14
15type nullLogger struct{}
16
17func (l *nullLogger) Trace(msg string, args ...interface{}) {}
18
19func (l *nullLogger) Debug(msg string, args ...interface{}) {}
20
21func (l *nullLogger) Info(msg string, args ...interface{}) {}
22
23func (l *nullLogger) Warn(msg string, args ...interface{}) {}
24
25func (l *nullLogger) Error(msg string, args ...interface{}) {}
26
27func (l *nullLogger) IsTrace() bool { return false }
28
29func (l *nullLogger) IsDebug() bool { return false }
30
31func (l *nullLogger) IsInfo() bool { return false }
32
33func (l *nullLogger) IsWarn() bool { return false }
34
35func (l *nullLogger) IsError() bool { return false }
36
37func (l *nullLogger) With(args ...interface{}) Logger { return l }
38
39func (l *nullLogger) Named(name string) Logger { return l }
40
41func (l *nullLogger) ResetNamed(name string) Logger { return l }
42
43func (l *nullLogger) SetLevel(level Level) {}
44
45func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
46 return log.New(ioutil.Discard, "", log.LstdFlags)
47}
diff --git a/vendor/github.com/hashicorp/go-multierror/.travis.yml b/vendor/github.com/hashicorp/go-multierror/.travis.yml
new file mode 100644
index 0000000..304a835
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/.travis.yml
@@ -0,0 +1,12 @@
1sudo: false
2
3language: go
4
5go:
6 - 1.x
7
8branches:
9 only:
10 - master
11
12script: make test testrace
diff --git a/vendor/github.com/hashicorp/go-multierror/Makefile b/vendor/github.com/hashicorp/go-multierror/Makefile
new file mode 100644
index 0000000..b97cd6e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/Makefile
@@ -0,0 +1,31 @@
1TEST?=./...
2
3default: test
4
5# test runs the test suite and vets the code.
6test: generate
7 @echo "==> Running tests..."
8 @go list $(TEST) \
9 | grep -v "/vendor/" \
10 | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS}
11
12# testrace runs the race checker
13testrace: generate
14 @echo "==> Running tests (race)..."
15 @go list $(TEST) \
16 | grep -v "/vendor/" \
17 | xargs -n1 go test -timeout=60s -race ${TESTARGS}
18
19# updatedeps installs all the dependencies needed to run and build.
20updatedeps:
21 @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'"
22
23# generate runs `go generate` to build the dynamically generated source files.
24generate:
25 @echo "==> Generating..."
26 @find . -type f -name '.DS_Store' -delete
27 @go list ./... \
28 | grep -v "/vendor/" \
29 | xargs -n1 go generate
30
31.PHONY: default test testrace updatedeps generate
diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md
index e81be50..ead5830 100644
--- a/vendor/github.com/hashicorp/go-multierror/README.md
+++ b/vendor/github.com/hashicorp/go-multierror/README.md
@@ -1,5 +1,11 @@
1# go-multierror 1# go-multierror
2 2
3[![Build Status](http://img.shields.io/travis/hashicorp/go-multierror.svg?style=flat-square)][travis]
4[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
5
6[travis]: https://travis-ci.org/hashicorp/go-multierror
7[godocs]: https://godoc.org/github.com/hashicorp/go-multierror
8
3`go-multierror` is a package for Go that provides a mechanism for 9`go-multierror` is a package for Go that provides a mechanism for
4representing a list of `error` values as a single `error`. 10representing a list of `error` values as a single `error`.
5 11
diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go
index 00afa9b..775b6e7 100644
--- a/vendor/github.com/hashicorp/go-multierror/append.go
+++ b/vendor/github.com/hashicorp/go-multierror/append.go
@@ -18,9 +18,13 @@ func Append(err error, errs ...error) *Error {
18 for _, e := range errs { 18 for _, e := range errs {
19 switch e := e.(type) { 19 switch e := e.(type) {
20 case *Error: 20 case *Error:
21 err.Errors = append(err.Errors, e.Errors...) 21 if e != nil {
22 err.Errors = append(err.Errors, e.Errors...)
23 }
22 default: 24 default:
23 err.Errors = append(err.Errors, e) 25 if e != nil {
26 err.Errors = append(err.Errors, e)
27 }
24 } 28 }
25 } 29 }
26 30
diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go
index bb65a12..47f13c4 100644
--- a/vendor/github.com/hashicorp/go-multierror/format.go
+++ b/vendor/github.com/hashicorp/go-multierror/format.go
@@ -12,12 +12,16 @@ type ErrorFormatFunc func([]error) string
12// ListFormatFunc is a basic formatter that outputs the number of errors 12// ListFormatFunc is a basic formatter that outputs the number of errors
13// that occurred along with a bullet point list of the errors. 13// that occurred along with a bullet point list of the errors.
14func ListFormatFunc(es []error) string { 14func ListFormatFunc(es []error) string {
15 if len(es) == 1 {
16 return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0])
17 }
18
15 points := make([]string, len(es)) 19 points := make([]string, len(es))
16 for i, err := range es { 20 for i, err := range es {
17 points[i] = fmt.Sprintf("* %s", err) 21 points[i] = fmt.Sprintf("* %s", err)
18 } 22 }
19 23
20 return fmt.Sprintf( 24 return fmt.Sprintf(
21 "%d error(s) occurred:\n\n%s", 25 "%d errors occurred:\n\t%s\n\n",
22 len(es), strings.Join(points, "\n")) 26 len(es), strings.Join(points, "\n\t"))
23} 27}
diff --git a/vendor/github.com/hashicorp/go-multierror/go.mod b/vendor/github.com/hashicorp/go-multierror/go.mod
new file mode 100644
index 0000000..2534331
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/go.mod
@@ -0,0 +1,3 @@
1module github.com/hashicorp/go-multierror
2
3require github.com/hashicorp/errwrap v1.0.0
diff --git a/vendor/github.com/hashicorp/go-multierror/go.sum b/vendor/github.com/hashicorp/go-multierror/go.sum
new file mode 100644
index 0000000..85b1f8f
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/go.sum
@@ -0,0 +1,4 @@
1github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce h1:prjrVgOk2Yg6w+PflHoszQNLTUh4kaByUcEWM/9uin4=
2github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
3github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
4github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go
index 2ea0827..89b1422 100644
--- a/vendor/github.com/hashicorp/go-multierror/multierror.go
+++ b/vendor/github.com/hashicorp/go-multierror/multierror.go
@@ -40,11 +40,11 @@ func (e *Error) GoString() string {
40} 40}
41 41
42// WrappedErrors returns the list of errors that this Error is wrapping. 42// WrappedErrors returns the list of errors that this Error is wrapping.
43// It is an implementatin of the errwrap.Wrapper interface so that 43// It is an implementation of the errwrap.Wrapper interface so that
44// multierror.Error can be used with that library. 44// multierror.Error can be used with that library.
45// 45//
46// This method is not safe to be called concurrently and is no different 46// This method is not safe to be called concurrently and is no different
47// than accessing the Errors field directly. It is implementd only to 47// than accessing the Errors field directly. It is implemented only to
48// satisfy the errwrap.Wrapper interface. 48// satisfy the errwrap.Wrapper interface.
49func (e *Error) WrappedErrors() []error { 49func (e *Error) WrappedErrors() []error {
50 return e.Errors 50 return e.Errors
diff --git a/vendor/github.com/hashicorp/go-multierror/sort.go b/vendor/github.com/hashicorp/go-multierror/sort.go
new file mode 100644
index 0000000..fecb14e
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-multierror/sort.go
@@ -0,0 +1,16 @@
1package multierror
2
3// Len implements sort.Interface function for length
4func (err Error) Len() int {
5 return len(err.Errors)
6}
7
8// Swap implements sort.Interface function for swapping elements
9func (err Error) Swap(i, j int) {
10 err.Errors[i], err.Errors[j] = err.Errors[j], err.Errors[i]
11}
12
13// Less implements sort.Interface function for determining order
14func (err Error) Less(i, j int) bool {
15 return err.Errors[i].Error() < err.Errors[j].Error()
16}
diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md
index e4558db..fe305ad 100644
--- a/vendor/github.com/hashicorp/go-plugin/README.md
+++ b/vendor/github.com/hashicorp/go-plugin/README.md
@@ -109,7 +109,7 @@ high-level steps that must be done. Examples are available in the
109 1. Choose the interface(s) you want to expose for plugins. 109 1. Choose the interface(s) you want to expose for plugins.
110 110
111 2. For each interface, implement an implementation of that interface 111 2. For each interface, implement an implementation of that interface
112 that communicates over a `net/rpc` connection or other a 112 that communicates over a `net/rpc` connection or over a
113 [gRPC](http://www.grpc.io) connection or both. You'll have to implement 113 [gRPC](http://www.grpc.io) connection or both. You'll have to implement
114 both a client and server implementation. 114 both a client and server implementation.
115 115
@@ -150,19 +150,19 @@ user experience.
150 150
151When we started using plugins (late 2012, early 2013), plugins over RPC 151When we started using plugins (late 2012, early 2013), plugins over RPC
152were the only option since Go didn't support dynamic library loading. Today, 152were the only option since Go didn't support dynamic library loading. Today,
153Go still doesn't support dynamic library loading, but they do intend to. 153Go supports the [plugin](https://golang.org/pkg/plugin/) standard library with
154Since 2012, our plugin system has stabilized from millions of users using it, 154a number of limitations. Since 2012, our plugin system has stabilized
155and has many benefits we've come to value greatly. 155from tens of millions of users using it, and has many benefits we've come to
156 156value greatly.
157For example, we intend to use this plugin system in 157
158[Vault](https://www.vaultproject.io), and dynamic library loading will 158For example, we use this plugin system in
159simply never be acceptable in Vault for security reasons. That is an extreme 159[Vault](https://www.vaultproject.io) where dynamic library loading is
160not acceptable for security reasons. That is an extreme
160example, but we believe our library system has more upsides than downsides 161example, but we believe our library system has more upsides than downsides
161over dynamic library loading and since we've had it built and tested for years, 162over dynamic library loading and since we've had it built and tested for years,
162we'll likely continue to use it. 163we'll continue to use it.
163 164
164Shared libraries have one major advantage over our system which is much 165Shared libraries have one major advantage over our system which is much
165higher performance. In real world scenarios across our various tools, 166higher performance. In real world scenarios across our various tools,
166we've never required any more performance out of our plugin system and it 167we've never required any more performance out of our plugin system and it
167has seen very high throughput, so this isn't a concern for us at the moment. 168has seen very high throughput, so this isn't a concern for us at the moment.
168
diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go
index b3e3b78..679e10a 100644
--- a/vendor/github.com/hashicorp/go-plugin/client.go
+++ b/vendor/github.com/hashicorp/go-plugin/client.go
@@ -5,12 +5,13 @@ import (
5 "context" 5 "context"
6 "crypto/subtle" 6 "crypto/subtle"
7 "crypto/tls" 7 "crypto/tls"
8 "crypto/x509"
9 "encoding/base64"
8 "errors" 10 "errors"
9 "fmt" 11 "fmt"
10 "hash" 12 "hash"
11 "io" 13 "io"
12 "io/ioutil" 14 "io/ioutil"
13 "log"
14 "net" 15 "net"
15 "os" 16 "os"
16 "os/exec" 17 "os/exec"
@@ -20,7 +21,6 @@ import (
20 "sync" 21 "sync"
21 "sync/atomic" 22 "sync/atomic"
22 "time" 23 "time"
23 "unicode"
24 24
25 hclog "github.com/hashicorp/go-hclog" 25 hclog "github.com/hashicorp/go-hclog"
26) 26)
@@ -71,16 +71,31 @@ var (
71// 71//
72// See NewClient and ClientConfig for using a Client. 72// See NewClient and ClientConfig for using a Client.
73type Client struct { 73type Client struct {
74 config *ClientConfig 74 config *ClientConfig
75 exited bool 75 exited bool
76 doneLogging chan struct{} 76 l sync.Mutex
77 l sync.Mutex 77 address net.Addr
78 address net.Addr 78 process *os.Process
79 process *os.Process 79 client ClientProtocol
80 client ClientProtocol 80 protocol Protocol
81 protocol Protocol 81 logger hclog.Logger
82 logger hclog.Logger 82 doneCtx context.Context
83 doneCtx context.Context 83 ctxCancel context.CancelFunc
84 negotiatedVersion int
85
86 // clientWaitGroup is used to manage the lifecycle of the plugin management
87 // goroutines.
88 clientWaitGroup sync.WaitGroup
89
90 // processKilled is used for testing only, to flag when the process was
91 // forcefully killed.
92 processKilled bool
93}
94
95// NegotiatedVersion returns the protocol version negotiated with the server.
96// This is only valid after Start() is called.
97func (c *Client) NegotiatedVersion() int {
98 return c.negotiatedVersion
84} 99}
85 100
86// ClientConfig is the configuration used to initialize a new 101// ClientConfig is the configuration used to initialize a new
@@ -91,7 +106,13 @@ type ClientConfig struct {
91 HandshakeConfig 106 HandshakeConfig
92 107
93 // Plugins are the plugins that can be consumed. 108 // Plugins are the plugins that can be consumed.
94 Plugins map[string]Plugin 109 // The implied version of this PluginSet is the Handshake.ProtocolVersion.
110 Plugins PluginSet
111
112 // VersionedPlugins is a map of PluginSets for specific protocol versions.
113 // These can be used to negotiate a compatible version between client and
114 // server. If this is set, Handshake.ProtocolVersion is not required.
115 VersionedPlugins map[int]PluginSet
95 116
96 // One of the following must be set, but not both. 117 // One of the following must be set, but not both.
97 // 118 //
@@ -158,6 +179,29 @@ type ClientConfig struct {
158 // Logger is the logger that the client will used. If none is provided, 179 // Logger is the logger that the client will used. If none is provided,
159 // it will default to hclog's default logger. 180 // it will default to hclog's default logger.
160 Logger hclog.Logger 181 Logger hclog.Logger
182
183 // AutoMTLS has the client and server automatically negotiate mTLS for
184 // transport authentication. This ensures that only the original client will
185 // be allowed to connect to the server, and all other connections will be
186 // rejected. The client will also refuse to connect to any server that isn't
187 // the original instance started by the client.
188 //
189 // In this mode of operation, the client generates a one-time use tls
190 // certificate, sends the public x.509 certificate to the new server, and
191 // the server generates a one-time use tls certificate, and sends the public
192 // x.509 certificate back to the client. These are used to authenticate all
193 // rpc connections between the client and server.
194 //
195 // Setting AutoMTLS to true implies that the server must support the
196 // protocol, and correctly negotiate the tls certificates, or a connection
197 // failure will result.
198 //
199 // The client should not set TLSConfig, nor should the server set a
200 // TLSProvider, because AutoMTLS implies that a new certificate and tls
201 // configuration will be generated at startup.
202 //
203 // You cannot Reattach to a server with this option enabled.
204 AutoMTLS bool
161} 205}
162 206
163// ReattachConfig is used to configure a client to reattach to an 207// ReattachConfig is used to configure a client to reattach to an
@@ -234,7 +278,6 @@ func CleanupClients() {
234 } 278 }
235 managedClientsLock.Unlock() 279 managedClientsLock.Unlock()
236 280
237 log.Println("[DEBUG] plugin: waiting for all plugin processes to complete...")
238 wg.Wait() 281 wg.Wait()
239} 282}
240 283
@@ -333,6 +376,14 @@ func (c *Client) Exited() bool {
333 return c.exited 376 return c.exited
334} 377}
335 378
379// killed is used in tests to check if a process failed to exit gracefully, and
380// needed to be killed.
381func (c *Client) killed() bool {
382 c.l.Lock()
383 defer c.l.Unlock()
384 return c.processKilled
385}
386
336// End the executing subprocess (if it is running) and perform any cleanup 387// End the executing subprocess (if it is running) and perform any cleanup
337// tasks necessary such as capturing any remaining logs and so on. 388// tasks necessary such as capturing any remaining logs and so on.
338// 389//
@@ -344,14 +395,24 @@ func (c *Client) Kill() {
344 c.l.Lock() 395 c.l.Lock()
345 process := c.process 396 process := c.process
346 addr := c.address 397 addr := c.address
347 doneCh := c.doneLogging
348 c.l.Unlock() 398 c.l.Unlock()
349 399
350 // If there is no process, we never started anything. Nothing to kill. 400 // If there is no process, there is nothing to kill.
351 if process == nil { 401 if process == nil {
352 return 402 return
353 } 403 }
354 404
405 defer func() {
406 // Wait for the all client goroutines to finish.
407 c.clientWaitGroup.Wait()
408
409 // Make sure there is no reference to the old process after it has been
410 // killed.
411 c.l.Lock()
412 c.process = nil
413 c.l.Unlock()
414 }()
415
355 // We need to check for address here. It is possible that the plugin 416 // We need to check for address here. It is possible that the plugin
356 // started (process != nil) but has no address (addr == nil) if the 417 // started (process != nil) but has no address (addr == nil) if the
357 // plugin failed at startup. If we do have an address, we need to close 418 // plugin failed at startup. If we do have an address, we need to close
@@ -372,6 +433,8 @@ func (c *Client) Kill() {
372 // kill in a moment anyways. 433 // kill in a moment anyways.
373 c.logger.Warn("error closing client during Kill", "err", err) 434 c.logger.Warn("error closing client during Kill", "err", err)
374 } 435 }
436 } else {
437 c.logger.Error("client", "error", err)
375 } 438 }
376 } 439 }
377 440
@@ -380,17 +443,20 @@ func (c *Client) Kill() {
380 // doneCh which would be closed if the process exits. 443 // doneCh which would be closed if the process exits.
381 if graceful { 444 if graceful {
382 select { 445 select {
383 case <-doneCh: 446 case <-c.doneCtx.Done():
447 c.logger.Debug("plugin exited")
384 return 448 return
385 case <-time.After(250 * time.Millisecond): 449 case <-time.After(2 * time.Second):
386 } 450 }
387 } 451 }
388 452
389 // If graceful exiting failed, just kill it 453 // If graceful exiting failed, just kill it
454 c.logger.Warn("plugin failed to exit gracefully")
390 process.Kill() 455 process.Kill()
391 456
392 // Wait for the client to finish logging so we have a complete log 457 c.l.Lock()
393 <-doneCh 458 c.processKilled = true
459 c.l.Unlock()
394} 460}
395 461
396// Starts the underlying subprocess, communicating with it to negotiate 462// Starts the underlying subprocess, communicating with it to negotiate
@@ -409,7 +475,7 @@ func (c *Client) Start() (addr net.Addr, err error) {
409 475
410 // If one of cmd or reattach isn't set, then it is an error. We wrap 476 // If one of cmd or reattach isn't set, then it is an error. We wrap
411 // this in a {} for scoping reasons, and hopeful that the escape 477 // this in a {} for scoping reasons, and hopeful that the escape
412 // analysis will pop the stock here. 478 // analysis will pop the stack here.
413 { 479 {
414 cmdSet := c.config.Cmd != nil 480 cmdSet := c.config.Cmd != nil
415 attachSet := c.config.Reattach != nil 481 attachSet := c.config.Reattach != nil
@@ -423,77 +489,49 @@ func (c *Client) Start() (addr net.Addr, err error) {
423 } 489 }
424 } 490 }
425 491
426 // Create the logging channel for when we kill
427 c.doneLogging = make(chan struct{})
428 // Create a context for when we kill
429 var ctxCancel context.CancelFunc
430 c.doneCtx, ctxCancel = context.WithCancel(context.Background())
431
432 if c.config.Reattach != nil { 492 if c.config.Reattach != nil {
433 // Verify the process still exists. If not, then it is an error 493 return c.reattach()
434 p, err := os.FindProcess(c.config.Reattach.Pid) 494 }
435 if err != nil {
436 return nil, err
437 }
438 495
439 // Attempt to connect to the addr since on Unix systems FindProcess 496 if c.config.VersionedPlugins == nil {
440 // doesn't actually return an error if it can't find the process. 497 c.config.VersionedPlugins = make(map[int]PluginSet)
441 conn, err := net.Dial( 498 }
442 c.config.Reattach.Addr.Network(),
443 c.config.Reattach.Addr.String())
444 if err != nil {
445 p.Kill()
446 return nil, ErrProcessNotFound
447 }
448 conn.Close()
449
450 // Goroutine to mark exit status
451 go func(pid int) {
452 // Wait for the process to die
453 pidWait(pid)
454
455 // Log so we can see it
456 c.logger.Debug("reattached plugin process exited")
457
458 // Mark it
459 c.l.Lock()
460 defer c.l.Unlock()
461 c.exited = true
462
463 // Close the logging channel since that doesn't work on reattach
464 close(c.doneLogging)
465
466 // Cancel the context
467 ctxCancel()
468 }(p.Pid)
469
470 // Set the address and process
471 c.address = c.config.Reattach.Addr
472 c.process = p
473 c.protocol = c.config.Reattach.Protocol
474 if c.protocol == "" {
475 // Default the protocol to net/rpc for backwards compatibility
476 c.protocol = ProtocolNetRPC
477 }
478 499
479 return c.address, nil 500 // handle all plugins as versioned, using the handshake config as the default.
501 version := int(c.config.ProtocolVersion)
502
503 // Make sure we're not overwriting a real version 0. If ProtocolVersion was
504 // non-zero, then we have to just assume the user made sure that
505 // VersionedPlugins doesn't conflict.
506 if _, ok := c.config.VersionedPlugins[version]; !ok && c.config.Plugins != nil {
507 c.config.VersionedPlugins[version] = c.config.Plugins
508 }
509
510 var versionStrings []string
511 for v := range c.config.VersionedPlugins {
512 versionStrings = append(versionStrings, strconv.Itoa(v))
480 } 513 }
481 514
482 env := []string{ 515 env := []string{
483 fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue), 516 fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue),
484 fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort), 517 fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort),
485 fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), 518 fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort),
519 fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")),
486 } 520 }
487 521
488 stdout_r, stdout_w := io.Pipe()
489 stderr_r, stderr_w := io.Pipe()
490
491 cmd := c.config.Cmd 522 cmd := c.config.Cmd
492 cmd.Env = append(cmd.Env, os.Environ()...) 523 cmd.Env = append(cmd.Env, os.Environ()...)
493 cmd.Env = append(cmd.Env, env...) 524 cmd.Env = append(cmd.Env, env...)
494 cmd.Stdin = os.Stdin 525 cmd.Stdin = os.Stdin
495 cmd.Stderr = stderr_w 526
496 cmd.Stdout = stdout_w 527 cmdStdout, err := cmd.StdoutPipe()
528 if err != nil {
529 return nil, err
530 }
531 cmdStderr, err := cmd.StderrPipe()
532 if err != nil {
533 return nil, err
534 }
497 535
498 if c.config.SecureConfig != nil { 536 if c.config.SecureConfig != nil {
499 if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil { 537 if ok, err := c.config.SecureConfig.Check(cmd.Path); err != nil {
@@ -503,6 +541,29 @@ func (c *Client) Start() (addr net.Addr, err error) {
503 } 541 }
504 } 542 }
505 543
544 // Setup a temporary certificate for client/server mtls, and send the public
545 // certificate to the plugin.
546 if c.config.AutoMTLS {
547 c.logger.Info("configuring client automatic mTLS")
548 certPEM, keyPEM, err := generateCert()
549 if err != nil {
550 c.logger.Error("failed to generate client certificate", "error", err)
551 return nil, err
552 }
553 cert, err := tls.X509KeyPair(certPEM, keyPEM)
554 if err != nil {
555 c.logger.Error("failed to parse client certificate", "error", err)
556 return nil, err
557 }
558
559 cmd.Env = append(cmd.Env, fmt.Sprintf("PLUGIN_CLIENT_CERT=%s", certPEM))
560
561 c.config.TLSConfig = &tls.Config{
562 Certificates: []tls.Certificate{cert},
563 ServerName: "localhost",
564 }
565 }
566
506 c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args) 567 c.logger.Debug("starting plugin", "path", cmd.Path, "args", cmd.Args)
507 err = cmd.Start() 568 err = cmd.Start()
508 if err != nil { 569 if err != nil {
@@ -511,6 +572,7 @@ func (c *Client) Start() (addr net.Addr, err error) {
511 572
512 // Set the process 573 // Set the process
513 c.process = cmd.Process 574 c.process = cmd.Process
575 c.logger.Debug("plugin started", "path", cmd.Path, "pid", c.process.Pid)
514 576
515 // Make sure the command is properly cleaned up if there is an error 577 // Make sure the command is properly cleaned up if there is an error
516 defer func() { 578 defer func() {
@@ -525,27 +587,37 @@ func (c *Client) Start() (addr net.Addr, err error) {
525 } 587 }
526 }() 588 }()
527 589
528 // Start goroutine to wait for process to exit 590 // Create a context for when we kill
529 exitCh := make(chan struct{}) 591 c.doneCtx, c.ctxCancel = context.WithCancel(context.Background())
592
593 c.clientWaitGroup.Add(1)
530 go func() { 594 go func() {
531 // Make sure we close the write end of our stderr/stdout so 595 // ensure the context is cancelled when we're done
532 // that the readers send EOF properly. 596 defer c.ctxCancel()
533 defer stderr_w.Close() 597
534 defer stdout_w.Close() 598 defer c.clientWaitGroup.Done()
599
600 // get the cmd info early, since the process information will be removed
601 // in Kill.
602 pid := c.process.Pid
603 path := cmd.Path
535 604
536 // Wait for the command to end. 605 // Wait for the command to end.
537 cmd.Wait() 606 err := cmd.Wait()
607
608 debugMsgArgs := []interface{}{
609 "path", path,
610 "pid", pid,
611 }
612 if err != nil {
613 debugMsgArgs = append(debugMsgArgs,
614 []interface{}{"error", err.Error()}...)
615 }
538 616
539 // Log and make sure to flush the logs write away 617 // Log and make sure to flush the logs write away
540 c.logger.Debug("plugin process exited", "path", cmd.Path) 618 c.logger.Debug("plugin process exited", debugMsgArgs...)
541 os.Stderr.Sync() 619 os.Stderr.Sync()
542 620
543 // Mark that we exited
544 close(exitCh)
545
546 // Cancel the context, marking that we exited
547 ctxCancel()
548
549 // Set that we exited, which takes a lock 621 // Set that we exited, which takes a lock
550 c.l.Lock() 622 c.l.Lock()
551 defer c.l.Unlock() 623 defer c.l.Unlock()
@@ -553,32 +625,33 @@ func (c *Client) Start() (addr net.Addr, err error) {
553 }() 625 }()
554 626
555 // Start goroutine that logs the stderr 627 // Start goroutine that logs the stderr
556 go c.logStderr(stderr_r) 628 c.clientWaitGroup.Add(1)
629 // logStderr calls Done()
630 go c.logStderr(cmdStderr)
557 631
558 // Start a goroutine that is going to be reading the lines 632 // Start a goroutine that is going to be reading the lines
559 // out of stdout 633 // out of stdout
560 linesCh := make(chan []byte) 634 linesCh := make(chan string)
635 c.clientWaitGroup.Add(1)
561 go func() { 636 go func() {
637 defer c.clientWaitGroup.Done()
562 defer close(linesCh) 638 defer close(linesCh)
563 639
564 buf := bufio.NewReader(stdout_r) 640 scanner := bufio.NewScanner(cmdStdout)
565 for { 641 for scanner.Scan() {
566 line, err := buf.ReadBytes('\n') 642 linesCh <- scanner.Text()
567 if line != nil {
568 linesCh <- line
569 }
570
571 if err == io.EOF {
572 return
573 }
574 } 643 }
575 }() 644 }()
576 645
577 // Make sure after we exit we read the lines from stdout forever 646 // Make sure after we exit we read the lines from stdout forever
578 // so they don't block since it is an io.Pipe 647 // so they don't block since it is a pipe.
648 // The scanner goroutine above will close this, but track it with a wait
649 // group for completeness.
650 c.clientWaitGroup.Add(1)
579 defer func() { 651 defer func() {
580 go func() { 652 go func() {
581 for _ = range linesCh { 653 defer c.clientWaitGroup.Done()
654 for range linesCh {
582 } 655 }
583 }() 656 }()
584 }() 657 }()
@@ -591,12 +664,12 @@ func (c *Client) Start() (addr net.Addr, err error) {
591 select { 664 select {
592 case <-timeout: 665 case <-timeout:
593 err = errors.New("timeout while waiting for plugin to start") 666 err = errors.New("timeout while waiting for plugin to start")
594 case <-exitCh: 667 case <-c.doneCtx.Done():
595 err = errors.New("plugin exited before we could connect") 668 err = errors.New("plugin exited before we could connect")
596 case lineBytes := <-linesCh: 669 case line := <-linesCh:
597 // Trim the line and split by "|" in order to get the parts of 670 // Trim the line and split by "|" in order to get the parts of
598 // the output. 671 // the output.
599 line := strings.TrimSpace(string(lineBytes)) 672 line = strings.TrimSpace(line)
600 parts := strings.SplitN(line, "|", 6) 673 parts := strings.SplitN(line, "|", 6)
601 if len(parts) < 4 { 674 if len(parts) < 4 {
602 err = fmt.Errorf( 675 err = fmt.Errorf(
@@ -624,20 +697,18 @@ func (c *Client) Start() (addr net.Addr, err error) {
624 } 697 }
625 } 698 }
626 699
627 // Parse the protocol version 700 // Test the API version
628 var protocol int64 701 version, pluginSet, err := c.checkProtoVersion(parts[1])
629 protocol, err = strconv.ParseInt(parts[1], 10, 0)
630 if err != nil { 702 if err != nil {
631 err = fmt.Errorf("Error parsing protocol version: %s", err) 703 return addr, err
632 return
633 } 704 }
634 705
635 // Test the API version 706 // set the Plugins value to the compatible set, so the version
636 if uint(protocol) != c.config.ProtocolVersion { 707 // doesn't need to be passed through to the ClientProtocol
637 err = fmt.Errorf("Incompatible API version with plugin. "+ 708 // implementation.
638 "Plugin version: %s, Core version: %d", parts[1], c.config.ProtocolVersion) 709 c.config.Plugins = pluginSet
639 return 710 c.negotiatedVersion = version
640 } 711 c.logger.Debug("using plugin", "version", version)
641 712
642 switch parts[2] { 713 switch parts[2] {
643 case "tcp": 714 case "tcp":
@@ -665,15 +736,125 @@ func (c *Client) Start() (addr net.Addr, err error) {
665 if !found { 736 if !found {
666 err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v", 737 err = fmt.Errorf("Unsupported plugin protocol %q. Supported: %v",
667 c.protocol, c.config.AllowedProtocols) 738 c.protocol, c.config.AllowedProtocols)
668 return 739 return addr, err
669 } 740 }
670 741
742 // See if we have a TLS certificate from the server.
743 // Checking if the length is > 50 rules out catching the unused "extra"
744 // data returned from some older implementations.
745 if len(parts) >= 6 && len(parts[5]) > 50 {
746 err := c.loadServerCert(parts[5])
747 if err != nil {
748 return nil, fmt.Errorf("error parsing server cert: %s", err)
749 }
750 }
671 } 751 }
672 752
673 c.address = addr 753 c.address = addr
674 return 754 return
675} 755}
676 756
757// loadServerCert is used by AutoMTLS to read an x.509 cert returned by the
758// server, and load it as the RootCA for the client TLSConfig.
759func (c *Client) loadServerCert(cert string) error {
760 certPool := x509.NewCertPool()
761
762 asn1, err := base64.RawStdEncoding.DecodeString(cert)
763 if err != nil {
764 return err
765 }
766
767 x509Cert, err := x509.ParseCertificate([]byte(asn1))
768 if err != nil {
769 return err
770 }
771
772 certPool.AddCert(x509Cert)
773
774 c.config.TLSConfig.RootCAs = certPool
775 return nil
776}
777
778func (c *Client) reattach() (net.Addr, error) {
779 // Verify the process still exists. If not, then it is an error
780 p, err := os.FindProcess(c.config.Reattach.Pid)
781 if err != nil {
782 return nil, err
783 }
784
785 // Attempt to connect to the addr since on Unix systems FindProcess
786 // doesn't actually return an error if it can't find the process.
787 conn, err := net.Dial(
788 c.config.Reattach.Addr.Network(),
789 c.config.Reattach.Addr.String())
790 if err != nil {
791 p.Kill()
792 return nil, ErrProcessNotFound
793 }
794 conn.Close()
795
796 // Create a context for when we kill
797 c.doneCtx, c.ctxCancel = context.WithCancel(context.Background())
798
799 c.clientWaitGroup.Add(1)
800 // Goroutine to mark exit status
801 go func(pid int) {
802 defer c.clientWaitGroup.Done()
803
804 // ensure the context is cancelled when we're done
805 defer c.ctxCancel()
806
807 // Wait for the process to die
808 pidWait(pid)
809
810 // Log so we can see it
811 c.logger.Debug("reattached plugin process exited")
812
813 // Mark it
814 c.l.Lock()
815 defer c.l.Unlock()
816 c.exited = true
817 }(p.Pid)
818
819 // Set the address and process
820 c.address = c.config.Reattach.Addr
821 c.process = p
822 c.protocol = c.config.Reattach.Protocol
823 if c.protocol == "" {
824 // Default the protocol to net/rpc for backwards compatibility
825 c.protocol = ProtocolNetRPC
826 }
827
828 return c.address, nil
829}
830
831// checkProtoVersion returns the negotiated version and PluginSet.
832// This returns an error if the server returned an incompatible protocol
833// version, or an invalid handshake response.
834func (c *Client) checkProtoVersion(protoVersion string) (int, PluginSet, error) {
835 serverVersion, err := strconv.Atoi(protoVersion)
836 if err != nil {
837 return 0, nil, fmt.Errorf("Error parsing protocol version %q: %s", protoVersion, err)
838 }
839
840 // record these for the error message
841 var clientVersions []int
842
843 // all versions, including the legacy ProtocolVersion have been added to
844 // the versions set
845 for version, plugins := range c.config.VersionedPlugins {
846 clientVersions = append(clientVersions, version)
847
848 if serverVersion != version {
849 continue
850 }
851 return version, plugins, nil
852 }
853
854 return 0, nil, fmt.Errorf("Incompatible API version with plugin. "+
855 "Plugin version: %d, Client versions: %d", serverVersion, clientVersions)
856}
857
677// ReattachConfig returns the information that must be provided to NewClient 858// ReattachConfig returns the information that must be provided to NewClient
678// to reattach to the plugin process that this client started. This is 859// to reattach to the plugin process that this client started. This is
679// useful for plugins that detach from their parent process. 860// useful for plugins that detach from their parent process.
@@ -751,44 +932,84 @@ func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) {
751 return conn, nil 932 return conn, nil
752} 933}
753 934
935var stdErrBufferSize = 64 * 1024
936
754func (c *Client) logStderr(r io.Reader) { 937func (c *Client) logStderr(r io.Reader) {
755 bufR := bufio.NewReader(r) 938 defer c.clientWaitGroup.Done()
939 l := c.logger.Named(filepath.Base(c.config.Cmd.Path))
940
941 reader := bufio.NewReaderSize(r, stdErrBufferSize)
942 // continuation indicates the previous line was a prefix
943 continuation := false
944
756 for { 945 for {
757 line, err := bufR.ReadString('\n') 946 line, isPrefix, err := reader.ReadLine()
758 if line != "" { 947 switch {
759 c.config.Stderr.Write([]byte(line)) 948 case err == io.EOF:
760 line = strings.TrimRightFunc(line, unicode.IsSpace) 949 return
950 case err != nil:
951 l.Error("reading plugin stderr", "error", err)
952 return
953 }
761 954
762 l := c.logger.Named(filepath.Base(c.config.Cmd.Path)) 955 c.config.Stderr.Write(line)
763 956
764 entry, err := parseJSON(line) 957 // The line was longer than our max token size, so it's likely
765 // If output is not JSON format, print directly to Debug 958 // incomplete and won't unmarshal.
766 if err != nil { 959 if isPrefix || continuation {
767 l.Debug(line) 960 l.Debug(string(line))
768 } else { 961
769 out := flattenKVPairs(entry.KVPairs) 962 // if we're finishing a continued line, add the newline back in
770 963 if !isPrefix {
771 l = l.With("timestamp", entry.Timestamp.Format(hclog.TimeFormat)) 964 c.config.Stderr.Write([]byte{'\n'})
772 switch hclog.LevelFromString(entry.Level) {
773 case hclog.Trace:
774 l.Trace(entry.Message, out...)
775 case hclog.Debug:
776 l.Debug(entry.Message, out...)
777 case hclog.Info:
778 l.Info(entry.Message, out...)
779 case hclog.Warn:
780 l.Warn(entry.Message, out...)
781 case hclog.Error:
782 l.Error(entry.Message, out...)
783 }
784 } 965 }
966
967 continuation = isPrefix
968 continue
785 } 969 }
786 970
787 if err == io.EOF { 971 c.config.Stderr.Write([]byte{'\n'})
788 break 972
973 entry, err := parseJSON(line)
974 // If output is not JSON format, print directly to Debug
975 if err != nil {
976 // Attempt to infer the desired log level from the commonly used
977 // string prefixes
978 switch line := string(line); {
979 case strings.HasPrefix(line, "[TRACE]"):
980 l.Trace(line)
981 case strings.HasPrefix(line, "[DEBUG]"):
982 l.Debug(line)
983 case strings.HasPrefix(line, "[INFO]"):
984 l.Info(line)
985 case strings.HasPrefix(line, "[WARN]"):
986 l.Warn(line)
987 case strings.HasPrefix(line, "[ERROR]"):
988 l.Error(line)
989 default:
990 l.Debug(line)
991 }
992 } else {
993 out := flattenKVPairs(entry.KVPairs)
994
995 out = append(out, "timestamp", entry.Timestamp.Format(hclog.TimeFormat))
996 switch hclog.LevelFromString(entry.Level) {
997 case hclog.Trace:
998 l.Trace(entry.Message, out...)
999 case hclog.Debug:
1000 l.Debug(entry.Message, out...)
1001 case hclog.Info:
1002 l.Info(entry.Message, out...)
1003 case hclog.Warn:
1004 l.Warn(entry.Message, out...)
1005 case hclog.Error:
1006 l.Error(entry.Message, out...)
1007 default:
1008 // if there was no log level, it's likely this is unexpected
1009 // json from something other than hclog, and we should output
1010 // it verbatim.
1011 l.Debug(string(line))
1012 }
789 } 1013 }
790 } 1014 }
791
792 // Flag that we've completed logging for others
793 close(c.doneLogging)
794} 1015}
diff --git a/vendor/github.com/hashicorp/go-plugin/go.mod b/vendor/github.com/hashicorp/go-plugin/go.mod
new file mode 100644
index 0000000..f3ddf44
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/go.mod
@@ -0,0 +1,17 @@
1module github.com/hashicorp/go-plugin
2
3require (
4 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
5 github.com/golang/protobuf v1.2.0
6 github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd
7 github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb
8 github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77
9 github.com/oklog/run v1.0.0
10 github.com/stretchr/testify v1.3.0 // indirect
11 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d
12 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect
13 golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc // indirect
14 golang.org/x/text v0.3.0 // indirect
15 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 // indirect
16 google.golang.org/grpc v1.14.0
17)
diff --git a/vendor/github.com/hashicorp/go-plugin/go.sum b/vendor/github.com/hashicorp/go-plugin/go.sum
new file mode 100644
index 0000000..21b14e9
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/go.sum
@@ -0,0 +1,31 @@
1github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
2github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
3github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
4github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
5github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
6github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
7github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd h1:rNuUHR+CvK1IS89MMtcF0EpcVMZtjKfPRp4MEmt/aTs=
8github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
9github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M=
10github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
11github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77 h1:7GoSOOW2jpsfkntVKaS2rAr1TJqfcxotyaUcuxoZSzg=
12github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
13github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
14github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
15github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
16github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
17github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
18github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
19github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
20golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I=
21golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
22golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
23golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
24golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc h1:WiYx1rIFmx8c0mXAFtv5D/mHyKe1+jmuP7PViuwqwuQ=
25golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
26golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
27golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
28google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
29google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
30google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo=
31google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go
index 49fd21c..daf142d 100644
--- a/vendor/github.com/hashicorp/go-plugin/grpc_broker.go
+++ b/vendor/github.com/hashicorp/go-plugin/grpc_broker.go
@@ -11,6 +11,8 @@ import (
11 "sync/atomic" 11 "sync/atomic"
12 "time" 12 "time"
13 13
14 "github.com/hashicorp/go-plugin/internal/plugin"
15
14 "github.com/oklog/run" 16 "github.com/oklog/run"
15 "google.golang.org/grpc" 17 "google.golang.org/grpc"
16 "google.golang.org/grpc/credentials" 18 "google.golang.org/grpc/credentials"
@@ -19,14 +21,14 @@ import (
19// streamer interface is used in the broker to send/receive connection 21// streamer interface is used in the broker to send/receive connection
20// information. 22// information.
21type streamer interface { 23type streamer interface {
22 Send(*ConnInfo) error 24 Send(*plugin.ConnInfo) error
23 Recv() (*ConnInfo, error) 25 Recv() (*plugin.ConnInfo, error)
24 Close() 26 Close()
25} 27}
26 28
27// sendErr is used to pass errors back during a send. 29// sendErr is used to pass errors back during a send.
28type sendErr struct { 30type sendErr struct {
29 i *ConnInfo 31 i *plugin.ConnInfo
30 ch chan error 32 ch chan error
31} 33}
32 34
@@ -38,7 +40,7 @@ type gRPCBrokerServer struct {
38 send chan *sendErr 40 send chan *sendErr
39 41
40 // recv is used to receive connection info from the gRPC stream. 42 // recv is used to receive connection info from the gRPC stream.
41 recv chan *ConnInfo 43 recv chan *plugin.ConnInfo
42 44
43 // quit closes down the stream. 45 // quit closes down the stream.
44 quit chan struct{} 46 quit chan struct{}
@@ -50,7 +52,7 @@ type gRPCBrokerServer struct {
50func newGRPCBrokerServer() *gRPCBrokerServer { 52func newGRPCBrokerServer() *gRPCBrokerServer {
51 return &gRPCBrokerServer{ 53 return &gRPCBrokerServer{
52 send: make(chan *sendErr), 54 send: make(chan *sendErr),
53 recv: make(chan *ConnInfo), 55 recv: make(chan *plugin.ConnInfo),
54 quit: make(chan struct{}), 56 quit: make(chan struct{}),
55 } 57 }
56} 58}
@@ -58,7 +60,7 @@ func newGRPCBrokerServer() *gRPCBrokerServer {
58// StartStream implements the GRPCBrokerServer interface and will block until 60// StartStream implements the GRPCBrokerServer interface and will block until
59// the quit channel is closed or the context reports Done. The stream will pass 61// the quit channel is closed or the context reports Done. The stream will pass
60// connection information to/from the client. 62// connection information to/from the client.
61func (s *gRPCBrokerServer) StartStream(stream GRPCBroker_StartStreamServer) error { 63func (s *gRPCBrokerServer) StartStream(stream plugin.GRPCBroker_StartStreamServer) error {
62 doneCh := stream.Context().Done() 64 doneCh := stream.Context().Done()
63 defer s.Close() 65 defer s.Close()
64 66
@@ -97,7 +99,7 @@ func (s *gRPCBrokerServer) StartStream(stream GRPCBroker_StartStreamServer) erro
97 99
98// Send is used by the GRPCBroker to pass connection information into the stream 100// Send is used by the GRPCBroker to pass connection information into the stream
99// to the client. 101// to the client.
100func (s *gRPCBrokerServer) Send(i *ConnInfo) error { 102func (s *gRPCBrokerServer) Send(i *plugin.ConnInfo) error {
101 ch := make(chan error) 103 ch := make(chan error)
102 defer close(ch) 104 defer close(ch)
103 105
@@ -115,7 +117,7 @@ func (s *gRPCBrokerServer) Send(i *ConnInfo) error {
115 117
116// Recv is used by the GRPCBroker to pass connection information that has been 118// Recv is used by the GRPCBroker to pass connection information that has been
117// sent from the client from the stream to the broker. 119// sent from the client from the stream to the broker.
118func (s *gRPCBrokerServer) Recv() (*ConnInfo, error) { 120func (s *gRPCBrokerServer) Recv() (*plugin.ConnInfo, error) {
119 select { 121 select {
120 case <-s.quit: 122 case <-s.quit:
121 return nil, errors.New("broker closed") 123 return nil, errors.New("broker closed")
@@ -136,13 +138,13 @@ func (s *gRPCBrokerServer) Close() {
136// streamer interfaces. 138// streamer interfaces.
137type gRPCBrokerClientImpl struct { 139type gRPCBrokerClientImpl struct {
138 // client is the underlying GRPC client used to make calls to the server. 140 // client is the underlying GRPC client used to make calls to the server.
139 client GRPCBrokerClient 141 client plugin.GRPCBrokerClient
140 142
141 // send is used to send connection info to the gRPC stream. 143 // send is used to send connection info to the gRPC stream.
142 send chan *sendErr 144 send chan *sendErr
143 145
144 // recv is used to receive connection info from the gRPC stream. 146 // recv is used to receive connection info from the gRPC stream.
145 recv chan *ConnInfo 147 recv chan *plugin.ConnInfo
146 148
147 // quit closes down the stream. 149 // quit closes down the stream.
148 quit chan struct{} 150 quit chan struct{}
@@ -153,9 +155,9 @@ type gRPCBrokerClientImpl struct {
153 155
154func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl { 156func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl {
155 return &gRPCBrokerClientImpl{ 157 return &gRPCBrokerClientImpl{
156 client: NewGRPCBrokerClient(conn), 158 client: plugin.NewGRPCBrokerClient(conn),
157 send: make(chan *sendErr), 159 send: make(chan *sendErr),
158 recv: make(chan *ConnInfo), 160 recv: make(chan *plugin.ConnInfo),
159 quit: make(chan struct{}), 161 quit: make(chan struct{}),
160 } 162 }
161} 163}
@@ -207,7 +209,7 @@ func (s *gRPCBrokerClientImpl) StartStream() error {
207 209
208// Send is used by the GRPCBroker to pass connection information into the stream 210// Send is used by the GRPCBroker to pass connection information into the stream
209// to the plugin. 211// to the plugin.
210func (s *gRPCBrokerClientImpl) Send(i *ConnInfo) error { 212func (s *gRPCBrokerClientImpl) Send(i *plugin.ConnInfo) error {
211 ch := make(chan error) 213 ch := make(chan error)
212 defer close(ch) 214 defer close(ch)
213 215
@@ -225,7 +227,7 @@ func (s *gRPCBrokerClientImpl) Send(i *ConnInfo) error {
225 227
226// Recv is used by the GRPCBroker to pass connection information that has been 228// Recv is used by the GRPCBroker to pass connection information that has been
227// sent from the plugin to the broker. 229// sent from the plugin to the broker.
228func (s *gRPCBrokerClientImpl) Recv() (*ConnInfo, error) { 230func (s *gRPCBrokerClientImpl) Recv() (*plugin.ConnInfo, error) {
229 select { 231 select {
230 case <-s.quit: 232 case <-s.quit:
231 return nil, errors.New("broker closed") 233 return nil, errors.New("broker closed")
@@ -266,7 +268,7 @@ type GRPCBroker struct {
266} 268}
267 269
268type gRPCBrokerPending struct { 270type gRPCBrokerPending struct {
269 ch chan *ConnInfo 271 ch chan *plugin.ConnInfo
270 doneCh chan struct{} 272 doneCh chan struct{}
271} 273}
272 274
@@ -288,7 +290,7 @@ func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) {
288 return nil, err 290 return nil, err
289 } 291 }
290 292
291 err = b.streamer.Send(&ConnInfo{ 293 err = b.streamer.Send(&plugin.ConnInfo{
292 ServiceId: id, 294 ServiceId: id,
293 Network: listener.Addr().Network(), 295 Network: listener.Addr().Network(),
294 Address: listener.Addr().String(), 296 Address: listener.Addr().String(),
@@ -363,7 +365,7 @@ func (b *GRPCBroker) Close() error {
363 365
364// Dial opens a connection by ID. 366// Dial opens a connection by ID.
365func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { 367func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) {
366 var c *ConnInfo 368 var c *plugin.ConnInfo
367 369
368 // Open the stream 370 // Open the stream
369 p := b.getStream(id) 371 p := b.getStream(id)
@@ -433,7 +435,7 @@ func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending {
433 } 435 }
434 436
435 m.streams[id] = &gRPCBrokerPending{ 437 m.streams[id] = &gRPCBrokerPending{
436 ch: make(chan *ConnInfo, 1), 438 ch: make(chan *plugin.ConnInfo, 1),
437 doneCh: make(chan struct{}), 439 doneCh: make(chan struct{}),
438 } 440 }
439 return m.streams[id] 441 return m.streams[id]
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/vendor/github.com/hashicorp/go-plugin/grpc_client.go
index 44294d0..294518e 100644
--- a/vendor/github.com/hashicorp/go-plugin/grpc_client.go
+++ b/vendor/github.com/hashicorp/go-plugin/grpc_client.go
@@ -6,6 +6,7 @@ import (
6 "net" 6 "net"
7 "time" 7 "time"
8 8
9 "github.com/hashicorp/go-plugin/internal/plugin"
9 "golang.org/x/net/context" 10 "golang.org/x/net/context"
10 "google.golang.org/grpc" 11 "google.golang.org/grpc"
11 "google.golang.org/grpc/credentials" 12 "google.golang.org/grpc/credentials"
@@ -16,12 +17,9 @@ func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn,
16 // Build dialing options. 17 // Build dialing options.
17 opts := make([]grpc.DialOption, 0, 5) 18 opts := make([]grpc.DialOption, 0, 5)
18 19
19 // We use a custom dialer so that we can connect over unix domain sockets 20 // We use a custom dialer so that we can connect over unix domain sockets.
20 opts = append(opts, grpc.WithDialer(dialer)) 21 opts = append(opts, grpc.WithDialer(dialer))
21 22
22 // go-plugin expects to block the connection
23 opts = append(opts, grpc.WithBlock())
24
25 // Fail right away 23 // Fail right away
26 opts = append(opts, grpc.FailOnNonTempDialError(true)) 24 opts = append(opts, grpc.FailOnNonTempDialError(true))
27 25
@@ -58,12 +56,15 @@ func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) {
58 go broker.Run() 56 go broker.Run()
59 go brokerGRPCClient.StartStream() 57 go brokerGRPCClient.StartStream()
60 58
61 return &GRPCClient{ 59 cl := &GRPCClient{
62 Conn: conn, 60 Conn: conn,
63 Plugins: c.config.Plugins, 61 Plugins: c.config.Plugins,
64 doneCtx: doneCtx, 62 doneCtx: doneCtx,
65 broker: broker, 63 broker: broker,
66 }, nil 64 controller: plugin.NewGRPCControllerClient(conn),
65 }
66
67 return cl, nil
67} 68}
68 69
69// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types. 70// GRPCClient connects to a GRPCServer over gRPC to dispense plugin types.
@@ -73,11 +74,14 @@ type GRPCClient struct {
73 74
74 doneCtx context.Context 75 doneCtx context.Context
75 broker *GRPCBroker 76 broker *GRPCBroker
77
78 controller plugin.GRPCControllerClient
76} 79}
77 80
78// ClientProtocol impl. 81// ClientProtocol impl.
79func (c *GRPCClient) Close() error { 82func (c *GRPCClient) Close() error {
80 c.broker.Close() 83 c.broker.Close()
84 c.controller.Shutdown(c.doneCtx, &plugin.Empty{})
81 return c.Conn.Close() 85 return c.Conn.Close()
82} 86}
83 87
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_controller.go b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go
new file mode 100644
index 0000000..1a8a8e7
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/grpc_controller.go
@@ -0,0 +1,23 @@
1package plugin
2
3import (
4 "context"
5
6 "github.com/hashicorp/go-plugin/internal/plugin"
7)
8
9// GRPCControllerServer handles shutdown calls to terminate the server when the
10// plugin client is closed.
11type grpcControllerServer struct {
12 server *GRPCServer
13}
14
15// Shutdown stops the grpc server. It first will attempt a graceful stop, then a
16// full stop on the server.
17func (s *grpcControllerServer) Shutdown(ctx context.Context, _ *plugin.Empty) (*plugin.Empty, error) {
18 resp := &plugin.Empty{}
19
20 // TODO: figure out why GracefullStop doesn't work.
21 s.server.Stop()
22 return resp, nil
23}
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/vendor/github.com/hashicorp/go-plugin/grpc_server.go
index 3a72739..d3dbf1c 100644
--- a/vendor/github.com/hashicorp/go-plugin/grpc_server.go
+++ b/vendor/github.com/hashicorp/go-plugin/grpc_server.go
@@ -8,6 +8,8 @@ import (
8 "io" 8 "io"
9 "net" 9 "net"
10 10
11 hclog "github.com/hashicorp/go-hclog"
12 "github.com/hashicorp/go-plugin/internal/plugin"
11 "google.golang.org/grpc" 13 "google.golang.org/grpc"
12 "google.golang.org/grpc/credentials" 14 "google.golang.org/grpc/credentials"
13 "google.golang.org/grpc/health" 15 "google.golang.org/grpc/health"
@@ -52,6 +54,8 @@ type GRPCServer struct {
52 config GRPCServerConfig 54 config GRPCServerConfig
53 server *grpc.Server 55 server *grpc.Server
54 broker *GRPCBroker 56 broker *GRPCBroker
57
58 logger hclog.Logger
55} 59}
56 60
57// ServerProtocol impl. 61// ServerProtocol impl.
@@ -71,10 +75,16 @@ func (s *GRPCServer) Init() error {
71 75
72 // Register the broker service 76 // Register the broker service
73 brokerServer := newGRPCBrokerServer() 77 brokerServer := newGRPCBrokerServer()
74 RegisterGRPCBrokerServer(s.server, brokerServer) 78 plugin.RegisterGRPCBrokerServer(s.server, brokerServer)
75 s.broker = newGRPCBroker(brokerServer, s.TLS) 79 s.broker = newGRPCBroker(brokerServer, s.TLS)
76 go s.broker.Run() 80 go s.broker.Run()
77 81
82 // Register the controller
83 controllerServer := &grpcControllerServer{
84 server: s,
85 }
86 plugin.RegisterGRPCControllerServer(s.server, controllerServer)
87
78 // Register all our plugins onto the gRPC server. 88 // Register all our plugins onto the gRPC server.
79 for k, raw := range s.Plugins { 89 for k, raw := range s.Plugins {
80 p, ok := raw.(GRPCPlugin) 90 p, ok := raw.(GRPCPlugin)
@@ -83,7 +93,7 @@ func (s *GRPCServer) Init() error {
83 } 93 }
84 94
85 if err := p.GRPCServer(s.broker, s.server); err != nil { 95 if err := p.GRPCServer(s.broker, s.server); err != nil {
86 return fmt.Errorf("error registring %q: %s", k, err) 96 return fmt.Errorf("error registering %q: %s", k, err)
87 } 97 }
88 } 98 }
89 99
@@ -117,11 +127,11 @@ func (s *GRPCServer) Config() string {
117} 127}
118 128
119func (s *GRPCServer) Serve(lis net.Listener) { 129func (s *GRPCServer) Serve(lis net.Listener) {
120 // Start serving in a goroutine 130 defer close(s.DoneCh)
121 go s.server.Serve(lis) 131 err := s.server.Serve(lis)
122 132 if err != nil {
123 // Wait until graceful completion 133 s.logger.Error("grpc server", "error", err)
124 <-s.DoneCh 134 }
125} 135}
126 136
127// GRPCServerConfig is the extra configuration passed along for consumers 137// GRPCServerConfig is the extra configuration passed along for consumers
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go
new file mode 100644
index 0000000..aa2fdc8
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go
@@ -0,0 +1,3 @@
1//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto --go_out=plugins=grpc:.
2
3package plugin
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go
index d490daf..b6850aa 100644
--- a/vendor/github.com/hashicorp/go-plugin/grpc_broker.pb.go
+++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go
@@ -1,24 +1,14 @@
1// Code generated by protoc-gen-go. DO NOT EDIT. 1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: grpc_broker.proto 2// source: grpc_broker.proto
3 3
4/*
5Package plugin is a generated protocol buffer package.
6
7It is generated from these files:
8 grpc_broker.proto
9
10It has these top-level messages:
11 ConnInfo
12*/
13package plugin 4package plugin
14 5
15import proto "github.com/golang/protobuf/proto"
16import fmt "fmt"
17import math "math"
18
19import ( 6import (
7 fmt "fmt"
8 proto "github.com/golang/protobuf/proto"
20 context "golang.org/x/net/context" 9 context "golang.org/x/net/context"
21 grpc "google.golang.org/grpc" 10 grpc "google.golang.org/grpc"
11 math "math"
22) 12)
23 13
24// Reference imports to suppress errors if they are not otherwise used. 14// Reference imports to suppress errors if they are not otherwise used.
@@ -33,15 +23,38 @@ var _ = math.Inf
33const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package 23const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
34 24
35type ConnInfo struct { 25type ConnInfo struct {
36 ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId" json:"service_id,omitempty"` 26 ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
37 Network string `protobuf:"bytes,2,opt,name=network" json:"network,omitempty"` 27 Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"`
38 Address string `protobuf:"bytes,3,opt,name=address" json:"address,omitempty"` 28 Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"`
29 XXX_NoUnkeyedLiteral struct{} `json:"-"`
30 XXX_unrecognized []byte `json:"-"`
31 XXX_sizecache int32 `json:"-"`
32}
33
34func (m *ConnInfo) Reset() { *m = ConnInfo{} }
35func (m *ConnInfo) String() string { return proto.CompactTextString(m) }
36func (*ConnInfo) ProtoMessage() {}
37func (*ConnInfo) Descriptor() ([]byte, []int) {
38 return fileDescriptor_802e9beed3ec3b28, []int{0}
39} 39}
40 40
41func (m *ConnInfo) Reset() { *m = ConnInfo{} } 41func (m *ConnInfo) XXX_Unmarshal(b []byte) error {
42func (m *ConnInfo) String() string { return proto.CompactTextString(m) } 42 return xxx_messageInfo_ConnInfo.Unmarshal(m, b)
43func (*ConnInfo) ProtoMessage() {} 43}
44func (*ConnInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } 44func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
45 return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic)
46}
47func (m *ConnInfo) XXX_Merge(src proto.Message) {
48 xxx_messageInfo_ConnInfo.Merge(m, src)
49}
50func (m *ConnInfo) XXX_Size() int {
51 return xxx_messageInfo_ConnInfo.Size(m)
52}
53func (m *ConnInfo) XXX_DiscardUnknown() {
54 xxx_messageInfo_ConnInfo.DiscardUnknown(m)
55}
56
57var xxx_messageInfo_ConnInfo proto.InternalMessageInfo
45 58
46func (m *ConnInfo) GetServiceId() uint32 { 59func (m *ConnInfo) GetServiceId() uint32 {
47 if m != nil { 60 if m != nil {
@@ -68,6 +81,23 @@ func init() {
68 proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo") 81 proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo")
69} 82}
70 83
84func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_802e9beed3ec3b28) }
85
86var fileDescriptor_802e9beed3ec3b28 = []byte{
87 // 175 bytes of a gzipped FileDescriptorProto
88 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48,
89 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b,
90 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b,
91 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91,
92 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7,
93 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20,
94 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc,
95 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1,
96 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b,
97 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b,
98 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00,
99}
100
71// Reference imports to suppress errors if they are not otherwise used. 101// Reference imports to suppress errors if they are not otherwise used.
72var _ context.Context 102var _ context.Context
73var _ grpc.ClientConn 103var _ grpc.ClientConn
@@ -76,8 +106,9 @@ var _ grpc.ClientConn
76// is compatible with the grpc package it is being compiled against. 106// is compatible with the grpc package it is being compiled against.
77const _ = grpc.SupportPackageIsVersion4 107const _ = grpc.SupportPackageIsVersion4
78 108
79// Client API for GRPCBroker service 109// GRPCBrokerClient is the client API for GRPCBroker service.
80 110//
111// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
81type GRPCBrokerClient interface { 112type GRPCBrokerClient interface {
82 StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) 113 StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error)
83} 114}
@@ -91,7 +122,7 @@ func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient {
91} 122}
92 123
93func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { 124func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) {
94 stream, err := grpc.NewClientStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], c.cc, "/plugin.GRPCBroker/StartStream", opts...) 125 stream, err := c.cc.NewStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], "/plugin.GRPCBroker/StartStream", opts...)
95 if err != nil { 126 if err != nil {
96 return nil, err 127 return nil, err
97 } 128 }
@@ -121,8 +152,7 @@ func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) {
121 return m, nil 152 return m, nil
122} 153}
123 154
124// Server API for GRPCBroker service 155// GRPCBrokerServer is the server API for GRPCBroker service.
125
126type GRPCBrokerServer interface { 156type GRPCBrokerServer interface {
127 StartStream(GRPCBroker_StartStreamServer) error 157 StartStream(GRPCBroker_StartStreamServer) error
128} 158}
@@ -171,20 +201,3 @@ var _GRPCBroker_serviceDesc = grpc.ServiceDesc{
171 }, 201 },
172 Metadata: "grpc_broker.proto", 202 Metadata: "grpc_broker.proto",
173} 203}
174
175func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor0) }
176
177var fileDescriptor0 = []byte{
178 // 170 bytes of a gzipped FileDescriptorProto
179 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48,
180 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b,
181 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b,
182 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91,
183 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7,
184 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20,
185 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc,
186 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1,
187 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b,
188 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x93, 0xd8, 0xc0, 0x4e, 0x36, 0x06, 0x04, 0x00, 0x00,
189 0xff, 0xff, 0x7b, 0x5d, 0xfb, 0xe1, 0xc7, 0x00, 0x00, 0x00,
190}
diff --git a/vendor/github.com/hashicorp/go-plugin/grpc_broker.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto
index f578348..3fa79e8 100644
--- a/vendor/github.com/hashicorp/go-plugin/grpc_broker.proto
+++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto
@@ -1,5 +1,6 @@
1syntax = "proto3"; 1syntax = "proto3";
2package plugin; 2package plugin;
3option go_package = "plugin";
3 4
4message ConnInfo { 5message ConnInfo {
5 uint32 service_id = 1; 6 uint32 service_id = 1;
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go
new file mode 100644
index 0000000..38b4204
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go
@@ -0,0 +1,143 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: grpc_controller.proto
3
4package plugin
5
6import (
7 fmt "fmt"
8 proto "github.com/golang/protobuf/proto"
9 context "golang.org/x/net/context"
10 grpc "google.golang.org/grpc"
11 math "math"
12)
13
14// Reference imports to suppress errors if they are not otherwise used.
15var _ = proto.Marshal
16var _ = fmt.Errorf
17var _ = math.Inf
18
19// This is a compile-time assertion to ensure that this generated file
20// is compatible with the proto package it is being compiled against.
21// A compilation error at this line likely means your copy of the
22// proto package needs to be updated.
23const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
24
25type Empty struct {
26 XXX_NoUnkeyedLiteral struct{} `json:"-"`
27 XXX_unrecognized []byte `json:"-"`
28 XXX_sizecache int32 `json:"-"`
29}
30
31func (m *Empty) Reset() { *m = Empty{} }
32func (m *Empty) String() string { return proto.CompactTextString(m) }
33func (*Empty) ProtoMessage() {}
34func (*Empty) Descriptor() ([]byte, []int) {
35 return fileDescriptor_23c2c7e42feab570, []int{0}
36}
37
38func (m *Empty) XXX_Unmarshal(b []byte) error {
39 return xxx_messageInfo_Empty.Unmarshal(m, b)
40}
41func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
42 return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
43}
44func (m *Empty) XXX_Merge(src proto.Message) {
45 xxx_messageInfo_Empty.Merge(m, src)
46}
47func (m *Empty) XXX_Size() int {
48 return xxx_messageInfo_Empty.Size(m)
49}
50func (m *Empty) XXX_DiscardUnknown() {
51 xxx_messageInfo_Empty.DiscardUnknown(m)
52}
53
54var xxx_messageInfo_Empty proto.InternalMessageInfo
55
56func init() {
57 proto.RegisterType((*Empty)(nil), "plugin.Empty")
58}
59
60func init() { proto.RegisterFile("grpc_controller.proto", fileDescriptor_23c2c7e42feab570) }
61
62var fileDescriptor_23c2c7e42feab570 = []byte{
63 // 108 bytes of a gzipped FileDescriptorProto
64 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48,
65 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f,
66 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d,
67 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0,
68 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03,
69 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08,
70 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00,
71}
72
73// Reference imports to suppress errors if they are not otherwise used.
74var _ context.Context
75var _ grpc.ClientConn
76
77// This is a compile-time assertion to ensure that this generated file
78// is compatible with the grpc package it is being compiled against.
79const _ = grpc.SupportPackageIsVersion4
80
81// GRPCControllerClient is the client API for GRPCController service.
82//
83// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
84type GRPCControllerClient interface {
85 Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)
86}
87
88type gRPCControllerClient struct {
89 cc *grpc.ClientConn
90}
91
92func NewGRPCControllerClient(cc *grpc.ClientConn) GRPCControllerClient {
93 return &gRPCControllerClient{cc}
94}
95
96func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {
97 out := new(Empty)
98 err := c.cc.Invoke(ctx, "/plugin.GRPCController/Shutdown", in, out, opts...)
99 if err != nil {
100 return nil, err
101 }
102 return out, nil
103}
104
105// GRPCControllerServer is the server API for GRPCController service.
106type GRPCControllerServer interface {
107 Shutdown(context.Context, *Empty) (*Empty, error)
108}
109
110func RegisterGRPCControllerServer(s *grpc.Server, srv GRPCControllerServer) {
111 s.RegisterService(&_GRPCController_serviceDesc, srv)
112}
113
114func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
115 in := new(Empty)
116 if err := dec(in); err != nil {
117 return nil, err
118 }
119 if interceptor == nil {
120 return srv.(GRPCControllerServer).Shutdown(ctx, in)
121 }
122 info := &grpc.UnaryServerInfo{
123 Server: srv,
124 FullMethod: "/plugin.GRPCController/Shutdown",
125 }
126 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
127 return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty))
128 }
129 return interceptor(ctx, in, info, handler)
130}
131
132var _GRPCController_serviceDesc = grpc.ServiceDesc{
133 ServiceName: "plugin.GRPCController",
134 HandlerType: (*GRPCControllerServer)(nil),
135 Methods: []grpc.MethodDesc{
136 {
137 MethodName: "Shutdown",
138 Handler: _GRPCController_Shutdown_Handler,
139 },
140 },
141 Streams: []grpc.StreamDesc{},
142 Metadata: "grpc_controller.proto",
143}
diff --git a/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto
new file mode 100644
index 0000000..345d0a1
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto
@@ -0,0 +1,11 @@
1syntax = "proto3";
2package plugin;
3option go_package = "plugin";
4
5message Empty {
6}
7
8// The GRPCController is responsible for telling the plugin server to shutdown.
9service GRPCController {
10 rpc Shutdown(Empty) returns (Empty);
11}
diff --git a/vendor/github.com/hashicorp/go-plugin/log_entry.go b/vendor/github.com/hashicorp/go-plugin/log_entry.go
index 2996c14..fb2ef93 100644
--- a/vendor/github.com/hashicorp/go-plugin/log_entry.go
+++ b/vendor/github.com/hashicorp/go-plugin/log_entry.go
@@ -32,11 +32,11 @@ func flattenKVPairs(kvs []*logEntryKV) []interface{} {
32} 32}
33 33
34// parseJSON handles parsing JSON output 34// parseJSON handles parsing JSON output
35func parseJSON(input string) (*logEntry, error) { 35func parseJSON(input []byte) (*logEntry, error) {
36 var raw map[string]interface{} 36 var raw map[string]interface{}
37 entry := &logEntry{} 37 entry := &logEntry{}
38 38
39 err := json.Unmarshal([]byte(input), &raw) 39 err := json.Unmarshal(input, &raw)
40 if err != nil { 40 if err != nil {
41 return nil, err 41 return nil, err
42 } 42 }
diff --git a/vendor/github.com/hashicorp/go-plugin/mtls.go b/vendor/github.com/hashicorp/go-plugin/mtls.go
new file mode 100644
index 0000000..8895524
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-plugin/mtls.go
@@ -0,0 +1,73 @@
1package plugin
2
3import (
4 "bytes"
5 "crypto/ecdsa"
6 "crypto/elliptic"
7 "crypto/rand"
8 "crypto/x509"
9 "crypto/x509/pkix"
10 "encoding/pem"
11 "math/big"
12 "time"
13)
14
15// generateCert generates a temporary certificate for plugin authentication. The
16// certificate and private key are returns in PEM format.
17func generateCert() (cert []byte, privateKey []byte, err error) {
18 key, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
19 if err != nil {
20 return nil, nil, err
21 }
22
23 serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
24 sn, err := rand.Int(rand.Reader, serialNumberLimit)
25 if err != nil {
26 return nil, nil, err
27 }
28
29 host := "localhost"
30
31 template := &x509.Certificate{
32 Subject: pkix.Name{
33 CommonName: host,
34 Organization: []string{"HashiCorp"},
35 },
36 DNSNames: []string{host},
37 ExtKeyUsage: []x509.ExtKeyUsage{
38 x509.ExtKeyUsageClientAuth,
39 x509.ExtKeyUsageServerAuth,
40 },
41 KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment | x509.KeyUsageKeyAgreement | x509.KeyUsageCertSign,
42 BasicConstraintsValid: true,
43 SerialNumber: sn,
44 NotBefore: time.Now().Add(-30 * time.Second),
45 NotAfter: time.Now().Add(262980 * time.Hour),
46 IsCA: true,
47 }
48
49 der, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key)
50 if err != nil {
51 return nil, nil, err
52 }
53
54 var certOut bytes.Buffer
55 if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: der}); err != nil {
56 return nil, nil, err
57 }
58
59 keyBytes, err := x509.MarshalECPrivateKey(key)
60 if err != nil {
61 return nil, nil, err
62 }
63
64 var keyOut bytes.Buffer
65 if err := pem.Encode(&keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil {
66 return nil, nil, err
67 }
68
69 cert = certOut.Bytes()
70 privateKey = keyOut.Bytes()
71
72 return cert, privateKey, nil
73}
diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go
index 1e808b9..fc9f05a 100644
--- a/vendor/github.com/hashicorp/go-plugin/server.go
+++ b/vendor/github.com/hashicorp/go-plugin/server.go
@@ -2,6 +2,7 @@ package plugin
2 2
3import ( 3import (
4 "crypto/tls" 4 "crypto/tls"
5 "crypto/x509"
5 "encoding/base64" 6 "encoding/base64"
6 "errors" 7 "errors"
7 "fmt" 8 "fmt"
@@ -11,7 +12,9 @@ import (
11 "os" 12 "os"
12 "os/signal" 13 "os/signal"
13 "runtime" 14 "runtime"
15 "sort"
14 "strconv" 16 "strconv"
17 "strings"
15 "sync/atomic" 18 "sync/atomic"
16 19
17 "github.com/hashicorp/go-hclog" 20 "github.com/hashicorp/go-hclog"
@@ -36,6 +39,8 @@ type HandshakeConfig struct {
36 // ProtocolVersion is the version that clients must match on to 39 // ProtocolVersion is the version that clients must match on to
37 // agree they can communicate. This should match the ProtocolVersion 40 // agree they can communicate. This should match the ProtocolVersion
38 // set on ClientConfig when using a plugin. 41 // set on ClientConfig when using a plugin.
42 // This field is not required if VersionedPlugins are being used in the
43 // Client or Server configurations.
39 ProtocolVersion uint 44 ProtocolVersion uint
40 45
41 // MagicCookieKey and value are used as a very basic verification 46 // MagicCookieKey and value are used as a very basic verification
@@ -46,6 +51,10 @@ type HandshakeConfig struct {
46 MagicCookieValue string 51 MagicCookieValue string
47} 52}
48 53
54// PluginSet is a set of plugins provided to be registered in the plugin
55// server.
56type PluginSet map[string]Plugin
57
49// ServeConfig configures what sorts of plugins are served. 58// ServeConfig configures what sorts of plugins are served.
50type ServeConfig struct { 59type ServeConfig struct {
51 // HandshakeConfig is the configuration that must match clients. 60 // HandshakeConfig is the configuration that must match clients.
@@ -55,7 +64,13 @@ type ServeConfig struct {
55 TLSProvider func() (*tls.Config, error) 64 TLSProvider func() (*tls.Config, error)
56 65
57 // Plugins are the plugins that are served. 66 // Plugins are the plugins that are served.
58 Plugins map[string]Plugin 67 // The implied version of this PluginSet is the Handshake.ProtocolVersion.
68 Plugins PluginSet
69
70 // VersionedPlugins is a map of PluginSets for specific protocol versions.
71 // These can be used to negotiate a compatible version between client and
72 // server. If this is set, Handshake.ProtocolVersion is not required.
73 VersionedPlugins map[int]PluginSet
59 74
60 // GRPCServer should be non-nil to enable serving the plugins over 75 // GRPCServer should be non-nil to enable serving the plugins over
61 // gRPC. This is a function to create the server when needed with the 76 // gRPC. This is a function to create the server when needed with the
@@ -72,14 +87,83 @@ type ServeConfig struct {
72 Logger hclog.Logger 87 Logger hclog.Logger
73} 88}
74 89
75// Protocol returns the protocol that this server should speak. 90// protocolVersion determines the protocol version and plugin set to be used by
76func (c *ServeConfig) Protocol() Protocol { 91// the server. In the event that there is no suitable version, the last version
77 result := ProtocolNetRPC 92// in the config is returned leaving the client to report the incompatibility.
78 if c.GRPCServer != nil { 93func protocolVersion(opts *ServeConfig) (int, Protocol, PluginSet) {
79 result = ProtocolGRPC 94 protoVersion := int(opts.ProtocolVersion)
95 pluginSet := opts.Plugins
96 protoType := ProtocolNetRPC
97 // Check if the client sent a list of acceptable versions
98 var clientVersions []int
99 if vs := os.Getenv("PLUGIN_PROTOCOL_VERSIONS"); vs != "" {
100 for _, s := range strings.Split(vs, ",") {
101 v, err := strconv.Atoi(s)
102 if err != nil {
103 fmt.Fprintf(os.Stderr, "server sent invalid plugin version %q", s)
104 continue
105 }
106 clientVersions = append(clientVersions, v)
107 }
108 }
109
110 // We want to iterate in reverse order, to ensure we match the newest
111 // compatible plugin version.
112 sort.Sort(sort.Reverse(sort.IntSlice(clientVersions)))
113
114 // set the old un-versioned fields as if they were versioned plugins
115 if opts.VersionedPlugins == nil {
116 opts.VersionedPlugins = make(map[int]PluginSet)
117 }
118
119 if pluginSet != nil {
120 opts.VersionedPlugins[protoVersion] = pluginSet
80 } 121 }
81 122
82 return result 123 // Sort the version to make sure we match the latest first
124 var versions []int
125 for v := range opts.VersionedPlugins {
126 versions = append(versions, v)
127 }
128
129 sort.Sort(sort.Reverse(sort.IntSlice(versions)))
130
131 // See if we have multiple versions of Plugins to choose from
132 for _, version := range versions {
133 // Record each version, since we guarantee that this returns valid
134 // values even if they are not a protocol match.
135 protoVersion = version
136 pluginSet = opts.VersionedPlugins[version]
137
138 // If we have a configured gRPC server we should select a protocol
139 if opts.GRPCServer != nil {
140 // All plugins in a set must use the same transport, so check the first
141 // for the protocol type
142 for _, p := range pluginSet {
143 switch p.(type) {
144 case GRPCPlugin:
145 protoType = ProtocolGRPC
146 default:
147 protoType = ProtocolNetRPC
148 }
149 break
150 }
151 }
152
153 for _, clientVersion := range clientVersions {
154 if clientVersion == protoVersion {
155 return protoVersion, protoType, pluginSet
156 }
157 }
158 }
159
160 // Return the lowest version as the fallback.
161 // Since we iterated over all the versions in reverse order above, these
162 // values are from the lowest version number plugins (which may be from
163 // a combination of the Handshake.ProtocolVersion and ServeConfig.Plugins
164 // fields). This allows serving the oldest version of our plugins to a
165 // legacy client that did not send a PLUGIN_PROTOCOL_VERSIONS list.
166 return protoVersion, protoType, pluginSet
83} 167}
84 168
85// Serve serves the plugins given by ServeConfig. 169// Serve serves the plugins given by ServeConfig.
@@ -107,6 +191,10 @@ func Serve(opts *ServeConfig) {
107 os.Exit(1) 191 os.Exit(1)
108 } 192 }
109 193
194 // negotiate the version and plugins
195 // start with default version in the handshake config
196 protoVersion, protoType, pluginSet := protocolVersion(opts)
197
110 // Logging goes to the original stderr 198 // Logging goes to the original stderr
111 log.SetOutput(os.Stderr) 199 log.SetOutput(os.Stderr)
112 200
@@ -155,12 +243,47 @@ func Serve(opts *ServeConfig) {
155 } 243 }
156 } 244 }
157 245
246 var serverCert string
247 clientCert := os.Getenv("PLUGIN_CLIENT_CERT")
248 // If the client is configured using AutoMTLS, the certificate will be here,
249 // and we need to generate our own in response.
250 if tlsConfig == nil && clientCert != "" {
251 logger.Info("configuring server automatic mTLS")
252 clientCertPool := x509.NewCertPool()
253 if !clientCertPool.AppendCertsFromPEM([]byte(clientCert)) {
254 logger.Error("client cert provided but failed to parse", "cert", clientCert)
255 }
256
257 certPEM, keyPEM, err := generateCert()
258 if err != nil {
259 logger.Error("failed to generate client certificate", "error", err)
260 panic(err)
261 }
262
263 cert, err := tls.X509KeyPair(certPEM, keyPEM)
264 if err != nil {
265 logger.Error("failed to parse client certificate", "error", err)
266 panic(err)
267 }
268
269 tlsConfig = &tls.Config{
270 Certificates: []tls.Certificate{cert},
271 ClientAuth: tls.RequireAndVerifyClientCert,
272 ClientCAs: clientCertPool,
273 MinVersion: tls.VersionTLS12,
274 }
275
276 // We send back the raw leaf cert data for the client rather than the
277 // PEM, since the protocol can't handle newlines.
278 serverCert = base64.RawStdEncoding.EncodeToString(cert.Certificate[0])
279 }
280
158 // Create the channel to tell us when we're done 281 // Create the channel to tell us when we're done
159 doneCh := make(chan struct{}) 282 doneCh := make(chan struct{})
160 283
161 // Build the server type 284 // Build the server type
162 var server ServerProtocol 285 var server ServerProtocol
163 switch opts.Protocol() { 286 switch protoType {
164 case ProtocolNetRPC: 287 case ProtocolNetRPC:
165 // If we have a TLS configuration then we wrap the listener 288 // If we have a TLS configuration then we wrap the listener
166 // ourselves and do it at that level. 289 // ourselves and do it at that level.
@@ -170,7 +293,7 @@ func Serve(opts *ServeConfig) {
170 293
171 // Create the RPC server to dispense 294 // Create the RPC server to dispense
172 server = &RPCServer{ 295 server = &RPCServer{
173 Plugins: opts.Plugins, 296 Plugins: pluginSet,
174 Stdout: stdout_r, 297 Stdout: stdout_r,
175 Stderr: stderr_r, 298 Stderr: stderr_r,
176 DoneCh: doneCh, 299 DoneCh: doneCh,
@@ -179,16 +302,17 @@ func Serve(opts *ServeConfig) {
179 case ProtocolGRPC: 302 case ProtocolGRPC:
180 // Create the gRPC server 303 // Create the gRPC server
181 server = &GRPCServer{ 304 server = &GRPCServer{
182 Plugins: opts.Plugins, 305 Plugins: pluginSet,
183 Server: opts.GRPCServer, 306 Server: opts.GRPCServer,
184 TLS: tlsConfig, 307 TLS: tlsConfig,
185 Stdout: stdout_r, 308 Stdout: stdout_r,
186 Stderr: stderr_r, 309 Stderr: stderr_r,
187 DoneCh: doneCh, 310 DoneCh: doneCh,
311 logger: logger,
188 } 312 }
189 313
190 default: 314 default:
191 panic("unknown server protocol: " + opts.Protocol()) 315 panic("unknown server protocol: " + protoType)
192 } 316 }
193 317
194 // Initialize the servers 318 // Initialize the servers
@@ -197,25 +321,16 @@ func Serve(opts *ServeConfig) {
197 return 321 return
198 } 322 }
199 323
200 // Build the extra configuration
201 extra := ""
202 if v := server.Config(); v != "" {
203 extra = base64.StdEncoding.EncodeToString([]byte(v))
204 }
205 if extra != "" {
206 extra = "|" + extra
207 }
208
209 logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String()) 324 logger.Debug("plugin address", "network", listener.Addr().Network(), "address", listener.Addr().String())
210 325
211 // Output the address and service name to stdout so that core can bring it up. 326 // Output the address and service name to stdout so that the client can bring it up.
212 fmt.Printf("%d|%d|%s|%s|%s%s\n", 327 fmt.Printf("%d|%d|%s|%s|%s|%s\n",
213 CoreProtocolVersion, 328 CoreProtocolVersion,
214 opts.ProtocolVersion, 329 protoVersion,
215 listener.Addr().Network(), 330 listener.Addr().Network(),
216 listener.Addr().String(), 331 listener.Addr().String(),
217 opts.Protocol(), 332 protoType,
218 extra) 333 serverCert)
219 os.Stdout.Sync() 334 os.Stdout.Sync()
220 335
221 // Eat the interrupts 336 // Eat the interrupts
diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go
index df29593..2cf2c26 100644
--- a/vendor/github.com/hashicorp/go-plugin/testing.go
+++ b/vendor/github.com/hashicorp/go-plugin/testing.go
@@ -3,13 +3,28 @@ package plugin
3import ( 3import (
4 "bytes" 4 "bytes"
5 "context" 5 "context"
6 "io"
6 "net" 7 "net"
7 "net/rpc" 8 "net/rpc"
8 9
9 "github.com/mitchellh/go-testing-interface" 10 "github.com/mitchellh/go-testing-interface"
11 hclog "github.com/hashicorp/go-hclog"
12 "github.com/hashicorp/go-plugin/internal/plugin"
10 "google.golang.org/grpc" 13 "google.golang.org/grpc"
11) 14)
12 15
16// TestOptions allows specifying options that can affect the behavior of the
17// test functions
18type TestOptions struct {
19 //ServerStdout causes the given value to be used in place of a blank buffer
20 //for RPCServer's Stdout
21 ServerStdout io.ReadCloser
22
23 //ServerStderr causes the given value to be used in place of a blank buffer
24 //for RPCServer's Stderr
25 ServerStderr io.ReadCloser
26}
27
13// The testing file contains test helpers that you can use outside of 28// The testing file contains test helpers that you can use outside of
14// this package for making it easier to test plugins themselves. 29// this package for making it easier to test plugins themselves.
15 30
@@ -61,12 +76,20 @@ func TestRPCConn(t testing.T) (*rpc.Client, *rpc.Server) {
61 76
62// TestPluginRPCConn returns a plugin RPC client and server that are connected 77// TestPluginRPCConn returns a plugin RPC client and server that are connected
63// together and configured. 78// together and configured.
64func TestPluginRPCConn(t testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) { 79func TestPluginRPCConn(t testing.T, ps map[string]Plugin, opts *TestOptions) (*RPCClient, *RPCServer) {
65 // Create two net.Conns we can use to shuttle our control connection 80 // Create two net.Conns we can use to shuttle our control connection
66 clientConn, serverConn := TestConn(t) 81 clientConn, serverConn := TestConn(t)
67 82
68 // Start up the server 83 // Start up the server
69 server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)} 84 server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)}
85 if opts != nil {
86 if opts.ServerStdout != nil {
87 server.Stdout = opts.ServerStdout
88 }
89 if opts.ServerStderr != nil {
90 server.Stderr = opts.ServerStderr
91 }
92 }
70 go server.ServeConn(serverConn) 93 go server.ServeConn(serverConn)
71 94
72 // Connect the client to the server 95 // Connect the client to the server
@@ -119,9 +142,11 @@ func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCSe
119 // Start up the server 142 // Start up the server
120 server := &GRPCServer{ 143 server := &GRPCServer{
121 Plugins: ps, 144 Plugins: ps,
145 DoneCh: make(chan struct{}),
122 Server: DefaultGRPCServer, 146 Server: DefaultGRPCServer,
123 Stdout: new(bytes.Buffer), 147 Stdout: new(bytes.Buffer),
124 Stderr: new(bytes.Buffer), 148 Stderr: new(bytes.Buffer),
149 logger: hclog.Default(),
125 } 150 }
126 if err := server.Init(); err != nil { 151 if err := server.Init(); err != nil {
127 t.Fatalf("err: %s", err) 152 t.Fatalf("err: %s", err)
@@ -144,10 +169,11 @@ func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCSe
144 169
145 // Create the client 170 // Create the client
146 client := &GRPCClient{ 171 client := &GRPCClient{
147 Conn: conn, 172 Conn: conn,
148 Plugins: ps, 173 Plugins: ps,
149 broker: broker, 174 broker: broker,
150 doneCtx: context.Background(), 175 doneCtx: context.Background(),
176 controller: plugin.NewGRPCControllerClient(conn),
151 } 177 }
152 178
153 return client, server 179 return client, server
diff --git a/vendor/github.com/hashicorp/go-safetemp/go.mod b/vendor/github.com/hashicorp/go-safetemp/go.mod
new file mode 100644
index 0000000..02bc5f5
--- /dev/null
+++ b/vendor/github.com/hashicorp/go-safetemp/go.mod
@@ -0,0 +1 @@
module github.com/hashicorp/go-safetemp
diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go
index ff9364c..911227f 100644
--- a/vendor/github.com/hashicorp/go-uuid/uuid.go
+++ b/vendor/github.com/hashicorp/go-uuid/uuid.go
@@ -15,9 +15,11 @@ func GenerateRandomBytes(size int) ([]byte, error) {
15 return buf, nil 15 return buf, nil
16} 16}
17 17
18const uuidLen = 16
19
18// GenerateUUID is used to generate a random UUID 20// GenerateUUID is used to generate a random UUID
19func GenerateUUID() (string, error) { 21func GenerateUUID() (string, error) {
20 buf, err := GenerateRandomBytes(16) 22 buf, err := GenerateRandomBytes(uuidLen)
21 if err != nil { 23 if err != nil {
22 return "", err 24 return "", err
23 } 25 }
@@ -25,11 +27,11 @@ func GenerateUUID() (string, error) {
25} 27}
26 28
27func FormatUUID(buf []byte) (string, error) { 29func FormatUUID(buf []byte) (string, error) {
28 if len(buf) != 16 { 30 if buflen := len(buf); buflen != uuidLen {
29 return "", fmt.Errorf("wrong length byte slice (%d)", len(buf)) 31 return "", fmt.Errorf("wrong length byte slice (%d)", buflen)
30 } 32 }
31 33
32 return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x", 34 return fmt.Sprintf("%x-%x-%x-%x-%x",
33 buf[0:4], 35 buf[0:4],
34 buf[4:6], 36 buf[4:6],
35 buf[6:8], 37 buf[6:8],
@@ -38,16 +40,14 @@ func FormatUUID(buf []byte) (string, error) {
38} 40}
39 41
40func ParseUUID(uuid string) ([]byte, error) { 42func ParseUUID(uuid string) ([]byte, error) {
41 if len(uuid) != 36 { 43 if len(uuid) != 2 * uuidLen + 4 {
42 return nil, fmt.Errorf("uuid string is wrong length") 44 return nil, fmt.Errorf("uuid string is wrong length")
43 } 45 }
44 46
45 hyph := []byte("-") 47 if uuid[8] != '-' ||
46 48 uuid[13] != '-' ||
47 if uuid[8] != hyph[0] || 49 uuid[18] != '-' ||
48 uuid[13] != hyph[0] || 50 uuid[23] != '-' {
49 uuid[18] != hyph[0] ||
50 uuid[23] != hyph[0] {
51 return nil, fmt.Errorf("uuid is improperly formatted") 51 return nil, fmt.Errorf("uuid is improperly formatted")
52 } 52 }
53 53
@@ -57,7 +57,7 @@ func ParseUUID(uuid string) ([]byte, error) {
57 if err != nil { 57 if err != nil {
58 return nil, err 58 return nil, err
59 } 59 }
60 if len(ret) != 16 { 60 if len(ret) != uuidLen {
61 return nil, fmt.Errorf("decoded hex is the wrong length") 61 return nil, fmt.Errorf("decoded hex is the wrong length")
62 } 62 }
63 63
diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go
index 4d1e6e2..186fd7c 100644
--- a/vendor/github.com/hashicorp/go-version/version.go
+++ b/vendor/github.com/hashicorp/go-version/version.go
@@ -10,14 +10,25 @@ import (
10) 10)
11 11
12// The compiled regular expression used to test the validity of a version. 12// The compiled regular expression used to test the validity of a version.
13var versionRegexp *regexp.Regexp 13var (
14 versionRegexp *regexp.Regexp
15 semverRegexp *regexp.Regexp
16)
14 17
15// The raw regular expression string used for testing the validity 18// The raw regular expression string used for testing the validity
16// of a version. 19// of a version.
17const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + 20const (
18 `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + 21 VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
19 `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + 22 `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
20 `?` 23 `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
24 `?`
25
26 // SemverRegexpRaw requires a separator between version and prerelease
27 SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
28 `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` +
29 `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` +
30 `?`
31)
21 32
22// Version represents a single version. 33// Version represents a single version.
23type Version struct { 34type Version struct {
@@ -30,12 +41,24 @@ type Version struct {
30 41
31func init() { 42func init() {
32 versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") 43 versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$")
44 semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$")
33} 45}
34 46
35// NewVersion parses the given version and returns a new 47// NewVersion parses the given version and returns a new
36// Version. 48// Version.
37func NewVersion(v string) (*Version, error) { 49func NewVersion(v string) (*Version, error) {
38 matches := versionRegexp.FindStringSubmatch(v) 50 return newVersion(v, versionRegexp)
51}
52
53// NewSemver parses the given version and returns a new
54// Version that adheres strictly to SemVer specs
55// https://semver.org/
56func NewSemver(v string) (*Version, error) {
57 return newVersion(v, semverRegexp)
58}
59
60func newVersion(v string, pattern *regexp.Regexp) (*Version, error) {
61 matches := pattern.FindStringSubmatch(v)
39 if matches == nil { 62 if matches == nil {
40 return nil, fmt.Errorf("Malformed version: %s", v) 63 return nil, fmt.Errorf("Malformed version: %s", v)
41 } 64 }
diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md b/vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md
new file mode 100644
index 0000000..2b24fdb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md
@@ -0,0 +1,184 @@
1# HCL Dynamic Blocks Extension
2
3This HCL extension implements a special block type named "dynamic" that can
4be used to dynamically generate blocks of other types by iterating over
5collection values.
6
7Normally the block structure in an HCL configuration file is rigid, even
8though dynamic expressions can be used within attribute values. This is
9convenient for most applications since it allows the overall structure of
10the document to be decoded easily, but in some applications it is desirable
11to allow dynamic block generation within certain portions of the configuration.
12
13Dynamic block generation is performed using the `dynamic` block type:
14
15```hcl
16toplevel {
17 nested {
18 foo = "static block 1"
19 }
20
21 dynamic "nested" {
22 for_each = ["a", "b", "c"]
23 iterator = nested
24 content {
25 foo = "dynamic block ${nested.value}"
26 }
27 }
28
29 nested {
30 foo = "static block 2"
31 }
32}
33```
34
35The above is interpreted as if it were written as follows:
36
37```hcl
38toplevel {
39 nested {
40 foo = "static block 1"
41 }
42
43 nested {
44 foo = "dynamic block a"
45 }
46
47 nested {
48 foo = "dynamic block b"
49 }
50
51 nested {
52 foo = "dynamic block c"
53 }
54
55 nested {
56 foo = "static block 2"
57 }
58}
59```
60
61Since HCL block syntax is not normally exposed to the possibility of unknown
62values, this extension must make some compromises when asked to iterate over
63an unknown collection. If the length of the collection cannot be statically
64recognized (because it is an unknown value of list, map, or set type) then
65the `dynamic` construct will generate a _single_ dynamic block whose iterator
66key and value are both unknown values of the dynamic pseudo-type, thus causing
67any attribute values derived from iteration to appear as unknown values. There
68is no explicit representation of the fact that the length of the collection may
69eventually be different than one.
70
71## Usage
72
73Pass a body to function `Expand` to obtain a new body that will, on access
74to its content, evaluate and expand any nested `dynamic` blocks.
75Dynamic block processing is also automatically propagated into any nested
76blocks that are returned, allowing users to nest dynamic blocks inside
77one another and to nest dynamic blocks inside other static blocks.
78
79HCL structural decoding does not normally have access to an `EvalContext`, so
80any variables and functions that should be available to the `for_each`
81and `labels` expressions must be passed in when calling `Expand`. Expressions
82within the `content` block are evaluated separately and so can be passed a
83separate `EvalContext` if desired, during normal attribute expression
84evaluation.
85
86## Detecting Variables
87
88Some applications dynamically generate an `EvalContext` by analyzing which
89variables are referenced by an expression before evaluating it.
90
91This unfortunately requires some extra effort when this analysis is required
92for the context passed to `Expand`: the HCL API requires a schema to be
93provided in order to do any analysis of the blocks in a body, but the low-level
94schema model provides a description of only one level of nested blocks at
95a time, and thus a new schema must be provided for each additional level of
96nesting.
97
98To make this arduous process as convenient as possbile, this package provides
99a helper function `WalkForEachVariables`, which returns a `WalkVariablesNode`
100instance that can be used to find variables directly in a given body and also
101determine which nested blocks require recursive calls. Using this mechanism
102requires that the caller be able to look up a schema given a nested block type.
103For _simple_ formats where a specific block type name always has the same schema
104regardless of context, a walk can be implemented as follows:
105
106```go
107func walkVariables(node dynblock.WalkVariablesNode, schema *hcl.BodySchema) []hcl.Traversal {
108 vars, children := node.Visit(schema)
109
110 for _, child := range children {
111 var childSchema *hcl.BodySchema
112 switch child.BlockTypeName {
113 case "a":
114 childSchema = &hcl.BodySchema{
115 Blocks: []hcl.BlockHeaderSchema{
116 {
117 Type: "b",
118 LabelNames: []string{"key"},
119 },
120 },
121 }
122 case "b":
123 childSchema = &hcl.BodySchema{
124 Attributes: []hcl.AttributeSchema{
125 {
126 Name: "val",
127 Required: true,
128 },
129 },
130 }
131 default:
132 // Should never happen, because the above cases should be exhaustive
133 // for the application's configuration format.
134 panic(fmt.Errorf("can't find schema for unknown block type %q", child.BlockTypeName))
135 }
136
137 vars = append(vars, testWalkAndAccumVars(child.Node, childSchema)...)
138 }
139}
140```
141
142### Detecting Variables with `hcldec` Specifications
143
144For applications that use the higher-level `hcldec` package to decode nested
145configuration structures into `cty` values, the same specification can be used
146to automatically drive the recursive variable-detection walk described above.
147
148The helper function `ForEachVariablesHCLDec` allows an entire recursive
149configuration structure to be analyzed in a single call given a `hcldec.Spec`
150that describes the nested block structure. This means a `hcldec`-based
151application can support dynamic blocks with only a little additional effort:
152
153```go
154func decodeBody(body hcl.Body, spec hcldec.Spec) (cty.Value, hcl.Diagnostics) {
155 // Determine which variables are needed to expand dynamic blocks
156 neededForDynamic := dynblock.ForEachVariablesHCLDec(body, spec)
157
158 // Build a suitable EvalContext and expand dynamic blocks
159 dynCtx := buildEvalContext(neededForDynamic)
160 dynBody := dynblock.Expand(body, dynCtx)
161
162 // Determine which variables are needed to fully decode the expanded body
163 // This will analyze expressions that came both from static blocks in the
164 // original body and from blocks that were dynamically added by Expand.
165 neededForDecode := hcldec.Variables(dynBody, spec)
166
167 // Build a suitable EvalContext and then fully decode the body as per the
168 // hcldec specification.
169 decCtx := buildEvalContext(neededForDecode)
170 return hcldec.Decode(dynBody, spec, decCtx)
171}
172
173func buildEvalContext(needed []hcl.Traversal) *hcl.EvalContext {
174 // (to be implemented by your application)
175}
176```
177
178# Performance
179
180This extension is going quite harshly against the grain of the HCL API, and
181so it uses lots of wrapping objects and temporary data structures to get its
182work done. HCL in general is not suitable for use in high-performance situations
183or situations sensitive to memory pressure, but that is _especially_ true for
184this extension.
diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_body.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_body.go
new file mode 100644
index 0000000..dd30822
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_body.go
@@ -0,0 +1,262 @@
1package dynblock
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/zclconf/go-cty/cty"
8)
9
10// expandBody wraps another hcl.Body and expands any "dynamic" blocks found
11// inside whenever Content or PartialContent is called.
12type expandBody struct {
13 original hcl.Body
14 forEachCtx *hcl.EvalContext
15 iteration *iteration // non-nil if we're nested inside another "dynamic" block
16
17 // These are used with PartialContent to produce a "remaining items"
18 // body to return. They are nil on all bodies fresh out of the transformer.
19 //
20 // Note that this is re-implemented here rather than delegating to the
21 // existing support required by the underlying body because we need to
22 // retain access to the entire original body on subsequent decode operations
23 // so we can retain any "dynamic" blocks for types we didn't take consume
24 // on the first pass.
25 hiddenAttrs map[string]struct{}
26 hiddenBlocks map[string]hcl.BlockHeaderSchema
27}
28
29func (b *expandBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
30 extSchema := b.extendSchema(schema)
31 rawContent, diags := b.original.Content(extSchema)
32
33 blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, false)
34 diags = append(diags, blockDiags...)
35 attrs := b.prepareAttributes(rawContent.Attributes)
36
37 content := &hcl.BodyContent{
38 Attributes: attrs,
39 Blocks: blocks,
40 MissingItemRange: b.original.MissingItemRange(),
41 }
42
43 return content, diags
44}
45
46func (b *expandBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
47 extSchema := b.extendSchema(schema)
48 rawContent, _, diags := b.original.PartialContent(extSchema)
49 // We discard the "remain" argument above because we're going to construct
50 // our own remain that also takes into account remaining "dynamic" blocks.
51
52 blocks, blockDiags := b.expandBlocks(schema, rawContent.Blocks, true)
53 diags = append(diags, blockDiags...)
54 attrs := b.prepareAttributes(rawContent.Attributes)
55
56 content := &hcl.BodyContent{
57 Attributes: attrs,
58 Blocks: blocks,
59 MissingItemRange: b.original.MissingItemRange(),
60 }
61
62 remain := &expandBody{
63 original: b.original,
64 forEachCtx: b.forEachCtx,
65 iteration: b.iteration,
66 hiddenAttrs: make(map[string]struct{}),
67 hiddenBlocks: make(map[string]hcl.BlockHeaderSchema),
68 }
69 for name := range b.hiddenAttrs {
70 remain.hiddenAttrs[name] = struct{}{}
71 }
72 for typeName, blockS := range b.hiddenBlocks {
73 remain.hiddenBlocks[typeName] = blockS
74 }
75 for _, attrS := range schema.Attributes {
76 remain.hiddenAttrs[attrS.Name] = struct{}{}
77 }
78 for _, blockS := range schema.Blocks {
79 remain.hiddenBlocks[blockS.Type] = blockS
80 }
81
82 return content, remain, diags
83}
84
85func (b *expandBody) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema {
86 // We augment the requested schema to also include our special "dynamic"
87 // block type, since then we'll get instances of it interleaved with
88 // all of the literal child blocks we must also include.
89 extSchema := &hcl.BodySchema{
90 Attributes: schema.Attributes,
91 Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+len(b.hiddenBlocks)+1),
92 }
93 copy(extSchema.Blocks, schema.Blocks)
94 extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema)
95
96 // If we have any hiddenBlocks then we also need to register those here
97 // so that a call to "Content" on the underlying body won't fail.
98 // (We'll filter these out again once we process the result of either
99 // Content or PartialContent.)
100 for _, blockS := range b.hiddenBlocks {
101 extSchema.Blocks = append(extSchema.Blocks, blockS)
102 }
103
104 // If we have any hiddenAttrs then we also need to register these, for
105 // the same reason as we deal with hiddenBlocks above.
106 if len(b.hiddenAttrs) != 0 {
107 newAttrs := make([]hcl.AttributeSchema, len(schema.Attributes), len(schema.Attributes)+len(b.hiddenAttrs))
108 copy(newAttrs, extSchema.Attributes)
109 for name := range b.hiddenAttrs {
110 newAttrs = append(newAttrs, hcl.AttributeSchema{
111 Name: name,
112 Required: false,
113 })
114 }
115 extSchema.Attributes = newAttrs
116 }
117
118 return extSchema
119}
120
121func (b *expandBody) prepareAttributes(rawAttrs hcl.Attributes) hcl.Attributes {
122 if len(b.hiddenAttrs) == 0 && b.iteration == nil {
123 // Easy path: just pass through the attrs from the original body verbatim
124 return rawAttrs
125 }
126
127 // Otherwise we have some work to do: we must filter out any attributes
128 // that are hidden (since a previous PartialContent call already saw these)
129 // and wrap the expressions of the inner attributes so that they will
130 // have access to our iteration variables.
131 attrs := make(hcl.Attributes, len(rawAttrs))
132 for name, rawAttr := range rawAttrs {
133 if _, hidden := b.hiddenAttrs[name]; hidden {
134 continue
135 }
136 if b.iteration != nil {
137 attr := *rawAttr // shallow copy so we can mutate it
138 attr.Expr = exprWrap{
139 Expression: attr.Expr,
140 i: b.iteration,
141 }
142 attrs[name] = &attr
143 } else {
144 // If we have no active iteration then no wrapping is required.
145 attrs[name] = rawAttr
146 }
147 }
148 return attrs
149}
150
151func (b *expandBody) expandBlocks(schema *hcl.BodySchema, rawBlocks hcl.Blocks, partial bool) (hcl.Blocks, hcl.Diagnostics) {
152 var blocks hcl.Blocks
153 var diags hcl.Diagnostics
154
155 for _, rawBlock := range rawBlocks {
156 switch rawBlock.Type {
157 case "dynamic":
158 realBlockType := rawBlock.Labels[0]
159 if _, hidden := b.hiddenBlocks[realBlockType]; hidden {
160 continue
161 }
162
163 var blockS *hcl.BlockHeaderSchema
164 for _, candidate := range schema.Blocks {
165 if candidate.Type == realBlockType {
166 blockS = &candidate
167 break
168 }
169 }
170 if blockS == nil {
171 // Not a block type that the caller requested.
172 if !partial {
173 diags = append(diags, &hcl.Diagnostic{
174 Severity: hcl.DiagError,
175 Summary: "Unsupported block type",
176 Detail: fmt.Sprintf("Blocks of type %q are not expected here.", realBlockType),
177 Subject: &rawBlock.LabelRanges[0],
178 })
179 }
180 continue
181 }
182
183 spec, specDiags := b.decodeSpec(blockS, rawBlock)
184 diags = append(diags, specDiags...)
185 if specDiags.HasErrors() {
186 continue
187 }
188
189 if spec.forEachVal.IsKnown() {
190 for it := spec.forEachVal.ElementIterator(); it.Next(); {
191 key, value := it.Element()
192 i := b.iteration.MakeChild(spec.iteratorName, key, value)
193
194 block, blockDiags := spec.newBlock(i, b.forEachCtx)
195 diags = append(diags, blockDiags...)
196 if block != nil {
197 // Attach our new iteration context so that attributes
198 // and other nested blocks can refer to our iterator.
199 block.Body = b.expandChild(block.Body, i)
200 blocks = append(blocks, block)
201 }
202 }
203 } else {
204 // If our top-level iteration value isn't known then we're forced
205 // to compromise since HCL doesn't have any concept of an
206 // "unknown block". In this case then, we'll produce a single
207 // dynamic block with the iterator values set to DynamicVal,
208 // which at least makes the potential for a block visible
209 // in our result, even though it's not represented in a fully-accurate
210 // way.
211 i := b.iteration.MakeChild(spec.iteratorName, cty.DynamicVal, cty.DynamicVal)
212 block, blockDiags := spec.newBlock(i, b.forEachCtx)
213 diags = append(diags, blockDiags...)
214 if block != nil {
215 block.Body = b.expandChild(block.Body, i)
216
217 // We additionally force all of the leaf attribute values
218 // in the result to be unknown so the calling application
219 // can, if necessary, use that as a heuristic to detect
220 // when a single nested block might be standing in for
221 // multiple blocks yet to be expanded. This retains the
222 // structure of the generated body but forces all of its
223 // leaf attribute values to be unknown.
224 block.Body = unknownBody{block.Body}
225
226 blocks = append(blocks, block)
227 }
228 }
229
230 default:
231 if _, hidden := b.hiddenBlocks[rawBlock.Type]; !hidden {
232 // A static block doesn't create a new iteration context, but
233 // it does need to inherit _our own_ iteration context in
234 // case it contains expressions that refer to our inherited
235 // iterators, or nested "dynamic" blocks.
236 expandedBlock := *rawBlock // shallow copy
237 expandedBlock.Body = b.expandChild(rawBlock.Body, b.iteration)
238 blocks = append(blocks, &expandedBlock)
239 }
240 }
241 }
242
243 return blocks, diags
244}
245
246func (b *expandBody) expandChild(child hcl.Body, i *iteration) hcl.Body {
247 chiCtx := i.EvalContext(b.forEachCtx)
248 ret := Expand(child, chiCtx)
249 ret.(*expandBody).iteration = i
250 return ret
251}
252
253func (b *expandBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
254 // blocks aren't allowed in JustAttributes mode and this body can
255 // only produce blocks, so we'll just pass straight through to our
256 // underlying body here.
257 return b.original.JustAttributes()
258}
259
260func (b *expandBody) MissingItemRange() hcl.Range {
261 return b.original.MissingItemRange()
262}
diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_spec.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_spec.go
new file mode 100644
index 0000000..41c0be2
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expand_spec.go
@@ -0,0 +1,215 @@
1package dynblock
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/zclconf/go-cty/cty"
8 "github.com/zclconf/go-cty/cty/convert"
9)
10
11type expandSpec struct {
12 blockType string
13 blockTypeRange hcl.Range
14 defRange hcl.Range
15 forEachVal cty.Value
16 iteratorName string
17 labelExprs []hcl.Expression
18 contentBody hcl.Body
19 inherited map[string]*iteration
20}
21
22func (b *expandBody) decodeSpec(blockS *hcl.BlockHeaderSchema, rawSpec *hcl.Block) (*expandSpec, hcl.Diagnostics) {
23 var diags hcl.Diagnostics
24
25 var schema *hcl.BodySchema
26 if len(blockS.LabelNames) != 0 {
27 schema = dynamicBlockBodySchemaLabels
28 } else {
29 schema = dynamicBlockBodySchemaNoLabels
30 }
31
32 specContent, specDiags := rawSpec.Body.Content(schema)
33 diags = append(diags, specDiags...)
34 if specDiags.HasErrors() {
35 return nil, diags
36 }
37
38 //// for_each attribute
39
40 eachAttr := specContent.Attributes["for_each"]
41 eachVal, eachDiags := eachAttr.Expr.Value(b.forEachCtx)
42 diags = append(diags, eachDiags...)
43
44 if !eachVal.CanIterateElements() && eachVal.Type() != cty.DynamicPseudoType {
45 // We skip this error for DynamicPseudoType because that means we either
46 // have a null (which is checked immediately below) or an unknown
47 // (which is handled in the expandBody Content methods).
48 diags = append(diags, &hcl.Diagnostic{
49 Severity: hcl.DiagError,
50 Summary: "Invalid dynamic for_each value",
51 Detail: fmt.Sprintf("Cannot use a %s value in for_each. An iterable collection is required.", eachVal.Type().FriendlyName()),
52 Subject: eachAttr.Expr.Range().Ptr(),
53 Expression: eachAttr.Expr,
54 EvalContext: b.forEachCtx,
55 })
56 return nil, diags
57 }
58 if eachVal.IsNull() {
59 diags = append(diags, &hcl.Diagnostic{
60 Severity: hcl.DiagError,
61 Summary: "Invalid dynamic for_each value",
62 Detail: "Cannot use a null value in for_each.",
63 Subject: eachAttr.Expr.Range().Ptr(),
64 Expression: eachAttr.Expr,
65 EvalContext: b.forEachCtx,
66 })
67 return nil, diags
68 }
69
70 //// iterator attribute
71
72 iteratorName := blockS.Type
73 if iteratorAttr := specContent.Attributes["iterator"]; iteratorAttr != nil {
74 itTraversal, itDiags := hcl.AbsTraversalForExpr(iteratorAttr.Expr)
75 diags = append(diags, itDiags...)
76 if itDiags.HasErrors() {
77 return nil, diags
78 }
79
80 if len(itTraversal) != 1 {
81 diags = append(diags, &hcl.Diagnostic{
82 Severity: hcl.DiagError,
83 Summary: "Invalid dynamic iterator name",
84 Detail: "Dynamic iterator must be a single variable name.",
85 Subject: itTraversal.SourceRange().Ptr(),
86 })
87 return nil, diags
88 }
89
90 iteratorName = itTraversal.RootName()
91 }
92
93 var labelExprs []hcl.Expression
94 if labelsAttr := specContent.Attributes["labels"]; labelsAttr != nil {
95 var labelDiags hcl.Diagnostics
96 labelExprs, labelDiags = hcl.ExprList(labelsAttr.Expr)
97 diags = append(diags, labelDiags...)
98 if labelDiags.HasErrors() {
99 return nil, diags
100 }
101
102 if len(labelExprs) > len(blockS.LabelNames) {
103 diags = append(diags, &hcl.Diagnostic{
104 Severity: hcl.DiagError,
105 Summary: "Extraneous dynamic block label",
106 Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)),
107 Subject: labelExprs[len(blockS.LabelNames)].Range().Ptr(),
108 })
109 return nil, diags
110 } else if len(labelExprs) < len(blockS.LabelNames) {
111 diags = append(diags, &hcl.Diagnostic{
112 Severity: hcl.DiagError,
113 Summary: "Insufficient dynamic block labels",
114 Detail: fmt.Sprintf("Blocks of type %q require %d label(s).", blockS.Type, len(blockS.LabelNames)),
115 Subject: labelsAttr.Expr.Range().Ptr(),
116 })
117 return nil, diags
118 }
119 }
120
121 // Since our schema requests only blocks of type "content", we can assume
122 // that all entries in specContent.Blocks are content blocks.
123 if len(specContent.Blocks) == 0 {
124 diags = append(diags, &hcl.Diagnostic{
125 Severity: hcl.DiagError,
126 Summary: "Missing dynamic content block",
127 Detail: "A dynamic block must have a nested block of type \"content\" to describe the body of each generated block.",
128 Subject: &specContent.MissingItemRange,
129 })
130 return nil, diags
131 }
132 if len(specContent.Blocks) > 1 {
133 diags = append(diags, &hcl.Diagnostic{
134 Severity: hcl.DiagError,
135 Summary: "Extraneous dynamic content block",
136 Detail: "Only one nested content block is allowed for each dynamic block.",
137 Subject: &specContent.Blocks[1].DefRange,
138 })
139 return nil, diags
140 }
141
142 return &expandSpec{
143 blockType: blockS.Type,
144 blockTypeRange: rawSpec.LabelRanges[0],
145 defRange: rawSpec.DefRange,
146 forEachVal: eachVal,
147 iteratorName: iteratorName,
148 labelExprs: labelExprs,
149 contentBody: specContent.Blocks[0].Body,
150 }, diags
151}
152
153func (s *expandSpec) newBlock(i *iteration, ctx *hcl.EvalContext) (*hcl.Block, hcl.Diagnostics) {
154 var diags hcl.Diagnostics
155 var labels []string
156 var labelRanges []hcl.Range
157 lCtx := i.EvalContext(ctx)
158 for _, labelExpr := range s.labelExprs {
159 labelVal, labelDiags := labelExpr.Value(lCtx)
160 diags = append(diags, labelDiags...)
161 if labelDiags.HasErrors() {
162 return nil, diags
163 }
164
165 var convErr error
166 labelVal, convErr = convert.Convert(labelVal, cty.String)
167 if convErr != nil {
168 diags = append(diags, &hcl.Diagnostic{
169 Severity: hcl.DiagError,
170 Summary: "Invalid dynamic block label",
171 Detail: fmt.Sprintf("Cannot use this value as a dynamic block label: %s.", convErr),
172 Subject: labelExpr.Range().Ptr(),
173 Expression: labelExpr,
174 EvalContext: lCtx,
175 })
176 return nil, diags
177 }
178 if labelVal.IsNull() {
179 diags = append(diags, &hcl.Diagnostic{
180 Severity: hcl.DiagError,
181 Summary: "Invalid dynamic block label",
182 Detail: "Cannot use a null value as a dynamic block label.",
183 Subject: labelExpr.Range().Ptr(),
184 Expression: labelExpr,
185 EvalContext: lCtx,
186 })
187 return nil, diags
188 }
189 if !labelVal.IsKnown() {
190 diags = append(diags, &hcl.Diagnostic{
191 Severity: hcl.DiagError,
192 Summary: "Invalid dynamic block label",
193 Detail: "This value is not yet known. Dynamic block labels must be immediately-known values.",
194 Subject: labelExpr.Range().Ptr(),
195 Expression: labelExpr,
196 EvalContext: lCtx,
197 })
198 return nil, diags
199 }
200
201 labels = append(labels, labelVal.AsString())
202 labelRanges = append(labelRanges, labelExpr.Range())
203 }
204
205 block := &hcl.Block{
206 Type: s.blockType,
207 TypeRange: s.blockTypeRange,
208 Labels: labels,
209 LabelRanges: labelRanges,
210 DefRange: s.defRange,
211 Body: s.contentBody,
212 }
213
214 return block, diags
215}
diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/expr_wrap.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expr_wrap.go
new file mode 100644
index 0000000..6916fc1
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/expr_wrap.go
@@ -0,0 +1,42 @@
1package dynblock
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/zclconf/go-cty/cty"
6)
7
8type exprWrap struct {
9 hcl.Expression
10 i *iteration
11}
12
13func (e exprWrap) Variables() []hcl.Traversal {
14 raw := e.Expression.Variables()
15 ret := make([]hcl.Traversal, 0, len(raw))
16
17 // Filter out traversals that refer to our iterator name or any
18 // iterator we've inherited; we're going to provide those in
19 // our Value wrapper, so the caller doesn't need to know about them.
20 for _, traversal := range raw {
21 rootName := traversal.RootName()
22 if rootName == e.i.IteratorName {
23 continue
24 }
25 if _, inherited := e.i.Inherited[rootName]; inherited {
26 continue
27 }
28 ret = append(ret, traversal)
29 }
30 return ret
31}
32
33func (e exprWrap) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
34 extCtx := e.i.EvalContext(ctx)
35 return e.Expression.Value(extCtx)
36}
37
38// UnwrapExpression returns the expression being wrapped by this instance.
39// This allows the original expression to be recovered by hcl.UnwrapExpression.
40func (e exprWrap) UnwrapExpression() hcl.Expression {
41 return e.Expression
42}
diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/iteration.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/iteration.go
new file mode 100644
index 0000000..7056d33
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/iteration.go
@@ -0,0 +1,66 @@
1package dynblock
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/zclconf/go-cty/cty"
6)
7
8type iteration struct {
9 IteratorName string
10 Key cty.Value
11 Value cty.Value
12 Inherited map[string]*iteration
13}
14
15func (s *expandSpec) MakeIteration(key, value cty.Value) *iteration {
16 return &iteration{
17 IteratorName: s.iteratorName,
18 Key: key,
19 Value: value,
20 Inherited: s.inherited,
21 }
22}
23
24func (i *iteration) Object() cty.Value {
25 return cty.ObjectVal(map[string]cty.Value{
26 "key": i.Key,
27 "value": i.Value,
28 })
29}
30
31func (i *iteration) EvalContext(base *hcl.EvalContext) *hcl.EvalContext {
32 new := base.NewChild()
33
34 if i != nil {
35 new.Variables = map[string]cty.Value{}
36 for name, otherIt := range i.Inherited {
37 new.Variables[name] = otherIt.Object()
38 }
39 new.Variables[i.IteratorName] = i.Object()
40 }
41
42 return new
43}
44
45func (i *iteration) MakeChild(iteratorName string, key, value cty.Value) *iteration {
46 if i == nil {
47 // Create entirely new root iteration, then
48 return &iteration{
49 IteratorName: iteratorName,
50 Key: key,
51 Value: value,
52 }
53 }
54
55 inherited := map[string]*iteration{}
56 for name, otherIt := range i.Inherited {
57 inherited[name] = otherIt
58 }
59 inherited[i.IteratorName] = i
60 return &iteration{
61 IteratorName: iteratorName,
62 Key: key,
63 Value: value,
64 Inherited: inherited,
65 }
66}
diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/public.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/public.go
new file mode 100644
index 0000000..b7e8ca9
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/public.go
@@ -0,0 +1,44 @@
1package dynblock
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7// Expand "dynamic" blocks in the given body, returning a new body that
8// has those blocks expanded.
9//
10// The given EvalContext is used when evaluating "for_each" and "labels"
11// attributes within dynamic blocks, allowing those expressions access to
12// variables and functions beyond the iterator variable created by the
13// iteration.
14//
15// Expand returns no diagnostics because no blocks are actually expanded
16// until a call to Content or PartialContent on the returned body, which
17// will then expand only the blocks selected by the schema.
18//
19// "dynamic" blocks are also expanded automatically within nested blocks
20// in the given body, including within other dynamic blocks, thus allowing
21// multi-dimensional iteration. However, it is not possible to
22// dynamically-generate the "dynamic" blocks themselves except through nesting.
23//
24// parent {
25// dynamic "child" {
26// for_each = child_objs
27// content {
28// dynamic "grandchild" {
29// for_each = child.value.children
30// labels = [grandchild.key]
31// content {
32// parent_key = child.key
33// value = grandchild.value
34// }
35// }
36// }
37// }
38// }
39func Expand(body hcl.Body, ctx *hcl.EvalContext) hcl.Body {
40 return &expandBody{
41 original: body,
42 forEachCtx: ctx,
43 }
44}
diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/schema.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/schema.go
new file mode 100644
index 0000000..dc8ed5a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/schema.go
@@ -0,0 +1,50 @@
1package dynblock
2
3import "github.com/hashicorp/hcl2/hcl"
4
5var dynamicBlockHeaderSchema = hcl.BlockHeaderSchema{
6 Type: "dynamic",
7 LabelNames: []string{"type"},
8}
9
10var dynamicBlockBodySchemaLabels = &hcl.BodySchema{
11 Attributes: []hcl.AttributeSchema{
12 {
13 Name: "for_each",
14 Required: true,
15 },
16 {
17 Name: "iterator",
18 Required: false,
19 },
20 {
21 Name: "labels",
22 Required: true,
23 },
24 },
25 Blocks: []hcl.BlockHeaderSchema{
26 {
27 Type: "content",
28 LabelNames: nil,
29 },
30 },
31}
32
33var dynamicBlockBodySchemaNoLabels = &hcl.BodySchema{
34 Attributes: []hcl.AttributeSchema{
35 {
36 Name: "for_each",
37 Required: true,
38 },
39 {
40 Name: "iterator",
41 Required: false,
42 },
43 },
44 Blocks: []hcl.BlockHeaderSchema{
45 {
46 Type: "content",
47 LabelNames: nil,
48 },
49 },
50}
diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/unknown_body.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/unknown_body.go
new file mode 100644
index 0000000..932f6a3
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/unknown_body.go
@@ -0,0 +1,84 @@
1package dynblock
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// unknownBody is a funny body that just reports everything inside it as
9// unknown. It uses a given other body as a sort of template for what attributes
10// and blocks are inside -- including source location information -- but
11// subsitutes unknown values of unknown type for all attributes.
12//
13// This rather odd process is used to handle expansion of dynamic blocks whose
14// for_each expression is unknown. Since a block cannot itself be unknown,
15// we instead arrange for everything _inside_ the block to be unknown instead,
16// to give the best possible approximation.
17type unknownBody struct {
18 template hcl.Body
19}
20
21var _ hcl.Body = unknownBody{}
22
23func (b unknownBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
24 content, diags := b.template.Content(schema)
25 content = b.fixupContent(content)
26
27 // We're intentionally preserving the diagnostics reported from the
28 // inner body so that we can still report where the template body doesn't
29 // match the requested schema.
30 return content, diags
31}
32
33func (b unknownBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
34 content, remain, diags := b.template.PartialContent(schema)
35 content = b.fixupContent(content)
36 remain = unknownBody{remain} // remaining content must also be wrapped
37
38 // We're intentionally preserving the diagnostics reported from the
39 // inner body so that we can still report where the template body doesn't
40 // match the requested schema.
41 return content, remain, diags
42}
43
44func (b unknownBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
45 attrs, diags := b.template.JustAttributes()
46 attrs = b.fixupAttrs(attrs)
47
48 // We're intentionally preserving the diagnostics reported from the
49 // inner body so that we can still report where the template body doesn't
50 // match the requested schema.
51 return attrs, diags
52}
53
54func (b unknownBody) MissingItemRange() hcl.Range {
55 return b.template.MissingItemRange()
56}
57
58func (b unknownBody) fixupContent(got *hcl.BodyContent) *hcl.BodyContent {
59 ret := &hcl.BodyContent{}
60 ret.Attributes = b.fixupAttrs(got.Attributes)
61 if len(got.Blocks) > 0 {
62 ret.Blocks = make(hcl.Blocks, 0, len(got.Blocks))
63 for _, gotBlock := range got.Blocks {
64 new := *gotBlock // shallow copy
65 new.Body = unknownBody{gotBlock.Body} // nested content must also be marked unknown
66 ret.Blocks = append(ret.Blocks, &new)
67 }
68 }
69
70 return ret
71}
72
73func (b unknownBody) fixupAttrs(got hcl.Attributes) hcl.Attributes {
74 if len(got) == 0 {
75 return nil
76 }
77 ret := make(hcl.Attributes, len(got))
78 for name, gotAttr := range got {
79 new := *gotAttr // shallow copy
80 new.Expr = hcl.StaticExpr(cty.DynamicVal, gotAttr.Expr.Range())
81 ret[name] = &new
82 }
83 return ret
84}
diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables.go
new file mode 100644
index 0000000..ad838f3
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables.go
@@ -0,0 +1,209 @@
1package dynblock
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// WalkVariables begins the recursive process of walking all expressions and
9// nested blocks in the given body and its child bodies while taking into
10// account any "dynamic" blocks.
11//
12// This function requires that the caller walk through the nested block
13// structure in the given body level-by-level so that an appropriate schema
14// can be provided at each level to inform further processing. This workflow
15// is thus easiest to use for calling applications that have some higher-level
16// schema representation available with which to drive this multi-step
17// process. If your application uses the hcldec package, you may be able to
18// use VariablesHCLDec instead for a more automatic approach.
19func WalkVariables(body hcl.Body) WalkVariablesNode {
20 return WalkVariablesNode{
21 body: body,
22 includeContent: true,
23 }
24}
25
26// WalkExpandVariables is like Variables but it includes only the variables
27// required for successful block expansion, ignoring any variables referenced
28// inside block contents. The result is the minimal set of all variables
29// required for a call to Expand, excluding variables that would only be
30// needed to subsequently call Content or PartialContent on the expanded
31// body.
32func WalkExpandVariables(body hcl.Body) WalkVariablesNode {
33 return WalkVariablesNode{
34 body: body,
35 }
36}
37
38type WalkVariablesNode struct {
39 body hcl.Body
40 it *iteration
41
42 includeContent bool
43}
44
45type WalkVariablesChild struct {
46 BlockTypeName string
47 Node WalkVariablesNode
48}
49
50// Body returns the HCL Body associated with the child node, in case the caller
51// wants to do some sort of inspection of it in order to decide what schema
52// to pass to Visit.
53//
54// Most implementations should just fetch a fixed schema based on the
55// BlockTypeName field and not access this. Deciding on a schema dynamically
56// based on the body is a strange thing to do and generally necessary only if
57// your caller is already doing other bizarre things with HCL bodies.
58func (c WalkVariablesChild) Body() hcl.Body {
59 return c.Node.body
60}
61
62// Visit returns the variable traversals required for any "dynamic" blocks
63// directly in the body associated with this node, and also returns any child
64// nodes that must be visited in order to continue the walk.
65//
66// Each child node has its associated block type name given in its BlockTypeName
67// field, which the calling application should use to determine the appropriate
68// schema for the content of each child node and pass it to the child node's
69// own Visit method to continue the walk recursively.
70func (n WalkVariablesNode) Visit(schema *hcl.BodySchema) (vars []hcl.Traversal, children []WalkVariablesChild) {
71 extSchema := n.extendSchema(schema)
72 container, _, _ := n.body.PartialContent(extSchema)
73 if container == nil {
74 return vars, children
75 }
76
77 children = make([]WalkVariablesChild, 0, len(container.Blocks))
78
79 if n.includeContent {
80 for _, attr := range container.Attributes {
81 for _, traversal := range attr.Expr.Variables() {
82 var ours, inherited bool
83 if n.it != nil {
84 ours = traversal.RootName() == n.it.IteratorName
85 _, inherited = n.it.Inherited[traversal.RootName()]
86 }
87
88 if !(ours || inherited) {
89 vars = append(vars, traversal)
90 }
91 }
92 }
93 }
94
95 for _, block := range container.Blocks {
96 switch block.Type {
97
98 case "dynamic":
99 blockTypeName := block.Labels[0]
100 inner, _, _ := block.Body.PartialContent(variableDetectionInnerSchema)
101 if inner == nil {
102 continue
103 }
104
105 iteratorName := blockTypeName
106 if attr, exists := inner.Attributes["iterator"]; exists {
107 iterTraversal, _ := hcl.AbsTraversalForExpr(attr.Expr)
108 if len(iterTraversal) == 0 {
109 // Ignore this invalid dynamic block, since it'll produce
110 // an error if someone tries to extract content from it
111 // later anyway.
112 continue
113 }
114 iteratorName = iterTraversal.RootName()
115 }
116 blockIt := n.it.MakeChild(iteratorName, cty.DynamicVal, cty.DynamicVal)
117
118 if attr, exists := inner.Attributes["for_each"]; exists {
119 // Filter out iterator names inherited from parent blocks
120 for _, traversal := range attr.Expr.Variables() {
121 if _, inherited := blockIt.Inherited[traversal.RootName()]; !inherited {
122 vars = append(vars, traversal)
123 }
124 }
125 }
126 if attr, exists := inner.Attributes["labels"]; exists {
127 // Filter out both our own iterator name _and_ those inherited
128 // from parent blocks, since we provide _both_ of these to the
129 // label expressions.
130 for _, traversal := range attr.Expr.Variables() {
131 ours := traversal.RootName() == iteratorName
132 _, inherited := blockIt.Inherited[traversal.RootName()]
133
134 if !(ours || inherited) {
135 vars = append(vars, traversal)
136 }
137 }
138 }
139
140 for _, contentBlock := range inner.Blocks {
141 // We only request "content" blocks in our schema, so we know
142 // any blocks we find here will be content blocks. We require
143 // exactly one content block for actual expansion, but we'll
144 // be more liberal here so that callers can still collect
145 // variables from erroneous "dynamic" blocks.
146 children = append(children, WalkVariablesChild{
147 BlockTypeName: blockTypeName,
148 Node: WalkVariablesNode{
149 body: contentBlock.Body,
150 it: blockIt,
151 includeContent: n.includeContent,
152 },
153 })
154 }
155
156 default:
157 children = append(children, WalkVariablesChild{
158 BlockTypeName: block.Type,
159 Node: WalkVariablesNode{
160 body: block.Body,
161 it: n.it,
162 includeContent: n.includeContent,
163 },
164 })
165
166 }
167 }
168
169 return vars, children
170}
171
172func (n WalkVariablesNode) extendSchema(schema *hcl.BodySchema) *hcl.BodySchema {
173 // We augment the requested schema to also include our special "dynamic"
174 // block type, since then we'll get instances of it interleaved with
175 // all of the literal child blocks we must also include.
176 extSchema := &hcl.BodySchema{
177 Attributes: schema.Attributes,
178 Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1),
179 }
180 copy(extSchema.Blocks, schema.Blocks)
181 extSchema.Blocks = append(extSchema.Blocks, dynamicBlockHeaderSchema)
182
183 return extSchema
184}
185
186// This is a more relaxed schema than what's in schema.go, since we
187// want to maximize the amount of variables we can find even if there
188// are erroneous blocks.
189var variableDetectionInnerSchema = &hcl.BodySchema{
190 Attributes: []hcl.AttributeSchema{
191 {
192 Name: "for_each",
193 Required: false,
194 },
195 {
196 Name: "labels",
197 Required: false,
198 },
199 {
200 Name: "iterator",
201 Required: false,
202 },
203 },
204 Blocks: []hcl.BlockHeaderSchema{
205 {
206 Type: "content",
207 },
208 },
209}
diff --git a/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables_hcldec.go b/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables_hcldec.go
new file mode 100644
index 0000000..a078d91
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/ext/dynblock/variables_hcldec.go
@@ -0,0 +1,43 @@
1package dynblock
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/hcl2/hcldec"
6)
7
8// VariablesHCLDec is a wrapper around WalkVariables that uses the given hcldec
9// specification to automatically drive the recursive walk through nested
10// blocks in the given body.
11//
12// This is a drop-in replacement for hcldec.Variables which is able to treat
13// blocks of type "dynamic" in the same special way that dynblock.Expand would,
14// exposing both the variables referenced in the "for_each" and "labels"
15// arguments and variables used in the nested "content" block.
16func VariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal {
17 rootNode := WalkVariables(body)
18 return walkVariablesWithHCLDec(rootNode, spec)
19}
20
21// ExpandVariablesHCLDec is like VariablesHCLDec but it includes only the
22// minimal set of variables required to call Expand, ignoring variables that
23// are referenced only inside normal block contents. See WalkExpandVariables
24// for more information.
25func ExpandVariablesHCLDec(body hcl.Body, spec hcldec.Spec) []hcl.Traversal {
26 rootNode := WalkExpandVariables(body)
27 return walkVariablesWithHCLDec(rootNode, spec)
28}
29
30func walkVariablesWithHCLDec(node WalkVariablesNode, spec hcldec.Spec) []hcl.Traversal {
31 vars, children := node.Visit(hcldec.ImpliedSchema(spec))
32
33 if len(children) > 0 {
34 childSpecs := hcldec.ChildBlockTypes(spec)
35 for _, child := range children {
36 if childSpec, exists := childSpecs[child.BlockTypeName]; exists {
37 vars = append(vars, walkVariablesWithHCLDec(child.Node, childSpec)...)
38 }
39 }
40 }
41
42 return vars
43}
diff --git a/vendor/github.com/hashicorp/hcl2/ext/typeexpr/README.md b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/README.md
new file mode 100644
index 0000000..ff2b3f2
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/README.md
@@ -0,0 +1,67 @@
1# HCL Type Expressions Extension
2
3This HCL extension defines a convention for describing HCL types using function
4call and variable reference syntax, allowing configuration formats to include
5type information provided by users.
6
7The type syntax is processed statically from a hcl.Expression, so it cannot
8use any of the usual language operators. This is similar to type expressions
9in statically-typed programming languages.
10
11```hcl
12variable "example" {
13 type = list(string)
14}
15```
16
17The extension is built using the `hcl.ExprAsKeyword` and `hcl.ExprCall`
18functions, and so it relies on the underlying syntax to define how "keyword"
19and "call" are interpreted. The above shows how they are interpreted in
20the HCL native syntax, while the following shows the same information
21expressed in JSON:
22
23```json
24{
25 "variable": {
26 "example": {
27 "type": "list(string)"
28 }
29 }
30}
31```
32
33Notice that since we have additional contextual information that we intend
34to allow only calls and keywords the JSON syntax is able to parse the given
35string directly as an expression, rather than as a template as would be
36the case for normal expression evaluation.
37
38For more information, see [the godoc reference](http://godoc.org/github.com/hashicorp/hcl2/ext/typeexpr).
39
40## Type Expression Syntax
41
42When expressed in the native syntax, the following expressions are permitted
43in a type expression:
44
45* `string` - string
46* `bool` - boolean
47* `number` - number
48* `any` - `cty.DynamicPseudoType` (in function `TypeConstraint` only)
49* `list(<type_expr>)` - list of the type given as an argument
50* `set(<type_expr>)` - set of the type given as an argument
51* `map(<type_expr>)` - map of the type given as an argument
52* `tuple([<type_exprs...>])` - tuple with the element types given in the single list argument
53* `object({<attr_name>=<type_expr>, ...}` - object with the attributes and corresponding types given in the single map argument
54
55For example:
56
57* `list(string)`
58* `object({name=string,age=number})`
59* `map(object({name=string,age=number}))`
60
61Note that the object constructor syntax is not fully-general for all possible
62object types because it requires the attribute names to be valid identifiers.
63In practice it is expected that any time an object type is being fixed for
64type checking it will be one that has identifiers as its attributes; object
65types with weird attributes generally show up only from arbitrary object
66constructors in configuration files, which are usually treated either as maps
67or as the dynamic pseudo-type.
diff --git a/vendor/github.com/hashicorp/hcl2/ext/typeexpr/doc.go b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/doc.go
new file mode 100644
index 0000000..c4b3795
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/doc.go
@@ -0,0 +1,11 @@
1// Package typeexpr extends HCL with a convention for describing HCL types
2// within configuration files.
3//
4// The type syntax is processed statically from a hcl.Expression, so it cannot
5// use any of the usual language operators. This is similar to type expressions
6// in statically-typed programming languages.
7//
8// variable "example" {
9// type = list(string)
10// }
11package typeexpr
diff --git a/vendor/github.com/hashicorp/hcl2/ext/typeexpr/get_type.go b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/get_type.go
new file mode 100644
index 0000000..a84338a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/get_type.go
@@ -0,0 +1,196 @@
1package typeexpr
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/zclconf/go-cty/cty"
8)
9
10const invalidTypeSummary = "Invalid type specification"
11
12// getType is the internal implementation of both Type and TypeConstraint,
13// using the passed flag to distinguish. When constraint is false, the "any"
14// keyword will produce an error.
15func getType(expr hcl.Expression, constraint bool) (cty.Type, hcl.Diagnostics) {
16 // First we'll try for one of our keywords
17 kw := hcl.ExprAsKeyword(expr)
18 switch kw {
19 case "bool":
20 return cty.Bool, nil
21 case "string":
22 return cty.String, nil
23 case "number":
24 return cty.Number, nil
25 case "any":
26 if constraint {
27 return cty.DynamicPseudoType, nil
28 }
29 return cty.DynamicPseudoType, hcl.Diagnostics{{
30 Severity: hcl.DiagError,
31 Summary: invalidTypeSummary,
32 Detail: fmt.Sprintf("The keyword %q cannot be used in this type specification: an exact type is required.", kw),
33 Subject: expr.Range().Ptr(),
34 }}
35 case "list", "map", "set":
36 return cty.DynamicPseudoType, hcl.Diagnostics{{
37 Severity: hcl.DiagError,
38 Summary: invalidTypeSummary,
39 Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", kw),
40 Subject: expr.Range().Ptr(),
41 }}
42 case "object":
43 return cty.DynamicPseudoType, hcl.Diagnostics{{
44 Severity: hcl.DiagError,
45 Summary: invalidTypeSummary,
46 Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.",
47 Subject: expr.Range().Ptr(),
48 }}
49 case "tuple":
50 return cty.DynamicPseudoType, hcl.Diagnostics{{
51 Severity: hcl.DiagError,
52 Summary: invalidTypeSummary,
53 Detail: "The tuple type constructor requires one argument specifying the element types as a list.",
54 Subject: expr.Range().Ptr(),
55 }}
56 case "":
57 // okay! we'll fall through and try processing as a call, then.
58 default:
59 return cty.DynamicPseudoType, hcl.Diagnostics{{
60 Severity: hcl.DiagError,
61 Summary: invalidTypeSummary,
62 Detail: fmt.Sprintf("The keyword %q is not a valid type specification.", kw),
63 Subject: expr.Range().Ptr(),
64 }}
65 }
66
67 // If we get down here then our expression isn't just a keyword, so we'll
68 // try to process it as a call instead.
69 call, diags := hcl.ExprCall(expr)
70 if diags.HasErrors() {
71 return cty.DynamicPseudoType, hcl.Diagnostics{{
72 Severity: hcl.DiagError,
73 Summary: invalidTypeSummary,
74 Detail: "A type specification is either a primitive type keyword (bool, number, string) or a complex type constructor call, like list(string).",
75 Subject: expr.Range().Ptr(),
76 }}
77 }
78
79 switch call.Name {
80 case "bool", "string", "number", "any":
81 return cty.DynamicPseudoType, hcl.Diagnostics{{
82 Severity: hcl.DiagError,
83 Summary: invalidTypeSummary,
84 Detail: fmt.Sprintf("Primitive type keyword %q does not expect arguments.", call.Name),
85 Subject: &call.ArgsRange,
86 }}
87 }
88
89 if len(call.Arguments) != 1 {
90 contextRange := call.ArgsRange
91 subjectRange := call.ArgsRange
92 if len(call.Arguments) > 1 {
93 // If we have too many arguments (as opposed to too _few_) then
94 // we'll highlight the extraneous arguments as the diagnostic
95 // subject.
96 subjectRange = hcl.RangeBetween(call.Arguments[1].Range(), call.Arguments[len(call.Arguments)-1].Range())
97 }
98
99 switch call.Name {
100 case "list", "set", "map":
101 return cty.DynamicPseudoType, hcl.Diagnostics{{
102 Severity: hcl.DiagError,
103 Summary: invalidTypeSummary,
104 Detail: fmt.Sprintf("The %s type constructor requires one argument specifying the element type.", call.Name),
105 Subject: &subjectRange,
106 Context: &contextRange,
107 }}
108 case "object":
109 return cty.DynamicPseudoType, hcl.Diagnostics{{
110 Severity: hcl.DiagError,
111 Summary: invalidTypeSummary,
112 Detail: "The object type constructor requires one argument specifying the attribute types and values as a map.",
113 Subject: &subjectRange,
114 Context: &contextRange,
115 }}
116 case "tuple":
117 return cty.DynamicPseudoType, hcl.Diagnostics{{
118 Severity: hcl.DiagError,
119 Summary: invalidTypeSummary,
120 Detail: "The tuple type constructor requires one argument specifying the element types as a list.",
121 Subject: &subjectRange,
122 Context: &contextRange,
123 }}
124 }
125 }
126
127 switch call.Name {
128
129 case "list":
130 ety, diags := getType(call.Arguments[0], constraint)
131 return cty.List(ety), diags
132 case "set":
133 ety, diags := getType(call.Arguments[0], constraint)
134 return cty.Set(ety), diags
135 case "map":
136 ety, diags := getType(call.Arguments[0], constraint)
137 return cty.Map(ety), diags
138 case "object":
139 attrDefs, diags := hcl.ExprMap(call.Arguments[0])
140 if diags.HasErrors() {
141 return cty.DynamicPseudoType, hcl.Diagnostics{{
142 Severity: hcl.DiagError,
143 Summary: invalidTypeSummary,
144 Detail: "Object type constructor requires a map whose keys are attribute names and whose values are the corresponding attribute types.",
145 Subject: call.Arguments[0].Range().Ptr(),
146 Context: expr.Range().Ptr(),
147 }}
148 }
149
150 atys := make(map[string]cty.Type)
151 for _, attrDef := range attrDefs {
152 attrName := hcl.ExprAsKeyword(attrDef.Key)
153 if attrName == "" {
154 diags = append(diags, &hcl.Diagnostic{
155 Severity: hcl.DiagError,
156 Summary: invalidTypeSummary,
157 Detail: "Object constructor map keys must be attribute names.",
158 Subject: attrDef.Key.Range().Ptr(),
159 Context: expr.Range().Ptr(),
160 })
161 continue
162 }
163 aty, attrDiags := getType(attrDef.Value, constraint)
164 diags = append(diags, attrDiags...)
165 atys[attrName] = aty
166 }
167 return cty.Object(atys), diags
168 case "tuple":
169 elemDefs, diags := hcl.ExprList(call.Arguments[0])
170 if diags.HasErrors() {
171 return cty.DynamicPseudoType, hcl.Diagnostics{{
172 Severity: hcl.DiagError,
173 Summary: invalidTypeSummary,
174 Detail: "Tuple type constructor requires a list of element types.",
175 Subject: call.Arguments[0].Range().Ptr(),
176 Context: expr.Range().Ptr(),
177 }}
178 }
179 etys := make([]cty.Type, len(elemDefs))
180 for i, defExpr := range elemDefs {
181 ety, elemDiags := getType(defExpr, constraint)
182 diags = append(diags, elemDiags...)
183 etys[i] = ety
184 }
185 return cty.Tuple(etys), diags
186 default:
187 // Can't access call.Arguments in this path because we've not validated
188 // that it contains exactly one expression here.
189 return cty.DynamicPseudoType, hcl.Diagnostics{{
190 Severity: hcl.DiagError,
191 Summary: invalidTypeSummary,
192 Detail: fmt.Sprintf("Keyword %q is not a valid type constructor.", call.Name),
193 Subject: expr.Range().Ptr(),
194 }}
195 }
196}
diff --git a/vendor/github.com/hashicorp/hcl2/ext/typeexpr/public.go b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/public.go
new file mode 100644
index 0000000..e3f5eef
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/ext/typeexpr/public.go
@@ -0,0 +1,129 @@
1package typeexpr
2
3import (
4 "bytes"
5 "fmt"
6 "sort"
7
8 "github.com/hashicorp/hcl2/hcl/hclsyntax"
9
10 "github.com/hashicorp/hcl2/hcl"
11 "github.com/zclconf/go-cty/cty"
12)
13
14// Type attempts to process the given expression as a type expression and, if
15// successful, returns the resulting type. If unsuccessful, error diagnostics
16// are returned.
17func Type(expr hcl.Expression) (cty.Type, hcl.Diagnostics) {
18 return getType(expr, false)
19}
20
21// TypeConstraint attempts to parse the given expression as a type constraint
22// and, if successful, returns the resulting type. If unsuccessful, error
23// diagnostics are returned.
24//
25// A type constraint has the same structure as a type, but it additionally
26// allows the keyword "any" to represent cty.DynamicPseudoType, which is often
27// used as a wildcard in type checking and type conversion operations.
28func TypeConstraint(expr hcl.Expression) (cty.Type, hcl.Diagnostics) {
29 return getType(expr, true)
30}
31
32// TypeString returns a string rendering of the given type as it would be
33// expected to appear in the HCL native syntax.
34//
35// This is primarily intended for showing types to the user in an application
36// that uses typexpr, where the user can be assumed to be familiar with the
37// type expression syntax. In applications that do not use typeexpr these
38// results may be confusing to the user and so type.FriendlyName may be
39// preferable, even though it's less precise.
40//
41// TypeString produces reasonable results only for types like what would be
42// produced by the Type and TypeConstraint functions. In particular, it cannot
43// support capsule types.
44func TypeString(ty cty.Type) string {
45 // Easy cases first
46 switch ty {
47 case cty.String:
48 return "string"
49 case cty.Bool:
50 return "bool"
51 case cty.Number:
52 return "number"
53 case cty.DynamicPseudoType:
54 return "any"
55 }
56
57 if ty.IsCapsuleType() {
58 panic("TypeString does not support capsule types")
59 }
60
61 if ty.IsCollectionType() {
62 ety := ty.ElementType()
63 etyString := TypeString(ety)
64 switch {
65 case ty.IsListType():
66 return fmt.Sprintf("list(%s)", etyString)
67 case ty.IsSetType():
68 return fmt.Sprintf("set(%s)", etyString)
69 case ty.IsMapType():
70 return fmt.Sprintf("map(%s)", etyString)
71 default:
72 // Should never happen because the above is exhaustive
73 panic("unsupported collection type")
74 }
75 }
76
77 if ty.IsObjectType() {
78 var buf bytes.Buffer
79 buf.WriteString("object({")
80 atys := ty.AttributeTypes()
81 names := make([]string, 0, len(atys))
82 for name := range atys {
83 names = append(names, name)
84 }
85 sort.Strings(names)
86 first := true
87 for _, name := range names {
88 aty := atys[name]
89 if !first {
90 buf.WriteByte(',')
91 }
92 if !hclsyntax.ValidIdentifier(name) {
93 // Should never happen for any type produced by this package,
94 // but we'll do something reasonable here just so we don't
95 // produce garbage if someone gives us a hand-assembled object
96 // type that has weird attribute names.
97 // Using Go-style quoting here isn't perfect, since it doesn't
98 // exactly match HCL syntax, but it's fine for an edge-case.
99 buf.WriteString(fmt.Sprintf("%q", name))
100 } else {
101 buf.WriteString(name)
102 }
103 buf.WriteByte('=')
104 buf.WriteString(TypeString(aty))
105 first = false
106 }
107 buf.WriteString("})")
108 return buf.String()
109 }
110
111 if ty.IsTupleType() {
112 var buf bytes.Buffer
113 buf.WriteString("tuple([")
114 etys := ty.TupleElementTypes()
115 first := true
116 for _, ety := range etys {
117 if !first {
118 buf.WriteByte(',')
119 }
120 buf.WriteString(TypeString(ety))
121 first = false
122 }
123 buf.WriteString("])")
124 return buf.String()
125 }
126
127 // Should never happen because we covered all cases above.
128 panic(fmt.Errorf("unsupported type %#v", ty))
129}
diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/doc.go b/vendor/github.com/hashicorp/hcl2/gohcl/doc.go
index 8500214..aa3c6ea 100644
--- a/vendor/github.com/hashicorp/hcl2/gohcl/doc.go
+++ b/vendor/github.com/hashicorp/hcl2/gohcl/doc.go
@@ -40,6 +40,10 @@
40// present then any attributes or blocks not matched by another valid tag 40// present then any attributes or blocks not matched by another valid tag
41// will cause an error diagnostic. 41// will cause an error diagnostic.
42// 42//
43// Only a subset of this tagging/typing vocabulary is supported for the
44// "Encode" family of functions. See the EncodeIntoBody docs for full details
45// on the constraints there.
46//
43// Broadly-speaking this package deals with two types of error. The first is 47// Broadly-speaking this package deals with two types of error. The first is
44// errors in the configuration itself, which are returned as diagnostics 48// errors in the configuration itself, which are returned as diagnostics
45// written with the configuration author as the target audience. The second 49// written with the configuration author as the target audience. The second
diff --git a/vendor/github.com/hashicorp/hcl2/gohcl/encode.go b/vendor/github.com/hashicorp/hcl2/gohcl/encode.go
new file mode 100644
index 0000000..3cbf7e4
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/gohcl/encode.go
@@ -0,0 +1,191 @@
1package gohcl
2
3import (
4 "fmt"
5 "reflect"
6 "sort"
7
8 "github.com/hashicorp/hcl2/hclwrite"
9 "github.com/zclconf/go-cty/cty/gocty"
10)
11
12// EncodeIntoBody replaces the contents of the given hclwrite Body with
13// attributes and blocks derived from the given value, which must be a
14// struct value or a pointer to a struct value with the struct tags defined
15// in this package.
16//
17// This function can work only with fully-decoded data. It will ignore any
18// fields tagged as "remain", any fields that decode attributes into either
19// hcl.Attribute or hcl.Expression values, and any fields that decode blocks
20// into hcl.Attributes values. This function does not have enough information
21// to complete the decoding of these types.
22//
23// Any fields tagged as "label" are ignored by this function. Use EncodeAsBlock
24// to produce a whole hclwrite.Block including block labels.
25//
26// As long as a suitable value is given to encode and the destination body
27// is non-nil, this function will always complete. It will panic in case of
28// any errors in the calling program, such as passing an inappropriate type
29// or a nil body.
30//
31// The layout of the resulting HCL source is derived from the ordering of
32// the struct fields, with blank lines around nested blocks of different types.
33// Fields representing attributes should usually precede those representing
34// blocks so that the attributes can group togather in the result. For more
35// control, use the hclwrite API directly.
36func EncodeIntoBody(val interface{}, dst *hclwrite.Body) {
37 rv := reflect.ValueOf(val)
38 ty := rv.Type()
39 if ty.Kind() == reflect.Ptr {
40 rv = rv.Elem()
41 ty = rv.Type()
42 }
43 if ty.Kind() != reflect.Struct {
44 panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
45 }
46
47 tags := getFieldTags(ty)
48 populateBody(rv, ty, tags, dst)
49}
50
51// EncodeAsBlock creates a new hclwrite.Block populated with the data from
52// the given value, which must be a struct or pointer to struct with the
53// struct tags defined in this package.
54//
55// If the given struct type has fields tagged with "label" tags then they
56// will be used in order to annotate the created block with labels.
57//
58// This function has the same constraints as EncodeIntoBody and will panic
59// if they are violated.
60func EncodeAsBlock(val interface{}, blockType string) *hclwrite.Block {
61 rv := reflect.ValueOf(val)
62 ty := rv.Type()
63 if ty.Kind() == reflect.Ptr {
64 rv = rv.Elem()
65 ty = rv.Type()
66 }
67 if ty.Kind() != reflect.Struct {
68 panic(fmt.Sprintf("value is %s, not struct", ty.Kind()))
69 }
70
71 tags := getFieldTags(ty)
72 labels := make([]string, len(tags.Labels))
73 for i, lf := range tags.Labels {
74 lv := rv.Field(lf.FieldIndex)
75 // We just stringify whatever we find. It should always be a string
76 // but if not then we'll still do something reasonable.
77 labels[i] = fmt.Sprintf("%s", lv.Interface())
78 }
79
80 block := hclwrite.NewBlock(blockType, labels)
81 populateBody(rv, ty, tags, block.Body())
82 return block
83}
84
85func populateBody(rv reflect.Value, ty reflect.Type, tags *fieldTags, dst *hclwrite.Body) {
86 nameIdxs := make(map[string]int, len(tags.Attributes)+len(tags.Blocks))
87 namesOrder := make([]string, 0, len(tags.Attributes)+len(tags.Blocks))
88 for n, i := range tags.Attributes {
89 nameIdxs[n] = i
90 namesOrder = append(namesOrder, n)
91 }
92 for n, i := range tags.Blocks {
93 nameIdxs[n] = i
94 namesOrder = append(namesOrder, n)
95 }
96 sort.SliceStable(namesOrder, func(i, j int) bool {
97 ni, nj := namesOrder[i], namesOrder[j]
98 return nameIdxs[ni] < nameIdxs[nj]
99 })
100
101 dst.Clear()
102
103 prevWasBlock := false
104 for _, name := range namesOrder {
105 fieldIdx := nameIdxs[name]
106 field := ty.Field(fieldIdx)
107 fieldTy := field.Type
108 fieldVal := rv.Field(fieldIdx)
109
110 if fieldTy.Kind() == reflect.Ptr {
111 fieldTy = fieldTy.Elem()
112 fieldVal = fieldVal.Elem()
113 }
114
115 if _, isAttr := tags.Attributes[name]; isAttr {
116
117 if exprType.AssignableTo(fieldTy) || attrType.AssignableTo(fieldTy) {
118 continue // ignore undecoded fields
119 }
120 if !fieldVal.IsValid() {
121 continue // ignore (field value is nil pointer)
122 }
123 if fieldTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
124 continue // ignore
125 }
126 if prevWasBlock {
127 dst.AppendNewline()
128 prevWasBlock = false
129 }
130
131 valTy, err := gocty.ImpliedType(fieldVal.Interface())
132 if err != nil {
133 panic(fmt.Sprintf("cannot encode %T as HCL expression: %s", fieldVal.Interface(), err))
134 }
135
136 val, err := gocty.ToCtyValue(fieldVal.Interface(), valTy)
137 if err != nil {
138 // This should never happen, since we should always be able
139 // to decode into the implied type.
140 panic(fmt.Sprintf("failed to encode %T as %#v: %s", fieldVal.Interface(), valTy, err))
141 }
142
143 dst.SetAttributeValue(name, val)
144
145 } else { // must be a block, then
146 elemTy := fieldTy
147 isSeq := false
148 if elemTy.Kind() == reflect.Slice || elemTy.Kind() == reflect.Array {
149 isSeq = true
150 elemTy = elemTy.Elem()
151 }
152
153 if bodyType.AssignableTo(elemTy) || attrsType.AssignableTo(elemTy) {
154 continue // ignore undecoded fields
155 }
156 prevWasBlock = false
157
158 if isSeq {
159 l := fieldVal.Len()
160 for i := 0; i < l; i++ {
161 elemVal := fieldVal.Index(i)
162 if !elemVal.IsValid() {
163 continue // ignore (elem value is nil pointer)
164 }
165 if elemTy.Kind() == reflect.Ptr && elemVal.IsNil() {
166 continue // ignore
167 }
168 block := EncodeAsBlock(elemVal.Interface(), name)
169 if !prevWasBlock {
170 dst.AppendNewline()
171 prevWasBlock = true
172 }
173 dst.AppendBlock(block)
174 }
175 } else {
176 if !fieldVal.IsValid() {
177 continue // ignore (field value is nil pointer)
178 }
179 if elemTy.Kind() == reflect.Ptr && fieldVal.IsNil() {
180 continue // ignore
181 }
182 block := EncodeAsBlock(fieldVal.Interface(), name)
183 if !prevWasBlock {
184 dst.AppendNewline()
185 prevWasBlock = true
186 }
187 dst.AppendBlock(block)
188 }
189 }
190 }
191}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go
index 6ecf744..c320961 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic.go
@@ -26,14 +26,43 @@ const (
26type Diagnostic struct { 26type Diagnostic struct {
27 Severity DiagnosticSeverity 27 Severity DiagnosticSeverity
28 28
29 // Summary and detail contain the English-language description of the 29 // Summary and Detail contain the English-language description of the
30 // problem. Summary is a terse description of the general problem and 30 // problem. Summary is a terse description of the general problem and
31 // detail is a more elaborate, often-multi-sentence description of 31 // detail is a more elaborate, often-multi-sentence description of
32 // the probem and what might be done to solve it. 32 // the probem and what might be done to solve it.
33 Summary string 33 Summary string
34 Detail string 34 Detail string
35
36 // Subject and Context are both source ranges relating to the diagnostic.
37 //
38 // Subject is a tight range referring to exactly the construct that
39 // is problematic, while Context is an optional broader range (which should
40 // fully contain Subject) that ought to be shown around Subject when
41 // generating isolated source-code snippets in diagnostic messages.
42 // If Context is nil, the Subject is also the Context.
43 //
44 // Some diagnostics have no source ranges at all. If Context is set then
45 // Subject should always also be set.
35 Subject *Range 46 Subject *Range
36 Context *Range 47 Context *Range
48
49 // For diagnostics that occur when evaluating an expression, Expression
50 // may refer to that expression and EvalContext may point to the
51 // EvalContext that was active when evaluating it. This may allow for the
52 // inclusion of additional useful information when rendering a diagnostic
53 // message to the user.
54 //
55 // It is not always possible to select a single EvalContext for a
56 // diagnostic, and so in some cases this field may be nil even when an
57 // expression causes a problem.
58 //
59 // EvalContexts form a tree, so the given EvalContext may refer to a parent
60 // which in turn refers to another parent, etc. For a full picture of all
61 // of the active variables and functions the caller must walk up this
62 // chain, preferring definitions that are "closer" to the expression in
63 // case of colliding names.
64 Expression Expression
65 EvalContext *EvalContext
37} 66}
38 67
39// Diagnostics is a list of Diagnostic instances. 68// Diagnostics is a list of Diagnostic instances.
@@ -96,6 +125,17 @@ func (d Diagnostics) HasErrors() bool {
96 return false 125 return false
97} 126}
98 127
128func (d Diagnostics) Errs() []error {
129 var errs []error
130 for _, diag := range d {
131 if diag.Severity == DiagError {
132 errs = append(errs, diag)
133 }
134 }
135
136 return errs
137}
138
99// A DiagnosticWriter emits diagnostics somehow. 139// A DiagnosticWriter emits diagnostics somehow.
100type DiagnosticWriter interface { 140type DiagnosticWriter interface {
101 WriteDiagnostic(*Diagnostic) error 141 WriteDiagnostic(*Diagnostic) error
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go
index dfa473a..0b4a262 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go
@@ -2,11 +2,14 @@ package hcl
2 2
3import ( 3import (
4 "bufio" 4 "bufio"
5 "bytes"
5 "errors" 6 "errors"
6 "fmt" 7 "fmt"
7 "io" 8 "io"
9 "sort"
8 10
9 wordwrap "github.com/mitchellh/go-wordwrap" 11 wordwrap "github.com/mitchellh/go-wordwrap"
12 "github.com/zclconf/go-cty/cty"
10) 13)
11 14
12type diagnosticTextWriter struct { 15type diagnosticTextWriter struct {
@@ -133,6 +136,62 @@ func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error {
133 136
134 w.wr.Write([]byte{'\n'}) 137 w.wr.Write([]byte{'\n'})
135 } 138 }
139
140 if diag.Expression != nil && diag.EvalContext != nil {
141 // We will attempt to render the values for any variables
142 // referenced in the given expression as additional context, for
143 // situations where the same expression is evaluated multiple
144 // times in different scopes.
145 expr := diag.Expression
146 ctx := diag.EvalContext
147
148 vars := expr.Variables()
149 stmts := make([]string, 0, len(vars))
150 seen := make(map[string]struct{}, len(vars))
151 for _, traversal := range vars {
152 val, diags := traversal.TraverseAbs(ctx)
153 if diags.HasErrors() {
154 // Skip anything that generates errors, since we probably
155 // already have the same error in our diagnostics set
156 // already.
157 continue
158 }
159
160 traversalStr := w.traversalStr(traversal)
161 if _, exists := seen[traversalStr]; exists {
162 continue // don't show duplicates when the same variable is referenced multiple times
163 }
164 switch {
165 case !val.IsKnown():
166 // Can't say anything about this yet, then.
167 continue
168 case val.IsNull():
169 stmts = append(stmts, fmt.Sprintf("%s set to null", traversalStr))
170 default:
171 stmts = append(stmts, fmt.Sprintf("%s as %s", traversalStr, w.valueStr(val)))
172 }
173 seen[traversalStr] = struct{}{}
174 }
175
176 sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly?
177 last := len(stmts) - 1
178
179 for i, stmt := range stmts {
180 switch i {
181 case 0:
182 w.wr.Write([]byte{'w', 'i', 't', 'h', ' '})
183 default:
184 w.wr.Write([]byte{' ', ' ', ' ', ' ', ' '})
185 }
186 w.wr.Write([]byte(stmt))
187 switch i {
188 case last:
189 w.wr.Write([]byte{'.', '\n', '\n'})
190 default:
191 w.wr.Write([]byte{',', '\n'})
192 }
193 }
194 }
136 } 195 }
137 196
138 if diag.Detail != "" { 197 if diag.Detail != "" {
@@ -156,6 +215,90 @@ func (w *diagnosticTextWriter) WriteDiagnostics(diags Diagnostics) error {
156 return nil 215 return nil
157} 216}
158 217
218func (w *diagnosticTextWriter) traversalStr(traversal Traversal) string {
219 // This is a specialized subset of traversal rendering tailored to
220 // producing helpful contextual messages in diagnostics. It is not
221 // comprehensive nor intended to be used for other purposes.
222
223 var buf bytes.Buffer
224 for _, step := range traversal {
225 switch tStep := step.(type) {
226 case TraverseRoot:
227 buf.WriteString(tStep.Name)
228 case TraverseAttr:
229 buf.WriteByte('.')
230 buf.WriteString(tStep.Name)
231 case TraverseIndex:
232 buf.WriteByte('[')
233 if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() {
234 buf.WriteString(w.valueStr(tStep.Key))
235 } else {
236 // We'll just use a placeholder for more complex values,
237 // since otherwise our result could grow ridiculously long.
238 buf.WriteString("...")
239 }
240 buf.WriteByte(']')
241 }
242 }
243 return buf.String()
244}
245
246func (w *diagnosticTextWriter) valueStr(val cty.Value) string {
247 // This is a specialized subset of value rendering tailored to producing
248 // helpful but concise messages in diagnostics. It is not comprehensive
249 // nor intended to be used for other purposes.
250
251 ty := val.Type()
252 switch {
253 case val.IsNull():
254 return "null"
255 case !val.IsKnown():
256 // Should never happen here because we should filter before we get
257 // in here, but we'll do something reasonable rather than panic.
258 return "(not yet known)"
259 case ty == cty.Bool:
260 if val.True() {
261 return "true"
262 }
263 return "false"
264 case ty == cty.Number:
265 bf := val.AsBigFloat()
266 return bf.Text('g', 10)
267 case ty == cty.String:
268 // Go string syntax is not exactly the same as HCL native string syntax,
269 // but we'll accept the minor edge-cases where this is different here
270 // for now, just to get something reasonable here.
271 return fmt.Sprintf("%q", val.AsString())
272 case ty.IsCollectionType() || ty.IsTupleType():
273 l := val.LengthInt()
274 switch l {
275 case 0:
276 return "empty " + ty.FriendlyName()
277 case 1:
278 return ty.FriendlyName() + " with 1 element"
279 default:
280 return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l)
281 }
282 case ty.IsObjectType():
283 atys := ty.AttributeTypes()
284 l := len(atys)
285 switch l {
286 case 0:
287 return "object with no attributes"
288 case 1:
289 var name string
290 for k := range atys {
291 name = k
292 }
293 return fmt.Sprintf("object with 1 attribute %q", name)
294 default:
295 return fmt.Sprintf("object with %d attributes", l)
296 }
297 default:
298 return ty.FriendlyName()
299 }
300}
301
159func contextString(file *File, offset int) string { 302func contextString(file *File, offset int) string {
160 type contextStringer interface { 303 type contextStringer interface {
161 ContextString(offset int) string 304 ContextString(offset int) string
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/diagnostics.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/diagnostics.go
new file mode 100644
index 0000000..94eaf58
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/diagnostics.go
@@ -0,0 +1,23 @@
1package hclsyntax
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7// setDiagEvalContext is an internal helper that will impose a particular
8// EvalContext on a set of diagnostics in-place, for any diagnostic that
9// does not already have an EvalContext set.
10//
11// We generally expect diagnostics to be immutable, but this is safe to use
12// on any Diagnostics where none of the contained Diagnostic objects have yet
13// been seen by a caller. Its purpose is to apply additional context to a
14// set of diagnostics produced by a "deeper" component as the stack unwinds
15// during expression evaluation.
16func setDiagEvalContext(diags hcl.Diagnostics, expr hcl.Expression, ctx *hcl.EvalContext) {
17 for _, diag := range diags {
18 if diag.Expression == nil {
19 diag.Expression = expr
20 diag.EvalContext = ctx
21 }
22 }
23}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go
index cfc7cd9..26819a2 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go
@@ -2,6 +2,7 @@ package hclsyntax
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "sync"
5 6
6 "github.com/hashicorp/hcl2/hcl" 7 "github.com/hashicorp/hcl2/hcl"
7 "github.com/zclconf/go-cty/cty" 8 "github.com/zclconf/go-cty/cty"
@@ -104,7 +105,9 @@ func (e *ScopeTraversalExpr) walkChildNodes(w internalWalkFunc) {
104} 105}
105 106
106func (e *ScopeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { 107func (e *ScopeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
107 return e.Traversal.TraverseAbs(ctx) 108 val, diags := e.Traversal.TraverseAbs(ctx)
109 setDiagEvalContext(diags, e, ctx)
110 return val, diags
108} 111}
109 112
110func (e *ScopeTraversalExpr) Range() hcl.Range { 113func (e *ScopeTraversalExpr) Range() hcl.Range {
@@ -129,12 +132,13 @@ type RelativeTraversalExpr struct {
129} 132}
130 133
131func (e *RelativeTraversalExpr) walkChildNodes(w internalWalkFunc) { 134func (e *RelativeTraversalExpr) walkChildNodes(w internalWalkFunc) {
132 // Scope traversals have no child nodes 135 w(e.Source)
133} 136}
134 137
135func (e *RelativeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { 138func (e *RelativeTraversalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
136 src, diags := e.Source.Value(ctx) 139 src, diags := e.Source.Value(ctx)
137 ret, travDiags := e.Traversal.TraverseRel(src) 140 ret, travDiags := e.Traversal.TraverseRel(src)
141 setDiagEvalContext(travDiags, e, ctx)
138 diags = append(diags, travDiags...) 142 diags = append(diags, travDiags...)
139 return ret, diags 143 return ret, diags
140} 144}
@@ -177,8 +181,8 @@ type FunctionCallExpr struct {
177} 181}
178 182
179func (e *FunctionCallExpr) walkChildNodes(w internalWalkFunc) { 183func (e *FunctionCallExpr) walkChildNodes(w internalWalkFunc) {
180 for i, arg := range e.Args { 184 for _, arg := range e.Args {
181 e.Args[i] = w(arg).(Expression) 185 w(arg)
182 } 186 }
183} 187}
184 188
@@ -206,10 +210,12 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti
206 if !hasNonNilMap { 210 if !hasNonNilMap {
207 return cty.DynamicVal, hcl.Diagnostics{ 211 return cty.DynamicVal, hcl.Diagnostics{
208 { 212 {
209 Severity: hcl.DiagError, 213 Severity: hcl.DiagError,
210 Summary: "Function calls not allowed", 214 Summary: "Function calls not allowed",
211 Detail: "Functions may not be called here.", 215 Detail: "Functions may not be called here.",
212 Subject: e.Range().Ptr(), 216 Subject: e.Range().Ptr(),
217 Expression: e,
218 EvalContext: ctx,
213 }, 219 },
214 } 220 }
215 } 221 }
@@ -225,11 +231,13 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti
225 231
226 return cty.DynamicVal, hcl.Diagnostics{ 232 return cty.DynamicVal, hcl.Diagnostics{
227 { 233 {
228 Severity: hcl.DiagError, 234 Severity: hcl.DiagError,
229 Summary: "Call to unknown function", 235 Summary: "Call to unknown function",
230 Detail: fmt.Sprintf("There is no function named %q.%s", e.Name, suggestion), 236 Detail: fmt.Sprintf("There is no function named %q.%s", e.Name, suggestion),
231 Subject: &e.NameRange, 237 Subject: &e.NameRange,
232 Context: e.Range().Ptr(), 238 Context: e.Range().Ptr(),
239 Expression: e,
240 EvalContext: ctx,
233 }, 241 },
234 } 242 }
235 } 243 }
@@ -254,11 +262,13 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti
254 case expandVal.Type().IsTupleType() || expandVal.Type().IsListType() || expandVal.Type().IsSetType(): 262 case expandVal.Type().IsTupleType() || expandVal.Type().IsListType() || expandVal.Type().IsSetType():
255 if expandVal.IsNull() { 263 if expandVal.IsNull() {
256 diags = append(diags, &hcl.Diagnostic{ 264 diags = append(diags, &hcl.Diagnostic{
257 Severity: hcl.DiagError, 265 Severity: hcl.DiagError,
258 Summary: "Invalid expanding argument value", 266 Summary: "Invalid expanding argument value",
259 Detail: "The expanding argument (indicated by ...) must not be null.", 267 Detail: "The expanding argument (indicated by ...) must not be null.",
260 Context: expandExpr.Range().Ptr(), 268 Subject: expandExpr.Range().Ptr(),
261 Subject: e.Range().Ptr(), 269 Context: e.Range().Ptr(),
270 Expression: expandExpr,
271 EvalContext: ctx,
262 }) 272 })
263 return cty.DynamicVal, diags 273 return cty.DynamicVal, diags
264 } 274 }
@@ -279,11 +289,13 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti
279 args = newArgs 289 args = newArgs
280 default: 290 default:
281 diags = append(diags, &hcl.Diagnostic{ 291 diags = append(diags, &hcl.Diagnostic{
282 Severity: hcl.DiagError, 292 Severity: hcl.DiagError,
283 Summary: "Invalid expanding argument value", 293 Summary: "Invalid expanding argument value",
284 Detail: "The expanding argument (indicated by ...) must be of a tuple, list, or set type.", 294 Detail: "The expanding argument (indicated by ...) must be of a tuple, list, or set type.",
285 Context: expandExpr.Range().Ptr(), 295 Subject: expandExpr.Range().Ptr(),
286 Subject: e.Range().Ptr(), 296 Context: e.Range().Ptr(),
297 Expression: expandExpr,
298 EvalContext: ctx,
287 }) 299 })
288 return cty.DynamicVal, diags 300 return cty.DynamicVal, diags
289 } 301 }
@@ -303,8 +315,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti
303 "Function %q expects%s %d argument(s). Missing value for %q.", 315 "Function %q expects%s %d argument(s). Missing value for %q.",
304 e.Name, qual, len(params), missing.Name, 316 e.Name, qual, len(params), missing.Name,
305 ), 317 ),
306 Subject: &e.CloseParenRange, 318 Subject: &e.CloseParenRange,
307 Context: e.Range().Ptr(), 319 Context: e.Range().Ptr(),
320 Expression: e,
321 EvalContext: ctx,
308 }, 322 },
309 } 323 }
310 } 324 }
@@ -318,8 +332,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti
318 "Function %q expects only %d argument(s).", 332 "Function %q expects only %d argument(s).",
319 e.Name, len(params), 333 e.Name, len(params),
320 ), 334 ),
321 Subject: args[len(params)].StartRange().Ptr(), 335 Subject: args[len(params)].StartRange().Ptr(),
322 Context: e.Range().Ptr(), 336 Context: e.Range().Ptr(),
337 Expression: e,
338 EvalContext: ctx,
323 }, 339 },
324 } 340 }
325 } 341 }
@@ -349,8 +365,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti
349 "Invalid value for %q parameter: %s.", 365 "Invalid value for %q parameter: %s.",
350 param.Name, err, 366 param.Name, err,
351 ), 367 ),
352 Subject: argExpr.StartRange().Ptr(), 368 Subject: argExpr.StartRange().Ptr(),
353 Context: e.Range().Ptr(), 369 Context: e.Range().Ptr(),
370 Expression: argExpr,
371 EvalContext: ctx,
354 }) 372 })
355 } 373 }
356 374
@@ -386,8 +404,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti
386 "Invalid value for %q parameter: %s.", 404 "Invalid value for %q parameter: %s.",
387 param.Name, err, 405 param.Name, err,
388 ), 406 ),
389 Subject: argExpr.StartRange().Ptr(), 407 Subject: argExpr.StartRange().Ptr(),
390 Context: e.Range().Ptr(), 408 Context: e.Range().Ptr(),
409 Expression: argExpr,
410 EvalContext: ctx,
391 }) 411 })
392 412
393 default: 413 default:
@@ -398,8 +418,10 @@ func (e *FunctionCallExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti
398 "Call to function %q failed: %s.", 418 "Call to function %q failed: %s.",
399 e.Name, err, 419 e.Name, err,
400 ), 420 ),
401 Subject: e.StartRange().Ptr(), 421 Subject: e.StartRange().Ptr(),
402 Context: e.Range().Ptr(), 422 Context: e.Range().Ptr(),
423 Expression: e,
424 EvalContext: ctx,
403 }) 425 })
404 } 426 }
405 427
@@ -441,9 +463,9 @@ type ConditionalExpr struct {
441} 463}
442 464
443func (e *ConditionalExpr) walkChildNodes(w internalWalkFunc) { 465func (e *ConditionalExpr) walkChildNodes(w internalWalkFunc) {
444 e.Condition = w(e.Condition).(Expression) 466 w(e.Condition)
445 e.TrueResult = w(e.TrueResult).(Expression) 467 w(e.TrueResult)
446 e.FalseResult = w(e.FalseResult).(Expression) 468 w(e.FalseResult)
447} 469}
448 470
449func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { 471func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
@@ -464,10 +486,12 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic
464 // "These expressions are object and object respectively" if the 486 // "These expressions are object and object respectively" if the
465 // object types don't exactly match. 487 // object types don't exactly match.
466 "The true and false result expressions must have consistent types. The given expressions are %s and %s, respectively.", 488 "The true and false result expressions must have consistent types. The given expressions are %s and %s, respectively.",
467 trueResult.Type(), falseResult.Type(), 489 trueResult.Type().FriendlyName(), falseResult.Type().FriendlyName(),
468 ), 490 ),
469 Subject: hcl.RangeBetween(e.TrueResult.Range(), e.FalseResult.Range()).Ptr(), 491 Subject: hcl.RangeBetween(e.TrueResult.Range(), e.FalseResult.Range()).Ptr(),
470 Context: &e.SrcRange, 492 Context: &e.SrcRange,
493 Expression: e,
494 EvalContext: ctx,
471 }, 495 },
472 } 496 }
473 } 497 }
@@ -476,11 +500,13 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic
476 diags = append(diags, condDiags...) 500 diags = append(diags, condDiags...)
477 if condResult.IsNull() { 501 if condResult.IsNull() {
478 diags = append(diags, &hcl.Diagnostic{ 502 diags = append(diags, &hcl.Diagnostic{
479 Severity: hcl.DiagError, 503 Severity: hcl.DiagError,
480 Summary: "Null condition", 504 Summary: "Null condition",
481 Detail: "The condition value is null. Conditions must either be true or false.", 505 Detail: "The condition value is null. Conditions must either be true or false.",
482 Subject: e.Condition.Range().Ptr(), 506 Subject: e.Condition.Range().Ptr(),
483 Context: &e.SrcRange, 507 Context: &e.SrcRange,
508 Expression: e.Condition,
509 EvalContext: ctx,
484 }) 510 })
485 return cty.UnknownVal(resultType), diags 511 return cty.UnknownVal(resultType), diags
486 } 512 }
@@ -490,11 +516,13 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic
490 condResult, err := convert.Convert(condResult, cty.Bool) 516 condResult, err := convert.Convert(condResult, cty.Bool)
491 if err != nil { 517 if err != nil {
492 diags = append(diags, &hcl.Diagnostic{ 518 diags = append(diags, &hcl.Diagnostic{
493 Severity: hcl.DiagError, 519 Severity: hcl.DiagError,
494 Summary: "Incorrect condition type", 520 Summary: "Incorrect condition type",
495 Detail: fmt.Sprintf("The condition expression must be of type bool."), 521 Detail: fmt.Sprintf("The condition expression must be of type bool."),
496 Subject: e.Condition.Range().Ptr(), 522 Subject: e.Condition.Range().Ptr(),
497 Context: &e.SrcRange, 523 Context: &e.SrcRange,
524 Expression: e.Condition,
525 EvalContext: ctx,
498 }) 526 })
499 return cty.UnknownVal(resultType), diags 527 return cty.UnknownVal(resultType), diags
500 } 528 }
@@ -513,8 +541,10 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic
513 "The true result value has the wrong type: %s.", 541 "The true result value has the wrong type: %s.",
514 err.Error(), 542 err.Error(),
515 ), 543 ),
516 Subject: e.TrueResult.Range().Ptr(), 544 Subject: e.TrueResult.Range().Ptr(),
517 Context: &e.SrcRange, 545 Context: &e.SrcRange,
546 Expression: e.TrueResult,
547 EvalContext: ctx,
518 }) 548 })
519 trueResult = cty.UnknownVal(resultType) 549 trueResult = cty.UnknownVal(resultType)
520 } 550 }
@@ -534,8 +564,10 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic
534 "The false result value has the wrong type: %s.", 564 "The false result value has the wrong type: %s.",
535 err.Error(), 565 err.Error(),
536 ), 566 ),
537 Subject: e.TrueResult.Range().Ptr(), 567 Subject: e.FalseResult.Range().Ptr(),
538 Context: &e.SrcRange, 568 Context: &e.SrcRange,
569 Expression: e.FalseResult,
570 EvalContext: ctx,
539 }) 571 })
540 falseResult = cty.UnknownVal(resultType) 572 falseResult = cty.UnknownVal(resultType)
541 } 573 }
@@ -561,8 +593,8 @@ type IndexExpr struct {
561} 593}
562 594
563func (e *IndexExpr) walkChildNodes(w internalWalkFunc) { 595func (e *IndexExpr) walkChildNodes(w internalWalkFunc) {
564 e.Collection = w(e.Collection).(Expression) 596 w(e.Collection)
565 e.Key = w(e.Key).(Expression) 597 w(e.Key)
566} 598}
567 599
568func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { 600func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
@@ -572,7 +604,10 @@ func (e *IndexExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
572 diags = append(diags, collDiags...) 604 diags = append(diags, collDiags...)
573 diags = append(diags, keyDiags...) 605 diags = append(diags, keyDiags...)
574 606
575 return hcl.Index(coll, key, &e.SrcRange) 607 val, indexDiags := hcl.Index(coll, key, &e.SrcRange)
608 setDiagEvalContext(indexDiags, e, ctx)
609 diags = append(diags, indexDiags...)
610 return val, diags
576} 611}
577 612
578func (e *IndexExpr) Range() hcl.Range { 613func (e *IndexExpr) Range() hcl.Range {
@@ -591,8 +626,8 @@ type TupleConsExpr struct {
591} 626}
592 627
593func (e *TupleConsExpr) walkChildNodes(w internalWalkFunc) { 628func (e *TupleConsExpr) walkChildNodes(w internalWalkFunc) {
594 for i, expr := range e.Exprs { 629 for _, expr := range e.Exprs {
595 e.Exprs[i] = w(expr).(Expression) 630 w(expr)
596 } 631 }
597} 632}
598 633
@@ -640,9 +675,9 @@ type ObjectConsItem struct {
640} 675}
641 676
642func (e *ObjectConsExpr) walkChildNodes(w internalWalkFunc) { 677func (e *ObjectConsExpr) walkChildNodes(w internalWalkFunc) {
643 for i, item := range e.Items { 678 for _, item := range e.Items {
644 e.Items[i].KeyExpr = w(item.KeyExpr).(Expression) 679 w(item.KeyExpr)
645 e.Items[i].ValueExpr = w(item.ValueExpr).(Expression) 680 w(item.ValueExpr)
646 } 681 }
647} 682}
648 683
@@ -675,10 +710,12 @@ func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics
675 710
676 if key.IsNull() { 711 if key.IsNull() {
677 diags = append(diags, &hcl.Diagnostic{ 712 diags = append(diags, &hcl.Diagnostic{
678 Severity: hcl.DiagError, 713 Severity: hcl.DiagError,
679 Summary: "Null value as key", 714 Summary: "Null value as key",
680 Detail: "Can't use a null value as a key.", 715 Detail: "Can't use a null value as a key.",
681 Subject: item.ValueExpr.Range().Ptr(), 716 Subject: item.ValueExpr.Range().Ptr(),
717 Expression: item.KeyExpr,
718 EvalContext: ctx,
682 }) 719 })
683 known = false 720 known = false
684 continue 721 continue
@@ -688,10 +725,12 @@ func (e *ObjectConsExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics
688 key, err = convert.Convert(key, cty.String) 725 key, err = convert.Convert(key, cty.String)
689 if err != nil { 726 if err != nil {
690 diags = append(diags, &hcl.Diagnostic{ 727 diags = append(diags, &hcl.Diagnostic{
691 Severity: hcl.DiagError, 728 Severity: hcl.DiagError,
692 Summary: "Incorrect key type", 729 Summary: "Incorrect key type",
693 Detail: fmt.Sprintf("Can't use this value as a key: %s.", err.Error()), 730 Detail: fmt.Sprintf("Can't use this value as a key: %s.", err.Error()),
694 Subject: item.ValueExpr.Range().Ptr(), 731 Subject: item.KeyExpr.Range().Ptr(),
732 Expression: item.KeyExpr,
733 EvalContext: ctx,
695 }) 734 })
696 known = false 735 known = false
697 continue 736 continue
@@ -754,11 +793,31 @@ func (e *ObjectConsKeyExpr) walkChildNodes(w internalWalkFunc) {
754 // We only treat our wrapped expression as a real expression if we're 793 // We only treat our wrapped expression as a real expression if we're
755 // not going to interpret it as a literal. 794 // not going to interpret it as a literal.
756 if e.literalName() == "" { 795 if e.literalName() == "" {
757 e.Wrapped = w(e.Wrapped).(Expression) 796 w(e.Wrapped)
758 } 797 }
759} 798}
760 799
761func (e *ObjectConsKeyExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { 800func (e *ObjectConsKeyExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
801 // Because we accept a naked identifier as a literal key rather than a
802 // reference, it's confusing to accept a traversal containing periods
803 // here since we can't tell if the user intends to create a key with
804 // periods or actually reference something. To avoid confusing downstream
805 // errors we'll just prohibit a naked multi-step traversal here and
806 // require the user to state their intent more clearly.
807 // (This is handled at evaluation time rather than parse time because
808 // an application using static analysis _can_ accept a naked multi-step
809 // traversal here, if desired.)
810 if travExpr, isTraversal := e.Wrapped.(*ScopeTraversalExpr); isTraversal && len(travExpr.Traversal) > 1 {
811 var diags hcl.Diagnostics
812 diags = append(diags, &hcl.Diagnostic{
813 Severity: hcl.DiagError,
814 Summary: "Ambiguous attribute key",
815 Detail: "If this expression is intended to be a reference, wrap it in parentheses. If it's instead intended as a literal name containing periods, wrap it in quotes to create a string literal.",
816 Subject: e.Range().Ptr(),
817 })
818 return cty.DynamicVal, diags
819 }
820
762 if ln := e.literalName(); ln != "" { 821 if ln := e.literalName(); ln != "" {
763 return cty.StringVal(ln), nil 822 return cty.StringVal(ln), nil
764 } 823 }
@@ -818,11 +877,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
818 877
819 if collVal.IsNull() { 878 if collVal.IsNull() {
820 diags = append(diags, &hcl.Diagnostic{ 879 diags = append(diags, &hcl.Diagnostic{
821 Severity: hcl.DiagError, 880 Severity: hcl.DiagError,
822 Summary: "Iteration over null value", 881 Summary: "Iteration over null value",
823 Detail: "A null value cannot be used as the collection in a 'for' expression.", 882 Detail: "A null value cannot be used as the collection in a 'for' expression.",
824 Subject: e.CollExpr.Range().Ptr(), 883 Subject: e.CollExpr.Range().Ptr(),
825 Context: &e.SrcRange, 884 Context: &e.SrcRange,
885 Expression: e.CollExpr,
886 EvalContext: ctx,
826 }) 887 })
827 return cty.DynamicVal, diags 888 return cty.DynamicVal, diags
828 } 889 }
@@ -837,8 +898,10 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
837 "A value of type %s cannot be used as the collection in a 'for' expression.", 898 "A value of type %s cannot be used as the collection in a 'for' expression.",
838 collVal.Type().FriendlyName(), 899 collVal.Type().FriendlyName(),
839 ), 900 ),
840 Subject: e.CollExpr.Range().Ptr(), 901 Subject: e.CollExpr.Range().Ptr(),
841 Context: &e.SrcRange, 902 Context: &e.SrcRange,
903 Expression: e.CollExpr,
904 EvalContext: ctx,
842 }) 905 })
843 return cty.DynamicVal, diags 906 return cty.DynamicVal, diags
844 } 907 }
@@ -846,14 +909,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
846 return cty.DynamicVal, diags 909 return cty.DynamicVal, diags
847 } 910 }
848 911
849 childCtx := ctx.NewChild()
850 childCtx.Variables = map[string]cty.Value{}
851
852 // Before we start we'll do an early check to see if any CondExpr we've 912 // Before we start we'll do an early check to see if any CondExpr we've
853 // been given is of the wrong type. This isn't 100% reliable (it may 913 // been given is of the wrong type. This isn't 100% reliable (it may
854 // be DynamicVal until real values are given) but it should catch some 914 // be DynamicVal until real values are given) but it should catch some
855 // straightforward cases and prevent a barrage of repeated errors. 915 // straightforward cases and prevent a barrage of repeated errors.
856 if e.CondExpr != nil { 916 if e.CondExpr != nil {
917 childCtx := ctx.NewChild()
918 childCtx.Variables = map[string]cty.Value{}
857 if e.KeyVar != "" { 919 if e.KeyVar != "" {
858 childCtx.Variables[e.KeyVar] = cty.DynamicVal 920 childCtx.Variables[e.KeyVar] = cty.DynamicVal
859 } 921 }
@@ -863,22 +925,26 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
863 diags = append(diags, condDiags...) 925 diags = append(diags, condDiags...)
864 if result.IsNull() { 926 if result.IsNull() {
865 diags = append(diags, &hcl.Diagnostic{ 927 diags = append(diags, &hcl.Diagnostic{
866 Severity: hcl.DiagError, 928 Severity: hcl.DiagError,
867 Summary: "Condition is null", 929 Summary: "Condition is null",
868 Detail: "The value of the 'if' clause must not be null.", 930 Detail: "The value of the 'if' clause must not be null.",
869 Subject: e.CondExpr.Range().Ptr(), 931 Subject: e.CondExpr.Range().Ptr(),
870 Context: &e.SrcRange, 932 Context: &e.SrcRange,
933 Expression: e.CondExpr,
934 EvalContext: ctx,
871 }) 935 })
872 return cty.DynamicVal, diags 936 return cty.DynamicVal, diags
873 } 937 }
874 _, err := convert.Convert(result, cty.Bool) 938 _, err := convert.Convert(result, cty.Bool)
875 if err != nil { 939 if err != nil {
876 diags = append(diags, &hcl.Diagnostic{ 940 diags = append(diags, &hcl.Diagnostic{
877 Severity: hcl.DiagError, 941 Severity: hcl.DiagError,
878 Summary: "Invalid 'for' condition", 942 Summary: "Invalid 'for' condition",
879 Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), 943 Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()),
880 Subject: e.CondExpr.Range().Ptr(), 944 Subject: e.CondExpr.Range().Ptr(),
881 Context: &e.SrcRange, 945 Context: &e.SrcRange,
946 Expression: e.CondExpr,
947 EvalContext: ctx,
882 }) 948 })
883 return cty.DynamicVal, diags 949 return cty.DynamicVal, diags
884 } 950 }
@@ -902,6 +968,8 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
902 known := true 968 known := true
903 for it.Next() { 969 for it.Next() {
904 k, v := it.Element() 970 k, v := it.Element()
971 childCtx := ctx.NewChild()
972 childCtx.Variables = map[string]cty.Value{}
905 if e.KeyVar != "" { 973 if e.KeyVar != "" {
906 childCtx.Variables[e.KeyVar] = k 974 childCtx.Variables[e.KeyVar] = k
907 } 975 }
@@ -913,11 +981,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
913 if includeRaw.IsNull() { 981 if includeRaw.IsNull() {
914 if known { 982 if known {
915 diags = append(diags, &hcl.Diagnostic{ 983 diags = append(diags, &hcl.Diagnostic{
916 Severity: hcl.DiagError, 984 Severity: hcl.DiagError,
917 Summary: "Condition is null", 985 Summary: "Invalid 'for' condition",
918 Detail: "The value of the 'if' clause must not be null.", 986 Detail: "The value of the 'if' clause must not be null.",
919 Subject: e.CondExpr.Range().Ptr(), 987 Subject: e.CondExpr.Range().Ptr(),
920 Context: &e.SrcRange, 988 Context: &e.SrcRange,
989 Expression: e.CondExpr,
990 EvalContext: childCtx,
921 }) 991 })
922 } 992 }
923 known = false 993 known = false
@@ -927,11 +997,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
927 if err != nil { 997 if err != nil {
928 if known { 998 if known {
929 diags = append(diags, &hcl.Diagnostic{ 999 diags = append(diags, &hcl.Diagnostic{
930 Severity: hcl.DiagError, 1000 Severity: hcl.DiagError,
931 Summary: "Invalid 'for' condition", 1001 Summary: "Invalid 'for' condition",
932 Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), 1002 Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()),
933 Subject: e.CondExpr.Range().Ptr(), 1003 Subject: e.CondExpr.Range().Ptr(),
934 Context: &e.SrcRange, 1004 Context: &e.SrcRange,
1005 Expression: e.CondExpr,
1006 EvalContext: childCtx,
935 }) 1007 })
936 } 1008 }
937 known = false 1009 known = false
@@ -953,11 +1025,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
953 if keyRaw.IsNull() { 1025 if keyRaw.IsNull() {
954 if known { 1026 if known {
955 diags = append(diags, &hcl.Diagnostic{ 1027 diags = append(diags, &hcl.Diagnostic{
956 Severity: hcl.DiagError, 1028 Severity: hcl.DiagError,
957 Summary: "Invalid object key", 1029 Summary: "Invalid object key",
958 Detail: "Key expression in 'for' expression must not produce a null value.", 1030 Detail: "Key expression in 'for' expression must not produce a null value.",
959 Subject: e.KeyExpr.Range().Ptr(), 1031 Subject: e.KeyExpr.Range().Ptr(),
960 Context: &e.SrcRange, 1032 Context: &e.SrcRange,
1033 Expression: e.KeyExpr,
1034 EvalContext: childCtx,
961 }) 1035 })
962 } 1036 }
963 known = false 1037 known = false
@@ -972,11 +1046,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
972 if err != nil { 1046 if err != nil {
973 if known { 1047 if known {
974 diags = append(diags, &hcl.Diagnostic{ 1048 diags = append(diags, &hcl.Diagnostic{
975 Severity: hcl.DiagError, 1049 Severity: hcl.DiagError,
976 Summary: "Invalid object key", 1050 Summary: "Invalid object key",
977 Detail: fmt.Sprintf("The key expression produced an invalid result: %s.", err.Error()), 1051 Detail: fmt.Sprintf("The key expression produced an invalid result: %s.", err.Error()),
978 Subject: e.KeyExpr.Range().Ptr(), 1052 Subject: e.KeyExpr.Range().Ptr(),
979 Context: &e.SrcRange, 1053 Context: &e.SrcRange,
1054 Expression: e.KeyExpr,
1055 EvalContext: childCtx,
980 }) 1056 })
981 } 1057 }
982 known = false 1058 known = false
@@ -996,11 +1072,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
996 Severity: hcl.DiagError, 1072 Severity: hcl.DiagError,
997 Summary: "Duplicate object key", 1073 Summary: "Duplicate object key",
998 Detail: fmt.Sprintf( 1074 Detail: fmt.Sprintf(
999 "Two different items produced the key %q in this for expression. If duplicates are expected, use the ellipsis (...) after the value expression to enable grouping by key.", 1075 "Two different items produced the key %q in this 'for' expression. If duplicates are expected, use the ellipsis (...) after the value expression to enable grouping by key.",
1000 k, 1076 k,
1001 ), 1077 ),
1002 Subject: e.KeyExpr.Range().Ptr(), 1078 Subject: e.KeyExpr.Range().Ptr(),
1003 Context: &e.SrcRange, 1079 Context: &e.SrcRange,
1080 Expression: e.KeyExpr,
1081 EvalContext: childCtx,
1004 }) 1082 })
1005 } else { 1083 } else {
1006 vals[key.AsString()] = val 1084 vals[key.AsString()] = val
@@ -1030,6 +1108,8 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
1030 known := true 1108 known := true
1031 for it.Next() { 1109 for it.Next() {
1032 k, v := it.Element() 1110 k, v := it.Element()
1111 childCtx := ctx.NewChild()
1112 childCtx.Variables = map[string]cty.Value{}
1033 if e.KeyVar != "" { 1113 if e.KeyVar != "" {
1034 childCtx.Variables[e.KeyVar] = k 1114 childCtx.Variables[e.KeyVar] = k
1035 } 1115 }
@@ -1041,11 +1121,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
1041 if includeRaw.IsNull() { 1121 if includeRaw.IsNull() {
1042 if known { 1122 if known {
1043 diags = append(diags, &hcl.Diagnostic{ 1123 diags = append(diags, &hcl.Diagnostic{
1044 Severity: hcl.DiagError, 1124 Severity: hcl.DiagError,
1045 Summary: "Condition is null", 1125 Summary: "Invalid 'for' condition",
1046 Detail: "The value of the 'if' clause must not be null.", 1126 Detail: "The value of the 'if' clause must not be null.",
1047 Subject: e.CondExpr.Range().Ptr(), 1127 Subject: e.CondExpr.Range().Ptr(),
1048 Context: &e.SrcRange, 1128 Context: &e.SrcRange,
1129 Expression: e.CondExpr,
1130 EvalContext: childCtx,
1049 }) 1131 })
1050 } 1132 }
1051 known = false 1133 known = false
@@ -1063,11 +1145,13 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
1063 if err != nil { 1145 if err != nil {
1064 if known { 1146 if known {
1065 diags = append(diags, &hcl.Diagnostic{ 1147 diags = append(diags, &hcl.Diagnostic{
1066 Severity: hcl.DiagError, 1148 Severity: hcl.DiagError,
1067 Summary: "Invalid 'for' condition", 1149 Summary: "Invalid 'for' condition",
1068 Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()), 1150 Detail: fmt.Sprintf("The 'if' clause value is invalid: %s.", err.Error()),
1069 Subject: e.CondExpr.Range().Ptr(), 1151 Subject: e.CondExpr.Range().Ptr(),
1070 Context: &e.SrcRange, 1152 Context: &e.SrcRange,
1153 Expression: e.CondExpr,
1154 EvalContext: childCtx,
1071 }) 1155 })
1072 } 1156 }
1073 known = false 1157 known = false
@@ -1094,7 +1178,7 @@ func (e *ForExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
1094} 1178}
1095 1179
1096func (e *ForExpr) walkChildNodes(w internalWalkFunc) { 1180func (e *ForExpr) walkChildNodes(w internalWalkFunc) {
1097 e.CollExpr = w(e.CollExpr).(Expression) 1181 w(e.CollExpr)
1098 1182
1099 scopeNames := map[string]struct{}{} 1183 scopeNames := map[string]struct{}{}
1100 if e.KeyVar != "" { 1184 if e.KeyVar != "" {
@@ -1107,17 +1191,17 @@ func (e *ForExpr) walkChildNodes(w internalWalkFunc) {
1107 if e.KeyExpr != nil { 1191 if e.KeyExpr != nil {
1108 w(ChildScope{ 1192 w(ChildScope{
1109 LocalNames: scopeNames, 1193 LocalNames: scopeNames,
1110 Expr: &e.KeyExpr, 1194 Expr: e.KeyExpr,
1111 }) 1195 })
1112 } 1196 }
1113 w(ChildScope{ 1197 w(ChildScope{
1114 LocalNames: scopeNames, 1198 LocalNames: scopeNames,
1115 Expr: &e.ValExpr, 1199 Expr: e.ValExpr,
1116 }) 1200 })
1117 if e.CondExpr != nil { 1201 if e.CondExpr != nil {
1118 w(ChildScope{ 1202 w(ChildScope{
1119 LocalNames: scopeNames, 1203 LocalNames: scopeNames,
1120 Expr: &e.CondExpr, 1204 Expr: e.CondExpr,
1121 }) 1205 })
1122 } 1206 }
1123} 1207}
@@ -1151,26 +1235,78 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
1151 return cty.DynamicVal, diags 1235 return cty.DynamicVal, diags
1152 } 1236 }
1153 1237
1238 sourceTy := sourceVal.Type()
1239 if sourceTy == cty.DynamicPseudoType {
1240 // If we don't even know the _type_ of our source value yet then
1241 // we'll need to defer all processing, since we can't decide our
1242 // result type either.
1243 return cty.DynamicVal, diags
1244 }
1245
1246 // A "special power" of splat expressions is that they can be applied
1247 // both to tuples/lists and to other values, and in the latter case
1248 // the value will be treated as an implicit single-item tuple, or as
1249 // an empty tuple if the value is null.
1250 autoUpgrade := !(sourceTy.IsTupleType() || sourceTy.IsListType() || sourceTy.IsSetType())
1251
1154 if sourceVal.IsNull() { 1252 if sourceVal.IsNull() {
1253 if autoUpgrade {
1254 return cty.EmptyTupleVal, diags
1255 }
1155 diags = append(diags, &hcl.Diagnostic{ 1256 diags = append(diags, &hcl.Diagnostic{
1156 Severity: hcl.DiagError, 1257 Severity: hcl.DiagError,
1157 Summary: "Splat of null value", 1258 Summary: "Splat of null value",
1158 Detail: "Splat expressions (with the * symbol) cannot be applied to null values.", 1259 Detail: "Splat expressions (with the * symbol) cannot be applied to null sequences.",
1159 Subject: e.Source.Range().Ptr(), 1260 Subject: e.Source.Range().Ptr(),
1160 Context: hcl.RangeBetween(e.Source.Range(), e.MarkerRange).Ptr(), 1261 Context: hcl.RangeBetween(e.Source.Range(), e.MarkerRange).Ptr(),
1262 Expression: e.Source,
1263 EvalContext: ctx,
1161 }) 1264 })
1162 return cty.DynamicVal, diags 1265 return cty.DynamicVal, diags
1163 } 1266 }
1164 if !sourceVal.IsKnown() { 1267
1165 return cty.DynamicVal, diags 1268 if autoUpgrade {
1269 sourceVal = cty.TupleVal([]cty.Value{sourceVal})
1270 sourceTy = sourceVal.Type()
1166 } 1271 }
1167 1272
1168 // A "special power" of splat expressions is that they can be applied 1273 // We'll compute our result type lazily if we need it. In the normal case
1169 // both to tuples/lists and to other values, and in the latter case 1274 // it's inferred automatically from the value we construct.
1170 // the value will be treated as an implicit single-value list. We'll 1275 resultTy := func() (cty.Type, hcl.Diagnostics) {
1171 // deal with that here first. 1276 chiCtx := ctx.NewChild()
1172 if !(sourceVal.Type().IsTupleType() || sourceVal.Type().IsListType()) { 1277 var diags hcl.Diagnostics
1173 sourceVal = cty.ListVal([]cty.Value{sourceVal}) 1278 switch {
1279 case sourceTy.IsListType() || sourceTy.IsSetType():
1280 ety := sourceTy.ElementType()
1281 e.Item.setValue(chiCtx, cty.UnknownVal(ety))
1282 val, itemDiags := e.Each.Value(chiCtx)
1283 diags = append(diags, itemDiags...)
1284 e.Item.clearValue(chiCtx) // clean up our temporary value
1285 return cty.List(val.Type()), diags
1286 case sourceTy.IsTupleType():
1287 etys := sourceTy.TupleElementTypes()
1288 resultTys := make([]cty.Type, 0, len(etys))
1289 for _, ety := range etys {
1290 e.Item.setValue(chiCtx, cty.UnknownVal(ety))
1291 val, itemDiags := e.Each.Value(chiCtx)
1292 diags = append(diags, itemDiags...)
1293 e.Item.clearValue(chiCtx) // clean up our temporary value
1294 resultTys = append(resultTys, val.Type())
1295 }
1296 return cty.Tuple(resultTys), diags
1297 default:
1298 // Should never happen because of our promotion to list above.
1299 return cty.DynamicPseudoType, diags
1300 }
1301 }
1302
1303 if !sourceVal.IsKnown() {
1304 // We can't produce a known result in this case, but we'll still
1305 // indicate what the result type would be, allowing any downstream type
1306 // checking to proceed.
1307 ty, tyDiags := resultTy()
1308 diags = append(diags, tyDiags...)
1309 return cty.UnknownVal(ty), diags
1174 } 1310 }
1175 1311
1176 vals := make([]cty.Value, 0, sourceVal.LengthInt()) 1312 vals := make([]cty.Value, 0, sourceVal.LengthInt())
@@ -1194,15 +1330,28 @@ func (e *SplatExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
1194 e.Item.clearValue(ctx) // clean up our temporary value 1330 e.Item.clearValue(ctx) // clean up our temporary value
1195 1331
1196 if !isKnown { 1332 if !isKnown {
1197 return cty.DynamicVal, diags 1333 // We'll ingore the resultTy diagnostics in this case since they
1334 // will just be the same errors we saw while iterating above.
1335 ty, _ := resultTy()
1336 return cty.UnknownVal(ty), diags
1198 } 1337 }
1199 1338
1200 return cty.TupleVal(vals), diags 1339 switch {
1340 case sourceTy.IsListType() || sourceTy.IsSetType():
1341 if len(vals) == 0 {
1342 ty, tyDiags := resultTy()
1343 diags = append(diags, tyDiags...)
1344 return cty.ListValEmpty(ty.ElementType()), diags
1345 }
1346 return cty.ListVal(vals), diags
1347 default:
1348 return cty.TupleVal(vals), diags
1349 }
1201} 1350}
1202 1351
1203func (e *SplatExpr) walkChildNodes(w internalWalkFunc) { 1352func (e *SplatExpr) walkChildNodes(w internalWalkFunc) {
1204 e.Source = w(e.Source).(Expression) 1353 w(e.Source)
1205 e.Each = w(e.Each).(Expression) 1354 w(e.Each)
1206} 1355}
1207 1356
1208func (e *SplatExpr) Range() hcl.Range { 1357func (e *SplatExpr) Range() hcl.Range {
@@ -1226,13 +1375,24 @@ func (e *SplatExpr) StartRange() hcl.Range {
1226// assigns it a value. 1375// assigns it a value.
1227type AnonSymbolExpr struct { 1376type AnonSymbolExpr struct {
1228 SrcRange hcl.Range 1377 SrcRange hcl.Range
1229 values map[*hcl.EvalContext]cty.Value 1378
1379 // values and its associated lock are used to isolate concurrent
1380 // evaluations of a symbol from one another. It is the calling application's
1381 // responsibility to ensure that the same splat expression is not evalauted
1382 // concurrently within the _same_ EvalContext, but it is fine and safe to
1383 // do cuncurrent evaluations with distinct EvalContexts.
1384 values map[*hcl.EvalContext]cty.Value
1385 valuesLock sync.RWMutex
1230} 1386}
1231 1387
1232func (e *AnonSymbolExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { 1388func (e *AnonSymbolExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
1233 if ctx == nil { 1389 if ctx == nil {
1234 return cty.DynamicVal, nil 1390 return cty.DynamicVal, nil
1235 } 1391 }
1392
1393 e.valuesLock.RLock()
1394 defer e.valuesLock.RUnlock()
1395
1236 val, exists := e.values[ctx] 1396 val, exists := e.values[ctx]
1237 if !exists { 1397 if !exists {
1238 return cty.DynamicVal, nil 1398 return cty.DynamicVal, nil
@@ -1243,6 +1403,9 @@ func (e *AnonSymbolExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics
1243// setValue sets a temporary local value for the expression when evaluated 1403// setValue sets a temporary local value for the expression when evaluated
1244// in the given context, which must be non-nil. 1404// in the given context, which must be non-nil.
1245func (e *AnonSymbolExpr) setValue(ctx *hcl.EvalContext, val cty.Value) { 1405func (e *AnonSymbolExpr) setValue(ctx *hcl.EvalContext, val cty.Value) {
1406 e.valuesLock.Lock()
1407 defer e.valuesLock.Unlock()
1408
1246 if e.values == nil { 1409 if e.values == nil {
1247 e.values = make(map[*hcl.EvalContext]cty.Value) 1410 e.values = make(map[*hcl.EvalContext]cty.Value)
1248 } 1411 }
@@ -1253,6 +1416,9 @@ func (e *AnonSymbolExpr) setValue(ctx *hcl.EvalContext, val cty.Value) {
1253} 1416}
1254 1417
1255func (e *AnonSymbolExpr) clearValue(ctx *hcl.EvalContext) { 1418func (e *AnonSymbolExpr) clearValue(ctx *hcl.EvalContext) {
1419 e.valuesLock.Lock()
1420 defer e.valuesLock.Unlock()
1421
1256 if e.values == nil { 1422 if e.values == nil {
1257 return 1423 return
1258 } 1424 }
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go
index 9a5da04..7f59f1a 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_ops.go
@@ -129,8 +129,8 @@ type BinaryOpExpr struct {
129} 129}
130 130
131func (e *BinaryOpExpr) walkChildNodes(w internalWalkFunc) { 131func (e *BinaryOpExpr) walkChildNodes(w internalWalkFunc) {
132 e.LHS = w(e.LHS).(Expression) 132 w(e.LHS)
133 e.RHS = w(e.RHS).(Expression) 133 w(e.RHS)
134} 134}
135 135
136func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { 136func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
@@ -149,21 +149,25 @@ func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics)
149 lhsVal, err := convert.Convert(givenLHSVal, lhsParam.Type) 149 lhsVal, err := convert.Convert(givenLHSVal, lhsParam.Type)
150 if err != nil { 150 if err != nil {
151 diags = append(diags, &hcl.Diagnostic{ 151 diags = append(diags, &hcl.Diagnostic{
152 Severity: hcl.DiagError, 152 Severity: hcl.DiagError,
153 Summary: "Invalid operand", 153 Summary: "Invalid operand",
154 Detail: fmt.Sprintf("Unsuitable value for left operand: %s.", err), 154 Detail: fmt.Sprintf("Unsuitable value for left operand: %s.", err),
155 Subject: e.LHS.Range().Ptr(), 155 Subject: e.LHS.Range().Ptr(),
156 Context: &e.SrcRange, 156 Context: &e.SrcRange,
157 Expression: e.LHS,
158 EvalContext: ctx,
157 }) 159 })
158 } 160 }
159 rhsVal, err := convert.Convert(givenRHSVal, rhsParam.Type) 161 rhsVal, err := convert.Convert(givenRHSVal, rhsParam.Type)
160 if err != nil { 162 if err != nil {
161 diags = append(diags, &hcl.Diagnostic{ 163 diags = append(diags, &hcl.Diagnostic{
162 Severity: hcl.DiagError, 164 Severity: hcl.DiagError,
163 Summary: "Invalid operand", 165 Summary: "Invalid operand",
164 Detail: fmt.Sprintf("Unsuitable value for right operand: %s.", err), 166 Detail: fmt.Sprintf("Unsuitable value for right operand: %s.", err),
165 Subject: e.RHS.Range().Ptr(), 167 Subject: e.RHS.Range().Ptr(),
166 Context: &e.SrcRange, 168 Context: &e.SrcRange,
169 Expression: e.RHS,
170 EvalContext: ctx,
167 }) 171 })
168 } 172 }
169 173
@@ -178,10 +182,12 @@ func (e *BinaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics)
178 if err != nil { 182 if err != nil {
179 diags = append(diags, &hcl.Diagnostic{ 183 diags = append(diags, &hcl.Diagnostic{
180 // FIXME: This diagnostic is useless. 184 // FIXME: This diagnostic is useless.
181 Severity: hcl.DiagError, 185 Severity: hcl.DiagError,
182 Summary: "Operation failed", 186 Summary: "Operation failed",
183 Detail: fmt.Sprintf("Error during operation: %s.", err), 187 Detail: fmt.Sprintf("Error during operation: %s.", err),
184 Subject: &e.SrcRange, 188 Subject: &e.SrcRange,
189 Expression: e,
190 EvalContext: ctx,
185 }) 191 })
186 return cty.UnknownVal(e.Op.Type), diags 192 return cty.UnknownVal(e.Op.Type), diags
187 } 193 }
@@ -206,7 +212,7 @@ type UnaryOpExpr struct {
206} 212}
207 213
208func (e *UnaryOpExpr) walkChildNodes(w internalWalkFunc) { 214func (e *UnaryOpExpr) walkChildNodes(w internalWalkFunc) {
209 e.Val = w(e.Val).(Expression) 215 w(e.Val)
210} 216}
211 217
212func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { 218func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
@@ -219,11 +225,13 @@ func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
219 val, err := convert.Convert(givenVal, param.Type) 225 val, err := convert.Convert(givenVal, param.Type)
220 if err != nil { 226 if err != nil {
221 diags = append(diags, &hcl.Diagnostic{ 227 diags = append(diags, &hcl.Diagnostic{
222 Severity: hcl.DiagError, 228 Severity: hcl.DiagError,
223 Summary: "Invalid operand", 229 Summary: "Invalid operand",
224 Detail: fmt.Sprintf("Unsuitable value for unary operand: %s.", err), 230 Detail: fmt.Sprintf("Unsuitable value for unary operand: %s.", err),
225 Subject: e.Val.Range().Ptr(), 231 Subject: e.Val.Range().Ptr(),
226 Context: &e.SrcRange, 232 Context: &e.SrcRange,
233 Expression: e.Val,
234 EvalContext: ctx,
227 }) 235 })
228 } 236 }
229 237
@@ -238,10 +246,12 @@ func (e *UnaryOpExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
238 if err != nil { 246 if err != nil {
239 diags = append(diags, &hcl.Diagnostic{ 247 diags = append(diags, &hcl.Diagnostic{
240 // FIXME: This diagnostic is useless. 248 // FIXME: This diagnostic is useless.
241 Severity: hcl.DiagError, 249 Severity: hcl.DiagError,
242 Summary: "Operation failed", 250 Summary: "Operation failed",
243 Detail: fmt.Sprintf("Error during operation: %s.", err), 251 Detail: fmt.Sprintf("Error during operation: %s.", err),
244 Subject: &e.SrcRange, 252 Subject: &e.SrcRange,
253 Expression: e,
254 EvalContext: ctx,
245 }) 255 })
246 return cty.UnknownVal(e.Op.Type), diags 256 return cty.UnknownVal(e.Op.Type), diags
247 } 257 }
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go
index a1c4727..fa79e3d 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go
@@ -16,8 +16,8 @@ type TemplateExpr struct {
16} 16}
17 17
18func (e *TemplateExpr) walkChildNodes(w internalWalkFunc) { 18func (e *TemplateExpr) walkChildNodes(w internalWalkFunc) {
19 for i, part := range e.Parts { 19 for _, part := range e.Parts {
20 e.Parts[i] = w(part).(Expression) 20 w(part)
21 } 21 }
22} 22}
23 23
@@ -37,8 +37,10 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics)
37 Detail: fmt.Sprintf( 37 Detail: fmt.Sprintf(
38 "The expression result is null. Cannot include a null value in a string template.", 38 "The expression result is null. Cannot include a null value in a string template.",
39 ), 39 ),
40 Subject: part.Range().Ptr(), 40 Subject: part.Range().Ptr(),
41 Context: &e.SrcRange, 41 Context: &e.SrcRange,
42 Expression: part,
43 EvalContext: ctx,
42 }) 44 })
43 continue 45 continue
44 } 46 }
@@ -61,8 +63,10 @@ func (e *TemplateExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics)
61 "Cannot include the given value in a string template: %s.", 63 "Cannot include the given value in a string template: %s.",
62 err.Error(), 64 err.Error(),
63 ), 65 ),
64 Subject: part.Range().Ptr(), 66 Subject: part.Range().Ptr(),
65 Context: &e.SrcRange, 67 Context: &e.SrcRange,
68 Expression: part,
69 EvalContext: ctx,
66 }) 70 })
67 continue 71 continue
68 } 72 }
@@ -94,7 +98,7 @@ type TemplateJoinExpr struct {
94} 98}
95 99
96func (e *TemplateJoinExpr) walkChildNodes(w internalWalkFunc) { 100func (e *TemplateJoinExpr) walkChildNodes(w internalWalkFunc) {
97 e.Tuple = w(e.Tuple).(Expression) 101 w(e.Tuple)
98} 102}
99 103
100func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { 104func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
@@ -127,7 +131,9 @@ func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti
127 Detail: fmt.Sprintf( 131 Detail: fmt.Sprintf(
128 "An iteration result is null. Cannot include a null value in a string template.", 132 "An iteration result is null. Cannot include a null value in a string template.",
129 ), 133 ),
130 Subject: e.Range().Ptr(), 134 Subject: e.Range().Ptr(),
135 Expression: e,
136 EvalContext: ctx,
131 }) 137 })
132 continue 138 continue
133 } 139 }
@@ -143,7 +149,9 @@ func (e *TemplateJoinExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnosti
143 "Cannot include one of the interpolation results into the string template: %s.", 149 "Cannot include one of the interpolation results into the string template: %s.",
144 err.Error(), 150 err.Error(),
145 ), 151 ),
146 Subject: e.Range().Ptr(), 152 Subject: e.Range().Ptr(),
153 Expression: e,
154 EvalContext: ctx,
147 }) 155 })
148 continue 156 continue
149 } 157 }
@@ -176,7 +184,7 @@ type TemplateWrapExpr struct {
176} 184}
177 185
178func (e *TemplateWrapExpr) walkChildNodes(w internalWalkFunc) { 186func (e *TemplateWrapExpr) walkChildNodes(w internalWalkFunc) {
179 e.Wrapped = w(e.Wrapped).(Expression) 187 w(e.Wrapped)
180} 188}
181 189
182func (e *TemplateWrapExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) { 190func (e *TemplateWrapExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go
index 4d41b6b..c8c97f3 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/navigation.go
@@ -3,6 +3,8 @@ package hclsyntax
3import ( 3import (
4 "bytes" 4 "bytes"
5 "fmt" 5 "fmt"
6
7 "github.com/hashicorp/hcl2/hcl"
6) 8)
7 9
8type navigation struct { 10type navigation struct {
@@ -39,3 +41,19 @@ func (n navigation) ContextString(offset int) string {
39 } 41 }
40 return buf.String() 42 return buf.String()
41} 43}
44
45func (n navigation) ContextDefRange(offset int) hcl.Range {
46 var block *Block
47 for _, candidate := range n.root.Blocks {
48 if candidate.Range().ContainsOffset(offset) {
49 block = candidate
50 break
51 }
52 }
53
54 if block == nil {
55 return hcl.Range{}
56 }
57
58 return block.DefRange()
59}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go
index fd426d4..75812e6 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/node.go
@@ -19,4 +19,4 @@ type Node interface {
19 Range() hcl.Range 19 Range() hcl.Range
20} 20}
21 21
22type internalWalkFunc func(Node) Node 22type internalWalkFunc func(Node)
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go
index 002858f..253ad50 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go
@@ -9,7 +9,6 @@ import (
9 "github.com/apparentlymart/go-textseg/textseg" 9 "github.com/apparentlymart/go-textseg/textseg"
10 "github.com/hashicorp/hcl2/hcl" 10 "github.com/hashicorp/hcl2/hcl"
11 "github.com/zclconf/go-cty/cty" 11 "github.com/zclconf/go-cty/cty"
12 "github.com/zclconf/go-cty/cty/convert"
13) 12)
14 13
15type parser struct { 14type parser struct {
@@ -55,7 +54,7 @@ Token:
55 Severity: hcl.DiagError, 54 Severity: hcl.DiagError,
56 Summary: "Attribute redefined", 55 Summary: "Attribute redefined",
57 Detail: fmt.Sprintf( 56 Detail: fmt.Sprintf(
58 "The attribute %q was already defined at %s. Each attribute may be defined only once.", 57 "The argument %q was already set at %s. Each argument may be set only once.",
59 titem.Name, existing.NameRange.String(), 58 titem.Name, existing.NameRange.String(),
60 ), 59 ),
61 Subject: &titem.NameRange, 60 Subject: &titem.NameRange,
@@ -80,15 +79,15 @@ Token:
80 if bad.Type == TokenOQuote { 79 if bad.Type == TokenOQuote {
81 diags = append(diags, &hcl.Diagnostic{ 80 diags = append(diags, &hcl.Diagnostic{
82 Severity: hcl.DiagError, 81 Severity: hcl.DiagError,
83 Summary: "Invalid attribute name", 82 Summary: "Invalid argument name",
84 Detail: "Attribute names must not be quoted.", 83 Detail: "Argument names must not be quoted.",
85 Subject: &bad.Range, 84 Subject: &bad.Range,
86 }) 85 })
87 } else { 86 } else {
88 diags = append(diags, &hcl.Diagnostic{ 87 diags = append(diags, &hcl.Diagnostic{
89 Severity: hcl.DiagError, 88 Severity: hcl.DiagError,
90 Summary: "Attribute or block definition required", 89 Summary: "Argument or block definition required",
91 Detail: "An attribute or block definition is required here.", 90 Detail: "An argument or block definition is required here.",
92 Subject: &bad.Range, 91 Subject: &bad.Range,
93 }) 92 })
94 } 93 }
@@ -120,8 +119,8 @@ func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) {
120 return nil, hcl.Diagnostics{ 119 return nil, hcl.Diagnostics{
121 { 120 {
122 Severity: hcl.DiagError, 121 Severity: hcl.DiagError,
123 Summary: "Attribute or block definition required", 122 Summary: "Argument or block definition required",
124 Detail: "An attribute or block definition is required here.", 123 Detail: "An argument or block definition is required here.",
125 Subject: &ident.Range, 124 Subject: &ident.Range,
126 }, 125 },
127 } 126 }
@@ -131,7 +130,7 @@ func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) {
131 130
132 switch next.Type { 131 switch next.Type {
133 case TokenEqual: 132 case TokenEqual:
134 return p.finishParsingBodyAttribute(ident) 133 return p.finishParsingBodyAttribute(ident, false)
135 case TokenOQuote, TokenOBrace, TokenIdent: 134 case TokenOQuote, TokenOBrace, TokenIdent:
136 return p.finishParsingBodyBlock(ident) 135 return p.finishParsingBodyBlock(ident)
137 default: 136 default:
@@ -139,8 +138,8 @@ func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) {
139 return nil, hcl.Diagnostics{ 138 return nil, hcl.Diagnostics{
140 { 139 {
141 Severity: hcl.DiagError, 140 Severity: hcl.DiagError,
142 Summary: "Attribute or block definition required", 141 Summary: "Argument or block definition required",
143 Detail: "An attribute or block definition is required here. To define an attribute, use the equals sign \"=\" to introduce the attribute value.", 142 Detail: "An argument or block definition is required here. To set an argument, use the equals sign \"=\" to introduce the argument value.",
144 Subject: &ident.Range, 143 Subject: &ident.Range,
145 }, 144 },
146 } 145 }
@@ -149,7 +148,72 @@ func (p *parser) ParseBodyItem() (Node, hcl.Diagnostics) {
149 return nil, nil 148 return nil, nil
150} 149}
151 150
152func (p *parser) finishParsingBodyAttribute(ident Token) (Node, hcl.Diagnostics) { 151// parseSingleAttrBody is a weird variant of ParseBody that deals with the
152// body of a nested block containing only one attribute value all on a single
153// line, like foo { bar = baz } . It expects to find a single attribute item
154// immediately followed by the end token type with no intervening newlines.
155func (p *parser) parseSingleAttrBody(end TokenType) (*Body, hcl.Diagnostics) {
156 ident := p.Read()
157 if ident.Type != TokenIdent {
158 p.recoverAfterBodyItem()
159 return nil, hcl.Diagnostics{
160 {
161 Severity: hcl.DiagError,
162 Summary: "Argument or block definition required",
163 Detail: "An argument or block definition is required here.",
164 Subject: &ident.Range,
165 },
166 }
167 }
168
169 var attr *Attribute
170 var diags hcl.Diagnostics
171
172 next := p.Peek()
173
174 switch next.Type {
175 case TokenEqual:
176 node, attrDiags := p.finishParsingBodyAttribute(ident, true)
177 diags = append(diags, attrDiags...)
178 attr = node.(*Attribute)
179 case TokenOQuote, TokenOBrace, TokenIdent:
180 p.recoverAfterBodyItem()
181 return nil, hcl.Diagnostics{
182 {
183 Severity: hcl.DiagError,
184 Summary: "Argument definition required",
185 Detail: fmt.Sprintf("A single-line block definition can contain only a single argument. If you meant to define argument %q, use an equals sign to assign it a value. To define a nested block, place it on a line of its own within its parent block.", ident.Bytes),
186 Subject: hcl.RangeBetween(ident.Range, next.Range).Ptr(),
187 },
188 }
189 default:
190 p.recoverAfterBodyItem()
191 return nil, hcl.Diagnostics{
192 {
193 Severity: hcl.DiagError,
194 Summary: "Argument or block definition required",
195 Detail: "An argument or block definition is required here. To set an argument, use the equals sign \"=\" to introduce the argument value.",
196 Subject: &ident.Range,
197 },
198 }
199 }
200
201 return &Body{
202 Attributes: Attributes{
203 string(ident.Bytes): attr,
204 },
205
206 SrcRange: attr.SrcRange,
207 EndRange: hcl.Range{
208 Filename: attr.SrcRange.Filename,
209 Start: attr.SrcRange.End,
210 End: attr.SrcRange.End,
211 },
212 }, diags
213
214}
215
216func (p *parser) finishParsingBodyAttribute(ident Token, singleLine bool) (Node, hcl.Diagnostics) {
153 eqTok := p.Read() // eat equals token 217 eqTok := p.Read() // eat equals token
154 if eqTok.Type != TokenEqual { 218 if eqTok.Type != TokenEqual {
155 // should never happen if caller behaves 219 // should never happen if caller behaves
@@ -166,22 +230,33 @@ func (p *parser) finishParsingBodyAttribute(ident Token) (Node, hcl.Diagnostics)
166 endRange = p.PrevRange() 230 endRange = p.PrevRange()
167 p.recoverAfterBodyItem() 231 p.recoverAfterBodyItem()
168 } else { 232 } else {
169 end := p.Peek() 233 endRange = p.PrevRange()
170 if end.Type != TokenNewline && end.Type != TokenEOF { 234 if !singleLine {
171 if !p.recovery { 235 end := p.Peek()
172 diags = append(diags, &hcl.Diagnostic{ 236 if end.Type != TokenNewline && end.Type != TokenEOF {
173 Severity: hcl.DiagError, 237 if !p.recovery {
174 Summary: "Missing newline after attribute definition", 238 summary := "Missing newline after argument"
175 Detail: "An attribute definition must end with a newline.", 239 detail := "An argument definition must end with a newline."
176 Subject: &end.Range, 240
177 Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(), 241 if end.Type == TokenComma {
178 }) 242 summary = "Unexpected comma after argument"
243 detail = "Argument definitions must be separated by newlines, not commas. " + detail
244 }
245
246 diags = append(diags, &hcl.Diagnostic{
247 Severity: hcl.DiagError,
248 Summary: summary,
249 Detail: detail,
250 Subject: &end.Range,
251 Context: hcl.RangeBetween(ident.Range, end.Range).Ptr(),
252 })
253 }
254 endRange = p.PrevRange()
255 p.recoverAfterBodyItem()
256 } else {
257 endRange = p.PrevRange()
258 p.Read() // eat newline
179 } 259 }
180 endRange = p.PrevRange()
181 p.recoverAfterBodyItem()
182 } else {
183 endRange = p.PrevRange()
184 p.Read() // eat newline
185 } 260 }
186 } 261 }
187 262
@@ -218,19 +293,9 @@ Token:
218 diags = append(diags, labelDiags...) 293 diags = append(diags, labelDiags...)
219 labels = append(labels, label) 294 labels = append(labels, label)
220 labelRanges = append(labelRanges, labelRange) 295 labelRanges = append(labelRanges, labelRange)
221 if labelDiags.HasErrors() { 296 // parseQuoteStringLiteral recovers up to the closing quote
222 p.recoverAfterBodyItem() 297 // if it encounters problems, so we can continue looking for
223 return &Block{ 298 // more labels and eventually the block body even.
224 Type: blockType,
225 Labels: labels,
226 Body: nil,
227
228 TypeRange: ident.Range,
229 LabelRanges: labelRanges,
230 OpenBraceRange: ident.Range, // placeholder
231 CloseBraceRange: ident.Range, // placeholder
232 }, diags
233 }
234 299
235 case TokenIdent: 300 case TokenIdent:
236 tok = p.Read() // eat token 301 tok = p.Read() // eat token
@@ -244,7 +309,7 @@ Token:
244 diags = append(diags, &hcl.Diagnostic{ 309 diags = append(diags, &hcl.Diagnostic{
245 Severity: hcl.DiagError, 310 Severity: hcl.DiagError,
246 Summary: "Invalid block definition", 311 Summary: "Invalid block definition",
247 Detail: "The equals sign \"=\" indicates an attribute definition, and must not be used when defining a block.", 312 Detail: "The equals sign \"=\" indicates an argument definition, and must not be used when defining a block.",
248 Subject: &tok.Range, 313 Subject: &tok.Range,
249 Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(), 314 Context: hcl.RangeBetween(ident.Range, tok.Range).Ptr(),
250 }) 315 })
@@ -273,7 +338,10 @@ Token:
273 return &Block{ 338 return &Block{
274 Type: blockType, 339 Type: blockType,
275 Labels: labels, 340 Labels: labels,
276 Body: nil, 341 Body: &Body{
342 SrcRange: ident.Range,
343 EndRange: ident.Range,
344 },
277 345
278 TypeRange: ident.Range, 346 TypeRange: ident.Range,
279 LabelRanges: labelRanges, 347 LabelRanges: labelRanges,
@@ -285,7 +353,51 @@ Token:
285 353
286 // Once we fall out here, the peeker is pointed just after our opening 354 // Once we fall out here, the peeker is pointed just after our opening
287 // brace, so we can begin our nested body parsing. 355 // brace, so we can begin our nested body parsing.
288 body, bodyDiags := p.ParseBody(TokenCBrace) 356 var body *Body
357 var bodyDiags hcl.Diagnostics
358 switch p.Peek().Type {
359 case TokenNewline, TokenEOF, TokenCBrace:
360 body, bodyDiags = p.ParseBody(TokenCBrace)
361 default:
362 // Special one-line, single-attribute block parsing mode.
363 body, bodyDiags = p.parseSingleAttrBody(TokenCBrace)
364 switch p.Peek().Type {
365 case TokenCBrace:
366 p.Read() // the happy path - just consume the closing brace
367 case TokenComma:
368 // User seems to be trying to use the object-constructor
369 // comma-separated style, which isn't permitted for blocks.
370 diags = append(diags, &hcl.Diagnostic{
371 Severity: hcl.DiagError,
372 Summary: "Invalid single-argument block definition",
373 Detail: "Single-line block syntax can include only one argument definition. To define multiple arguments, use the multi-line block syntax with one argument definition per line.",
374 Subject: p.Peek().Range.Ptr(),
375 })
376 p.recover(TokenCBrace)
377 case TokenNewline:
378 // We don't allow weird mixtures of single and multi-line syntax.
379 diags = append(diags, &hcl.Diagnostic{
380 Severity: hcl.DiagError,
381 Summary: "Invalid single-argument block definition",
382 Detail: "An argument definition on the same line as its containing block creates a single-line block definition, which must also be closed on the same line. Place the block's closing brace immediately after the argument definition.",
383 Subject: p.Peek().Range.Ptr(),
384 })
385 p.recover(TokenCBrace)
386 default:
387 // Some other weird thing is going on. Since we can't guess a likely
388 // user intent for this one, we'll skip it if we're already in
389 // recovery mode.
390 if !p.recovery {
391 diags = append(diags, &hcl.Diagnostic{
392 Severity: hcl.DiagError,
393 Summary: "Invalid single-argument block definition",
394 Detail: "A single-line block definition must end with a closing brace immediately after its single argument definition.",
395 Subject: p.Peek().Range.Ptr(),
396 })
397 }
398 p.recover(TokenCBrace)
399 }
400 }
289 diags = append(diags, bodyDiags...) 401 diags = append(diags, bodyDiags...)
290 cBraceRange := p.PrevRange() 402 cBraceRange := p.PrevRange()
291 403
@@ -305,6 +417,17 @@ Token:
305 p.recoverAfterBodyItem() 417 p.recoverAfterBodyItem()
306 } 418 }
307 419
420 // We must never produce a nil body, since the caller may attempt to
421 // do analysis of a partial result when there's an error, so we'll
422 // insert a placeholder if we otherwise failed to produce a valid
423 // body due to one of the syntax error paths above.
424 if body == nil && diags.HasErrors() {
425 body = &Body{
426 SrcRange: hcl.RangeBetween(oBrace.Range, cBraceRange),
427 EndRange: cBraceRange,
428 }
429 }
430
308 return &Block{ 431 return &Block{
309 Type: blockType, 432 Type: blockType,
310 Labels: labels, 433 Labels: labels,
@@ -459,7 +582,14 @@ func (p *parser) parseBinaryOps(ops []map[TokenType]*Operation) (Expression, hcl
459 582
460func (p *parser) parseExpressionWithTraversals() (Expression, hcl.Diagnostics) { 583func (p *parser) parseExpressionWithTraversals() (Expression, hcl.Diagnostics) {
461 term, diags := p.parseExpressionTerm() 584 term, diags := p.parseExpressionTerm()
462 ret := term 585 ret, moreDiags := p.parseExpressionTraversals(term)
586 diags = append(diags, moreDiags...)
587 return ret, diags
588}
589
590func (p *parser) parseExpressionTraversals(from Expression) (Expression, hcl.Diagnostics) {
591 var diags hcl.Diagnostics
592 ret := from
463 593
464Traversal: 594Traversal:
465 for { 595 for {
@@ -657,44 +787,81 @@ Traversal:
657 // the key value is something constant. 787 // the key value is something constant.
658 788
659 open := p.Read() 789 open := p.Read()
660 // TODO: If we have a TokenStar inside our brackets, parse as 790 switch p.Peek().Type {
661 // a Splat expression: foo[*].baz[0]. 791 case TokenStar:
662 var close Token 792 // This is a full splat expression, like foo[*], which consumes
663 p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets 793 // the rest of the traversal steps after it using a recursive
664 keyExpr, keyDiags := p.ParseExpression() 794 // call to this function.
665 diags = append(diags, keyDiags...) 795 p.Read() // consume star
666 if p.recovery && keyDiags.HasErrors() { 796 close := p.Read()
667 close = p.recover(TokenCBrack)
668 } else {
669 close = p.Read()
670 if close.Type != TokenCBrack && !p.recovery { 797 if close.Type != TokenCBrack && !p.recovery {
671 diags = append(diags, &hcl.Diagnostic{ 798 diags = append(diags, &hcl.Diagnostic{
672 Severity: hcl.DiagError, 799 Severity: hcl.DiagError,
673 Summary: "Missing close bracket on index", 800 Summary: "Missing close bracket on splat index",
674 Detail: "The index operator must end with a closing bracket (\"]\").", 801 Detail: "The star for a full splat operator must be immediately followed by a closing bracket (\"]\").",
675 Subject: &close.Range, 802 Subject: &close.Range,
676 }) 803 })
677 close = p.recover(TokenCBrack) 804 close = p.recover(TokenCBrack)
678 } 805 }
679 } 806 // Splat expressions use a special "anonymous symbol" as a
680 p.PopIncludeNewlines() 807 // placeholder in an expression to be evaluated once for each
808 // item in the source expression.
809 itemExpr := &AnonSymbolExpr{
810 SrcRange: hcl.RangeBetween(open.Range, close.Range),
811 }
812 // Now we'll recursively call this same function to eat any
813 // remaining traversal steps against the anonymous symbol.
814 travExpr, nestedDiags := p.parseExpressionTraversals(itemExpr)
815 diags = append(diags, nestedDiags...)
681 816
682 if lit, isLit := keyExpr.(*LiteralValueExpr); isLit { 817 ret = &SplatExpr{
683 litKey, _ := lit.Value(nil) 818 Source: ret,
684 rng := hcl.RangeBetween(open.Range, close.Range) 819 Each: travExpr,
685 step := hcl.TraverseIndex{ 820 Item: itemExpr,
686 Key: litKey, 821
687 SrcRange: rng, 822 SrcRange: hcl.RangeBetween(open.Range, travExpr.Range()),
823 MarkerRange: hcl.RangeBetween(open.Range, close.Range),
688 } 824 }
689 ret = makeRelativeTraversal(ret, step, rng)
690 } else {
691 rng := hcl.RangeBetween(open.Range, close.Range)
692 ret = &IndexExpr{
693 Collection: ret,
694 Key: keyExpr,
695 825
696 SrcRange: rng, 826 default:
697 OpenRange: open.Range, 827
828 var close Token
829 p.PushIncludeNewlines(false) // arbitrary newlines allowed in brackets
830 keyExpr, keyDiags := p.ParseExpression()
831 diags = append(diags, keyDiags...)
832 if p.recovery && keyDiags.HasErrors() {
833 close = p.recover(TokenCBrack)
834 } else {
835 close = p.Read()
836 if close.Type != TokenCBrack && !p.recovery {
837 diags = append(diags, &hcl.Diagnostic{
838 Severity: hcl.DiagError,
839 Summary: "Missing close bracket on index",
840 Detail: "The index operator must end with a closing bracket (\"]\").",
841 Subject: &close.Range,
842 })
843 close = p.recover(TokenCBrack)
844 }
845 }
846 p.PopIncludeNewlines()
847
848 if lit, isLit := keyExpr.(*LiteralValueExpr); isLit {
849 litKey, _ := lit.Value(nil)
850 rng := hcl.RangeBetween(open.Range, close.Range)
851 step := hcl.TraverseIndex{
852 Key: litKey,
853 SrcRange: rng,
854 }
855 ret = makeRelativeTraversal(ret, step, rng)
856 } else {
857 rng := hcl.RangeBetween(open.Range, close.Range)
858 ret = &IndexExpr{
859 Collection: ret,
860 Key: keyExpr,
861
862 SrcRange: rng,
863 OpenRange: open.Range,
864 }
698 } 865 }
699 } 866 }
700 867
@@ -813,7 +980,7 @@ func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) {
813 case TokenOQuote, TokenOHeredoc: 980 case TokenOQuote, TokenOHeredoc:
814 open := p.Read() // eat opening marker 981 open := p.Read() // eat opening marker
815 closer := p.oppositeBracket(open.Type) 982 closer := p.oppositeBracket(open.Type)
816 exprs, passthru, _, diags := p.parseTemplateInner(closer) 983 exprs, passthru, _, diags := p.parseTemplateInner(closer, tokenOpensFlushHeredoc(open))
817 984
818 closeRange := p.PrevRange() 985 closeRange := p.PrevRange()
819 986
@@ -891,11 +1058,10 @@ func (p *parser) parseExpressionTerm() (Expression, hcl.Diagnostics) {
891} 1058}
892 1059
893func (p *parser) numberLitValue(tok Token) (cty.Value, hcl.Diagnostics) { 1060func (p *parser) numberLitValue(tok Token) (cty.Value, hcl.Diagnostics) {
894 // We'll lean on the cty converter to do the conversion, to ensure that 1061 // The cty.ParseNumberVal is always the same behavior as converting a
895 // the behavior is the same as what would happen if converting a 1062 // string to a number, ensuring we always interpret decimal numbers in
896 // non-literal string to a number. 1063 // the same way.
897 numStrVal := cty.StringVal(string(tok.Bytes)) 1064 numVal, err := cty.ParseNumberVal(string(tok.Bytes))
898 numVal, err := convert.Convert(numStrVal, cty.Number)
899 if err != nil { 1065 if err != nil {
900 ret := cty.UnknownVal(cty.Number) 1066 ret := cty.UnknownVal(cty.Number)
901 return ret, hcl.Diagnostics{ 1067 return ret, hcl.Diagnostics{
@@ -1087,13 +1253,19 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) {
1087 panic("parseObjectCons called without peeker pointing to open brace") 1253 panic("parseObjectCons called without peeker pointing to open brace")
1088 } 1254 }
1089 1255
1090 p.PushIncludeNewlines(true) 1256 // We must temporarily stop looking at newlines here while we check for
1091 defer p.PopIncludeNewlines() 1257 // a "for" keyword, since for expressions are _not_ newline-sensitive,
1092 1258 // even though object constructors are.
1093 if forKeyword.TokenMatches(p.Peek()) { 1259 p.PushIncludeNewlines(false)
1260 isFor := forKeyword.TokenMatches(p.Peek())
1261 p.PopIncludeNewlines()
1262 if isFor {
1094 return p.finishParsingForExpr(open) 1263 return p.finishParsingForExpr(open)
1095 } 1264 }
1096 1265
1266 p.PushIncludeNewlines(true)
1267 defer p.PopIncludeNewlines()
1268
1097 var close Token 1269 var close Token
1098 1270
1099 var diags hcl.Diagnostics 1271 var diags hcl.Diagnostics
@@ -1132,19 +1304,36 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) {
1132 next = p.Peek() 1304 next = p.Peek()
1133 if next.Type != TokenEqual && next.Type != TokenColon { 1305 if next.Type != TokenEqual && next.Type != TokenColon {
1134 if !p.recovery { 1306 if !p.recovery {
1135 if next.Type == TokenNewline || next.Type == TokenComma { 1307 switch next.Type {
1308 case TokenNewline, TokenComma:
1136 diags = append(diags, &hcl.Diagnostic{ 1309 diags = append(diags, &hcl.Diagnostic{
1137 Severity: hcl.DiagError, 1310 Severity: hcl.DiagError,
1138 Summary: "Missing item value", 1311 Summary: "Missing attribute value",
1139 Detail: "Expected an item value, introduced by an equals sign (\"=\").", 1312 Detail: "Expected an attribute value, introduced by an equals sign (\"=\").",
1140 Subject: &next.Range, 1313 Subject: &next.Range,
1141 Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), 1314 Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
1142 }) 1315 })
1143 } else { 1316 case TokenIdent:
1317 // Although this might just be a plain old missing equals
1318 // sign before a reference, one way to get here is to try
1319 // to write an attribute name containing a period followed
1320 // by a digit, which was valid in HCL1, like this:
1321 // foo1.2_bar = "baz"
1322 // We can't know exactly what the user intended here, but
1323 // we'll augment our message with an extra hint in this case
1324 // in case it is helpful.
1144 diags = append(diags, &hcl.Diagnostic{ 1325 diags = append(diags, &hcl.Diagnostic{
1145 Severity: hcl.DiagError, 1326 Severity: hcl.DiagError,
1146 Summary: "Missing key/value separator", 1327 Summary: "Missing key/value separator",
1147 Detail: "Expected an equals sign (\"=\") to mark the beginning of the item value.", 1328 Detail: "Expected an equals sign (\"=\") to mark the beginning of the attribute value. If you intended to given an attribute name containing periods or spaces, write the name in quotes to create a string literal.",
1329 Subject: &next.Range,
1330 Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
1331 })
1332 default:
1333 diags = append(diags, &hcl.Diagnostic{
1334 Severity: hcl.DiagError,
1335 Summary: "Missing key/value separator",
1336 Detail: "Expected an equals sign (\"=\") to mark the beginning of the attribute value.",
1148 Subject: &next.Range, 1337 Subject: &next.Range,
1149 Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), 1338 Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
1150 }) 1339 })
@@ -1182,8 +1371,8 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) {
1182 if !p.recovery { 1371 if !p.recovery {
1183 diags = append(diags, &hcl.Diagnostic{ 1372 diags = append(diags, &hcl.Diagnostic{
1184 Severity: hcl.DiagError, 1373 Severity: hcl.DiagError,
1185 Summary: "Missing item separator", 1374 Summary: "Missing attribute separator",
1186 Detail: "Expected a newline or comma to mark the beginning of the next item.", 1375 Detail: "Expected a newline or comma to mark the beginning of the next attribute.",
1187 Subject: &next.Range, 1376 Subject: &next.Range,
1188 Context: hcl.RangeBetween(open.Range, next.Range).Ptr(), 1377 Context: hcl.RangeBetween(open.Range, next.Range).Ptr(),
1189 }) 1378 })
@@ -1205,6 +1394,8 @@ func (p *parser) parseObjectCons() (Expression, hcl.Diagnostics) {
1205} 1394}
1206 1395
1207func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics) { 1396func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics) {
1397 p.PushIncludeNewlines(false)
1398 defer p.PopIncludeNewlines()
1208 introducer := p.Read() 1399 introducer := p.Read()
1209 if !forKeyword.TokenMatches(introducer) { 1400 if !forKeyword.TokenMatches(introducer) {
1210 // Should never happen if callers are behaving 1401 // Should never happen if callers are behaving
@@ -1277,7 +1468,7 @@ func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics)
1277 diags = append(diags, &hcl.Diagnostic{ 1468 diags = append(diags, &hcl.Diagnostic{
1278 Severity: hcl.DiagError, 1469 Severity: hcl.DiagError,
1279 Summary: "Invalid 'for' expression", 1470 Summary: "Invalid 'for' expression",
1280 Detail: "For expression requires 'in' keyword after names.", 1471 Detail: "For expression requires the 'in' keyword after its name declarations.",
1281 Subject: p.Peek().Range.Ptr(), 1472 Subject: p.Peek().Range.Ptr(),
1282 Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), 1473 Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(),
1283 }) 1474 })
@@ -1305,7 +1496,7 @@ func (p *parser) finishParsingForExpr(open Token) (Expression, hcl.Diagnostics)
1305 diags = append(diags, &hcl.Diagnostic{ 1496 diags = append(diags, &hcl.Diagnostic{
1306 Severity: hcl.DiagError, 1497 Severity: hcl.DiagError,
1307 Summary: "Invalid 'for' expression", 1498 Summary: "Invalid 'for' expression",
1308 Detail: "For expression requires colon after collection expression.", 1499 Detail: "For expression requires a colon after the collection expression.",
1309 Subject: p.Peek().Range.Ptr(), 1500 Subject: p.Peek().Range.Ptr(),
1310 Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(), 1501 Context: hcl.RangeBetween(open.Range, p.Peek().Range).Ptr(),
1311 }) 1502 })
@@ -1459,7 +1650,7 @@ Token:
1459 case TokenTemplateControl, TokenTemplateInterp: 1650 case TokenTemplateControl, TokenTemplateInterp:
1460 which := "$" 1651 which := "$"
1461 if tok.Type == TokenTemplateControl { 1652 if tok.Type == TokenTemplateControl {
1462 which = "!" 1653 which = "%"
1463 } 1654 }
1464 1655
1465 diags = append(diags, &hcl.Diagnostic{ 1656 diags = append(diags, &hcl.Diagnostic{
@@ -1472,7 +1663,16 @@ Token:
1472 Subject: &tok.Range, 1663 Subject: &tok.Range,
1473 Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), 1664 Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(),
1474 }) 1665 })
1475 p.recover(TokenTemplateSeqEnd) 1666
1667 // Now that we're returning an error callers won't attempt to use
1668 // the result for any real operations, but they might try to use
1669 // the partial AST for other analyses, so we'll leave a marker
1670 // to indicate that there was something invalid in the string to
1671 // help avoid misinterpretation of the partial result
1672 ret.WriteString(which)
1673 ret.WriteString("{ ... }")
1674
1675 p.recover(TokenTemplateSeqEnd) // we'll try to keep parsing after the sequence ends
1476 1676
1477 case TokenEOF: 1677 case TokenEOF:
1478 diags = append(diags, &hcl.Diagnostic{ 1678 diags = append(diags, &hcl.Diagnostic{
@@ -1493,7 +1693,7 @@ Token:
1493 Subject: &tok.Range, 1693 Subject: &tok.Range,
1494 Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(), 1694 Context: hcl.RangeBetween(oQuote.Range, tok.Range).Ptr(),
1495 }) 1695 })
1496 p.recover(TokenOQuote) 1696 p.recover(TokenCQuote)
1497 break Token 1697 break Token
1498 1698
1499 } 1699 }
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go
index 3711067..a141626 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser_template.go
@@ -5,16 +5,17 @@ import (
5 "strings" 5 "strings"
6 "unicode" 6 "unicode"
7 7
8 "github.com/apparentlymart/go-textseg/textseg"
8 "github.com/hashicorp/hcl2/hcl" 9 "github.com/hashicorp/hcl2/hcl"
9 "github.com/zclconf/go-cty/cty" 10 "github.com/zclconf/go-cty/cty"
10) 11)
11 12
12func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) { 13func (p *parser) ParseTemplate() (Expression, hcl.Diagnostics) {
13 return p.parseTemplate(TokenEOF) 14 return p.parseTemplate(TokenEOF, false)
14} 15}
15 16
16func (p *parser) parseTemplate(end TokenType) (Expression, hcl.Diagnostics) { 17func (p *parser) parseTemplate(end TokenType, flushHeredoc bool) (Expression, hcl.Diagnostics) {
17 exprs, passthru, rng, diags := p.parseTemplateInner(end) 18 exprs, passthru, rng, diags := p.parseTemplateInner(end, flushHeredoc)
18 19
19 if passthru { 20 if passthru {
20 if len(exprs) != 1 { 21 if len(exprs) != 1 {
@@ -32,8 +33,11 @@ func (p *parser) parseTemplate(end TokenType) (Expression, hcl.Diagnostics) {
32 }, diags 33 }, diags
33} 34}
34 35
35func (p *parser) parseTemplateInner(end TokenType) ([]Expression, bool, hcl.Range, hcl.Diagnostics) { 36func (p *parser) parseTemplateInner(end TokenType, flushHeredoc bool) ([]Expression, bool, hcl.Range, hcl.Diagnostics) {
36 parts, diags := p.parseTemplateParts(end) 37 parts, diags := p.parseTemplateParts(end)
38 if flushHeredoc {
39 flushHeredocTemplateParts(parts) // Trim off leading spaces on lines per the flush heredoc spec
40 }
37 tp := templateParser{ 41 tp := templateParser{
38 Tokens: parts.Tokens, 42 Tokens: parts.Tokens,
39 SrcRange: parts.SrcRange, 43 SrcRange: parts.SrcRange,
@@ -649,6 +653,73 @@ Token:
649 return ret, diags 653 return ret, diags
650} 654}
651 655
656// flushHeredocTemplateParts modifies in-place the line-leading literal strings
657// to apply the flush heredoc processing rule: find the line with the smallest
658// number of whitespace characters as prefix and then trim that number of
659// characters from all of the lines.
660//
661// This rule is applied to static tokens rather than to the rendered result,
662// so interpolating a string with leading whitespace cannot affect the chosen
663// prefix length.
664func flushHeredocTemplateParts(parts *templateParts) {
665 if len(parts.Tokens) == 0 {
666 // Nothing to do
667 return
668 }
669
670 const maxInt = int((^uint(0)) >> 1)
671
672 minSpaces := maxInt
673 newline := true
674 var adjust []*templateLiteralToken
675 for _, ttok := range parts.Tokens {
676 if newline {
677 newline = false
678 var spaces int
679 if lit, ok := ttok.(*templateLiteralToken); ok {
680 orig := lit.Val
681 trimmed := strings.TrimLeftFunc(orig, unicode.IsSpace)
682 // If a token is entirely spaces and ends with a newline
683 // then it's a "blank line" and thus not considered for
684 // space-prefix-counting purposes.
685 if len(trimmed) == 0 && strings.HasSuffix(orig, "\n") {
686 spaces = maxInt
687 } else {
688 spaceBytes := len(lit.Val) - len(trimmed)
689 spaces, _ = textseg.TokenCount([]byte(orig[:spaceBytes]), textseg.ScanGraphemeClusters)
690 adjust = append(adjust, lit)
691 }
692 } else if _, ok := ttok.(*templateEndToken); ok {
693 break // don't process the end token since it never has spaces before it
694 }
695 if spaces < minSpaces {
696 minSpaces = spaces
697 }
698 }
699 if lit, ok := ttok.(*templateLiteralToken); ok {
700 if strings.HasSuffix(lit.Val, "\n") {
701 newline = true // The following token, if any, begins a new line
702 }
703 }
704 }
705
706 for _, lit := range adjust {
707 // Since we want to count space _characters_ rather than space _bytes_,
708 // we can't just do a straightforward slice operation here and instead
709 // need to hunt for the split point with a scanner.
710 valBytes := []byte(lit.Val)
711 spaceByteCount := 0
712 for i := 0; i < minSpaces; i++ {
713 adv, _, _ := textseg.ScanGraphemeClusters(valBytes, true)
714 spaceByteCount += adv
715 valBytes = valBytes[adv:]
716 }
717 lit.Val = lit.Val[spaceByteCount:]
718 lit.SrcRange.Start.Column += minSpaces
719 lit.SrcRange.Start.Byte += spaceByteCount
720 }
721}
722
652type templateParts struct { 723type templateParts struct {
653 Tokens []templateToken 724 Tokens []templateToken
654 SrcRange hcl.Range 725 SrcRange hcl.Range
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go
index de1f524..2895ade 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_string_lit.go
@@ -1,10 +1,10 @@
1// line 1 "scan_string_lit.rl" 1//line scan_string_lit.rl:1
2 2
3package hclsyntax 3package hclsyntax
4 4
5// This file is generated from scan_string_lit.rl. DO NOT EDIT. 5// This file is generated from scan_string_lit.rl. DO NOT EDIT.
6 6
7// line 9 "scan_string_lit.go" 7//line scan_string_lit.go:9
8var _hclstrtok_actions []byte = []byte{ 8var _hclstrtok_actions []byte = []byte{
9 0, 1, 0, 1, 1, 2, 1, 0, 9 0, 1, 0, 1, 1, 2, 1, 0,
10} 10}
@@ -114,12 +114,12 @@ const hclstrtok_error int = 0
114const hclstrtok_en_quoted int = 10 114const hclstrtok_en_quoted int = 10
115const hclstrtok_en_unquoted int = 4 115const hclstrtok_en_unquoted int = 4
116 116
117// line 10 "scan_string_lit.rl" 117//line scan_string_lit.rl:10
118 118
119func scanStringLit(data []byte, quoted bool) [][]byte { 119func scanStringLit(data []byte, quoted bool) [][]byte {
120 var ret [][]byte 120 var ret [][]byte
121 121
122 // line 61 "scan_string_lit.rl" 122//line scan_string_lit.rl:61
123 123
124 // Ragel state 124 // Ragel state
125 p := 0 // "Pointer" into data 125 p := 0 // "Pointer" into data
@@ -144,11 +144,11 @@ func scanStringLit(data []byte, quoted bool) [][]byte {
144 ret = append(ret, data[ts:te]) 144 ret = append(ret, data[ts:te])
145 }*/ 145 }*/
146 146
147 // line 154 "scan_string_lit.go" 147//line scan_string_lit.go:154
148 { 148 {
149 } 149 }
150 150
151 // line 158 "scan_string_lit.go" 151//line scan_string_lit.go:158
152 { 152 {
153 var _klen int 153 var _klen int
154 var _trans int 154 var _trans int
@@ -229,7 +229,7 @@ func scanStringLit(data []byte, quoted bool) [][]byte {
229 _acts++ 229 _acts++
230 switch _hclstrtok_actions[_acts-1] { 230 switch _hclstrtok_actions[_acts-1] {
231 case 0: 231 case 0:
232 // line 40 "scan_string_lit.rl" 232//line scan_string_lit.rl:40
233 233
234 // If te is behind p then we've skipped over some literal 234 // If te is behind p then we've skipped over some literal
235 // characters which we must now return. 235 // characters which we must now return.
@@ -239,12 +239,12 @@ func scanStringLit(data []byte, quoted bool) [][]byte {
239 ts = p 239 ts = p
240 240
241 case 1: 241 case 1:
242 // line 48 "scan_string_lit.rl" 242//line scan_string_lit.rl:48
243 243
244 te = p 244 te = p
245 ret = append(ret, data[ts:te]) 245 ret = append(ret, data[ts:te])
246 246
247 // line 255 "scan_string_lit.go" 247//line scan_string_lit.go:253
248 } 248 }
249 } 249 }
250 250
@@ -267,12 +267,12 @@ func scanStringLit(data []byte, quoted bool) [][]byte {
267 __acts++ 267 __acts++
268 switch _hclstrtok_actions[__acts-1] { 268 switch _hclstrtok_actions[__acts-1] {
269 case 1: 269 case 1:
270 // line 48 "scan_string_lit.rl" 270//line scan_string_lit.rl:48
271 271
272 te = p 272 te = p
273 ret = append(ret, data[ts:te]) 273 ret = append(ret, data[ts:te])
274 274
275 // line 281 "scan_string_lit.go" 275//line scan_string_lit.go:278
276 } 276 }
277 } 277 }
278 } 278 }
@@ -282,7 +282,7 @@ func scanStringLit(data []byte, quoted bool) [][]byte {
282 } 282 }
283 } 283 }
284 284
285 // line 89 "scan_string_lit.rl" 285//line scan_string_lit.rl:89
286 286
287 if te < p { 287 if te < p {
288 // Collect any leftover literal characters at the end of the input 288 // Collect any leftover literal characters at the end of the input
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go
index 395e9c1..581e35e 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.go
@@ -1,4 +1,4 @@
1// line 1 "scan_tokens.rl" 1//line scan_tokens.rl:1
2 2
3package hclsyntax 3package hclsyntax
4 4
@@ -10,570 +10,256 @@ import (
10 10
11// This file is generated from scan_tokens.rl. DO NOT EDIT. 11// This file is generated from scan_tokens.rl. DO NOT EDIT.
12 12
13// line 15 "scan_tokens.go" 13//line scan_tokens.go:15
14var _hcltok_actions []byte = []byte{ 14var _hcltok_actions []byte = []byte{
15 0, 1, 0, 1, 1, 1, 2, 1, 3, 15 0, 1, 0, 1, 1, 1, 3, 1, 4,
16 1, 4, 1, 6, 1, 7, 1, 8, 16 1, 7, 1, 8, 1, 9, 1, 10,
17 1, 9, 1, 10, 1, 11, 1, 12, 17 1, 11, 1, 12, 1, 13, 1, 14,
18 1, 13, 1, 14, 1, 15, 1, 16, 18 1, 15, 1, 16, 1, 17, 1, 18,
19 1, 17, 1, 18, 1, 19, 1, 22, 19 1, 19, 1, 20, 1, 23, 1, 24,
20 1, 23, 1, 24, 1, 25, 1, 26, 20 1, 25, 1, 26, 1, 27, 1, 28,
21 1, 27, 1, 28, 1, 29, 1, 30, 21 1, 29, 1, 30, 1, 31, 1, 32,
22 1, 31, 1, 34, 1, 35, 1, 36, 22 1, 35, 1, 36, 1, 37, 1, 38,
23 1, 37, 1, 38, 1, 39, 1, 40, 23 1, 39, 1, 40, 1, 41, 1, 42,
24 1, 41, 1, 42, 1, 43, 1, 46, 24 1, 43, 1, 44, 1, 47, 1, 48,
25 1, 47, 1, 48, 1, 49, 1, 50, 25 1, 49, 1, 50, 1, 51, 1, 52,
26 1, 51, 1, 52, 1, 58, 1, 59, 26 1, 53, 1, 56, 1, 57, 1, 58,
27 1, 60, 1, 61, 1, 62, 1, 63, 27 1, 59, 1, 60, 1, 61, 1, 62,
28 1, 64, 1, 65, 1, 66, 1, 67, 28 1, 63, 1, 64, 1, 65, 1, 66,
29 1, 68, 1, 69, 1, 70, 1, 71, 29 1, 67, 1, 68, 1, 69, 1, 70,
30 1, 72, 1, 73, 1, 74, 1, 75, 30 1, 71, 1, 72, 1, 73, 1, 74,
31 1, 76, 1, 77, 1, 78, 1, 79, 31 1, 75, 1, 76, 1, 77, 1, 78,
32 1, 80, 1, 81, 1, 82, 1, 83, 32 1, 79, 1, 80, 1, 81, 1, 82,
33 1, 84, 1, 85, 1, 86, 1, 87, 33 1, 83, 1, 84, 1, 85, 2, 0,
34 2, 0, 15, 2, 1, 15, 2, 2, 34 14, 2, 0, 25, 2, 0, 29, 2,
35 24, 2, 2, 28, 2, 3, 24, 2, 35 0, 37, 2, 0, 41, 2, 1, 2,
36 3, 28, 2, 4, 5, 2, 7, 0, 36 2, 4, 5, 2, 4, 6, 2, 4,
37 2, 7, 1, 2, 7, 20, 2, 7, 37 21, 2, 4, 22, 2, 4, 33, 2,
38 21, 2, 7, 32, 2, 7, 33, 2, 38 4, 34, 2, 4, 45, 2, 4, 46,
39 7, 44, 2, 7, 45, 2, 7, 53, 39 2, 4, 54, 2, 4, 55,
40 2, 7, 54, 2, 7, 55, 2, 7,
41 56, 2, 7, 57, 3, 7, 2, 20,
42 3, 7, 3, 20,
43} 40}
44 41
45var _hcltok_key_offsets []int16 = []int16{ 42var _hcltok_key_offsets []int16 = []int16{
46 0, 0, 1, 2, 3, 5, 10, 14, 43 0, 0, 1, 2, 4, 9, 13, 15,
47 16, 58, 99, 145, 146, 150, 156, 156, 44 57, 98, 144, 145, 149, 155, 155, 157,
48 158, 160, 169, 175, 182, 183, 186, 187, 45 159, 168, 174, 181, 182, 185, 186, 190,
49 191, 196, 205, 209, 213, 221, 223, 225, 46 195, 204, 208, 212, 220, 222, 224, 226,
50 227, 230, 262, 264, 266, 270, 274, 277, 47 229, 261, 263, 265, 269, 273, 276, 287,
51 288, 301, 320, 333, 349, 361, 377, 392, 48 300, 319, 332, 348, 360, 376, 391, 412,
52 413, 423, 435, 446, 460, 475, 485, 497, 49 422, 434, 445, 459, 474, 484, 496, 505,
53 506, 518, 520, 524, 545, 554, 564, 570, 50 517, 519, 523, 544, 553, 563, 569, 575,
54 576, 577, 626, 628, 632, 634, 640, 647, 51 576, 625, 627, 631, 633, 639, 646, 654,
55 655, 662, 665, 671, 675, 679, 681, 685, 52 661, 664, 670, 674, 678, 680, 684, 688,
56 689, 693, 699, 707, 715, 721, 723, 727, 53 692, 698, 706, 714, 720, 722, 726, 728,
57 729, 735, 739, 743, 747, 751, 756, 763, 54 734, 738, 742, 746, 750, 755, 762, 768,
58 769, 771, 773, 777, 779, 785, 789, 793, 55 770, 772, 776, 778, 784, 788, 792, 802,
59 803, 808, 822, 837, 839, 847, 849, 854, 56 807, 821, 836, 838, 846, 848, 853, 867,
60 868, 873, 875, 879, 880, 884, 890, 896, 57 872, 874, 878, 879, 883, 889, 895, 905,
61 906, 916, 927, 935, 938, 941, 945, 949, 58 915, 926, 934, 937, 940, 944, 948, 950,
62 951, 954, 954, 957, 959, 989, 991, 993, 59 953, 953, 956, 958, 988, 990, 992, 996,
63 997, 1002, 1006, 1011, 1013, 1015, 1017, 1026, 60 1001, 1005, 1010, 1012, 1014, 1016, 1025, 1029,
64 1030, 1034, 1040, 1042, 1050, 1058, 1070, 1073, 61 1033, 1039, 1041, 1049, 1057, 1069, 1072, 1078,
65 1079, 1083, 1085, 1089, 1109, 1111, 1113, 1124, 62 1082, 1084, 1088, 1108, 1110, 1112, 1123, 1129,
66 1130, 1132, 1134, 1136, 1140, 1146, 1152, 1154, 63 1131, 1133, 1135, 1139, 1145, 1151, 1153, 1158,
67 1159, 1163, 1165, 1173, 1191, 1231, 1241, 1245, 64 1162, 1164, 1172, 1190, 1230, 1240, 1244, 1246,
68 1247, 1249, 1250, 1254, 1258, 1262, 1266, 1270, 65 1248, 1249, 1253, 1257, 1261, 1265, 1269, 1274,
69 1275, 1279, 1283, 1287, 1289, 1291, 1295, 1305, 66 1278, 1282, 1286, 1288, 1290, 1294, 1304, 1308,
70 1309, 1311, 1315, 1319, 1323, 1336, 1338, 1340, 67 1310, 1314, 1318, 1322, 1335, 1337, 1339, 1343,
71 1344, 1346, 1350, 1352, 1354, 1384, 1388, 1392, 68 1345, 1349, 1351, 1353, 1383, 1387, 1391, 1395,
72 1396, 1399, 1406, 1411, 1422, 1426, 1442, 1456, 69 1398, 1405, 1410, 1421, 1425, 1441, 1455, 1459,
73 1460, 1465, 1469, 1473, 1479, 1481, 1487, 1489, 70 1464, 1468, 1472, 1478, 1480, 1486, 1488, 1492,
74 1493, 1495, 1501, 1506, 1511, 1521, 1523, 1525, 71 1494, 1500, 1505, 1510, 1520, 1522, 1524, 1528,
75 1529, 1533, 1535, 1548, 1550, 1554, 1558, 1566, 72 1532, 1534, 1547, 1549, 1553, 1557, 1565, 1567,
76 1568, 1572, 1574, 1575, 1578, 1583, 1585, 1587, 73 1571, 1573, 1574, 1577, 1582, 1584, 1586, 1590,
77 1591, 1593, 1597, 1603, 1623, 1629, 1635, 1637, 74 1592, 1596, 1602, 1622, 1628, 1634, 1636, 1637,
78 1638, 1648, 1649, 1657, 1664, 1666, 1669, 1671, 75 1647, 1648, 1656, 1663, 1665, 1668, 1670, 1672,
79 1673, 1675, 1680, 1684, 1688, 1693, 1703, 1713, 76 1674, 1679, 1683, 1687, 1692, 1702, 1712, 1716,
80 1717, 1721, 1735, 1761, 1771, 1773, 1775, 1778, 77 1720, 1734, 1760, 1770, 1772, 1774, 1777, 1779,
81 1780, 1783, 1785, 1789, 1791, 1792, 1796, 1798, 78 1782, 1784, 1788, 1790, 1791, 1795, 1797, 1800,
82 1801, 1808, 1816, 1818, 1820, 1824, 1826, 1832, 79 1807, 1815, 1817, 1819, 1823, 1825, 1831, 1842,
83 1843, 1846, 1848, 1852, 1857, 1887, 1892, 1894, 80 1845, 1847, 1851, 1856, 1886, 1891, 1893, 1896,
84 1897, 1902, 1916, 1923, 1937, 1942, 1955, 1959, 81 1901, 1915, 1922, 1936, 1941, 1954, 1958, 1971,
85 1972, 1977, 1995, 1996, 2005, 2009, 2021, 2026, 82 1976, 1994, 1995, 2004, 2008, 2020, 2025, 2032,
86 2033, 2040, 2047, 2049, 2053, 2075, 2080, 2081, 83 2039, 2046, 2048, 2052, 2074, 2079, 2080, 2084,
87 2085, 2087, 2137, 2140, 2151, 2155, 2157, 2163, 84 2086, 2136, 2139, 2150, 2154, 2156, 2162, 2168,
88 2169, 2171, 2176, 2178, 2182, 2184, 2185, 2187, 85 2170, 2175, 2177, 2181, 2183, 2184, 2186, 2188,
89 2189, 2195, 2197, 2199, 2203, 2209, 2222, 2224, 86 2194, 2196, 2198, 2202, 2208, 2221, 2223, 2229,
90 2230, 2234, 2242, 2253, 2261, 2264, 2294, 2300, 87 2233, 2241, 2252, 2260, 2263, 2293, 2299, 2302,
91 2303, 2308, 2310, 2314, 2318, 2322, 2324, 2331, 88 2307, 2309, 2313, 2317, 2321, 2323, 2330, 2332,
92 2333, 2342, 2349, 2357, 2359, 2379, 2391, 2395, 89 2341, 2348, 2356, 2358, 2378, 2390, 2394, 2396,
93 2397, 2415, 2454, 2456, 2460, 2462, 2469, 2473, 90 2414, 2453, 2455, 2459, 2461, 2468, 2472, 2500,
94 2501, 2503, 2505, 2507, 2509, 2512, 2514, 2518, 91 2502, 2504, 2506, 2508, 2511, 2513, 2517, 2521,
95 2522, 2524, 2527, 2529, 2531, 2534, 2536, 2538, 92 2523, 2526, 2528, 2530, 2533, 2535, 2537, 2538,
96 2539, 2541, 2543, 2547, 2551, 2554, 2567, 2569, 93 2540, 2542, 2546, 2550, 2553, 2566, 2568, 2574,
97 2575, 2579, 2581, 2585, 2589, 2603, 2606, 2615, 94 2578, 2580, 2584, 2588, 2602, 2605, 2614, 2616,
98 2617, 2621, 2627, 2627, 2629, 2631, 2640, 2646, 95 2620, 2626, 2626, 2628, 2630, 2639, 2645, 2652,
99 2653, 2654, 2657, 2658, 2662, 2667, 2676, 2680, 96 2653, 2656, 2657, 2661, 2666, 2675, 2679, 2683,
100 2684, 2692, 2694, 2696, 2698, 2701, 2733, 2735, 97 2691, 2693, 2695, 2697, 2700, 2732, 2734, 2736,
101 2737, 2741, 2745, 2748, 2759, 2772, 2791, 2804, 98 2740, 2744, 2747, 2758, 2771, 2790, 2803, 2819,
102 2820, 2832, 2848, 2863, 2884, 2894, 2906, 2917, 99 2831, 2847, 2862, 2883, 2893, 2905, 2916, 2930,
103 2931, 2946, 2956, 2968, 2977, 2989, 2991, 2995, 100 2945, 2955, 2967, 2976, 2988, 2990, 2994, 3015,
104 3016, 3025, 3035, 3041, 3047, 3048, 3097, 3099, 101 3024, 3034, 3040, 3046, 3047, 3096, 3098, 3102,
105 3103, 3105, 3111, 3118, 3126, 3133, 3136, 3142, 102 3104, 3110, 3117, 3125, 3132, 3135, 3141, 3145,
106 3146, 3150, 3152, 3156, 3160, 3164, 3170, 3178, 103 3149, 3151, 3155, 3159, 3163, 3169, 3177, 3185,
107 3186, 3192, 3194, 3198, 3200, 3206, 3210, 3214, 104 3191, 3193, 3197, 3199, 3205, 3209, 3213, 3217,
108 3218, 3222, 3227, 3234, 3240, 3242, 3244, 3248, 105 3221, 3226, 3233, 3239, 3241, 3243, 3247, 3249,
109 3250, 3256, 3260, 3264, 3274, 3279, 3293, 3308, 106 3255, 3259, 3263, 3273, 3278, 3292, 3307, 3309,
110 3310, 3318, 3320, 3325, 3339, 3344, 3346, 3350, 107 3317, 3319, 3324, 3338, 3343, 3345, 3349, 3350,
111 3351, 3355, 3361, 3367, 3377, 3387, 3398, 3406, 108 3354, 3360, 3366, 3376, 3386, 3397, 3405, 3408,
112 3409, 3412, 3416, 3420, 3422, 3425, 3425, 3428, 109 3411, 3415, 3419, 3421, 3424, 3424, 3427, 3429,
113 3430, 3460, 3462, 3464, 3468, 3473, 3477, 3482, 110 3459, 3461, 3463, 3467, 3472, 3476, 3481, 3483,
114 3484, 3486, 3488, 3497, 3501, 3505, 3511, 3513, 111 3485, 3487, 3496, 3500, 3504, 3510, 3512, 3520,
115 3521, 3529, 3541, 3544, 3550, 3554, 3556, 3560, 112 3528, 3540, 3543, 3549, 3553, 3555, 3559, 3579,
116 3580, 3582, 3584, 3595, 3601, 3603, 3605, 3607, 113 3581, 3583, 3594, 3600, 3602, 3604, 3606, 3610,
117 3611, 3617, 3623, 3625, 3630, 3634, 3636, 3644, 114 3616, 3622, 3624, 3629, 3633, 3635, 3643, 3661,
118 3662, 3702, 3712, 3716, 3718, 3720, 3721, 3725, 115 3701, 3711, 3715, 3717, 3719, 3720, 3724, 3728,
119 3729, 3733, 3737, 3741, 3746, 3750, 3754, 3758, 116 3732, 3736, 3740, 3745, 3749, 3753, 3757, 3759,
120 3760, 3762, 3766, 3776, 3780, 3782, 3786, 3790, 117 3761, 3765, 3775, 3779, 3781, 3785, 3789, 3793,
121 3794, 3807, 3809, 3811, 3815, 3817, 3821, 3823, 118 3806, 3808, 3810, 3814, 3816, 3820, 3822, 3824,
122 3825, 3855, 3859, 3863, 3867, 3870, 3877, 3882, 119 3854, 3858, 3862, 3866, 3869, 3876, 3881, 3892,
123 3893, 3897, 3913, 3927, 3931, 3936, 3940, 3944, 120 3896, 3912, 3926, 3930, 3935, 3939, 3943, 3949,
124 3950, 3952, 3958, 3960, 3964, 3966, 3972, 3977, 121 3951, 3957, 3959, 3963, 3965, 3971, 3976, 3981,
125 3982, 3992, 3994, 3996, 4000, 4004, 4006, 4019, 122 3991, 3993, 3995, 3999, 4003, 4005, 4018, 4020,
126 4021, 4025, 4029, 4037, 4039, 4043, 4045, 4046, 123 4024, 4028, 4036, 4038, 4042, 4044, 4045, 4048,
127 4049, 4054, 4056, 4058, 4062, 4064, 4068, 4074, 124 4053, 4055, 4057, 4061, 4063, 4067, 4073, 4093,
128 4094, 4100, 4106, 4108, 4109, 4119, 4120, 4128, 125 4099, 4105, 4107, 4108, 4118, 4119, 4127, 4134,
129 4135, 4137, 4140, 4142, 4144, 4146, 4151, 4155, 126 4136, 4139, 4141, 4143, 4145, 4150, 4154, 4158,
130 4159, 4164, 4174, 4184, 4188, 4192, 4206, 4232, 127 4163, 4173, 4183, 4187, 4191, 4205, 4231, 4241,
131 4242, 4244, 4246, 4249, 4251, 4254, 4256, 4260, 128 4243, 4245, 4248, 4250, 4253, 4255, 4259, 4261,
132 4262, 4263, 4267, 4269, 4271, 4278, 4282, 4289, 129 4262, 4266, 4268, 4270, 4277, 4281, 4288, 4295,
133 4296, 4305, 4321, 4333, 4351, 4362, 4374, 4382, 130 4304, 4320, 4332, 4350, 4361, 4373, 4381, 4399,
134 4400, 4408, 4438, 4441, 4451, 4461, 4473, 4484, 131 4407, 4437, 4440, 4450, 4460, 4472, 4483, 4492,
135 4493, 4506, 4518, 4522, 4528, 4555, 4564, 4567, 132 4505, 4517, 4521, 4527, 4554, 4563, 4566, 4571,
136 4572, 4578, 4583, 4604, 4608, 4614, 4614, 4621, 133 4577, 4582, 4603, 4607, 4613, 4613, 4620, 4629,
137 4630, 4638, 4641, 4645, 4651, 4657, 4660, 4664, 134 4637, 4640, 4644, 4650, 4656, 4659, 4663, 4670,
138 4671, 4677, 4686, 4695, 4699, 4703, 4707, 4711, 135 4676, 4685, 4694, 4698, 4702, 4706, 4710, 4717,
139 4718, 4722, 4726, 4736, 4742, 4746, 4752, 4756, 136 4721, 4725, 4735, 4741, 4745, 4751, 4755, 4758,
140 4759, 4765, 4771, 4783, 4787, 4791, 4801, 4805, 137 4764, 4770, 4782, 4786, 4790, 4800, 4804, 4815,
141 4816, 4818, 4820, 4824, 4836, 4841, 4865, 4869, 138 4817, 4819, 4823, 4835, 4840, 4864, 4868, 4874,
142 4875, 4897, 4906, 4910, 4913, 4914, 4922, 4930, 139 4896, 4905, 4909, 4912, 4913, 4921, 4929, 4935,
143 4936, 4946, 4953, 4971, 4974, 4977, 4985, 4991, 140 4945, 4952, 4970, 4973, 4976, 4984, 4990, 4994,
144 4995, 4999, 5003, 5009, 5017, 5022, 5028, 5032, 141 4998, 5002, 5008, 5016, 5021, 5027, 5031, 5039,
145 5040, 5047, 5051, 5058, 5064, 5072, 5080, 5086, 142 5046, 5050, 5057, 5063, 5071, 5079, 5085, 5091,
146 5092, 5103, 5107, 5119, 5128, 5145, 5162, 5165, 143 5102, 5106, 5118, 5127, 5144, 5161, 5164, 5168,
147 5169, 5171, 5177, 5179, 5183, 5198, 5202, 5206, 144 5170, 5176, 5178, 5182, 5197, 5201, 5205, 5209,
148 5210, 5214, 5218, 5220, 5226, 5231, 5235, 5241, 145 5213, 5217, 5219, 5225, 5230, 5234, 5240, 5247,
149 5248, 5251, 5269, 5271, 5316, 5322, 5328, 5332, 146 5250, 5268, 5270, 5315, 5321, 5327, 5331, 5335,
150 5336, 5342, 5346, 5352, 5358, 5365, 5367, 5373, 147 5341, 5345, 5351, 5357, 5364, 5366, 5372, 5378,
151 5379, 5383, 5387, 5395, 5408, 5414, 5421, 5429, 148 5382, 5386, 5394, 5407, 5413, 5420, 5428, 5434,
152 5435, 5444, 5450, 5454, 5459, 5463, 5471, 5475, 149 5443, 5449, 5453, 5458, 5462, 5470, 5474, 5478,
153 5479, 5509, 5515, 5521, 5527, 5533, 5540, 5546, 150 5508, 5514, 5520, 5526, 5532, 5539, 5545, 5552,
154 5553, 5558, 5568, 5572, 5579, 5585, 5589, 5596, 151 5557, 5567, 5571, 5578, 5584, 5588, 5595, 5599,
155 5600, 5606, 5609, 5613, 5617, 5621, 5625, 5630, 152 5605, 5608, 5612, 5616, 5620, 5624, 5629, 5634,
156 5635, 5639, 5650, 5654, 5658, 5664, 5672, 5676, 153 5638, 5649, 5653, 5657, 5663, 5671, 5675, 5692,
157 5693, 5697, 5703, 5713, 5719, 5725, 5728, 5733, 154 5696, 5702, 5712, 5718, 5724, 5727, 5732, 5741,
158 5742, 5746, 5750, 5756, 5760, 5766, 5774, 5792, 155 5745, 5749, 5755, 5759, 5765, 5773, 5791, 5792,
159 5793, 5803, 5804, 5813, 5821, 5823, 5826, 5828, 156 5802, 5803, 5812, 5820, 5822, 5825, 5827, 5829,
160 5830, 5832, 5837, 5850, 5854, 5869, 5898, 5909, 157 5831, 5836, 5849, 5853, 5868, 5897, 5908, 5910,
161 5911, 5915, 5919, 5924, 5928, 5930, 5937, 5941, 158 5914, 5918, 5923, 5927, 5929, 5936, 5940, 5948,
162 5949, 5953, 5954, 5955, 5957, 5959, 5961, 5963, 159 5952, 5964, 5966, 5968, 5970, 5972, 5974, 5975,
163 5965, 5966, 5967, 5968, 5970, 5972, 5974, 5975, 160 5977, 5979, 5981, 5983, 5985, 5986, 5988, 5990,
164 5976, 5977, 5978, 5980, 5982, 5984, 5985, 5986, 161 5992, 5994, 5996, 6000, 6006, 6006, 6008, 6010,
165 5990, 5996, 5996, 5998, 6000, 6009, 6015, 6022, 162 6019, 6025, 6032, 6033, 6036, 6037, 6041, 6046,
166 6023, 6026, 6027, 6031, 6036, 6045, 6049, 6053, 163 6055, 6059, 6063, 6071, 6073, 6075, 6077, 6080,
167 6061, 6063, 6065, 6067, 6070, 6102, 6104, 6106, 164 6112, 6114, 6116, 6120, 6124, 6127, 6138, 6151,
168 6110, 6114, 6117, 6128, 6141, 6160, 6173, 6189, 165 6170, 6183, 6199, 6211, 6227, 6242, 6263, 6273,
169 6201, 6217, 6232, 6253, 6263, 6275, 6286, 6300, 166 6285, 6296, 6310, 6325, 6335, 6347, 6356, 6368,
170 6315, 6325, 6337, 6346, 6358, 6360, 6364, 6385, 167 6370, 6374, 6395, 6404, 6414, 6420, 6426, 6427,
171 6394, 6404, 6410, 6416, 6417, 6466, 6468, 6472, 168 6476, 6478, 6482, 6484, 6490, 6497, 6505, 6512,
172 6474, 6480, 6487, 6495, 6502, 6505, 6511, 6515, 169 6515, 6521, 6525, 6529, 6531, 6535, 6539, 6543,
173 6519, 6521, 6525, 6529, 6533, 6539, 6547, 6555, 170 6549, 6557, 6565, 6571, 6573, 6577, 6579, 6585,
174 6561, 6563, 6567, 6569, 6575, 6579, 6583, 6587, 171 6589, 6593, 6597, 6601, 6606, 6613, 6619, 6621,
175 6591, 6596, 6603, 6609, 6611, 6613, 6617, 6619, 172 6623, 6627, 6629, 6635, 6639, 6643, 6653, 6658,
176 6625, 6629, 6633, 6643, 6648, 6662, 6677, 6679, 173 6672, 6687, 6689, 6697, 6699, 6704, 6718, 6723,
177 6687, 6689, 6694, 6708, 6713, 6715, 6719, 6720, 174 6725, 6729, 6730, 6734, 6740, 6746, 6756, 6766,
178 6724, 6730, 6736, 6746, 6756, 6767, 6775, 6778, 175 6777, 6785, 6788, 6791, 6795, 6799, 6801, 6804,
179 6781, 6785, 6789, 6791, 6794, 6794, 6797, 6799, 176 6804, 6807, 6809, 6839, 6841, 6843, 6847, 6852,
180 6829, 6831, 6833, 6837, 6842, 6846, 6851, 6853, 177 6856, 6861, 6863, 6865, 6867, 6876, 6880, 6884,
181 6855, 6857, 6866, 6870, 6874, 6880, 6882, 6890, 178 6890, 6892, 6900, 6908, 6920, 6923, 6929, 6933,
182 6898, 6910, 6913, 6919, 6923, 6925, 6929, 6949, 179 6935, 6939, 6959, 6961, 6963, 6974, 6980, 6982,
183 6951, 6953, 6964, 6970, 6972, 6974, 6976, 6980, 180 6984, 6986, 6990, 6996, 7002, 7004, 7009, 7013,
184 6986, 6992, 6994, 6999, 7003, 7005, 7013, 7031, 181 7015, 7023, 7041, 7081, 7091, 7095, 7097, 7099,
185 7071, 7081, 7085, 7087, 7089, 7090, 7094, 7098, 182 7100, 7104, 7108, 7112, 7116, 7120, 7125, 7129,
186 7102, 7106, 7110, 7115, 7119, 7123, 7127, 7129, 183 7133, 7137, 7139, 7141, 7145, 7155, 7159, 7161,
187 7131, 7135, 7145, 7149, 7151, 7155, 7159, 7163, 184 7165, 7169, 7173, 7186, 7188, 7190, 7194, 7196,
188 7176, 7178, 7180, 7184, 7186, 7190, 7192, 7194, 185 7200, 7202, 7204, 7234, 7238, 7242, 7246, 7249,
189 7224, 7228, 7232, 7236, 7239, 7246, 7251, 7262, 186 7256, 7261, 7272, 7276, 7292, 7306, 7310, 7315,
190 7266, 7282, 7296, 7300, 7305, 7309, 7313, 7319, 187 7319, 7323, 7329, 7331, 7337, 7339, 7343, 7345,
191 7321, 7327, 7329, 7333, 7335, 7341, 7346, 7351, 188 7351, 7356, 7361, 7371, 7373, 7375, 7379, 7383,
192 7361, 7363, 7365, 7369, 7373, 7375, 7388, 7390, 189 7385, 7398, 7400, 7404, 7408, 7416, 7418, 7422,
193 7394, 7398, 7406, 7408, 7412, 7414, 7415, 7418, 190 7424, 7425, 7428, 7433, 7435, 7437, 7441, 7443,
194 7423, 7425, 7427, 7431, 7433, 7437, 7443, 7463, 191 7447, 7453, 7473, 7479, 7485, 7487, 7488, 7498,
195 7469, 7475, 7477, 7478, 7488, 7489, 7497, 7504, 192 7499, 7507, 7514, 7516, 7519, 7521, 7523, 7525,
196 7506, 7509, 7511, 7513, 7515, 7520, 7524, 7528, 193 7530, 7534, 7538, 7543, 7553, 7563, 7567, 7571,
197 7533, 7543, 7553, 7557, 7561, 7575, 7601, 7611, 194 7585, 7611, 7621, 7623, 7625, 7628, 7630, 7633,
198 7613, 7615, 7618, 7620, 7623, 7625, 7629, 7631, 195 7635, 7639, 7641, 7642, 7646, 7648, 7650, 7657,
199 7632, 7636, 7638, 7640, 7647, 7651, 7658, 7665, 196 7661, 7668, 7675, 7684, 7700, 7712, 7730, 7741,
200 7674, 7690, 7702, 7720, 7731, 7743, 7751, 7769, 197 7753, 7761, 7779, 7787, 7817, 7820, 7830, 7840,
201 7777, 7807, 7810, 7820, 7830, 7842, 7853, 7862, 198 7852, 7863, 7872, 7885, 7897, 7901, 7907, 7934,
202 7875, 7887, 7891, 7897, 7924, 7933, 7936, 7941, 199 7943, 7946, 7951, 7957, 7962, 7983, 7987, 7993,
203 7947, 7952, 7973, 7977, 7983, 7983, 7990, 7999, 200 7993, 8000, 8009, 8017, 8020, 8024, 8030, 8036,
204 8007, 8010, 8014, 8020, 8026, 8029, 8033, 8040, 201 8039, 8043, 8050, 8056, 8065, 8074, 8078, 8082,
205 8046, 8055, 8064, 8068, 8072, 8076, 8080, 8087, 202 8086, 8090, 8097, 8101, 8105, 8115, 8121, 8125,
206 8091, 8095, 8105, 8111, 8115, 8121, 8125, 8128, 203 8131, 8135, 8138, 8144, 8150, 8162, 8166, 8170,
207 8134, 8140, 8152, 8156, 8160, 8170, 8174, 8185, 204 8180, 8184, 8195, 8197, 8199, 8203, 8215, 8220,
208 8187, 8189, 8193, 8205, 8210, 8234, 8238, 8244, 205 8244, 8248, 8254, 8276, 8285, 8289, 8292, 8293,
209 8266, 8275, 8279, 8282, 8283, 8291, 8299, 8305, 206 8301, 8309, 8315, 8325, 8332, 8350, 8353, 8356,
210 8315, 8322, 8340, 8343, 8346, 8354, 8360, 8364, 207 8364, 8370, 8374, 8378, 8382, 8388, 8396, 8401,
211 8368, 8372, 8378, 8386, 8391, 8397, 8401, 8409, 208 8407, 8411, 8419, 8426, 8430, 8437, 8443, 8451,
212 8416, 8420, 8427, 8433, 8441, 8449, 8455, 8461, 209 8459, 8465, 8471, 8482, 8486, 8498, 8507, 8524,
213 8472, 8476, 8488, 8497, 8514, 8531, 8534, 8538, 210 8541, 8544, 8548, 8550, 8556, 8558, 8562, 8577,
214 8540, 8546, 8548, 8552, 8567, 8571, 8575, 8579, 211 8581, 8585, 8589, 8593, 8597, 8599, 8605, 8610,
215 8583, 8587, 8589, 8595, 8600, 8604, 8610, 8617, 212 8614, 8620, 8627, 8630, 8648, 8650, 8695, 8701,
216 8620, 8638, 8640, 8685, 8691, 8697, 8701, 8705, 213 8707, 8711, 8715, 8721, 8725, 8731, 8737, 8744,
217 8711, 8715, 8721, 8727, 8734, 8736, 8742, 8748, 214 8746, 8752, 8758, 8762, 8766, 8774, 8787, 8793,
218 8752, 8756, 8764, 8777, 8783, 8790, 8798, 8804, 215 8800, 8808, 8814, 8823, 8829, 8833, 8838, 8842,
219 8813, 8819, 8823, 8828, 8832, 8840, 8844, 8848, 216 8850, 8854, 8858, 8888, 8894, 8900, 8906, 8912,
220 8878, 8884, 8890, 8896, 8902, 8909, 8915, 8922, 217 8919, 8925, 8932, 8937, 8947, 8951, 8958, 8964,
221 8927, 8937, 8941, 8948, 8954, 8958, 8965, 8969, 218 8968, 8975, 8979, 8985, 8988, 8992, 8996, 9000,
222 8975, 8978, 8982, 8986, 8990, 8994, 8999, 9004, 219 9004, 9009, 9014, 9018, 9029, 9033, 9037, 9043,
223 9008, 9019, 9023, 9027, 9033, 9041, 9045, 9062, 220 9051, 9055, 9072, 9076, 9082, 9092, 9098, 9104,
224 9066, 9072, 9082, 9088, 9094, 9097, 9102, 9111, 221 9107, 9112, 9121, 9125, 9129, 9135, 9139, 9145,
225 9115, 9119, 9125, 9129, 9135, 9143, 9161, 9162, 222 9153, 9171, 9172, 9182, 9183, 9192, 9200, 9202,
226 9172, 9173, 9182, 9190, 9192, 9195, 9197, 9199, 223 9205, 9207, 9209, 9211, 9216, 9229, 9233, 9248,
227 9201, 9206, 9219, 9223, 9238, 9267, 9278, 9280, 224 9277, 9288, 9290, 9294, 9298, 9303, 9307, 9309,
228 9284, 9288, 9293, 9297, 9299, 9306, 9310, 9318, 225 9316, 9320, 9328, 9332, 9407, 9409, 9410, 9411,
229 9322, 9398, 9400, 9401, 9402, 9403, 9404, 9405, 226 9412, 9413, 9414, 9416, 9421, 9423, 9425, 9426,
230 9407, 9408, 9413, 9415, 9417, 9418, 9462, 9463, 227 9470, 9471, 9472, 9474, 9479, 9483, 9483, 9485,
231 9464, 9466, 9471, 9475, 9475, 9477, 9479, 9490, 228 9487, 9498, 9508, 9516, 9517, 9519, 9520, 9524,
232 9500, 9508, 9509, 9511, 9512, 9516, 9520, 9530, 229 9528, 9538, 9542, 9549, 9560, 9567, 9571, 9577,
233 9534, 9541, 9552, 9559, 9563, 9569, 9580, 9612, 230 9588, 9620, 9669, 9684, 9699, 9704, 9706, 9711,
234 9661, 9676, 9691, 9696, 9698, 9703, 9735, 9743, 231 9743, 9751, 9753, 9775, 9797, 9799, 9815, 9831,
235 9745, 9767, 9789, 9791, 9807, 9823, 9839, 9855, 232 9833, 9835, 9835, 9836, 9837, 9838, 9840, 9841,
236 9870, 9880, 9897, 9914, 9931, 9947, 9957, 9974, 233 9853, 9855, 9857, 9859, 9873, 9887, 9889, 9892,
237 9990, 10006, 10022, 10038, 10054, 10070, 10086, 10087, 234 9895, 9897, 9898, 9899, 9901, 9903, 9905, 9919,
238 10088, 10089, 10090, 10092, 10094, 10096, 10110, 10124, 235 9933, 9935, 9938, 9941, 9943, 9944, 9945, 9947,
239 10138, 10152, 10153, 10154, 10156, 10158, 10160, 10174, 236 9949, 9951, 10000, 10044, 10046, 10051, 10055, 10055,
240 10188, 10189, 10190, 10192, 10194, 10196, 10245, 10289, 237 10057, 10059, 10070, 10080, 10088, 10089, 10091, 10092,
241 10291, 10296, 10300, 10300, 10302, 10304, 10315, 10325, 238 10096, 10100, 10110, 10114, 10121, 10132, 10139, 10143,
242 10333, 10334, 10336, 10337, 10341, 10345, 10355, 10359, 239 10149, 10160, 10192, 10241, 10256, 10271, 10276, 10278,
243 10366, 10377, 10384, 10388, 10394, 10405, 10437, 10486, 240 10283, 10315, 10323, 10325, 10347, 10369,
244 10501, 10516, 10521, 10523, 10528, 10560, 10568, 10570,
245 10592, 10614,
246} 241}
247 242
248var _hcltok_trans_keys []byte = []byte{ 243var _hcltok_trans_keys []byte = []byte{
249 10, 46, 42, 42, 47, 46, 69, 101, 244 46, 42, 42, 47, 46, 69, 101, 48,
250 48, 57, 43, 45, 48, 57, 48, 57, 245 57, 43, 45, 48, 57, 48, 57, 45,
251 45, 95, 194, 195, 198, 199, 203, 205, 246 95, 194, 195, 198, 199, 203, 205, 206,
247 207, 210, 212, 213, 214, 215, 216, 217,
248 219, 220, 221, 222, 223, 224, 225, 226,
249 227, 228, 233, 234, 237, 239, 240, 65,
250 90, 97, 122, 196, 202, 208, 218, 229,
251 236, 95, 194, 195, 198, 199, 203, 205,
252 206, 207, 210, 212, 213, 214, 215, 216, 252 206, 207, 210, 212, 213, 214, 215, 216,
253 217, 219, 220, 221, 222, 223, 224, 225, 253 217, 219, 220, 221, 222, 223, 224, 225,
254 226, 227, 228, 233, 234, 237, 239, 240, 254 226, 227, 228, 233, 234, 237, 239, 240,
255 65, 90, 97, 122, 196, 202, 208, 218, 255 65, 90, 97, 122, 196, 202, 208, 218,
256 229, 236, 95, 194, 195, 198, 199, 203, 256 229, 236, 10, 13, 45, 95, 194, 195,
257 205, 206, 207, 210, 212, 213, 214, 215, 257 198, 199, 203, 204, 205, 206, 207, 210,
258 216, 217, 219, 220, 221, 222, 223, 224, 258 212, 213, 214, 215, 216, 217, 219, 220,
259 225, 226, 227, 228, 233, 234, 237, 239, 259 221, 222, 223, 224, 225, 226, 227, 228,
260 240, 65, 90, 97, 122, 196, 202, 208, 260 233, 234, 237, 239, 240, 243, 48, 57,
261 218, 229, 236, 10, 13, 45, 95, 194, 261 65, 90, 97, 122, 196, 218, 229, 236,
262 195, 198, 199, 203, 204, 205, 206, 207, 262 10, 170, 181, 183, 186, 128, 150, 152,
263 210, 212, 213, 214, 215, 216, 217, 219,
264 220, 221, 222, 223, 224, 225, 226, 227,
265 228, 233, 234, 237, 239, 240, 243, 48,
266 57, 65, 90, 97, 122, 196, 218, 229,
267 236, 10, 170, 181, 183, 186, 128, 150,
268 152, 182, 184, 255, 192, 255, 0, 127,
269 173, 130, 133, 146, 159, 165, 171, 175,
270 255, 181, 190, 184, 185, 192, 255, 140,
271 134, 138, 142, 161, 163, 255, 182, 130,
272 136, 137, 176, 151, 152, 154, 160, 190,
273 136, 144, 192, 255, 135, 129, 130, 132,
274 133, 144, 170, 176, 178, 144, 154, 160,
275 191, 128, 169, 174, 255, 148, 169, 157,
276 158, 189, 190, 192, 255, 144, 255, 139,
277 140, 178, 255, 186, 128, 181, 160, 161,
278 162, 163, 164, 165, 166, 167, 168, 169,
279 170, 171, 172, 173, 174, 175, 176, 177,
280 178, 179, 180, 181, 182, 183, 184, 185,
281 186, 187, 188, 189, 190, 191, 128, 173,
282 128, 155, 160, 180, 182, 189, 148, 161,
283 163, 255, 176, 164, 165, 132, 169, 177,
284 141, 142, 145, 146, 179, 181, 186, 187,
285 158, 133, 134, 137, 138, 143, 150, 152,
286 155, 164, 165, 178, 255, 188, 129, 131,
287 133, 138, 143, 144, 147, 168, 170, 176,
288 178, 179, 181, 182, 184, 185, 190, 255,
289 157, 131, 134, 137, 138, 142, 144, 146,
290 152, 159, 165, 182, 255, 129, 131, 133,
291 141, 143, 145, 147, 168, 170, 176, 178,
292 179, 181, 185, 188, 255, 134, 138, 142,
293 143, 145, 159, 164, 165, 176, 184, 186,
294 255, 129, 131, 133, 140, 143, 144, 147,
295 168, 170, 176, 178, 179, 181, 185, 188,
296 191, 177, 128, 132, 135, 136, 139, 141,
297 150, 151, 156, 157, 159, 163, 166, 175,
298 156, 130, 131, 133, 138, 142, 144, 146,
299 149, 153, 154, 158, 159, 163, 164, 168,
300 170, 174, 185, 190, 191, 144, 151, 128,
301 130, 134, 136, 138, 141, 166, 175, 128,
302 131, 133, 140, 142, 144, 146, 168, 170,
303 185, 189, 255, 133, 137, 151, 142, 148,
304 155, 159, 164, 165, 176, 255, 128, 131,
305 133, 140, 142, 144, 146, 168, 170, 179,
306 181, 185, 188, 191, 158, 128, 132, 134,
307 136, 138, 141, 149, 150, 160, 163, 166,
308 175, 177, 178, 129, 131, 133, 140, 142,
309 144, 146, 186, 189, 255, 133, 137, 143,
310 147, 152, 158, 164, 165, 176, 185, 192,
311 255, 189, 130, 131, 133, 150, 154, 177,
312 179, 187, 138, 150, 128, 134, 143, 148,
313 152, 159, 166, 175, 178, 179, 129, 186,
314 128, 142, 144, 153, 132, 138, 141, 165,
315 167, 129, 130, 135, 136, 148, 151, 153,
316 159, 161, 163, 170, 171, 173, 185, 187,
317 189, 134, 128, 132, 136, 141, 144, 153,
318 156, 159, 128, 181, 183, 185, 152, 153,
319 160, 169, 190, 191, 128, 135, 137, 172,
320 177, 191, 128, 132, 134, 151, 153, 188,
321 134, 128, 129, 130, 131, 137, 138, 139,
322 140, 141, 142, 143, 144, 153, 154, 155,
323 156, 157, 158, 159, 160, 161, 162, 163,
324 164, 165, 166, 167, 168, 169, 170, 173,
325 175, 176, 177, 178, 179, 181, 182, 183,
326 188, 189, 190, 191, 132, 152, 172, 184,
327 185, 187, 128, 191, 128, 137, 144, 255,
328 158, 159, 134, 187, 136, 140, 142, 143,
329 137, 151, 153, 142, 143, 158, 159, 137,
330 177, 142, 143, 182, 183, 191, 255, 128,
331 130, 133, 136, 150, 152, 255, 145, 150,
332 151, 155, 156, 160, 168, 178, 255, 128,
333 143, 160, 255, 182, 183, 190, 255, 129,
334 255, 173, 174, 192, 255, 129, 154, 160,
335 255, 171, 173, 185, 255, 128, 140, 142,
336 148, 160, 180, 128, 147, 160, 172, 174,
337 176, 178, 179, 148, 150, 152, 155, 158,
338 159, 170, 255, 139, 141, 144, 153, 160,
339 255, 184, 255, 128, 170, 176, 255, 182,
340 255, 128, 158, 160, 171, 176, 187, 134,
341 173, 176, 180, 128, 171, 176, 255, 138,
342 143, 155, 255, 128, 155, 160, 255, 159,
343 189, 190, 192, 255, 167, 128, 137, 144,
344 153, 176, 189, 140, 143, 154, 170, 180,
345 255, 180, 255, 128, 183, 128, 137, 141,
346 189, 128, 136, 144, 146, 148, 182, 184,
347 185, 128, 181, 187, 191, 150, 151, 158,
348 159, 152, 154, 156, 158, 134, 135, 142,
349 143, 190, 255, 190, 128, 180, 182, 188,
350 130, 132, 134, 140, 144, 147, 150, 155,
351 160, 172, 178, 180, 182, 188, 128, 129,
352 130, 131, 132, 133, 134, 176, 177, 178,
353 179, 180, 181, 182, 183, 191, 255, 129,
354 147, 149, 176, 178, 190, 192, 255, 144,
355 156, 161, 144, 156, 165, 176, 130, 135,
356 149, 164, 166, 168, 138, 147, 152, 157,
357 170, 185, 188, 191, 142, 133, 137, 160,
358 255, 137, 255, 128, 174, 176, 255, 159,
359 165, 170, 180, 255, 167, 173, 128, 165,
360 176, 255, 168, 174, 176, 190, 192, 255,
361 128, 150, 160, 166, 168, 174, 176, 182,
362 184, 190, 128, 134, 136, 142, 144, 150,
363 152, 158, 160, 191, 128, 129, 130, 131,
364 132, 133, 134, 135, 144, 145, 255, 133,
365 135, 161, 175, 177, 181, 184, 188, 160,
366 151, 152, 187, 192, 255, 133, 173, 177,
367 255, 143, 159, 187, 255, 176, 191, 182,
368 183, 184, 191, 192, 255, 150, 255, 128,
369 146, 147, 148, 152, 153, 154, 155, 156,
370 158, 159, 160, 161, 162, 163, 164, 165,
371 166, 167, 168, 169, 170, 171, 172, 173,
372 174, 175, 176, 129, 255, 141, 255, 144,
373 189, 141, 143, 172, 255, 191, 128, 175,
374 180, 189, 151, 159, 162, 255, 175, 137,
375 138, 184, 255, 183, 255, 168, 255, 128,
376 179, 188, 134, 143, 154, 159, 184, 186,
377 190, 255, 128, 173, 176, 255, 148, 159,
378 189, 255, 129, 142, 154, 159, 191, 255,
379 128, 182, 128, 141, 144, 153, 160, 182,
380 186, 255, 128, 130, 155, 157, 160, 175,
381 178, 182, 129, 134, 137, 142, 145, 150,
382 160, 166, 168, 174, 176, 255, 155, 166,
383 175, 128, 170, 172, 173, 176, 185, 158,
384 159, 160, 255, 164, 175, 135, 138, 188,
385 255, 164, 169, 171, 172, 173, 174, 175,
386 180, 181, 182, 183, 184, 185, 187, 188,
387 189, 190, 191, 165, 186, 174, 175, 154,
388 255, 190, 128, 134, 147, 151, 157, 168,
389 170, 182, 184, 188, 128, 129, 131, 132,
390 134, 255, 147, 255, 190, 255, 144, 145,
391 136, 175, 188, 255, 128, 143, 160, 175,
392 179, 180, 141, 143, 176, 180, 182, 255,
393 189, 255, 191, 144, 153, 161, 186, 129,
394 154, 166, 255, 191, 255, 130, 135, 138,
395 143, 146, 151, 154, 156, 144, 145, 146,
396 147, 148, 150, 151, 152, 155, 157, 158,
397 160, 170, 171, 172, 175, 161, 169, 128,
398 129, 130, 131, 133, 135, 138, 139, 140,
399 141, 142, 143, 144, 145, 146, 147, 148,
400 149, 152, 156, 157, 160, 161, 162, 163,
401 164, 166, 168, 169, 170, 171, 172, 173,
402 174, 176, 177, 153, 155, 178, 179, 128,
403 139, 141, 166, 168, 186, 188, 189, 191,
404 255, 142, 143, 158, 255, 187, 255, 128,
405 180, 189, 128, 156, 160, 255, 145, 159,
406 161, 255, 128, 159, 176, 255, 139, 143,
407 187, 255, 128, 157, 160, 255, 144, 132,
408 135, 150, 255, 158, 159, 170, 175, 148,
409 151, 188, 255, 128, 167, 176, 255, 164,
410 255, 183, 255, 128, 149, 160, 167, 136,
411 188, 128, 133, 138, 181, 183, 184, 191,
412 255, 150, 159, 183, 255, 128, 158, 160,
413 178, 180, 181, 128, 149, 160, 185, 128,
414 183, 190, 191, 191, 128, 131, 133, 134,
415 140, 147, 149, 151, 153, 179, 184, 186,
416 160, 188, 128, 156, 128, 135, 137, 166,
417 128, 181, 128, 149, 160, 178, 128, 145,
418 128, 178, 129, 130, 131, 132, 133, 135,
419 136, 138, 139, 140, 141, 144, 145, 146,
420 147, 150, 151, 152, 153, 154, 155, 156,
421 162, 163, 171, 176, 177, 178, 128, 134,
422 135, 165, 176, 190, 144, 168, 176, 185,
423 128, 180, 182, 191, 182, 144, 179, 155,
424 133, 137, 141, 143, 157, 255, 190, 128,
425 145, 147, 183, 136, 128, 134, 138, 141,
426 143, 157, 159, 168, 176, 255, 171, 175,
427 186, 255, 128, 131, 133, 140, 143, 144,
428 147, 168, 170, 176, 178, 179, 181, 185,
429 188, 191, 144, 151, 128, 132, 135, 136,
430 139, 141, 157, 163, 166, 172, 176, 180,
431 128, 138, 144, 153, 134, 136, 143, 154,
432 255, 128, 181, 184, 255, 129, 151, 158,
433 255, 129, 131, 133, 143, 154, 255, 128,
434 137, 128, 153, 157, 171, 176, 185, 160,
435 255, 170, 190, 192, 255, 128, 184, 128,
436 136, 138, 182, 184, 191, 128, 144, 153,
437 178, 255, 168, 144, 145, 183, 255, 128,
438 142, 145, 149, 129, 141, 144, 146, 147,
439 148, 175, 255, 132, 255, 128, 144, 129,
440 143, 144, 153, 145, 152, 135, 255, 160,
441 168, 169, 171, 172, 173, 174, 188, 189,
442 190, 191, 161, 167, 185, 255, 128, 158,
443 160, 169, 144, 173, 176, 180, 128, 131,
444 144, 153, 163, 183, 189, 255, 144, 255,
445 133, 143, 191, 255, 143, 159, 160, 128,
446 129, 255, 159, 160, 171, 172, 255, 173,
447 255, 179, 255, 128, 176, 177, 178, 128,
448 129, 171, 175, 189, 255, 128, 136, 144,
449 153, 157, 158, 133, 134, 137, 144, 145,
450 146, 147, 148, 149, 154, 155, 156, 157,
451 158, 159, 168, 169, 170, 150, 153, 165,
452 169, 173, 178, 187, 255, 131, 132, 140,
453 169, 174, 255, 130, 132, 149, 157, 173,
454 186, 188, 160, 161, 163, 164, 167, 168,
455 132, 134, 149, 157, 186, 139, 140, 191,
456 255, 134, 128, 132, 138, 144, 146, 255,
457 166, 167, 129, 155, 187, 149, 181, 143,
458 175, 137, 169, 131, 140, 141, 192, 255,
459 128, 182, 187, 255, 173, 180, 182, 255,
460 132, 155, 159, 161, 175, 128, 160, 163,
461 164, 165, 184, 185, 186, 161, 162, 128,
462 134, 136, 152, 155, 161, 163, 164, 166,
463 170, 133, 143, 151, 255, 139, 143, 154,
464 255, 164, 167, 185, 187, 128, 131, 133,
465 159, 161, 162, 169, 178, 180, 183, 130,
466 135, 137, 139, 148, 151, 153, 155, 157,
467 159, 164, 190, 141, 143, 145, 146, 161,
468 162, 167, 170, 172, 178, 180, 183, 185,
469 188, 128, 137, 139, 155, 161, 163, 165,
470 169, 171, 187, 155, 156, 151, 255, 156,
471 157, 160, 181, 255, 186, 187, 255, 162,
472 255, 160, 168, 161, 167, 158, 255, 160,
473 132, 135, 133, 134, 176, 255, 170, 181,
474 186, 191, 176, 180, 182, 183, 186, 189,
475 134, 140, 136, 138, 142, 161, 163, 255,
476 130, 137, 136, 255, 144, 170, 176, 178,
477 160, 191, 128, 138, 174, 175, 177, 255,
478 148, 150, 164, 167, 173, 176, 185, 189,
479 190, 192, 255, 144, 146, 175, 141, 255,
480 166, 176, 178, 255, 186, 138, 170, 180,
481 181, 160, 161, 162, 164, 165, 166, 167,
482 168, 169, 170, 171, 172, 173, 174, 175,
483 176, 177, 178, 179, 180, 181, 182, 184,
484 186, 187, 188, 189, 190, 183, 185, 154,
485 164, 168, 128, 149, 128, 152, 189, 132,
486 185, 144, 152, 161, 177, 255, 169, 177,
487 129, 132, 141, 142, 145, 146, 179, 181,
488 186, 188, 190, 255, 142, 156, 157, 159,
489 161, 176, 177, 133, 138, 143, 144, 147,
490 168, 170, 176, 178, 179, 181, 182, 184,
491 185, 158, 153, 156, 178, 180, 189, 133,
492 141, 143, 145, 147, 168, 170, 176, 178,
493 179, 181, 185, 144, 185, 160, 161, 189,
494 133, 140, 143, 144, 147, 168, 170, 176,
495 178, 179, 181, 185, 177, 156, 157, 159,
496 161, 131, 156, 133, 138, 142, 144, 146,
497 149, 153, 154, 158, 159, 163, 164, 168,
498 170, 174, 185, 144, 189, 133, 140, 142,
499 144, 146, 168, 170, 185, 152, 154, 160,
500 161, 128, 189, 133, 140, 142, 144, 146,
501 168, 170, 179, 181, 185, 158, 160, 161,
502 177, 178, 189, 133, 140, 142, 144, 146,
503 186, 142, 148, 150, 159, 161, 186, 191,
504 189, 133, 150, 154, 177, 179, 187, 128,
505 134, 129, 176, 178, 179, 132, 138, 141,
506 165, 167, 189, 129, 130, 135, 136, 148,
507 151, 153, 159, 161, 163, 170, 171, 173,
508 176, 178, 179, 134, 128, 132, 156, 159,
509 128, 128, 135, 137, 172, 136, 140, 128,
510 129, 130, 131, 137, 138, 139, 140, 141,
511 142, 143, 144, 153, 154, 155, 156, 157,
512 158, 159, 160, 161, 162, 163, 164, 165,
513 166, 167, 168, 169, 170, 172, 173, 174,
514 175, 176, 177, 178, 179, 180, 181, 182,
515 184, 188, 189, 190, 191, 132, 152, 185,
516 187, 191, 128, 170, 161, 144, 149, 154,
517 157, 165, 166, 174, 176, 181, 255, 130,
518 141, 143, 159, 155, 255, 128, 140, 142,
519 145, 160, 177, 128, 145, 160, 172, 174,
520 176, 151, 156, 170, 128, 168, 176, 255,
521 138, 255, 128, 150, 160, 255, 149, 255,
522 167, 133, 179, 133, 139, 131, 160, 174,
523 175, 186, 255, 166, 255, 128, 163, 141,
524 143, 154, 189, 169, 172, 174, 177, 181,
525 182, 129, 130, 132, 133, 134, 176, 177,
526 178, 179, 180, 181, 182, 183, 177, 191,
527 165, 170, 175, 177, 180, 255, 168, 174,
528 176, 255, 128, 134, 136, 142, 144, 150,
529 152, 158, 128, 129, 130, 131, 132, 133,
530 134, 135, 144, 145, 255, 133, 135, 161,
531 169, 177, 181, 184, 188, 160, 151, 154,
532 128, 146, 147, 148, 152, 153, 154, 155,
533 156, 158, 159, 160, 161, 162, 163, 164,
534 165, 166, 167, 168, 169, 170, 171, 172,
535 173, 174, 175, 176, 129, 255, 141, 143,
536 160, 169, 172, 255, 191, 128, 174, 130,
537 134, 139, 163, 255, 130, 179, 187, 189,
538 178, 183, 138, 165, 176, 255, 135, 159,
539 189, 255, 132, 178, 143, 160, 164, 166,
540 175, 186, 190, 128, 168, 186, 128, 130,
541 132, 139, 160, 182, 190, 255, 176, 178,
542 180, 183, 184, 190, 255, 128, 130, 155,
543 157, 160, 170, 178, 180, 128, 162, 164,
544 169, 171, 172, 173, 174, 175, 180, 181,
545 182, 183, 185, 186, 187, 188, 189, 190,
546 191, 165, 179, 157, 190, 128, 134, 147,
547 151, 159, 168, 170, 182, 184, 188, 176,
548 180, 182, 255, 161, 186, 144, 145, 146,
549 147, 148, 150, 151, 152, 155, 157, 158,
550 160, 170, 171, 172, 175, 161, 169, 128,
551 129, 130, 131, 133, 138, 139, 140, 141,
552 142, 143, 144, 145, 146, 147, 148, 149,
553 152, 156, 157, 160, 161, 162, 163, 164,
554 166, 168, 169, 170, 171, 172, 173, 174,
555 176, 177, 153, 155, 178, 179, 145, 255,
556 139, 143, 182, 255, 158, 175, 128, 144,
557 147, 149, 151, 153, 179, 128, 135, 137,
558 164, 128, 130, 131, 132, 133, 134, 135,
559 136, 138, 139, 140, 141, 144, 145, 146,
560 147, 150, 151, 152, 153, 154, 156, 162,
561 163, 171, 176, 177, 178, 131, 183, 131,
562 175, 144, 168, 131, 166, 182, 144, 178,
563 131, 178, 154, 156, 129, 132, 128, 145,
564 147, 171, 159, 255, 144, 157, 161, 135,
565 138, 128, 175, 135, 132, 133, 128, 174,
566 152, 155, 132, 128, 170, 128, 153, 160,
567 190, 192, 255, 128, 136, 138, 174, 128,
568 178, 255, 160, 168, 169, 171, 172, 173,
569 174, 188, 189, 190, 191, 161, 167, 144,
570 173, 128, 131, 163, 183, 189, 255, 133,
571 143, 145, 255, 147, 159, 128, 176, 177,
572 178, 128, 136, 144, 153, 144, 145, 146,
573 147, 148, 149, 154, 155, 156, 157, 158,
574 159, 150, 153, 131, 140, 255, 160, 163,
575 164, 165, 184, 185, 186, 161, 162, 133,
576 255, 170, 181, 183, 186, 128, 150, 152,
577 182, 184, 255, 192, 255, 128, 255, 173, 263 182, 184, 255, 192, 255, 128, 255, 173,
578 130, 133, 146, 159, 165, 171, 175, 255, 264 130, 133, 146, 159, 165, 171, 175, 255,
579 181, 190, 184, 185, 192, 255, 140, 134, 265 181, 190, 184, 185, 192, 255, 140, 134,
@@ -779,649 +465,959 @@ var _hcltok_trans_keys []byte = []byte{
779 171, 187, 155, 156, 151, 255, 156, 157, 465 171, 187, 155, 156, 151, 255, 156, 157,
780 160, 181, 255, 186, 187, 255, 162, 255, 466 160, 181, 255, 186, 187, 255, 162, 255,
781 160, 168, 161, 167, 158, 255, 160, 132, 467 160, 168, 161, 167, 158, 255, 160, 132,
782 135, 133, 134, 176, 255, 128, 191, 154, 468 135, 133, 134, 176, 255, 170, 181, 186,
783 164, 168, 128, 149, 150, 191, 128, 152, 469 191, 176, 180, 182, 183, 186, 189, 134,
784 153, 191, 181, 128, 159, 160, 189, 190, 470 140, 136, 138, 142, 161, 163, 255, 130,
785 191, 189, 128, 131, 132, 185, 186, 191, 471 137, 136, 255, 144, 170, 176, 178, 160,
786 144, 128, 151, 152, 161, 162, 176, 177, 472 191, 128, 138, 174, 175, 177, 255, 148,
787 255, 169, 177, 129, 132, 141, 142, 145, 473 150, 164, 167, 173, 176, 185, 189, 190,
788 146, 179, 181, 186, 188, 190, 191, 192, 474 192, 255, 144, 146, 175, 141, 255, 166,
789 255, 142, 158, 128, 155, 156, 161, 162, 475 176, 178, 255, 186, 138, 170, 180, 181,
790 175, 176, 177, 178, 191, 169, 177, 180, 476 160, 161, 162, 164, 165, 166, 167, 168,
791 183, 128, 132, 133, 138, 139, 142, 143, 477 169, 170, 171, 172, 173, 174, 175, 176,
792 144, 145, 146, 147, 185, 186, 191, 157, 478 177, 178, 179, 180, 181, 182, 184, 186,
793 128, 152, 153, 158, 159, 177, 178, 180, 479 187, 188, 189, 190, 183, 185, 154, 164,
794 181, 191, 142, 146, 169, 177, 180, 189, 480 168, 128, 149, 128, 152, 189, 132, 185,
795 128, 132, 133, 185, 186, 191, 144, 185, 481 144, 152, 161, 177, 255, 169, 177, 129,
796 128, 159, 160, 161, 162, 191, 169, 177, 482 132, 141, 142, 145, 146, 179, 181, 186,
797 180, 189, 128, 132, 133, 140, 141, 142, 483 188, 190, 255, 142, 156, 157, 159, 161,
798 143, 144, 145, 146, 147, 185, 186, 191, 484 176, 177, 133, 138, 143, 144, 147, 168,
799 158, 177, 128, 155, 156, 161, 162, 191, 485 170, 176, 178, 179, 181, 182, 184, 185,
800 131, 145, 155, 157, 128, 132, 133, 138, 486 158, 153, 156, 178, 180, 189, 133, 141,
801 139, 141, 142, 149, 150, 152, 153, 159, 487 143, 145, 147, 168, 170, 176, 178, 179,
802 160, 162, 163, 164, 165, 167, 168, 170, 488 181, 185, 144, 185, 160, 161, 189, 133,
803 171, 173, 174, 185, 186, 191, 144, 128, 489 140, 143, 144, 147, 168, 170, 176, 178,
804 191, 141, 145, 169, 189, 128, 132, 133, 490 179, 181, 185, 177, 156, 157, 159, 161,
805 185, 186, 191, 128, 151, 152, 154, 155, 491 131, 156, 133, 138, 142, 144, 146, 149,
806 159, 160, 161, 162, 191, 128, 141, 145, 492 153, 154, 158, 159, 163, 164, 168, 170,
807 169, 180, 189, 129, 132, 133, 185, 186, 493 174, 185, 144, 189, 133, 140, 142, 144,
808 191, 158, 128, 159, 160, 161, 162, 176, 494 146, 168, 170, 185, 152, 154, 160, 161,
809 177, 178, 179, 191, 141, 145, 189, 128, 495 128, 189, 133, 140, 142, 144, 146, 168,
810 132, 133, 186, 187, 191, 142, 128, 147, 496 170, 179, 181, 185, 158, 160, 161, 177,
811 148, 150, 151, 158, 159, 161, 162, 185, 497 178, 189, 133, 140, 142, 144, 146, 186,
812 186, 191, 178, 188, 128, 132, 133, 150, 498 142, 148, 150, 159, 161, 186, 191, 189,
813 151, 153, 154, 189, 190, 191, 128, 134, 499 133, 150, 154, 177, 179, 187, 128, 134,
814 135, 191, 128, 177, 129, 179, 180, 191, 500 129, 176, 178, 179, 132, 138, 141, 165,
815 128, 131, 137, 141, 152, 160, 164, 166, 501 167, 189, 129, 130, 135, 136, 148, 151,
816 172, 177, 189, 129, 132, 133, 134, 135, 502 153, 159, 161, 163, 170, 171, 173, 176,
817 138, 139, 147, 148, 167, 168, 169, 170, 503 178, 179, 134, 128, 132, 156, 159, 128,
818 179, 180, 191, 133, 128, 134, 135, 155, 504 128, 135, 137, 172, 136, 140, 128, 129,
819 156, 159, 160, 191, 128, 129, 191, 136, 505 130, 131, 137, 138, 139, 140, 141, 142,
820 128, 172, 173, 191, 128, 135, 136, 140, 506 143, 144, 153, 154, 155, 156, 157, 158,
821 141, 191, 191, 128, 170, 171, 190, 161, 507 159, 160, 161, 162, 163, 164, 165, 166,
822 128, 143, 144, 149, 150, 153, 154, 157, 508 167, 168, 169, 170, 172, 173, 174, 175,
823 158, 164, 165, 166, 167, 173, 174, 176, 509 176, 177, 178, 179, 180, 181, 182, 184,
824 177, 180, 181, 255, 130, 141, 143, 159, 510 188, 189, 190, 191, 132, 152, 185, 187,
825 134, 187, 136, 140, 142, 143, 137, 151, 511 191, 128, 170, 161, 144, 149, 154, 157,
826 153, 142, 143, 158, 159, 137, 177, 191, 512 165, 166, 174, 176, 181, 255, 130, 141,
827 142, 143, 182, 183, 192, 255, 129, 151, 513 143, 159, 155, 255, 128, 140, 142, 145,
828 128, 133, 134, 135, 136, 255, 145, 150, 514 160, 177, 128, 145, 160, 172, 174, 176,
829 151, 155, 191, 192, 255, 128, 143, 144, 515 151, 156, 170, 128, 168, 176, 255, 138,
830 159, 160, 255, 182, 183, 190, 191, 192, 516 255, 128, 150, 160, 255, 149, 255, 167,
831 255, 128, 129, 255, 173, 174, 192, 255, 517 133, 179, 133, 139, 131, 160, 174, 175,
832 128, 129, 154, 155, 159, 160, 255, 171, 518 186, 255, 166, 255, 128, 163, 141, 143,
833 173, 185, 191, 192, 255, 141, 128, 145, 519 154, 189, 169, 172, 174, 177, 181, 182,
834 146, 159, 160, 177, 178, 191, 173, 128, 520 129, 130, 132, 133, 134, 176, 177, 178,
835 145, 146, 159, 160, 176, 177, 191, 128, 521 179, 180, 181, 182, 183, 177, 191, 165,
836 179, 180, 191, 151, 156, 128, 191, 128, 522 170, 175, 177, 180, 255, 168, 174, 176,
837 159, 160, 255, 184, 191, 192, 255, 169, 523 255, 128, 134, 136, 142, 144, 150, 152,
838 128, 170, 171, 175, 176, 255, 182, 191, 524 158, 128, 129, 130, 131, 132, 133, 134,
839 192, 255, 128, 158, 159, 191, 128, 143, 525 135, 144, 145, 255, 133, 135, 161, 169,
840 144, 173, 174, 175, 176, 180, 181, 191, 526 177, 181, 184, 188, 160, 151, 154, 128,
841 128, 171, 172, 175, 176, 255, 138, 191,
842 192, 255, 128, 150, 151, 159, 160, 255,
843 149, 191, 192, 255, 167, 128, 191, 128,
844 132, 133, 179, 180, 191, 128, 132, 133,
845 139, 140, 191, 128, 130, 131, 160, 161,
846 173, 174, 175, 176, 185, 186, 255, 166,
847 191, 192, 255, 128, 163, 164, 191, 128,
848 140, 141, 143, 144, 153, 154, 189, 190,
849 191, 128, 136, 137, 191, 173, 128, 168,
850 169, 177, 178, 180, 181, 182, 183, 191,
851 0, 127, 192, 255, 150, 151, 158, 159,
852 152, 154, 156, 158, 134, 135, 142, 143,
853 190, 191, 192, 255, 181, 189, 191, 128,
854 190, 133, 181, 128, 129, 130, 140, 141,
855 143, 144, 147, 148, 149, 150, 155, 156,
856 159, 160, 172, 173, 177, 178, 188, 189,
857 191, 177, 191, 128, 190, 128, 143, 144,
858 156, 157, 191, 130, 135, 148, 164, 166,
859 168, 128, 137, 138, 149, 150, 151, 152,
860 157, 158, 169, 170, 185, 186, 187, 188,
861 191, 142, 128, 132, 133, 137, 138, 159,
862 160, 255, 137, 191, 192, 255, 175, 128,
863 255, 159, 165, 170, 175, 177, 180, 191,
864 192, 255, 166, 173, 128, 167, 168, 175,
865 176, 255, 168, 174, 176, 191, 192, 255,
866 167, 175, 183, 191, 128, 150, 151, 159,
867 160, 190, 135, 143, 151, 128, 158, 159,
868 191, 128, 132, 133, 135, 136, 160, 161,
869 169, 170, 176, 177, 181, 182, 183, 184,
870 188, 189, 191, 160, 151, 154, 187, 192,
871 255, 128, 132, 133, 173, 174, 176, 177,
872 255, 143, 159, 187, 191, 192, 255, 128,
873 175, 176, 191, 150, 191, 192, 255, 141,
874 191, 192, 255, 128, 143, 144, 189, 190,
875 191, 141, 143, 160, 169, 172, 191, 192,
876 255, 191, 128, 174, 175, 190, 128, 157,
877 158, 159, 160, 255, 176, 191, 192, 255,
878 128, 150, 151, 159, 160, 161, 162, 255,
879 175, 137, 138, 184, 191, 192, 255, 128,
880 182, 183, 255, 130, 134, 139, 163, 191,
881 192, 255, 128, 129, 130, 179, 180, 191,
882 187, 189, 128, 177, 178, 183, 184, 191,
883 128, 137, 138, 165, 166, 175, 176, 255,
884 135, 159, 189, 191, 192, 255, 128, 131,
885 132, 178, 179, 191, 143, 165, 191, 128,
886 159, 160, 175, 176, 185, 186, 190, 128,
887 168, 169, 191, 131, 186, 128, 139, 140,
888 159, 160, 182, 183, 189, 190, 255, 176,
889 178, 180, 183, 184, 190, 191, 192, 255,
890 129, 128, 130, 131, 154, 155, 157, 158,
891 159, 160, 170, 171, 177, 178, 180, 181,
892 191, 128, 167, 175, 129, 134, 135, 136,
893 137, 142, 143, 144, 145, 150, 151, 159,
894 160, 255, 155, 166, 175, 128, 162, 163,
895 191, 164, 175, 135, 138, 188, 191, 192,
896 255, 174, 175, 154, 191, 192, 255, 157,
897 169, 183, 189, 191, 128, 134, 135, 146,
898 147, 151, 152, 158, 159, 190, 130, 133,
899 128, 255, 178, 191, 192, 255, 128, 146,
900 147, 255, 190, 191, 192, 255, 128, 143,
901 144, 255, 144, 145, 136, 175, 188, 191,
902 192, 255, 181, 128, 175, 176, 255, 189,
903 191, 192, 255, 128, 160, 161, 186, 187,
904 191, 128, 129, 154, 155, 165, 166, 255,
905 191, 192, 255, 128, 129, 130, 135, 136,
906 137, 138, 143, 144, 145, 146, 151, 152,
907 153, 154, 156, 157, 191, 128, 191, 128,
908 129, 130, 131, 133, 138, 139, 140, 141,
909 142, 143, 144, 145, 146, 147, 148, 149,
910 152, 156, 157, 160, 161, 162, 163, 164,
911 166, 168, 169, 170, 171, 172, 173, 174,
912 176, 177, 132, 151, 153, 155, 158, 175,
913 178, 179, 180, 191, 140, 167, 187, 190,
914 128, 255, 142, 143, 158, 191, 192, 255,
915 187, 191, 192, 255, 128, 180, 181, 191,
916 128, 156, 157, 159, 160, 255, 145, 191,
917 192, 255, 128, 159, 160, 175, 176, 255,
918 139, 143, 182, 191, 192, 255, 144, 132,
919 135, 150, 191, 192, 255, 158, 175, 148,
920 151, 188, 191, 192, 255, 128, 167, 168,
921 175, 176, 255, 164, 191, 192, 255, 183,
922 191, 192, 255, 128, 149, 150, 159, 160,
923 167, 168, 191, 136, 182, 188, 128, 133,
924 134, 137, 138, 184, 185, 190, 191, 255,
925 150, 159, 183, 191, 192, 255, 179, 128,
926 159, 160, 181, 182, 191, 128, 149, 150,
927 159, 160, 185, 186, 191, 128, 183, 184,
928 189, 190, 191, 128, 148, 152, 129, 143,
929 144, 179, 180, 191, 128, 159, 160, 188,
930 189, 191, 128, 156, 157, 191, 136, 128,
931 164, 165, 191, 128, 181, 182, 191, 128,
932 149, 150, 159, 160, 178, 179, 191, 128,
933 145, 146, 191, 128, 178, 179, 191, 128,
934 130, 131, 132, 133, 134, 135, 136, 138,
935 139, 140, 141, 144, 145, 146, 147, 150,
936 151, 152, 153, 154, 156, 162, 163, 171,
937 176, 177, 178, 129, 191, 128, 130, 131,
938 183, 184, 191, 128, 130, 131, 175, 176,
939 191, 128, 143, 144, 168, 169, 191, 128,
940 130, 131, 166, 167, 191, 182, 128, 143,
941 144, 178, 179, 191, 128, 130, 131, 178,
942 179, 191, 128, 154, 156, 129, 132, 133,
943 191, 146, 128, 171, 172, 191, 135, 137,
944 142, 158, 128, 168, 169, 175, 176, 255,
945 159, 191, 192, 255, 144, 128, 156, 157,
946 161, 162, 191, 128, 134, 135, 138, 139,
947 191, 128, 175, 176, 191, 134, 128, 131,
948 132, 135, 136, 191, 128, 174, 175, 191,
949 128, 151, 152, 155, 156, 191, 132, 128,
950 191, 128, 170, 171, 191, 128, 153, 154,
951 191, 160, 190, 192, 255, 128, 184, 185,
952 191, 137, 128, 174, 175, 191, 128, 129,
953 177, 178, 255, 144, 191, 192, 255, 128,
954 142, 143, 144, 145, 146, 149, 129, 148,
955 150, 191, 175, 191, 192, 255, 132, 191,
956 192, 255, 128, 144, 129, 143, 145, 191,
957 144, 153, 128, 143, 145, 152, 154, 191,
958 135, 191, 192, 255, 160, 168, 169, 171,
959 172, 173, 174, 188, 189, 190, 191, 128,
960 159, 161, 167, 170, 187, 185, 191, 192,
961 255, 128, 143, 144, 173, 174, 191, 128,
962 131, 132, 162, 163, 183, 184, 188, 189,
963 255, 133, 143, 145, 191, 192, 255, 128,
964 146, 147, 159, 160, 191, 160, 128, 191,
965 128, 129, 191, 192, 255, 159, 160, 171,
966 128, 170, 172, 191, 192, 255, 173, 191,
967 192, 255, 179, 191, 192, 255, 128, 176,
968 177, 178, 129, 191, 128, 129, 130, 191,
969 171, 175, 189, 191, 192, 255, 128, 136,
970 137, 143, 144, 153, 154, 191, 144, 145,
971 146, 147, 148, 149, 154, 155, 156, 157,
972 158, 159, 128, 143, 150, 153, 160, 191,
973 149, 157, 173, 186, 188, 160, 161, 163,
974 164, 167, 168, 132, 134, 149, 157, 186,
975 191, 139, 140, 192, 255, 133, 145, 128,
976 134, 135, 137, 138, 255, 166, 167, 129,
977 155, 187, 149, 181, 143, 175, 137, 169,
978 131, 140, 191, 192, 255, 160, 163, 164,
979 165, 184, 185, 186, 128, 159, 161, 162,
980 166, 191, 133, 191, 192, 255, 132, 160,
981 163, 167, 179, 184, 186, 128, 164, 165,
982 168, 169, 187, 188, 191, 130, 135, 137,
983 139, 144, 147, 151, 153, 155, 157, 159,
984 163, 171, 179, 184, 189, 191, 128, 140,
985 141, 148, 149, 160, 161, 164, 165, 166,
986 167, 190, 138, 164, 170, 128, 155, 156,
987 160, 161, 187, 188, 191, 128, 191, 155,
988 156, 128, 191, 151, 191, 192, 255, 156,
989 157, 160, 128, 191, 181, 191, 192, 255,
990 158, 159, 186, 128, 185, 187, 191, 192,
991 255, 162, 191, 192, 255, 160, 168, 128,
992 159, 161, 167, 169, 191, 158, 191, 192,
993 255, 123, 123, 128, 191, 128, 191, 128,
994 191, 128, 191, 128, 191, 10, 123, 123,
995 128, 191, 128, 191, 128, 191, 123, 123,
996 10, 123, 128, 191, 128, 191, 128, 191,
997 123, 123, 170, 181, 183, 186, 128, 150,
998 152, 182, 184, 255, 192, 255, 128, 255,
999 173, 130, 133, 146, 159, 165, 171, 175,
1000 255, 181, 190, 184, 185, 192, 255, 140,
1001 134, 138, 142, 161, 163, 255, 182, 130,
1002 136, 137, 176, 151, 152, 154, 160, 190,
1003 136, 144, 192, 255, 135, 129, 130, 132,
1004 133, 144, 170, 176, 178, 144, 154, 160,
1005 191, 128, 169, 174, 255, 148, 169, 157,
1006 158, 189, 190, 192, 255, 144, 255, 139,
1007 140, 178, 255, 186, 128, 181, 160, 161,
1008 162, 163, 164, 165, 166, 167, 168, 169,
1009 170, 171, 172, 173, 174, 175, 176, 177,
1010 178, 179, 180, 181, 182, 183, 184, 185,
1011 186, 187, 188, 189, 190, 191, 128, 173,
1012 128, 155, 160, 180, 182, 189, 148, 161,
1013 163, 255, 176, 164, 165, 132, 169, 177,
1014 141, 142, 145, 146, 179, 181, 186, 187,
1015 158, 133, 134, 137, 138, 143, 150, 152,
1016 155, 164, 165, 178, 255, 188, 129, 131,
1017 133, 138, 143, 144, 147, 168, 170, 176,
1018 178, 179, 181, 182, 184, 185, 190, 255,
1019 157, 131, 134, 137, 138, 142, 144, 146,
1020 152, 159, 165, 182, 255, 129, 131, 133,
1021 141, 143, 145, 147, 168, 170, 176, 178,
1022 179, 181, 185, 188, 255, 134, 138, 142,
1023 143, 145, 159, 164, 165, 176, 184, 186,
1024 255, 129, 131, 133, 140, 143, 144, 147,
1025 168, 170, 176, 178, 179, 181, 185, 188,
1026 191, 177, 128, 132, 135, 136, 139, 141,
1027 150, 151, 156, 157, 159, 163, 166, 175,
1028 156, 130, 131, 133, 138, 142, 144, 146,
1029 149, 153, 154, 158, 159, 163, 164, 168,
1030 170, 174, 185, 190, 191, 144, 151, 128,
1031 130, 134, 136, 138, 141, 166, 175, 128,
1032 131, 133, 140, 142, 144, 146, 168, 170,
1033 185, 189, 255, 133, 137, 151, 142, 148,
1034 155, 159, 164, 165, 176, 255, 128, 131,
1035 133, 140, 142, 144, 146, 168, 170, 179,
1036 181, 185, 188, 191, 158, 128, 132, 134,
1037 136, 138, 141, 149, 150, 160, 163, 166,
1038 175, 177, 178, 129, 131, 133, 140, 142,
1039 144, 146, 186, 189, 255, 133, 137, 143,
1040 147, 152, 158, 164, 165, 176, 185, 192,
1041 255, 189, 130, 131, 133, 150, 154, 177,
1042 179, 187, 138, 150, 128, 134, 143, 148,
1043 152, 159, 166, 175, 178, 179, 129, 186,
1044 128, 142, 144, 153, 132, 138, 141, 165,
1045 167, 129, 130, 135, 136, 148, 151, 153,
1046 159, 161, 163, 170, 171, 173, 185, 187,
1047 189, 134, 128, 132, 136, 141, 144, 153,
1048 156, 159, 128, 181, 183, 185, 152, 153,
1049 160, 169, 190, 191, 128, 135, 137, 172,
1050 177, 191, 128, 132, 134, 151, 153, 188,
1051 134, 128, 129, 130, 131, 137, 138, 139,
1052 140, 141, 142, 143, 144, 153, 154, 155,
1053 156, 157, 158, 159, 160, 161, 162, 163,
1054 164, 165, 166, 167, 168, 169, 170, 173,
1055 175, 176, 177, 178, 179, 181, 182, 183,
1056 188, 189, 190, 191, 132, 152, 172, 184,
1057 185, 187, 128, 191, 128, 137, 144, 255,
1058 158, 159, 134, 187, 136, 140, 142, 143,
1059 137, 151, 153, 142, 143, 158, 159, 137,
1060 177, 142, 143, 182, 183, 191, 255, 128,
1061 130, 133, 136, 150, 152, 255, 145, 150,
1062 151, 155, 156, 160, 168, 178, 255, 128,
1063 143, 160, 255, 182, 183, 190, 255, 129,
1064 255, 173, 174, 192, 255, 129, 154, 160,
1065 255, 171, 173, 185, 255, 128, 140, 142,
1066 148, 160, 180, 128, 147, 160, 172, 174,
1067 176, 178, 179, 148, 150, 152, 155, 158,
1068 159, 170, 255, 139, 141, 144, 153, 160,
1069 255, 184, 255, 128, 170, 176, 255, 182,
1070 255, 128, 158, 160, 171, 176, 187, 134,
1071 173, 176, 180, 128, 171, 176, 255, 138,
1072 143, 155, 255, 128, 155, 160, 255, 159,
1073 189, 190, 192, 255, 167, 128, 137, 144,
1074 153, 176, 189, 140, 143, 154, 170, 180,
1075 255, 180, 255, 128, 183, 128, 137, 141,
1076 189, 128, 136, 144, 146, 148, 182, 184,
1077 185, 128, 181, 187, 191, 150, 151, 158,
1078 159, 152, 154, 156, 158, 134, 135, 142,
1079 143, 190, 255, 190, 128, 180, 182, 188,
1080 130, 132, 134, 140, 144, 147, 150, 155,
1081 160, 172, 178, 180, 182, 188, 128, 129,
1082 130, 131, 132, 133, 134, 176, 177, 178,
1083 179, 180, 181, 182, 183, 191, 255, 129,
1084 147, 149, 176, 178, 190, 192, 255, 144,
1085 156, 161, 144, 156, 165, 176, 130, 135,
1086 149, 164, 166, 168, 138, 147, 152, 157,
1087 170, 185, 188, 191, 142, 133, 137, 160,
1088 255, 137, 255, 128, 174, 176, 255, 159,
1089 165, 170, 180, 255, 167, 173, 128, 165,
1090 176, 255, 168, 174, 176, 190, 192, 255,
1091 128, 150, 160, 166, 168, 174, 176, 182,
1092 184, 190, 128, 134, 136, 142, 144, 150,
1093 152, 158, 160, 191, 128, 129, 130, 131,
1094 132, 133, 134, 135, 144, 145, 255, 133,
1095 135, 161, 175, 177, 181, 184, 188, 160,
1096 151, 152, 187, 192, 255, 133, 173, 177,
1097 255, 143, 159, 187, 255, 176, 191, 182,
1098 183, 184, 191, 192, 255, 150, 255, 128,
1099 146, 147, 148, 152, 153, 154, 155, 156, 527 146, 147, 148, 152, 153, 154, 155, 156,
1100 158, 159, 160, 161, 162, 163, 164, 165, 528 158, 159, 160, 161, 162, 163, 164, 165,
1101 166, 167, 168, 169, 170, 171, 172, 173, 529 166, 167, 168, 169, 170, 171, 172, 173,
1102 174, 175, 176, 129, 255, 141, 255, 144, 530 174, 175, 176, 129, 255, 141, 143, 160,
1103 189, 141, 143, 172, 255, 191, 128, 175, 531 169, 172, 255, 191, 128, 174, 130, 134,
1104 180, 189, 151, 159, 162, 255, 175, 137, 532 139, 163, 255, 130, 179, 187, 189, 178,
1105 138, 184, 255, 183, 255, 168, 255, 128, 533 183, 138, 165, 176, 255, 135, 159, 189,
1106 179, 188, 134, 143, 154, 159, 184, 186, 534 255, 132, 178, 143, 160, 164, 166, 175,
1107 190, 255, 128, 173, 176, 255, 148, 159, 535 186, 190, 128, 168, 186, 128, 130, 132,
1108 189, 255, 129, 142, 154, 159, 191, 255, 536 139, 160, 182, 190, 255, 176, 178, 180,
1109 128, 182, 128, 141, 144, 153, 160, 182, 537 183, 184, 190, 255, 128, 130, 155, 157,
1110 186, 255, 128, 130, 155, 157, 160, 175, 538 160, 170, 178, 180, 128, 162, 164, 169,
1111 178, 182, 129, 134, 137, 142, 145, 150, 539 171, 172, 173, 174, 175, 180, 181, 182,
1112 160, 166, 168, 174, 176, 255, 155, 166, 540 183, 185, 186, 187, 188, 189, 190, 191,
1113 175, 128, 170, 172, 173, 176, 185, 158, 541 165, 179, 157, 190, 128, 134, 147, 151,
1114 159, 160, 255, 164, 175, 135, 138, 188, 542 159, 168, 170, 182, 184, 188, 176, 180,
1115 255, 164, 169, 171, 172, 173, 174, 175, 543 182, 255, 161, 186, 144, 145, 146, 147,
1116 180, 181, 182, 183, 184, 185, 187, 188, 544 148, 150, 151, 152, 155, 157, 158, 160,
1117 189, 190, 191, 165, 186, 174, 175, 154, 545 170, 171, 172, 175, 161, 169, 128, 129,
1118 255, 190, 128, 134, 147, 151, 157, 168, 546 130, 131, 133, 138, 139, 140, 141, 142,
1119 170, 182, 184, 188, 128, 129, 131, 132, 547 143, 144, 145, 146, 147, 148, 149, 152,
1120 134, 255, 147, 255, 190, 255, 144, 145, 548 156, 157, 160, 161, 162, 163, 164, 166,
1121 136, 175, 188, 255, 128, 143, 160, 175, 549 168, 169, 170, 171, 172, 173, 174, 176,
1122 179, 180, 141, 143, 176, 180, 182, 255, 550 177, 153, 155, 178, 179, 145, 255, 139,
1123 189, 255, 191, 144, 153, 161, 186, 129, 551 143, 182, 255, 158, 175, 128, 144, 147,
1124 154, 166, 255, 191, 255, 130, 135, 138, 552 149, 151, 153, 179, 128, 135, 137, 164,
1125 143, 146, 151, 154, 156, 144, 145, 146,
1126 147, 148, 150, 151, 152, 155, 157, 158,
1127 160, 170, 171, 172, 175, 161, 169, 128,
1128 129, 130, 131, 133, 135, 138, 139, 140,
1129 141, 142, 143, 144, 145, 146, 147, 148,
1130 149, 152, 156, 157, 160, 161, 162, 163,
1131 164, 166, 168, 169, 170, 171, 172, 173,
1132 174, 176, 177, 153, 155, 178, 179, 128,
1133 139, 141, 166, 168, 186, 188, 189, 191,
1134 255, 142, 143, 158, 255, 187, 255, 128,
1135 180, 189, 128, 156, 160, 255, 145, 159,
1136 161, 255, 128, 159, 176, 255, 139, 143,
1137 187, 255, 128, 157, 160, 255, 144, 132,
1138 135, 150, 255, 158, 159, 170, 175, 148,
1139 151, 188, 255, 128, 167, 176, 255, 164,
1140 255, 183, 255, 128, 149, 160, 167, 136,
1141 188, 128, 133, 138, 181, 183, 184, 191,
1142 255, 150, 159, 183, 255, 128, 158, 160,
1143 178, 180, 181, 128, 149, 160, 185, 128,
1144 183, 190, 191, 191, 128, 131, 133, 134,
1145 140, 147, 149, 151, 153, 179, 184, 186,
1146 160, 188, 128, 156, 128, 135, 137, 166,
1147 128, 181, 128, 149, 160, 178, 128, 145,
1148 128, 178, 129, 130, 131, 132, 133, 135,
1149 136, 138, 139, 140, 141, 144, 145, 146,
1150 147, 150, 151, 152, 153, 154, 155, 156,
1151 162, 163, 171, 176, 177, 178, 128, 134,
1152 135, 165, 176, 190, 144, 168, 176, 185,
1153 128, 180, 182, 191, 182, 144, 179, 155,
1154 133, 137, 141, 143, 157, 255, 190, 128,
1155 145, 147, 183, 136, 128, 134, 138, 141,
1156 143, 157, 159, 168, 176, 255, 171, 175,
1157 186, 255, 128, 131, 133, 140, 143, 144,
1158 147, 168, 170, 176, 178, 179, 181, 185,
1159 188, 191, 144, 151, 128, 132, 135, 136,
1160 139, 141, 157, 163, 166, 172, 176, 180,
1161 128, 138, 144, 153, 134, 136, 143, 154,
1162 255, 128, 181, 184, 255, 129, 151, 158,
1163 255, 129, 131, 133, 143, 154, 255, 128,
1164 137, 128, 153, 157, 171, 176, 185, 160,
1165 255, 170, 190, 192, 255, 128, 184, 128,
1166 136, 138, 182, 184, 191, 128, 144, 153,
1167 178, 255, 168, 144, 145, 183, 255, 128,
1168 142, 145, 149, 129, 141, 144, 146, 147,
1169 148, 175, 255, 132, 255, 128, 144, 129,
1170 143, 144, 153, 145, 152, 135, 255, 160,
1171 168, 169, 171, 172, 173, 174, 188, 189,
1172 190, 191, 161, 167, 185, 255, 128, 158,
1173 160, 169, 144, 173, 176, 180, 128, 131,
1174 144, 153, 163, 183, 189, 255, 144, 255,
1175 133, 143, 191, 255, 143, 159, 160, 128,
1176 129, 255, 159, 160, 171, 172, 255, 173,
1177 255, 179, 255, 128, 176, 177, 178, 128,
1178 129, 171, 175, 189, 255, 128, 136, 144,
1179 153, 157, 158, 133, 134, 137, 144, 145,
1180 146, 147, 148, 149, 154, 155, 156, 157,
1181 158, 159, 168, 169, 170, 150, 153, 165,
1182 169, 173, 178, 187, 255, 131, 132, 140,
1183 169, 174, 255, 130, 132, 149, 157, 173,
1184 186, 188, 160, 161, 163, 164, 167, 168,
1185 132, 134, 149, 157, 186, 139, 140, 191,
1186 255, 134, 128, 132, 138, 144, 146, 255,
1187 166, 167, 129, 155, 187, 149, 181, 143,
1188 175, 137, 169, 131, 140, 141, 192, 255,
1189 128, 182, 187, 255, 173, 180, 182, 255,
1190 132, 155, 159, 161, 175, 128, 160, 163,
1191 164, 165, 184, 185, 186, 161, 162, 128,
1192 134, 136, 152, 155, 161, 163, 164, 166,
1193 170, 133, 143, 151, 255, 139, 143, 154,
1194 255, 164, 167, 185, 187, 128, 131, 133,
1195 159, 161, 162, 169, 178, 180, 183, 130,
1196 135, 137, 139, 148, 151, 153, 155, 157,
1197 159, 164, 190, 141, 143, 145, 146, 161,
1198 162, 167, 170, 172, 178, 180, 183, 185,
1199 188, 128, 137, 139, 155, 161, 163, 165,
1200 169, 171, 187, 155, 156, 151, 255, 156,
1201 157, 160, 181, 255, 186, 187, 255, 162,
1202 255, 160, 168, 161, 167, 158, 255, 160,
1203 132, 135, 133, 134, 176, 255, 128, 191,
1204 154, 164, 168, 128, 149, 150, 191, 128,
1205 152, 153, 191, 181, 128, 159, 160, 189,
1206 190, 191, 189, 128, 131, 132, 185, 186,
1207 191, 144, 128, 151, 152, 161, 162, 176,
1208 177, 255, 169, 177, 129, 132, 141, 142,
1209 145, 146, 179, 181, 186, 188, 190, 191,
1210 192, 255, 142, 158, 128, 155, 156, 161,
1211 162, 175, 176, 177, 178, 191, 169, 177,
1212 180, 183, 128, 132, 133, 138, 139, 142,
1213 143, 144, 145, 146, 147, 185, 186, 191,
1214 157, 128, 152, 153, 158, 159, 177, 178,
1215 180, 181, 191, 142, 146, 169, 177, 180,
1216 189, 128, 132, 133, 185, 186, 191, 144,
1217 185, 128, 159, 160, 161, 162, 191, 169,
1218 177, 180, 189, 128, 132, 133, 140, 141,
1219 142, 143, 144, 145, 146, 147, 185, 186,
1220 191, 158, 177, 128, 155, 156, 161, 162,
1221 191, 131, 145, 155, 157, 128, 132, 133,
1222 138, 139, 141, 142, 149, 150, 152, 153,
1223 159, 160, 162, 163, 164, 165, 167, 168,
1224 170, 171, 173, 174, 185, 186, 191, 144,
1225 128, 191, 141, 145, 169, 189, 128, 132,
1226 133, 185, 186, 191, 128, 151, 152, 154,
1227 155, 159, 160, 161, 162, 191, 128, 141,
1228 145, 169, 180, 189, 129, 132, 133, 185,
1229 186, 191, 158, 128, 159, 160, 161, 162,
1230 176, 177, 178, 179, 191, 141, 145, 189,
1231 128, 132, 133, 186, 187, 191, 142, 128,
1232 147, 148, 150, 151, 158, 159, 161, 162,
1233 185, 186, 191, 178, 188, 128, 132, 133,
1234 150, 151, 153, 154, 189, 190, 191, 128,
1235 134, 135, 191, 128, 177, 129, 179, 180,
1236 191, 128, 131, 137, 141, 152, 160, 164,
1237 166, 172, 177, 189, 129, 132, 133, 134,
1238 135, 138, 139, 147, 148, 167, 168, 169,
1239 170, 179, 180, 191, 133, 128, 134, 135,
1240 155, 156, 159, 160, 191, 128, 129, 191,
1241 136, 128, 172, 173, 191, 128, 135, 136,
1242 140, 141, 191, 191, 128, 170, 171, 190,
1243 161, 128, 143, 144, 149, 150, 153, 154,
1244 157, 158, 164, 165, 166, 167, 173, 174,
1245 176, 177, 180, 181, 255, 130, 141, 143,
1246 159, 134, 187, 136, 140, 142, 143, 137,
1247 151, 153, 142, 143, 158, 159, 137, 177,
1248 191, 142, 143, 182, 183, 192, 255, 129,
1249 151, 128, 133, 134, 135, 136, 255, 145,
1250 150, 151, 155, 191, 192, 255, 128, 143,
1251 144, 159, 160, 255, 182, 183, 190, 191,
1252 192, 255, 128, 129, 255, 173, 174, 192,
1253 255, 128, 129, 154, 155, 159, 160, 255,
1254 171, 173, 185, 191, 192, 255, 141, 128,
1255 145, 146, 159, 160, 177, 178, 191, 173,
1256 128, 145, 146, 159, 160, 176, 177, 191,
1257 128, 179, 180, 191, 151, 156, 128, 191,
1258 128, 159, 160, 255, 184, 191, 192, 255,
1259 169, 128, 170, 171, 175, 176, 255, 182,
1260 191, 192, 255, 128, 158, 159, 191, 128,
1261 143, 144, 173, 174, 175, 176, 180, 181,
1262 191, 128, 171, 172, 175, 176, 255, 138,
1263 191, 192, 255, 128, 150, 151, 159, 160,
1264 255, 149, 191, 192, 255, 167, 128, 191,
1265 128, 132, 133, 179, 180, 191, 128, 132,
1266 133, 139, 140, 191, 128, 130, 131, 160,
1267 161, 173, 174, 175, 176, 185, 186, 255,
1268 166, 191, 192, 255, 128, 163, 164, 191,
1269 128, 140, 141, 143, 144, 153, 154, 189,
1270 190, 191, 128, 136, 137, 191, 173, 128,
1271 168, 169, 177, 178, 180, 181, 182, 183,
1272 191, 0, 127, 192, 255, 150, 151, 158,
1273 159, 152, 154, 156, 158, 134, 135, 142,
1274 143, 190, 191, 192, 255, 181, 189, 191,
1275 128, 190, 133, 181, 128, 129, 130, 140,
1276 141, 143, 144, 147, 148, 149, 150, 155,
1277 156, 159, 160, 172, 173, 177, 178, 188,
1278 189, 191, 177, 191, 128, 190, 128, 143,
1279 144, 156, 157, 191, 130, 135, 148, 164,
1280 166, 168, 128, 137, 138, 149, 150, 151,
1281 152, 157, 158, 169, 170, 185, 186, 187,
1282 188, 191, 142, 128, 132, 133, 137, 138,
1283 159, 160, 255, 137, 191, 192, 255, 175,
1284 128, 255, 159, 165, 170, 175, 177, 180,
1285 191, 192, 255, 166, 173, 128, 167, 168,
1286 175, 176, 255, 168, 174, 176, 191, 192,
1287 255, 167, 175, 183, 191, 128, 150, 151,
1288 159, 160, 190, 135, 143, 151, 128, 158,
1289 159, 191, 128, 132, 133, 135, 136, 160,
1290 161, 169, 170, 176, 177, 181, 182, 183,
1291 184, 188, 189, 191, 160, 151, 154, 187,
1292 192, 255, 128, 132, 133, 173, 174, 176,
1293 177, 255, 143, 159, 187, 191, 192, 255,
1294 128, 175, 176, 191, 150, 191, 192, 255,
1295 141, 191, 192, 255, 128, 143, 144, 189,
1296 190, 191, 141, 143, 160, 169, 172, 191,
1297 192, 255, 191, 128, 174, 175, 190, 128,
1298 157, 158, 159, 160, 255, 176, 191, 192,
1299 255, 128, 150, 151, 159, 160, 161, 162,
1300 255, 175, 137, 138, 184, 191, 192, 255,
1301 128, 182, 183, 255, 130, 134, 139, 163,
1302 191, 192, 255, 128, 129, 130, 179, 180,
1303 191, 187, 189, 128, 177, 178, 183, 184,
1304 191, 128, 137, 138, 165, 166, 175, 176,
1305 255, 135, 159, 189, 191, 192, 255, 128,
1306 131, 132, 178, 179, 191, 143, 165, 191,
1307 128, 159, 160, 175, 176, 185, 186, 190,
1308 128, 168, 169, 191, 131, 186, 128, 139,
1309 140, 159, 160, 182, 183, 189, 190, 255,
1310 176, 178, 180, 183, 184, 190, 191, 192,
1311 255, 129, 128, 130, 131, 154, 155, 157,
1312 158, 159, 160, 170, 171, 177, 178, 180,
1313 181, 191, 128, 167, 175, 129, 134, 135,
1314 136, 137, 142, 143, 144, 145, 150, 151,
1315 159, 160, 255, 155, 166, 175, 128, 162,
1316 163, 191, 164, 175, 135, 138, 188, 191,
1317 192, 255, 174, 175, 154, 191, 192, 255,
1318 157, 169, 183, 189, 191, 128, 134, 135,
1319 146, 147, 151, 152, 158, 159, 190, 130,
1320 133, 128, 255, 178, 191, 192, 255, 128,
1321 146, 147, 255, 190, 191, 192, 255, 128,
1322 143, 144, 255, 144, 145, 136, 175, 188,
1323 191, 192, 255, 181, 128, 175, 176, 255,
1324 189, 191, 192, 255, 128, 160, 161, 186,
1325 187, 191, 128, 129, 154, 155, 165, 166,
1326 255, 191, 192, 255, 128, 129, 130, 135,
1327 136, 137, 138, 143, 144, 145, 146, 151,
1328 152, 153, 154, 156, 157, 191, 128, 191,
1329 128, 129, 130, 131, 133, 138, 139, 140,
1330 141, 142, 143, 144, 145, 146, 147, 148,
1331 149, 152, 156, 157, 160, 161, 162, 163,
1332 164, 166, 168, 169, 170, 171, 172, 173,
1333 174, 176, 177, 132, 151, 153, 155, 158,
1334 175, 178, 179, 180, 191, 140, 167, 187,
1335 190, 128, 255, 142, 143, 158, 191, 192,
1336 255, 187, 191, 192, 255, 128, 180, 181,
1337 191, 128, 156, 157, 159, 160, 255, 145,
1338 191, 192, 255, 128, 159, 160, 175, 176,
1339 255, 139, 143, 182, 191, 192, 255, 144,
1340 132, 135, 150, 191, 192, 255, 158, 175,
1341 148, 151, 188, 191, 192, 255, 128, 167,
1342 168, 175, 176, 255, 164, 191, 192, 255,
1343 183, 191, 192, 255, 128, 149, 150, 159,
1344 160, 167, 168, 191, 136, 182, 188, 128,
1345 133, 134, 137, 138, 184, 185, 190, 191,
1346 255, 150, 159, 183, 191, 192, 255, 179,
1347 128, 159, 160, 181, 182, 191, 128, 149,
1348 150, 159, 160, 185, 186, 191, 128, 183,
1349 184, 189, 190, 191, 128, 148, 152, 129,
1350 143, 144, 179, 180, 191, 128, 159, 160,
1351 188, 189, 191, 128, 156, 157, 191, 136,
1352 128, 164, 165, 191, 128, 181, 182, 191,
1353 128, 149, 150, 159, 160, 178, 179, 191,
1354 128, 145, 146, 191, 128, 178, 179, 191,
1355 128, 130, 131, 132, 133, 134, 135, 136, 553 128, 130, 131, 132, 133, 134, 135, 136,
1356 138, 139, 140, 141, 144, 145, 146, 147, 554 138, 139, 140, 141, 144, 145, 146, 147,
1357 150, 151, 152, 153, 154, 156, 162, 163, 555 150, 151, 152, 153, 154, 156, 162, 163,
1358 171, 176, 177, 178, 129, 191, 128, 130, 556 171, 176, 177, 178, 131, 183, 131, 175,
1359 131, 183, 184, 191, 128, 130, 131, 175, 557 144, 168, 131, 166, 182, 144, 178, 131,
1360 176, 191, 128, 143, 144, 168, 169, 191, 558 178, 154, 156, 129, 132, 128, 145, 147,
1361 128, 130, 131, 166, 167, 191, 182, 128, 559 171, 159, 255, 144, 157, 161, 135, 138,
1362 143, 144, 178, 179, 191, 128, 130, 131, 560 128, 175, 135, 132, 133, 128, 174, 152,
1363 178, 179, 191, 128, 154, 156, 129, 132, 561 155, 132, 128, 170, 128, 153, 160, 190,
1364 133, 191, 146, 128, 171, 172, 191, 135, 562 192, 255, 128, 136, 138, 174, 128, 178,
1365 137, 142, 158, 128, 168, 169, 175, 176, 563 255, 160, 168, 169, 171, 172, 173, 174,
1366 255, 159, 191, 192, 255, 144, 128, 156, 564 188, 189, 190, 191, 161, 167, 144, 173,
1367 157, 161, 162, 191, 128, 134, 135, 138, 565 128, 131, 163, 183, 189, 255, 133, 143,
1368 139, 191, 128, 175, 176, 191, 134, 128, 566 145, 255, 147, 159, 128, 176, 177, 178,
1369 131, 132, 135, 136, 191, 128, 174, 175, 567 128, 136, 144, 153, 144, 145, 146, 147,
1370 191, 128, 151, 152, 155, 156, 191, 132, 568 148, 149, 154, 155, 156, 157, 158, 159,
1371 128, 191, 128, 170, 171, 191, 128, 153, 569 150, 153, 131, 140, 255, 160, 163, 164,
1372 154, 191, 160, 190, 192, 255, 128, 184, 570 165, 184, 185, 186, 161, 162, 133, 255,
1373 185, 191, 137, 128, 174, 175, 191, 128, 571 170, 181, 183, 186, 128, 150, 152, 182,
1374 129, 177, 178, 255, 144, 191, 192, 255, 572 184, 255, 192, 255, 128, 255, 173, 130,
1375 128, 142, 143, 144, 145, 146, 149, 129, 573 133, 146, 159, 165, 171, 175, 255, 181,
1376 148, 150, 191, 175, 191, 192, 255, 132, 574 190, 184, 185, 192, 255, 140, 134, 138,
1377 191, 192, 255, 128, 144, 129, 143, 145, 575 142, 161, 163, 255, 182, 130, 136, 137,
1378 191, 144, 153, 128, 143, 145, 152, 154, 576 176, 151, 152, 154, 160, 190, 136, 144,
1379 191, 135, 191, 192, 255, 160, 168, 169, 577 192, 255, 135, 129, 130, 132, 133, 144,
578 170, 176, 178, 144, 154, 160, 191, 128,
579 169, 174, 255, 148, 169, 157, 158, 189,
580 190, 192, 255, 144, 255, 139, 140, 178,
581 255, 186, 128, 181, 160, 161, 162, 163,
582 164, 165, 166, 167, 168, 169, 170, 171,
583 172, 173, 174, 175, 176, 177, 178, 179,
584 180, 181, 182, 183, 184, 185, 186, 187,
585 188, 189, 190, 191, 128, 173, 128, 155,
586 160, 180, 182, 189, 148, 161, 163, 255,
587 176, 164, 165, 132, 169, 177, 141, 142,
588 145, 146, 179, 181, 186, 187, 158, 133,
589 134, 137, 138, 143, 150, 152, 155, 164,
590 165, 178, 255, 188, 129, 131, 133, 138,
591 143, 144, 147, 168, 170, 176, 178, 179,
592 181, 182, 184, 185, 190, 255, 157, 131,
593 134, 137, 138, 142, 144, 146, 152, 159,
594 165, 182, 255, 129, 131, 133, 141, 143,
595 145, 147, 168, 170, 176, 178, 179, 181,
596 185, 188, 255, 134, 138, 142, 143, 145,
597 159, 164, 165, 176, 184, 186, 255, 129,
598 131, 133, 140, 143, 144, 147, 168, 170,
599 176, 178, 179, 181, 185, 188, 191, 177,
600 128, 132, 135, 136, 139, 141, 150, 151,
601 156, 157, 159, 163, 166, 175, 156, 130,
602 131, 133, 138, 142, 144, 146, 149, 153,
603 154, 158, 159, 163, 164, 168, 170, 174,
604 185, 190, 191, 144, 151, 128, 130, 134,
605 136, 138, 141, 166, 175, 128, 131, 133,
606 140, 142, 144, 146, 168, 170, 185, 189,
607 255, 133, 137, 151, 142, 148, 155, 159,
608 164, 165, 176, 255, 128, 131, 133, 140,
609 142, 144, 146, 168, 170, 179, 181, 185,
610 188, 191, 158, 128, 132, 134, 136, 138,
611 141, 149, 150, 160, 163, 166, 175, 177,
612 178, 129, 131, 133, 140, 142, 144, 146,
613 186, 189, 255, 133, 137, 143, 147, 152,
614 158, 164, 165, 176, 185, 192, 255, 189,
615 130, 131, 133, 150, 154, 177, 179, 187,
616 138, 150, 128, 134, 143, 148, 152, 159,
617 166, 175, 178, 179, 129, 186, 128, 142,
618 144, 153, 132, 138, 141, 165, 167, 129,
619 130, 135, 136, 148, 151, 153, 159, 161,
620 163, 170, 171, 173, 185, 187, 189, 134,
621 128, 132, 136, 141, 144, 153, 156, 159,
622 128, 181, 183, 185, 152, 153, 160, 169,
623 190, 191, 128, 135, 137, 172, 177, 191,
624 128, 132, 134, 151, 153, 188, 134, 128,
625 129, 130, 131, 137, 138, 139, 140, 141,
626 142, 143, 144, 153, 154, 155, 156, 157,
627 158, 159, 160, 161, 162, 163, 164, 165,
628 166, 167, 168, 169, 170, 173, 175, 176,
629 177, 178, 179, 181, 182, 183, 188, 189,
630 190, 191, 132, 152, 172, 184, 185, 187,
631 128, 191, 128, 137, 144, 255, 158, 159,
632 134, 187, 136, 140, 142, 143, 137, 151,
633 153, 142, 143, 158, 159, 137, 177, 142,
634 143, 182, 183, 191, 255, 128, 130, 133,
635 136, 150, 152, 255, 145, 150, 151, 155,
636 156, 160, 168, 178, 255, 128, 143, 160,
637 255, 182, 183, 190, 255, 129, 255, 173,
638 174, 192, 255, 129, 154, 160, 255, 171,
639 173, 185, 255, 128, 140, 142, 148, 160,
640 180, 128, 147, 160, 172, 174, 176, 178,
641 179, 148, 150, 152, 155, 158, 159, 170,
642 255, 139, 141, 144, 153, 160, 255, 184,
643 255, 128, 170, 176, 255, 182, 255, 128,
644 158, 160, 171, 176, 187, 134, 173, 176,
645 180, 128, 171, 176, 255, 138, 143, 155,
646 255, 128, 155, 160, 255, 159, 189, 190,
647 192, 255, 167, 128, 137, 144, 153, 176,
648 189, 140, 143, 154, 170, 180, 255, 180,
649 255, 128, 183, 128, 137, 141, 189, 128,
650 136, 144, 146, 148, 182, 184, 185, 128,
651 181, 187, 191, 150, 151, 158, 159, 152,
652 154, 156, 158, 134, 135, 142, 143, 190,
653 255, 190, 128, 180, 182, 188, 130, 132,
654 134, 140, 144, 147, 150, 155, 160, 172,
655 178, 180, 182, 188, 128, 129, 130, 131,
656 132, 133, 134, 176, 177, 178, 179, 180,
657 181, 182, 183, 191, 255, 129, 147, 149,
658 176, 178, 190, 192, 255, 144, 156, 161,
659 144, 156, 165, 176, 130, 135, 149, 164,
660 166, 168, 138, 147, 152, 157, 170, 185,
661 188, 191, 142, 133, 137, 160, 255, 137,
662 255, 128, 174, 176, 255, 159, 165, 170,
663 180, 255, 167, 173, 128, 165, 176, 255,
664 168, 174, 176, 190, 192, 255, 128, 150,
665 160, 166, 168, 174, 176, 182, 184, 190,
666 128, 134, 136, 142, 144, 150, 152, 158,
667 160, 191, 128, 129, 130, 131, 132, 133,
668 134, 135, 144, 145, 255, 133, 135, 161,
669 175, 177, 181, 184, 188, 160, 151, 152,
670 187, 192, 255, 133, 173, 177, 255, 143,
671 159, 187, 255, 176, 191, 182, 183, 184,
672 191, 192, 255, 150, 255, 128, 146, 147,
673 148, 152, 153, 154, 155, 156, 158, 159,
674 160, 161, 162, 163, 164, 165, 166, 167,
675 168, 169, 170, 171, 172, 173, 174, 175,
676 176, 129, 255, 141, 255, 144, 189, 141,
677 143, 172, 255, 191, 128, 175, 180, 189,
678 151, 159, 162, 255, 175, 137, 138, 184,
679 255, 183, 255, 168, 255, 128, 179, 188,
680 134, 143, 154, 159, 184, 186, 190, 255,
681 128, 173, 176, 255, 148, 159, 189, 255,
682 129, 142, 154, 159, 191, 255, 128, 182,
683 128, 141, 144, 153, 160, 182, 186, 255,
684 128, 130, 155, 157, 160, 175, 178, 182,
685 129, 134, 137, 142, 145, 150, 160, 166,
686 168, 174, 176, 255, 155, 166, 175, 128,
687 170, 172, 173, 176, 185, 158, 159, 160,
688 255, 164, 175, 135, 138, 188, 255, 164,
689 169, 171, 172, 173, 174, 175, 180, 181,
690 182, 183, 184, 185, 187, 188, 189, 190,
691 191, 165, 186, 174, 175, 154, 255, 190,
692 128, 134, 147, 151, 157, 168, 170, 182,
693 184, 188, 128, 129, 131, 132, 134, 255,
694 147, 255, 190, 255, 144, 145, 136, 175,
695 188, 255, 128, 143, 160, 175, 179, 180,
696 141, 143, 176, 180, 182, 255, 189, 255,
697 191, 144, 153, 161, 186, 129, 154, 166,
698 255, 191, 255, 130, 135, 138, 143, 146,
699 151, 154, 156, 144, 145, 146, 147, 148,
700 150, 151, 152, 155, 157, 158, 160, 170,
701 171, 172, 175, 161, 169, 128, 129, 130,
702 131, 133, 135, 138, 139, 140, 141, 142,
703 143, 144, 145, 146, 147, 148, 149, 152,
704 156, 157, 160, 161, 162, 163, 164, 166,
705 168, 169, 170, 171, 172, 173, 174, 176,
706 177, 153, 155, 178, 179, 128, 139, 141,
707 166, 168, 186, 188, 189, 191, 255, 142,
708 143, 158, 255, 187, 255, 128, 180, 189,
709 128, 156, 160, 255, 145, 159, 161, 255,
710 128, 159, 176, 255, 139, 143, 187, 255,
711 128, 157, 160, 255, 144, 132, 135, 150,
712 255, 158, 159, 170, 175, 148, 151, 188,
713 255, 128, 167, 176, 255, 164, 255, 183,
714 255, 128, 149, 160, 167, 136, 188, 128,
715 133, 138, 181, 183, 184, 191, 255, 150,
716 159, 183, 255, 128, 158, 160, 178, 180,
717 181, 128, 149, 160, 185, 128, 183, 190,
718 191, 191, 128, 131, 133, 134, 140, 147,
719 149, 151, 153, 179, 184, 186, 160, 188,
720 128, 156, 128, 135, 137, 166, 128, 181,
721 128, 149, 160, 178, 128, 145, 128, 178,
722 129, 130, 131, 132, 133, 135, 136, 138,
723 139, 140, 141, 144, 145, 146, 147, 150,
724 151, 152, 153, 154, 155, 156, 162, 163,
725 171, 176, 177, 178, 128, 134, 135, 165,
726 176, 190, 144, 168, 176, 185, 128, 180,
727 182, 191, 182, 144, 179, 155, 133, 137,
728 141, 143, 157, 255, 190, 128, 145, 147,
729 183, 136, 128, 134, 138, 141, 143, 157,
730 159, 168, 176, 255, 171, 175, 186, 255,
731 128, 131, 133, 140, 143, 144, 147, 168,
732 170, 176, 178, 179, 181, 185, 188, 191,
733 144, 151, 128, 132, 135, 136, 139, 141,
734 157, 163, 166, 172, 176, 180, 128, 138,
735 144, 153, 134, 136, 143, 154, 255, 128,
736 181, 184, 255, 129, 151, 158, 255, 129,
737 131, 133, 143, 154, 255, 128, 137, 128,
738 153, 157, 171, 176, 185, 160, 255, 170,
739 190, 192, 255, 128, 184, 128, 136, 138,
740 182, 184, 191, 128, 144, 153, 178, 255,
741 168, 144, 145, 183, 255, 128, 142, 145,
742 149, 129, 141, 144, 146, 147, 148, 175,
743 255, 132, 255, 128, 144, 129, 143, 144,
744 153, 145, 152, 135, 255, 160, 168, 169,
1380 171, 172, 173, 174, 188, 189, 190, 191, 745 171, 172, 173, 174, 188, 189, 190, 191,
1381 128, 159, 161, 167, 170, 187, 185, 191, 746 161, 167, 185, 255, 128, 158, 160, 169,
1382 192, 255, 128, 143, 144, 173, 174, 191, 747 144, 173, 176, 180, 128, 131, 144, 153,
1383 128, 131, 132, 162, 163, 183, 184, 188, 748 163, 183, 189, 255, 144, 255, 133, 143,
1384 189, 255, 133, 143, 145, 191, 192, 255, 749 191, 255, 143, 159, 160, 128, 129, 255,
1385 128, 146, 147, 159, 160, 191, 160, 128, 750 159, 160, 171, 172, 255, 173, 255, 179,
1386 191, 128, 129, 191, 192, 255, 159, 160, 751 255, 128, 176, 177, 178, 128, 129, 171,
1387 171, 128, 170, 172, 191, 192, 255, 173, 752 175, 189, 255, 128, 136, 144, 153, 157,
1388 191, 192, 255, 179, 191, 192, 255, 128, 753 158, 133, 134, 137, 144, 145, 146, 147,
1389 176, 177, 178, 129, 191, 128, 129, 130, 754 148, 149, 154, 155, 156, 157, 158, 159,
1390 191, 171, 175, 189, 191, 192, 255, 128, 755 168, 169, 170, 150, 153, 165, 169, 173,
1391 136, 137, 143, 144, 153, 154, 191, 144, 756 178, 187, 255, 131, 132, 140, 169, 174,
1392 145, 146, 147, 148, 149, 154, 155, 156, 757 255, 130, 132, 149, 157, 173, 186, 188,
1393 157, 158, 159, 128, 143, 150, 153, 160, 758 160, 161, 163, 164, 167, 168, 132, 134,
1394 191, 149, 157, 173, 186, 188, 160, 161, 759 149, 157, 186, 139, 140, 191, 255, 134,
1395 163, 164, 167, 168, 132, 134, 149, 157, 760 128, 132, 138, 144, 146, 255, 166, 167,
1396 186, 191, 139, 140, 192, 255, 133, 145,
1397 128, 134, 135, 137, 138, 255, 166, 167,
1398 129, 155, 187, 149, 181, 143, 175, 137, 761 129, 155, 187, 149, 181, 143, 175, 137,
1399 169, 131, 140, 191, 192, 255, 160, 163, 762 169, 131, 140, 141, 192, 255, 128, 182,
1400 164, 165, 184, 185, 186, 128, 159, 161, 763 187, 255, 173, 180, 182, 255, 132, 155,
1401 162, 166, 191, 133, 191, 192, 255, 132, 764 159, 161, 175, 128, 160, 163, 164, 165,
1402 160, 163, 167, 179, 184, 186, 128, 164, 765 184, 185, 186, 161, 162, 128, 134, 136,
1403 165, 168, 169, 187, 188, 191, 130, 135, 766 152, 155, 161, 163, 164, 166, 170, 133,
1404 137, 139, 144, 147, 151, 153, 155, 157, 767 143, 151, 255, 139, 143, 154, 255, 164,
1405 159, 163, 171, 179, 184, 189, 191, 128, 768 167, 185, 187, 128, 131, 133, 159, 161,
1406 140, 141, 148, 149, 160, 161, 164, 165, 769 162, 169, 178, 180, 183, 130, 135, 137,
1407 166, 167, 190, 138, 164, 170, 128, 155, 770 139, 148, 151, 153, 155, 157, 159, 164,
1408 156, 160, 161, 187, 188, 191, 128, 191, 771 190, 141, 143, 145, 146, 161, 162, 167,
1409 155, 156, 128, 191, 151, 191, 192, 255, 772 170, 172, 178, 180, 183, 185, 188, 128,
1410 156, 157, 160, 128, 191, 181, 191, 192, 773 137, 139, 155, 161, 163, 165, 169, 171,
1411 255, 158, 159, 186, 128, 185, 187, 191, 774 187, 155, 156, 151, 255, 156, 157, 160,
1412 192, 255, 162, 191, 192, 255, 160, 168, 775 181, 255, 186, 187, 255, 162, 255, 160,
1413 128, 159, 161, 167, 169, 191, 158, 191, 776 168, 161, 167, 158, 255, 160, 132, 135,
1414 192, 255, 9, 10, 13, 32, 33, 34, 777 133, 134, 176, 255, 128, 191, 154, 164,
1415 35, 37, 38, 46, 47, 60, 61, 62, 778 168, 128, 149, 150, 191, 128, 152, 153,
1416 64, 92, 95, 123, 124, 125, 126, 127, 779 191, 181, 128, 159, 160, 189, 190, 191,
1417 194, 195, 198, 199, 203, 204, 205, 206, 780 189, 128, 131, 132, 185, 186, 191, 144,
1418 207, 210, 212, 213, 214, 215, 216, 217, 781 128, 151, 152, 161, 162, 176, 177, 255,
1419 219, 220, 221, 222, 223, 224, 225, 226, 782 169, 177, 129, 132, 141, 142, 145, 146,
1420 227, 228, 233, 234, 237, 238, 239, 240, 783 179, 181, 186, 188, 190, 191, 192, 255,
1421 0, 39, 40, 45, 48, 57, 58, 63, 784 142, 158, 128, 155, 156, 161, 162, 175,
1422 65, 90, 91, 96, 97, 122, 192, 193, 785 176, 177, 178, 191, 169, 177, 180, 183,
1423 196, 218, 229, 236, 241, 247, 9, 32, 786 128, 132, 133, 138, 139, 142, 143, 144,
1424 10, 61, 10, 38, 46, 42, 47, 42, 787 145, 146, 147, 185, 186, 191, 157, 128,
788 152, 153, 158, 159, 177, 178, 180, 181,
789 191, 142, 146, 169, 177, 180, 189, 128,
790 132, 133, 185, 186, 191, 144, 185, 128,
791 159, 160, 161, 162, 191, 169, 177, 180,
792 189, 128, 132, 133, 140, 141, 142, 143,
793 144, 145, 146, 147, 185, 186, 191, 158,
794 177, 128, 155, 156, 161, 162, 191, 131,
795 145, 155, 157, 128, 132, 133, 138, 139,
796 141, 142, 149, 150, 152, 153, 159, 160,
797 162, 163, 164, 165, 167, 168, 170, 171,
798 173, 174, 185, 186, 191, 144, 128, 191,
799 141, 145, 169, 189, 128, 132, 133, 185,
800 186, 191, 128, 151, 152, 154, 155, 159,
801 160, 161, 162, 191, 128, 141, 145, 169,
802 180, 189, 129, 132, 133, 185, 186, 191,
803 158, 128, 159, 160, 161, 162, 176, 177,
804 178, 179, 191, 141, 145, 189, 128, 132,
805 133, 186, 187, 191, 142, 128, 147, 148,
806 150, 151, 158, 159, 161, 162, 185, 186,
807 191, 178, 188, 128, 132, 133, 150, 151,
808 153, 154, 189, 190, 191, 128, 134, 135,
809 191, 128, 177, 129, 179, 180, 191, 128,
810 131, 137, 141, 152, 160, 164, 166, 172,
811 177, 189, 129, 132, 133, 134, 135, 138,
812 139, 147, 148, 167, 168, 169, 170, 179,
813 180, 191, 133, 128, 134, 135, 155, 156,
814 159, 160, 191, 128, 129, 191, 136, 128,
815 172, 173, 191, 128, 135, 136, 140, 141,
816 191, 191, 128, 170, 171, 190, 161, 128,
817 143, 144, 149, 150, 153, 154, 157, 158,
818 164, 165, 166, 167, 173, 174, 176, 177,
819 180, 181, 255, 130, 141, 143, 159, 134,
820 187, 136, 140, 142, 143, 137, 151, 153,
821 142, 143, 158, 159, 137, 177, 191, 142,
822 143, 182, 183, 192, 255, 129, 151, 128,
823 133, 134, 135, 136, 255, 145, 150, 151,
824 155, 191, 192, 255, 128, 143, 144, 159,
825 160, 255, 182, 183, 190, 191, 192, 255,
826 128, 129, 255, 173, 174, 192, 255, 128,
827 129, 154, 155, 159, 160, 255, 171, 173,
828 185, 191, 192, 255, 141, 128, 145, 146,
829 159, 160, 177, 178, 191, 173, 128, 145,
830 146, 159, 160, 176, 177, 191, 128, 179,
831 180, 191, 151, 156, 128, 191, 128, 159,
832 160, 255, 184, 191, 192, 255, 169, 128,
833 170, 171, 175, 176, 255, 182, 191, 192,
834 255, 128, 158, 159, 191, 128, 143, 144,
835 173, 174, 175, 176, 180, 181, 191, 128,
836 171, 172, 175, 176, 255, 138, 191, 192,
837 255, 128, 150, 151, 159, 160, 255, 149,
838 191, 192, 255, 167, 128, 191, 128, 132,
839 133, 179, 180, 191, 128, 132, 133, 139,
840 140, 191, 128, 130, 131, 160, 161, 173,
841 174, 175, 176, 185, 186, 255, 166, 191,
842 192, 255, 128, 163, 164, 191, 128, 140,
843 141, 143, 144, 153, 154, 189, 190, 191,
844 128, 136, 137, 191, 173, 128, 168, 169,
845 177, 178, 180, 181, 182, 183, 191, 0,
846 127, 192, 255, 150, 151, 158, 159, 152,
847 154, 156, 158, 134, 135, 142, 143, 190,
848 191, 192, 255, 181, 189, 191, 128, 190,
849 133, 181, 128, 129, 130, 140, 141, 143,
850 144, 147, 148, 149, 150, 155, 156, 159,
851 160, 172, 173, 177, 178, 188, 189, 191,
852 177, 191, 128, 190, 128, 143, 144, 156,
853 157, 191, 130, 135, 148, 164, 166, 168,
854 128, 137, 138, 149, 150, 151, 152, 157,
855 158, 169, 170, 185, 186, 187, 188, 191,
856 142, 128, 132, 133, 137, 138, 159, 160,
857 255, 137, 191, 192, 255, 175, 128, 255,
858 159, 165, 170, 175, 177, 180, 191, 192,
859 255, 166, 173, 128, 167, 168, 175, 176,
860 255, 168, 174, 176, 191, 192, 255, 167,
861 175, 183, 191, 128, 150, 151, 159, 160,
862 190, 135, 143, 151, 128, 158, 159, 191,
863 128, 132, 133, 135, 136, 160, 161, 169,
864 170, 176, 177, 181, 182, 183, 184, 188,
865 189, 191, 160, 151, 154, 187, 192, 255,
866 128, 132, 133, 173, 174, 176, 177, 255,
867 143, 159, 187, 191, 192, 255, 128, 175,
868 176, 191, 150, 191, 192, 255, 141, 191,
869 192, 255, 128, 143, 144, 189, 190, 191,
870 141, 143, 160, 169, 172, 191, 192, 255,
871 191, 128, 174, 175, 190, 128, 157, 158,
872 159, 160, 255, 176, 191, 192, 255, 128,
873 150, 151, 159, 160, 161, 162, 255, 175,
874 137, 138, 184, 191, 192, 255, 128, 182,
875 183, 255, 130, 134, 139, 163, 191, 192,
876 255, 128, 129, 130, 179, 180, 191, 187,
877 189, 128, 177, 178, 183, 184, 191, 128,
878 137, 138, 165, 166, 175, 176, 255, 135,
879 159, 189, 191, 192, 255, 128, 131, 132,
880 178, 179, 191, 143, 165, 191, 128, 159,
881 160, 175, 176, 185, 186, 190, 128, 168,
882 169, 191, 131, 186, 128, 139, 140, 159,
883 160, 182, 183, 189, 190, 255, 176, 178,
884 180, 183, 184, 190, 191, 192, 255, 129,
885 128, 130, 131, 154, 155, 157, 158, 159,
886 160, 170, 171, 177, 178, 180, 181, 191,
887 128, 167, 175, 129, 134, 135, 136, 137,
888 142, 143, 144, 145, 150, 151, 159, 160,
889 255, 155, 166, 175, 128, 162, 163, 191,
890 164, 175, 135, 138, 188, 191, 192, 255,
891 174, 175, 154, 191, 192, 255, 157, 169,
892 183, 189, 191, 128, 134, 135, 146, 147,
893 151, 152, 158, 159, 190, 130, 133, 128,
894 255, 178, 191, 192, 255, 128, 146, 147,
895 255, 190, 191, 192, 255, 128, 143, 144,
896 255, 144, 145, 136, 175, 188, 191, 192,
897 255, 181, 128, 175, 176, 255, 189, 191,
898 192, 255, 128, 160, 161, 186, 187, 191,
899 128, 129, 154, 155, 165, 166, 255, 191,
900 192, 255, 128, 129, 130, 135, 136, 137,
901 138, 143, 144, 145, 146, 151, 152, 153,
902 154, 156, 157, 191, 128, 191, 128, 129,
903 130, 131, 133, 138, 139, 140, 141, 142,
904 143, 144, 145, 146, 147, 148, 149, 152,
905 156, 157, 160, 161, 162, 163, 164, 166,
906 168, 169, 170, 171, 172, 173, 174, 176,
907 177, 132, 151, 153, 155, 158, 175, 178,
908 179, 180, 191, 140, 167, 187, 190, 128,
909 255, 142, 143, 158, 191, 192, 255, 187,
910 191, 192, 255, 128, 180, 181, 191, 128,
911 156, 157, 159, 160, 255, 145, 191, 192,
912 255, 128, 159, 160, 175, 176, 255, 139,
913 143, 182, 191, 192, 255, 144, 132, 135,
914 150, 191, 192, 255, 158, 175, 148, 151,
915 188, 191, 192, 255, 128, 167, 168, 175,
916 176, 255, 164, 191, 192, 255, 183, 191,
917 192, 255, 128, 149, 150, 159, 160, 167,
918 168, 191, 136, 182, 188, 128, 133, 134,
919 137, 138, 184, 185, 190, 191, 255, 150,
920 159, 183, 191, 192, 255, 179, 128, 159,
921 160, 181, 182, 191, 128, 149, 150, 159,
922 160, 185, 186, 191, 128, 183, 184, 189,
923 190, 191, 128, 148, 152, 129, 143, 144,
924 179, 180, 191, 128, 159, 160, 188, 189,
925 191, 128, 156, 157, 191, 136, 128, 164,
926 165, 191, 128, 181, 182, 191, 128, 149,
927 150, 159, 160, 178, 179, 191, 128, 145,
928 146, 191, 128, 178, 179, 191, 128, 130,
929 131, 132, 133, 134, 135, 136, 138, 139,
930 140, 141, 144, 145, 146, 147, 150, 151,
931 152, 153, 154, 156, 162, 163, 171, 176,
932 177, 178, 129, 191, 128, 130, 131, 183,
933 184, 191, 128, 130, 131, 175, 176, 191,
934 128, 143, 144, 168, 169, 191, 128, 130,
935 131, 166, 167, 191, 182, 128, 143, 144,
936 178, 179, 191, 128, 130, 131, 178, 179,
937 191, 128, 154, 156, 129, 132, 133, 191,
938 146, 128, 171, 172, 191, 135, 137, 142,
939 158, 128, 168, 169, 175, 176, 255, 159,
940 191, 192, 255, 144, 128, 156, 157, 161,
941 162, 191, 128, 134, 135, 138, 139, 191,
942 128, 175, 176, 191, 134, 128, 131, 132,
943 135, 136, 191, 128, 174, 175, 191, 128,
944 151, 152, 155, 156, 191, 132, 128, 191,
945 128, 170, 171, 191, 128, 153, 154, 191,
946 160, 190, 192, 255, 128, 184, 185, 191,
947 137, 128, 174, 175, 191, 128, 129, 177,
948 178, 255, 144, 191, 192, 255, 128, 142,
949 143, 144, 145, 146, 149, 129, 148, 150,
950 191, 175, 191, 192, 255, 132, 191, 192,
951 255, 128, 144, 129, 143, 145, 191, 144,
952 153, 128, 143, 145, 152, 154, 191, 135,
953 191, 192, 255, 160, 168, 169, 171, 172,
954 173, 174, 188, 189, 190, 191, 128, 159,
955 161, 167, 170, 187, 185, 191, 192, 255,
956 128, 143, 144, 173, 174, 191, 128, 131,
957 132, 162, 163, 183, 184, 188, 189, 255,
958 133, 143, 145, 191, 192, 255, 128, 146,
959 147, 159, 160, 191, 160, 128, 191, 128,
960 129, 191, 192, 255, 159, 160, 171, 128,
961 170, 172, 191, 192, 255, 173, 191, 192,
962 255, 179, 191, 192, 255, 128, 176, 177,
963 178, 129, 191, 128, 129, 130, 191, 171,
964 175, 189, 191, 192, 255, 128, 136, 137,
965 143, 144, 153, 154, 191, 144, 145, 146,
966 147, 148, 149, 154, 155, 156, 157, 158,
967 159, 128, 143, 150, 153, 160, 191, 149,
968 157, 173, 186, 188, 160, 161, 163, 164,
969 167, 168, 132, 134, 149, 157, 186, 191,
970 139, 140, 192, 255, 133, 145, 128, 134,
971 135, 137, 138, 255, 166, 167, 129, 155,
972 187, 149, 181, 143, 175, 137, 169, 131,
973 140, 191, 192, 255, 160, 163, 164, 165,
974 184, 185, 186, 128, 159, 161, 162, 166,
975 191, 133, 191, 192, 255, 132, 160, 163,
976 167, 179, 184, 186, 128, 164, 165, 168,
977 169, 187, 188, 191, 130, 135, 137, 139,
978 144, 147, 151, 153, 155, 157, 159, 163,
979 171, 179, 184, 189, 191, 128, 140, 141,
980 148, 149, 160, 161, 164, 165, 166, 167,
981 190, 138, 164, 170, 128, 155, 156, 160,
982 161, 187, 188, 191, 128, 191, 155, 156,
983 128, 191, 151, 191, 192, 255, 156, 157,
984 160, 128, 191, 181, 191, 192, 255, 158,
985 159, 186, 128, 185, 187, 191, 192, 255,
986 162, 191, 192, 255, 160, 168, 128, 159,
987 161, 167, 169, 191, 158, 191, 192, 255,
988 10, 13, 128, 191, 192, 223, 224, 239,
989 240, 247, 248, 255, 128, 191, 128, 191,
990 128, 191, 128, 191, 128, 191, 10, 128,
991 191, 128, 191, 128, 191, 36, 123, 37,
992 123, 10, 128, 191, 128, 191, 128, 191,
993 36, 123, 37, 123, 170, 181, 183, 186,
994 128, 150, 152, 182, 184, 255, 192, 255,
995 128, 255, 173, 130, 133, 146, 159, 165,
996 171, 175, 255, 181, 190, 184, 185, 192,
997 255, 140, 134, 138, 142, 161, 163, 255,
998 182, 130, 136, 137, 176, 151, 152, 154,
999 160, 190, 136, 144, 192, 255, 135, 129,
1000 130, 132, 133, 144, 170, 176, 178, 144,
1001 154, 160, 191, 128, 169, 174, 255, 148,
1002 169, 157, 158, 189, 190, 192, 255, 144,
1003 255, 139, 140, 178, 255, 186, 128, 181,
1004 160, 161, 162, 163, 164, 165, 166, 167,
1005 168, 169, 170, 171, 172, 173, 174, 175,
1006 176, 177, 178, 179, 180, 181, 182, 183,
1007 184, 185, 186, 187, 188, 189, 190, 191,
1008 128, 173, 128, 155, 160, 180, 182, 189,
1009 148, 161, 163, 255, 176, 164, 165, 132,
1010 169, 177, 141, 142, 145, 146, 179, 181,
1011 186, 187, 158, 133, 134, 137, 138, 143,
1012 150, 152, 155, 164, 165, 178, 255, 188,
1013 129, 131, 133, 138, 143, 144, 147, 168,
1014 170, 176, 178, 179, 181, 182, 184, 185,
1015 190, 255, 157, 131, 134, 137, 138, 142,
1016 144, 146, 152, 159, 165, 182, 255, 129,
1017 131, 133, 141, 143, 145, 147, 168, 170,
1018 176, 178, 179, 181, 185, 188, 255, 134,
1019 138, 142, 143, 145, 159, 164, 165, 176,
1020 184, 186, 255, 129, 131, 133, 140, 143,
1021 144, 147, 168, 170, 176, 178, 179, 181,
1022 185, 188, 191, 177, 128, 132, 135, 136,
1023 139, 141, 150, 151, 156, 157, 159, 163,
1024 166, 175, 156, 130, 131, 133, 138, 142,
1025 144, 146, 149, 153, 154, 158, 159, 163,
1026 164, 168, 170, 174, 185, 190, 191, 144,
1027 151, 128, 130, 134, 136, 138, 141, 166,
1028 175, 128, 131, 133, 140, 142, 144, 146,
1029 168, 170, 185, 189, 255, 133, 137, 151,
1030 142, 148, 155, 159, 164, 165, 176, 255,
1031 128, 131, 133, 140, 142, 144, 146, 168,
1032 170, 179, 181, 185, 188, 191, 158, 128,
1033 132, 134, 136, 138, 141, 149, 150, 160,
1034 163, 166, 175, 177, 178, 129, 131, 133,
1035 140, 142, 144, 146, 186, 189, 255, 133,
1036 137, 143, 147, 152, 158, 164, 165, 176,
1037 185, 192, 255, 189, 130, 131, 133, 150,
1038 154, 177, 179, 187, 138, 150, 128, 134,
1039 143, 148, 152, 159, 166, 175, 178, 179,
1040 129, 186, 128, 142, 144, 153, 132, 138,
1041 141, 165, 167, 129, 130, 135, 136, 148,
1042 151, 153, 159, 161, 163, 170, 171, 173,
1043 185, 187, 189, 134, 128, 132, 136, 141,
1044 144, 153, 156, 159, 128, 181, 183, 185,
1045 152, 153, 160, 169, 190, 191, 128, 135,
1046 137, 172, 177, 191, 128, 132, 134, 151,
1047 153, 188, 134, 128, 129, 130, 131, 137,
1048 138, 139, 140, 141, 142, 143, 144, 153,
1049 154, 155, 156, 157, 158, 159, 160, 161,
1050 162, 163, 164, 165, 166, 167, 168, 169,
1051 170, 173, 175, 176, 177, 178, 179, 181,
1052 182, 183, 188, 189, 190, 191, 132, 152,
1053 172, 184, 185, 187, 128, 191, 128, 137,
1054 144, 255, 158, 159, 134, 187, 136, 140,
1055 142, 143, 137, 151, 153, 142, 143, 158,
1056 159, 137, 177, 142, 143, 182, 183, 191,
1057 255, 128, 130, 133, 136, 150, 152, 255,
1058 145, 150, 151, 155, 156, 160, 168, 178,
1059 255, 128, 143, 160, 255, 182, 183, 190,
1060 255, 129, 255, 173, 174, 192, 255, 129,
1061 154, 160, 255, 171, 173, 185, 255, 128,
1062 140, 142, 148, 160, 180, 128, 147, 160,
1063 172, 174, 176, 178, 179, 148, 150, 152,
1064 155, 158, 159, 170, 255, 139, 141, 144,
1065 153, 160, 255, 184, 255, 128, 170, 176,
1066 255, 182, 255, 128, 158, 160, 171, 176,
1067 187, 134, 173, 176, 180, 128, 171, 176,
1068 255, 138, 143, 155, 255, 128, 155, 160,
1069 255, 159, 189, 190, 192, 255, 167, 128,
1070 137, 144, 153, 176, 189, 140, 143, 154,
1071 170, 180, 255, 180, 255, 128, 183, 128,
1072 137, 141, 189, 128, 136, 144, 146, 148,
1073 182, 184, 185, 128, 181, 187, 191, 150,
1074 151, 158, 159, 152, 154, 156, 158, 134,
1075 135, 142, 143, 190, 255, 190, 128, 180,
1076 182, 188, 130, 132, 134, 140, 144, 147,
1077 150, 155, 160, 172, 178, 180, 182, 188,
1078 128, 129, 130, 131, 132, 133, 134, 176,
1079 177, 178, 179, 180, 181, 182, 183, 191,
1080 255, 129, 147, 149, 176, 178, 190, 192,
1081 255, 144, 156, 161, 144, 156, 165, 176,
1082 130, 135, 149, 164, 166, 168, 138, 147,
1083 152, 157, 170, 185, 188, 191, 142, 133,
1084 137, 160, 255, 137, 255, 128, 174, 176,
1085 255, 159, 165, 170, 180, 255, 167, 173,
1086 128, 165, 176, 255, 168, 174, 176, 190,
1087 192, 255, 128, 150, 160, 166, 168, 174,
1088 176, 182, 184, 190, 128, 134, 136, 142,
1089 144, 150, 152, 158, 160, 191, 128, 129,
1090 130, 131, 132, 133, 134, 135, 144, 145,
1091 255, 133, 135, 161, 175, 177, 181, 184,
1092 188, 160, 151, 152, 187, 192, 255, 133,
1093 173, 177, 255, 143, 159, 187, 255, 176,
1094 191, 182, 183, 184, 191, 192, 255, 150,
1095 255, 128, 146, 147, 148, 152, 153, 154,
1096 155, 156, 158, 159, 160, 161, 162, 163,
1097 164, 165, 166, 167, 168, 169, 170, 171,
1098 172, 173, 174, 175, 176, 129, 255, 141,
1099 255, 144, 189, 141, 143, 172, 255, 191,
1100 128, 175, 180, 189, 151, 159, 162, 255,
1101 175, 137, 138, 184, 255, 183, 255, 168,
1102 255, 128, 179, 188, 134, 143, 154, 159,
1103 184, 186, 190, 255, 128, 173, 176, 255,
1104 148, 159, 189, 255, 129, 142, 154, 159,
1105 191, 255, 128, 182, 128, 141, 144, 153,
1106 160, 182, 186, 255, 128, 130, 155, 157,
1107 160, 175, 178, 182, 129, 134, 137, 142,
1108 145, 150, 160, 166, 168, 174, 176, 255,
1109 155, 166, 175, 128, 170, 172, 173, 176,
1110 185, 158, 159, 160, 255, 164, 175, 135,
1111 138, 188, 255, 164, 169, 171, 172, 173,
1112 174, 175, 180, 181, 182, 183, 184, 185,
1113 187, 188, 189, 190, 191, 165, 186, 174,
1114 175, 154, 255, 190, 128, 134, 147, 151,
1115 157, 168, 170, 182, 184, 188, 128, 129,
1116 131, 132, 134, 255, 147, 255, 190, 255,
1117 144, 145, 136, 175, 188, 255, 128, 143,
1118 160, 175, 179, 180, 141, 143, 176, 180,
1119 182, 255, 189, 255, 191, 144, 153, 161,
1120 186, 129, 154, 166, 255, 191, 255, 130,
1121 135, 138, 143, 146, 151, 154, 156, 144,
1122 145, 146, 147, 148, 150, 151, 152, 155,
1123 157, 158, 160, 170, 171, 172, 175, 161,
1124 169, 128, 129, 130, 131, 133, 135, 138,
1125 139, 140, 141, 142, 143, 144, 145, 146,
1126 147, 148, 149, 152, 156, 157, 160, 161,
1127 162, 163, 164, 166, 168, 169, 170, 171,
1128 172, 173, 174, 176, 177, 153, 155, 178,
1129 179, 128, 139, 141, 166, 168, 186, 188,
1130 189, 191, 255, 142, 143, 158, 255, 187,
1131 255, 128, 180, 189, 128, 156, 160, 255,
1132 145, 159, 161, 255, 128, 159, 176, 255,
1133 139, 143, 187, 255, 128, 157, 160, 255,
1134 144, 132, 135, 150, 255, 158, 159, 170,
1135 175, 148, 151, 188, 255, 128, 167, 176,
1136 255, 164, 255, 183, 255, 128, 149, 160,
1137 167, 136, 188, 128, 133, 138, 181, 183,
1138 184, 191, 255, 150, 159, 183, 255, 128,
1139 158, 160, 178, 180, 181, 128, 149, 160,
1140 185, 128, 183, 190, 191, 191, 128, 131,
1141 133, 134, 140, 147, 149, 151, 153, 179,
1142 184, 186, 160, 188, 128, 156, 128, 135,
1143 137, 166, 128, 181, 128, 149, 160, 178,
1144 128, 145, 128, 178, 129, 130, 131, 132,
1145 133, 135, 136, 138, 139, 140, 141, 144,
1146 145, 146, 147, 150, 151, 152, 153, 154,
1147 155, 156, 162, 163, 171, 176, 177, 178,
1148 128, 134, 135, 165, 176, 190, 144, 168,
1149 176, 185, 128, 180, 182, 191, 182, 144,
1150 179, 155, 133, 137, 141, 143, 157, 255,
1151 190, 128, 145, 147, 183, 136, 128, 134,
1152 138, 141, 143, 157, 159, 168, 176, 255,
1153 171, 175, 186, 255, 128, 131, 133, 140,
1154 143, 144, 147, 168, 170, 176, 178, 179,
1155 181, 185, 188, 191, 144, 151, 128, 132,
1156 135, 136, 139, 141, 157, 163, 166, 172,
1157 176, 180, 128, 138, 144, 153, 134, 136,
1158 143, 154, 255, 128, 181, 184, 255, 129,
1159 151, 158, 255, 129, 131, 133, 143, 154,
1160 255, 128, 137, 128, 153, 157, 171, 176,
1161 185, 160, 255, 170, 190, 192, 255, 128,
1162 184, 128, 136, 138, 182, 184, 191, 128,
1163 144, 153, 178, 255, 168, 144, 145, 183,
1164 255, 128, 142, 145, 149, 129, 141, 144,
1165 146, 147, 148, 175, 255, 132, 255, 128,
1166 144, 129, 143, 144, 153, 145, 152, 135,
1167 255, 160, 168, 169, 171, 172, 173, 174,
1168 188, 189, 190, 191, 161, 167, 185, 255,
1169 128, 158, 160, 169, 144, 173, 176, 180,
1170 128, 131, 144, 153, 163, 183, 189, 255,
1171 144, 255, 133, 143, 191, 255, 143, 159,
1172 160, 128, 129, 255, 159, 160, 171, 172,
1173 255, 173, 255, 179, 255, 128, 176, 177,
1174 178, 128, 129, 171, 175, 189, 255, 128,
1175 136, 144, 153, 157, 158, 133, 134, 137,
1176 144, 145, 146, 147, 148, 149, 154, 155,
1177 156, 157, 158, 159, 168, 169, 170, 150,
1178 153, 165, 169, 173, 178, 187, 255, 131,
1179 132, 140, 169, 174, 255, 130, 132, 149,
1180 157, 173, 186, 188, 160, 161, 163, 164,
1181 167, 168, 132, 134, 149, 157, 186, 139,
1182 140, 191, 255, 134, 128, 132, 138, 144,
1183 146, 255, 166, 167, 129, 155, 187, 149,
1184 181, 143, 175, 137, 169, 131, 140, 141,
1185 192, 255, 128, 182, 187, 255, 173, 180,
1186 182, 255, 132, 155, 159, 161, 175, 128,
1187 160, 163, 164, 165, 184, 185, 186, 161,
1188 162, 128, 134, 136, 152, 155, 161, 163,
1189 164, 166, 170, 133, 143, 151, 255, 139,
1190 143, 154, 255, 164, 167, 185, 187, 128,
1191 131, 133, 159, 161, 162, 169, 178, 180,
1192 183, 130, 135, 137, 139, 148, 151, 153,
1193 155, 157, 159, 164, 190, 141, 143, 145,
1194 146, 161, 162, 167, 170, 172, 178, 180,
1195 183, 185, 188, 128, 137, 139, 155, 161,
1196 163, 165, 169, 171, 187, 155, 156, 151,
1197 255, 156, 157, 160, 181, 255, 186, 187,
1198 255, 162, 255, 160, 168, 161, 167, 158,
1199 255, 160, 132, 135, 133, 134, 176, 255,
1200 128, 191, 154, 164, 168, 128, 149, 150,
1201 191, 128, 152, 153, 191, 181, 128, 159,
1202 160, 189, 190, 191, 189, 128, 131, 132,
1203 185, 186, 191, 144, 128, 151, 152, 161,
1204 162, 176, 177, 255, 169, 177, 129, 132,
1205 141, 142, 145, 146, 179, 181, 186, 188,
1206 190, 191, 192, 255, 142, 158, 128, 155,
1207 156, 161, 162, 175, 176, 177, 178, 191,
1208 169, 177, 180, 183, 128, 132, 133, 138,
1209 139, 142, 143, 144, 145, 146, 147, 185,
1210 186, 191, 157, 128, 152, 153, 158, 159,
1211 177, 178, 180, 181, 191, 142, 146, 169,
1212 177, 180, 189, 128, 132, 133, 185, 186,
1213 191, 144, 185, 128, 159, 160, 161, 162,
1214 191, 169, 177, 180, 189, 128, 132, 133,
1215 140, 141, 142, 143, 144, 145, 146, 147,
1216 185, 186, 191, 158, 177, 128, 155, 156,
1217 161, 162, 191, 131, 145, 155, 157, 128,
1218 132, 133, 138, 139, 141, 142, 149, 150,
1219 152, 153, 159, 160, 162, 163, 164, 165,
1220 167, 168, 170, 171, 173, 174, 185, 186,
1221 191, 144, 128, 191, 141, 145, 169, 189,
1222 128, 132, 133, 185, 186, 191, 128, 151,
1223 152, 154, 155, 159, 160, 161, 162, 191,
1224 128, 141, 145, 169, 180, 189, 129, 132,
1225 133, 185, 186, 191, 158, 128, 159, 160,
1226 161, 162, 176, 177, 178, 179, 191, 141,
1227 145, 189, 128, 132, 133, 186, 187, 191,
1228 142, 128, 147, 148, 150, 151, 158, 159,
1229 161, 162, 185, 186, 191, 178, 188, 128,
1230 132, 133, 150, 151, 153, 154, 189, 190,
1231 191, 128, 134, 135, 191, 128, 177, 129,
1232 179, 180, 191, 128, 131, 137, 141, 152,
1233 160, 164, 166, 172, 177, 189, 129, 132,
1234 133, 134, 135, 138, 139, 147, 148, 167,
1235 168, 169, 170, 179, 180, 191, 133, 128,
1236 134, 135, 155, 156, 159, 160, 191, 128,
1237 129, 191, 136, 128, 172, 173, 191, 128,
1238 135, 136, 140, 141, 191, 191, 128, 170,
1239 171, 190, 161, 128, 143, 144, 149, 150,
1240 153, 154, 157, 158, 164, 165, 166, 167,
1241 173, 174, 176, 177, 180, 181, 255, 130,
1242 141, 143, 159, 134, 187, 136, 140, 142,
1243 143, 137, 151, 153, 142, 143, 158, 159,
1244 137, 177, 191, 142, 143, 182, 183, 192,
1245 255, 129, 151, 128, 133, 134, 135, 136,
1246 255, 145, 150, 151, 155, 191, 192, 255,
1247 128, 143, 144, 159, 160, 255, 182, 183,
1248 190, 191, 192, 255, 128, 129, 255, 173,
1249 174, 192, 255, 128, 129, 154, 155, 159,
1250 160, 255, 171, 173, 185, 191, 192, 255,
1251 141, 128, 145, 146, 159, 160, 177, 178,
1252 191, 173, 128, 145, 146, 159, 160, 176,
1253 177, 191, 128, 179, 180, 191, 151, 156,
1254 128, 191, 128, 159, 160, 255, 184, 191,
1255 192, 255, 169, 128, 170, 171, 175, 176,
1256 255, 182, 191, 192, 255, 128, 158, 159,
1257 191, 128, 143, 144, 173, 174, 175, 176,
1258 180, 181, 191, 128, 171, 172, 175, 176,
1259 255, 138, 191, 192, 255, 128, 150, 151,
1260 159, 160, 255, 149, 191, 192, 255, 167,
1261 128, 191, 128, 132, 133, 179, 180, 191,
1262 128, 132, 133, 139, 140, 191, 128, 130,
1263 131, 160, 161, 173, 174, 175, 176, 185,
1264 186, 255, 166, 191, 192, 255, 128, 163,
1265 164, 191, 128, 140, 141, 143, 144, 153,
1266 154, 189, 190, 191, 128, 136, 137, 191,
1267 173, 128, 168, 169, 177, 178, 180, 181,
1268 182, 183, 191, 0, 127, 192, 255, 150,
1269 151, 158, 159, 152, 154, 156, 158, 134,
1270 135, 142, 143, 190, 191, 192, 255, 181,
1271 189, 191, 128, 190, 133, 181, 128, 129,
1272 130, 140, 141, 143, 144, 147, 148, 149,
1273 150, 155, 156, 159, 160, 172, 173, 177,
1274 178, 188, 189, 191, 177, 191, 128, 190,
1275 128, 143, 144, 156, 157, 191, 130, 135,
1276 148, 164, 166, 168, 128, 137, 138, 149,
1277 150, 151, 152, 157, 158, 169, 170, 185,
1278 186, 187, 188, 191, 142, 128, 132, 133,
1279 137, 138, 159, 160, 255, 137, 191, 192,
1280 255, 175, 128, 255, 159, 165, 170, 175,
1281 177, 180, 191, 192, 255, 166, 173, 128,
1282 167, 168, 175, 176, 255, 168, 174, 176,
1283 191, 192, 255, 167, 175, 183, 191, 128,
1284 150, 151, 159, 160, 190, 135, 143, 151,
1285 128, 158, 159, 191, 128, 132, 133, 135,
1286 136, 160, 161, 169, 170, 176, 177, 181,
1287 182, 183, 184, 188, 189, 191, 160, 151,
1288 154, 187, 192, 255, 128, 132, 133, 173,
1289 174, 176, 177, 255, 143, 159, 187, 191,
1290 192, 255, 128, 175, 176, 191, 150, 191,
1291 192, 255, 141, 191, 192, 255, 128, 143,
1292 144, 189, 190, 191, 141, 143, 160, 169,
1293 172, 191, 192, 255, 191, 128, 174, 175,
1294 190, 128, 157, 158, 159, 160, 255, 176,
1295 191, 192, 255, 128, 150, 151, 159, 160,
1296 161, 162, 255, 175, 137, 138, 184, 191,
1297 192, 255, 128, 182, 183, 255, 130, 134,
1298 139, 163, 191, 192, 255, 128, 129, 130,
1299 179, 180, 191, 187, 189, 128, 177, 178,
1300 183, 184, 191, 128, 137, 138, 165, 166,
1301 175, 176, 255, 135, 159, 189, 191, 192,
1302 255, 128, 131, 132, 178, 179, 191, 143,
1303 165, 191, 128, 159, 160, 175, 176, 185,
1304 186, 190, 128, 168, 169, 191, 131, 186,
1305 128, 139, 140, 159, 160, 182, 183, 189,
1306 190, 255, 176, 178, 180, 183, 184, 190,
1307 191, 192, 255, 129, 128, 130, 131, 154,
1308 155, 157, 158, 159, 160, 170, 171, 177,
1309 178, 180, 181, 191, 128, 167, 175, 129,
1310 134, 135, 136, 137, 142, 143, 144, 145,
1311 150, 151, 159, 160, 255, 155, 166, 175,
1312 128, 162, 163, 191, 164, 175, 135, 138,
1313 188, 191, 192, 255, 174, 175, 154, 191,
1314 192, 255, 157, 169, 183, 189, 191, 128,
1315 134, 135, 146, 147, 151, 152, 158, 159,
1316 190, 130, 133, 128, 255, 178, 191, 192,
1317 255, 128, 146, 147, 255, 190, 191, 192,
1318 255, 128, 143, 144, 255, 144, 145, 136,
1319 175, 188, 191, 192, 255, 181, 128, 175,
1320 176, 255, 189, 191, 192, 255, 128, 160,
1321 161, 186, 187, 191, 128, 129, 154, 155,
1322 165, 166, 255, 191, 192, 255, 128, 129,
1323 130, 135, 136, 137, 138, 143, 144, 145,
1324 146, 151, 152, 153, 154, 156, 157, 191,
1325 128, 191, 128, 129, 130, 131, 133, 138,
1326 139, 140, 141, 142, 143, 144, 145, 146,
1327 147, 148, 149, 152, 156, 157, 160, 161,
1328 162, 163, 164, 166, 168, 169, 170, 171,
1329 172, 173, 174, 176, 177, 132, 151, 153,
1330 155, 158, 175, 178, 179, 180, 191, 140,
1331 167, 187, 190, 128, 255, 142, 143, 158,
1332 191, 192, 255, 187, 191, 192, 255, 128,
1333 180, 181, 191, 128, 156, 157, 159, 160,
1334 255, 145, 191, 192, 255, 128, 159, 160,
1335 175, 176, 255, 139, 143, 182, 191, 192,
1336 255, 144, 132, 135, 150, 191, 192, 255,
1337 158, 175, 148, 151, 188, 191, 192, 255,
1338 128, 167, 168, 175, 176, 255, 164, 191,
1339 192, 255, 183, 191, 192, 255, 128, 149,
1340 150, 159, 160, 167, 168, 191, 136, 182,
1341 188, 128, 133, 134, 137, 138, 184, 185,
1342 190, 191, 255, 150, 159, 183, 191, 192,
1343 255, 179, 128, 159, 160, 181, 182, 191,
1344 128, 149, 150, 159, 160, 185, 186, 191,
1345 128, 183, 184, 189, 190, 191, 128, 148,
1346 152, 129, 143, 144, 179, 180, 191, 128,
1347 159, 160, 188, 189, 191, 128, 156, 157,
1348 191, 136, 128, 164, 165, 191, 128, 181,
1349 182, 191, 128, 149, 150, 159, 160, 178,
1350 179, 191, 128, 145, 146, 191, 128, 178,
1351 179, 191, 128, 130, 131, 132, 133, 134,
1352 135, 136, 138, 139, 140, 141, 144, 145,
1353 146, 147, 150, 151, 152, 153, 154, 156,
1354 162, 163, 171, 176, 177, 178, 129, 191,
1355 128, 130, 131, 183, 184, 191, 128, 130,
1356 131, 175, 176, 191, 128, 143, 144, 168,
1357 169, 191, 128, 130, 131, 166, 167, 191,
1358 182, 128, 143, 144, 178, 179, 191, 128,
1359 130, 131, 178, 179, 191, 128, 154, 156,
1360 129, 132, 133, 191, 146, 128, 171, 172,
1361 191, 135, 137, 142, 158, 128, 168, 169,
1362 175, 176, 255, 159, 191, 192, 255, 144,
1363 128, 156, 157, 161, 162, 191, 128, 134,
1364 135, 138, 139, 191, 128, 175, 176, 191,
1365 134, 128, 131, 132, 135, 136, 191, 128,
1366 174, 175, 191, 128, 151, 152, 155, 156,
1367 191, 132, 128, 191, 128, 170, 171, 191,
1368 128, 153, 154, 191, 160, 190, 192, 255,
1369 128, 184, 185, 191, 137, 128, 174, 175,
1370 191, 128, 129, 177, 178, 255, 144, 191,
1371 192, 255, 128, 142, 143, 144, 145, 146,
1372 149, 129, 148, 150, 191, 175, 191, 192,
1373 255, 132, 191, 192, 255, 128, 144, 129,
1374 143, 145, 191, 144, 153, 128, 143, 145,
1375 152, 154, 191, 135, 191, 192, 255, 160,
1376 168, 169, 171, 172, 173, 174, 188, 189,
1377 190, 191, 128, 159, 161, 167, 170, 187,
1378 185, 191, 192, 255, 128, 143, 144, 173,
1379 174, 191, 128, 131, 132, 162, 163, 183,
1380 184, 188, 189, 255, 133, 143, 145, 191,
1381 192, 255, 128, 146, 147, 159, 160, 191,
1382 160, 128, 191, 128, 129, 191, 192, 255,
1383 159, 160, 171, 128, 170, 172, 191, 192,
1384 255, 173, 191, 192, 255, 179, 191, 192,
1385 255, 128, 176, 177, 178, 129, 191, 128,
1386 129, 130, 191, 171, 175, 189, 191, 192,
1387 255, 128, 136, 137, 143, 144, 153, 154,
1388 191, 144, 145, 146, 147, 148, 149, 154,
1389 155, 156, 157, 158, 159, 128, 143, 150,
1390 153, 160, 191, 149, 157, 173, 186, 188,
1391 160, 161, 163, 164, 167, 168, 132, 134,
1392 149, 157, 186, 191, 139, 140, 192, 255,
1393 133, 145, 128, 134, 135, 137, 138, 255,
1394 166, 167, 129, 155, 187, 149, 181, 143,
1395 175, 137, 169, 131, 140, 191, 192, 255,
1396 160, 163, 164, 165, 184, 185, 186, 128,
1397 159, 161, 162, 166, 191, 133, 191, 192,
1398 255, 132, 160, 163, 167, 179, 184, 186,
1399 128, 164, 165, 168, 169, 187, 188, 191,
1400 130, 135, 137, 139, 144, 147, 151, 153,
1401 155, 157, 159, 163, 171, 179, 184, 189,
1402 191, 128, 140, 141, 148, 149, 160, 161,
1403 164, 165, 166, 167, 190, 138, 164, 170,
1404 128, 155, 156, 160, 161, 187, 188, 191,
1405 128, 191, 155, 156, 128, 191, 151, 191,
1406 192, 255, 156, 157, 160, 128, 191, 181,
1407 191, 192, 255, 158, 159, 186, 128, 185,
1408 187, 191, 192, 255, 162, 191, 192, 255,
1409 160, 168, 128, 159, 161, 167, 169, 191,
1410 158, 191, 192, 255, 9, 10, 13, 32,
1411 33, 34, 35, 38, 46, 47, 60, 61,
1412 62, 64, 92, 95, 123, 124, 125, 126,
1413 127, 194, 195, 198, 199, 203, 204, 205,
1414 206, 207, 210, 212, 213, 214, 215, 216,
1415 217, 219, 220, 221, 222, 223, 224, 225,
1416 226, 227, 228, 233, 234, 237, 238, 239,
1417 240, 0, 36, 37, 45, 48, 57, 58,
1418 63, 65, 90, 91, 96, 97, 122, 192,
1419 193, 196, 218, 229, 236, 241, 247, 9,
1420 32, 10, 61, 10, 38, 46, 42, 47,
1425 46, 69, 101, 48, 57, 60, 61, 61, 1421 46, 69, 101, 48, 57, 60, 61, 61,
1426 62, 61, 45, 95, 194, 195, 198, 199, 1422 62, 61, 45, 95, 194, 195, 198, 199,
1427 203, 204, 205, 206, 207, 210, 212, 213, 1423 203, 204, 205, 206, 207, 210, 212, 213,
@@ -1472,229 +1468,131 @@ var _hcltok_trans_keys []byte = []byte{
1472 159, 161, 169, 173, 191, 128, 191, 10, 1468 159, 161, 169, 173, 191, 128, 191, 10,
1473 13, 34, 36, 37, 92, 128, 191, 192, 1469 13, 34, 36, 37, 92, 128, 191, 192,
1474 223, 224, 239, 240, 247, 248, 255, 10, 1470 223, 224, 239, 240, 247, 248, 255, 10,
1475 13, 34, 36, 37, 92, 128, 191, 192, 1471 13, 34, 92, 36, 37, 128, 191, 192,
1476 223, 224, 239, 240, 247, 248, 255, 10, 1472 223, 224, 239, 240, 247, 248, 255, 10,
1477 13, 34, 36, 37, 92, 128, 191, 192, 1473 13, 36, 123, 123, 126, 126, 37, 123,
1474 126, 10, 13, 128, 191, 192, 223, 224,
1475 239, 240, 247, 248, 255, 128, 191, 128,
1476 191, 128, 191, 10, 13, 36, 37, 128,
1477 191, 192, 223, 224, 239, 240, 247, 248,
1478 255, 10, 13, 36, 37, 128, 191, 192,
1478 223, 224, 239, 240, 247, 248, 255, 10, 1479 223, 224, 239, 240, 247, 248, 255, 10,
1479 13, 34, 36, 37, 92, 128, 191, 192, 1480 13, 10, 13, 123, 10, 13, 126, 10,
1481 13, 126, 126, 128, 191, 128, 191, 128,
1482 191, 10, 13, 36, 37, 128, 191, 192,
1480 223, 224, 239, 240, 247, 248, 255, 10, 1483 223, 224, 239, 240, 247, 248, 255, 10,
1481 13, 36, 37, 92, 128, 191, 192, 223, 1484 13, 36, 37, 128, 191, 192, 223, 224,
1482 224, 239, 240, 247, 248, 255, 36, 37, 1485 239, 240, 247, 248, 255, 10, 13, 10,
1483 92, 123, 192, 223, 224, 239, 240, 247, 1486 13, 123, 10, 13, 126, 10, 13, 126,
1484 10, 13, 34, 36, 37, 92, 123, 128, 1487 126, 128, 191, 128, 191, 128, 191, 95,
1485 191, 192, 223, 224, 239, 240, 247, 248, 1488 194, 195, 198, 199, 203, 204, 205, 206,
1486 255, 10, 13, 34, 36, 37, 92, 123, 1489 207, 210, 212, 213, 214, 215, 216, 217,
1487 128, 191, 192, 223, 224, 239, 240, 247, 1490 219, 220, 221, 222, 223, 224, 225, 226,
1488 248, 255, 10, 13, 34, 36, 37, 92, 1491 227, 228, 233, 234, 237, 238, 239, 240,
1489 123, 128, 191, 192, 223, 224, 239, 240, 1492 65, 90, 97, 122, 128, 191, 192, 193,
1490 247, 248, 255, 10, 13, 34, 36, 37, 1493 196, 218, 229, 236, 241, 247, 248, 255,
1491 92, 128, 191, 192, 223, 224, 239, 240, 1494 45, 95, 194, 195, 198, 199, 203, 204,
1492 247, 248, 255, 36, 37, 92, 123, 192, 1495 205, 206, 207, 210, 212, 213, 214, 215,
1493 223, 224, 239, 240, 247, 10, 13, 34, 1496 216, 217, 219, 220, 221, 222, 223, 224,
1494 36, 37, 92, 123, 128, 191, 192, 223, 1497 225, 226, 227, 228, 233, 234, 237, 239,
1495 224, 239, 240, 247, 248, 255, 10, 13, 1498 240, 243, 48, 57, 65, 90, 97, 122,
1496 34, 36, 37, 92, 128, 191, 192, 223, 1499 196, 218, 229, 236, 128, 191, 170, 181,
1497 224, 239, 240, 247, 248, 255, 10, 13, 1500 186, 128, 191, 151, 183, 128, 255, 192,
1498 34, 36, 37, 92, 128, 191, 192, 223, 1501 255, 0, 127, 173, 130, 133, 146, 159,
1499 224, 239, 240, 247, 248, 255, 10, 13, 1502 165, 171, 175, 191, 192, 255, 181, 190,
1500 34, 36, 37, 92, 128, 191, 192, 223, 1503 128, 175, 176, 183, 184, 185, 186, 191,
1501 224, 239, 240, 247, 248, 255, 10, 13, 1504 134, 139, 141, 162, 128, 135, 136, 255,
1502 34, 36, 37, 92, 128, 191, 192, 223, 1505 182, 130, 137, 176, 151, 152, 154, 160,
1503 224, 239, 240, 247, 248, 255, 10, 13, 1506 136, 191, 192, 255, 128, 143, 144, 170,
1504 34, 36, 37, 92, 128, 191, 192, 223, 1507 171, 175, 176, 178, 179, 191, 128, 159,
1505 224, 239, 240, 247, 248, 255, 10, 13, 1508 160, 191, 176, 128, 138, 139, 173, 174,
1506 34, 36, 37, 92, 128, 191, 192, 223, 1509 255, 148, 150, 164, 167, 173, 176, 185,
1507 224, 239, 240, 247, 248, 255, 10, 13, 1510 189, 190, 192, 255, 144, 128, 145, 146,
1508 34, 36, 37, 92, 128, 191, 192, 223, 1511 175, 176, 191, 128, 140, 141, 255, 166,
1509 224, 239, 240, 247, 248, 255, 123, 126, 1512 176, 178, 191, 192, 255, 186, 128, 137,
1510 123, 126, 128, 191, 128, 191, 128, 191, 1513 138, 170, 171, 179, 180, 181, 182, 191,
1511 10, 13, 36, 37, 128, 191, 192, 223, 1514 160, 161, 162, 164, 165, 166, 167, 168,
1512 224, 239, 240, 247, 248, 255, 10, 13, 1515 169, 170, 171, 172, 173, 174, 175, 176,
1513 36, 37, 128, 191, 192, 223, 224, 239, 1516 177, 178, 179, 180, 181, 182, 183, 184,
1514 240, 247, 248, 255, 10, 13, 36, 37, 1517 185, 186, 187, 188, 189, 190, 128, 191,
1515 128, 191, 192, 223, 224, 239, 240, 247, 1518 128, 129, 130, 131, 137, 138, 139, 140,
1516 248, 255, 10, 13, 36, 37, 128, 191, 1519 141, 142, 143, 144, 153, 154, 155, 156,
1517 192, 223, 224, 239, 240, 247, 248, 255, 1520 157, 158, 159, 160, 161, 162, 163, 164,
1518 126, 126, 128, 191, 128, 191, 128, 191,
1519 10, 13, 36, 37, 128, 191, 192, 223,
1520 224, 239, 240, 247, 248, 255, 10, 13,
1521 36, 37, 128, 191, 192, 223, 224, 239,
1522 240, 247, 248, 255, 126, 126, 128, 191,
1523 128, 191, 128, 191, 95, 194, 195, 198,
1524 199, 203, 204, 205, 206, 207, 210, 212,
1525 213, 214, 215, 216, 217, 219, 220, 221,
1526 222, 223, 224, 225, 226, 227, 228, 233,
1527 234, 237, 238, 239, 240, 65, 90, 97,
1528 122, 128, 191, 192, 193, 196, 218, 229,
1529 236, 241, 247, 248, 255, 45, 95, 194,
1530 195, 198, 199, 203, 204, 205, 206, 207,
1531 210, 212, 213, 214, 215, 216, 217, 219,
1532 220, 221, 222, 223, 224, 225, 226, 227,
1533 228, 233, 234, 237, 239, 240, 243, 48,
1534 57, 65, 90, 97, 122, 196, 218, 229,
1535 236, 128, 191, 170, 181, 186, 128, 191,
1536 151, 183, 128, 255, 192, 255, 0, 127,
1537 173, 130, 133, 146, 159, 165, 171, 175,
1538 191, 192, 255, 181, 190, 128, 175, 176,
1539 183, 184, 185, 186, 191, 134, 139, 141,
1540 162, 128, 135, 136, 255, 182, 130, 137,
1541 176, 151, 152, 154, 160, 136, 191, 192,
1542 255, 128, 143, 144, 170, 171, 175, 176,
1543 178, 179, 191, 128, 159, 160, 191, 176,
1544 128, 138, 139, 173, 174, 255, 148, 150,
1545 164, 167, 173, 176, 185, 189, 190, 192,
1546 255, 144, 128, 145, 146, 175, 176, 191,
1547 128, 140, 141, 255, 166, 176, 178, 191,
1548 192, 255, 186, 128, 137, 138, 170, 171,
1549 179, 180, 181, 182, 191, 160, 161, 162,
1550 164, 165, 166, 167, 168, 169, 170, 171,
1551 172, 173, 174, 175, 176, 177, 178, 179,
1552 180, 181, 182, 183, 184, 185, 186, 187,
1553 188, 189, 190, 128, 191, 128, 129, 130,
1554 131, 137, 138, 139, 140, 141, 142, 143,
1555 144, 153, 154, 155, 156, 157, 158, 159,
1556 160, 161, 162, 163, 164, 165, 166, 167,
1557 168, 169, 170, 171, 172, 173, 174, 175,
1558 176, 177, 178, 179, 180, 182, 183, 184,
1559 188, 189, 190, 191, 132, 187, 129, 130,
1560 132, 133, 134, 176, 177, 178, 179, 180,
1561 181, 182, 183, 128, 191, 128, 129, 130,
1562 131, 132, 133, 134, 135, 144, 136, 143,
1563 145, 191, 192, 255, 182, 183, 184, 128,
1564 191, 128, 191, 191, 128, 190, 192, 255,
1565 128, 146, 147, 148, 152, 153, 154, 155,
1566 156, 158, 159, 160, 161, 162, 163, 164,
1567 165, 166, 167, 168, 169, 170, 171, 172, 1521 165, 166, 167, 168, 169, 170, 171, 172,
1568 173, 174, 175, 176, 129, 191, 192, 255, 1522 173, 174, 175, 176, 177, 178, 179, 180,
1569 158, 159, 128, 157, 160, 191, 192, 255, 1523 182, 183, 184, 188, 189, 190, 191, 132,
1570 128, 191, 164, 169, 171, 172, 173, 174, 1524 187, 129, 130, 132, 133, 134, 176, 177,
1571 175, 180, 181, 182, 183, 184, 185, 187, 1525 178, 179, 180, 181, 182, 183, 128, 191,
1572 188, 189, 190, 191, 128, 163, 165, 186, 1526 128, 129, 130, 131, 132, 133, 134, 135,
1573 144, 145, 146, 147, 148, 150, 151, 152, 1527 144, 136, 143, 145, 191, 192, 255, 182,
1574 155, 157, 158, 160, 170, 171, 172, 175, 1528 183, 184, 128, 191, 128, 191, 191, 128,
1575 128, 159, 161, 169, 173, 191, 128, 191, 1529 190, 192, 255, 128, 146, 147, 148, 152,
1530 153, 154, 155, 156, 158, 159, 160, 161,
1531 162, 163, 164, 165, 166, 167, 168, 169,
1532 170, 171, 172, 173, 174, 175, 176, 129,
1533 191, 192, 255, 158, 159, 128, 157, 160,
1534 191, 192, 255, 128, 191, 164, 169, 171,
1535 172, 173, 174, 175, 180, 181, 182, 183,
1536 184, 185, 187, 188, 189, 190, 191, 128,
1537 163, 165, 186, 144, 145, 146, 147, 148,
1538 150, 151, 152, 155, 157, 158, 160, 170,
1539 171, 172, 175, 128, 159, 161, 169, 173,
1540 191, 128, 191,
1576} 1541}
1577 1542
1578var _hcltok_single_lengths []byte = []byte{ 1543var _hcltok_single_lengths []byte = []byte{
1579 0, 1, 1, 1, 2, 3, 2, 0, 1544 0, 1, 1, 2, 3, 2, 0, 32,
1580 32, 31, 36, 1, 4, 0, 0, 0, 1545 31, 36, 1, 4, 0, 0, 0, 0,
1581 0, 1, 2, 1, 1, 1, 1, 0, 1546 1, 2, 1, 1, 1, 1, 0, 1,
1582 1, 1, 0, 0, 2, 0, 0, 0, 1547 1, 0, 0, 2, 0, 0, 0, 1,
1583 1, 32, 0, 0, 0, 0, 1, 3, 1548 32, 0, 0, 0, 0, 1, 3, 1,
1584 1, 1, 1, 0, 2, 0, 1, 1, 1549 1, 1, 0, 2, 0, 1, 1, 2,
1585 2, 0, 3, 0, 1, 0, 2, 1, 1550 0, 3, 0, 1, 0, 2, 1, 2,
1586 2, 0, 0, 5, 1, 4, 0, 0, 1551 0, 0, 5, 1, 4, 0, 0, 1,
1587 1, 43, 0, 0, 0, 2, 3, 2, 1552 43, 0, 0, 0, 2, 3, 2, 1,
1588 1, 1, 0, 0, 0, 0, 0, 0, 1553 1, 0, 0, 0, 0, 0, 0, 0,
1589 0, 0, 0, 0, 0, 0, 0, 0, 1554 0, 0, 0, 0, 0, 0, 0, 0,
1590 0, 0, 0, 0, 0, 1, 1, 0, 1555 0, 0, 0, 0, 1, 1, 0, 0,
1591 0, 0, 0, 0, 0, 0, 0, 4, 1556 0, 0, 0, 0, 0, 0, 4, 1,
1592 1, 0, 15, 0, 0, 0, 1, 6, 1557 0, 15, 0, 0, 0, 1, 6, 1,
1593 1, 0, 0, 1, 0, 2, 0, 0, 1558 0, 0, 1, 0, 2, 0, 0, 0,
1594 0, 9, 0, 1, 1, 0, 0, 0, 1559 9, 0, 1, 1, 0, 0, 0, 3,
1595 3, 0, 1, 0, 28, 0, 0, 0, 1560 0, 1, 0, 28, 0, 0, 0, 1,
1596 1, 0, 1, 0, 0, 0, 1, 0, 1561 0, 1, 0, 0, 0, 1, 0, 0,
1562 0, 0, 0, 0, 0, 1, 0, 2,
1563 0, 0, 18, 0, 0, 1, 0, 0,
1597 0, 0, 0, 0, 0, 0, 1, 0, 1564 0, 0, 0, 0, 0, 0, 1, 0,
1598 2, 0, 0, 18, 0, 0, 1, 0, 1565 0, 0, 16, 36, 0, 0, 0, 0,
1599 0, 0, 0, 0, 0, 0, 0, 1, 1566 1, 0, 0, 0, 0, 0, 1, 0,
1600 0, 0, 0, 16, 36, 0, 0, 0, 1567 0, 0, 0, 0, 0, 2, 0, 0,
1601 0, 1, 0, 0, 0, 0, 0, 1,
1602 0, 0, 0, 0, 0, 0, 2, 0,
1603 0, 0, 0, 0, 1, 0, 0, 0,
1604 0, 0, 0, 0, 28, 0, 0, 0,
1605 1, 1, 1, 1, 0, 0, 2, 0,
1606 1, 0, 0, 0, 0, 0, 0, 0,
1607 0, 0, 1, 1, 4, 0, 0, 2,
1608 2, 0, 11, 0, 0, 0, 0, 0,
1609 0, 0, 1, 1, 3, 0, 0, 4,
1610 0, 0, 0, 18, 0, 0, 0, 1,
1611 4, 1, 4, 1, 0, 3, 2, 2,
1612 2, 1, 0, 0, 1, 8, 0, 0,
1613 0, 4, 12, 0, 2, 0, 3, 0,
1614 1, 0, 2, 0, 1, 2, 0, 3,
1615 1, 2, 0, 0, 0, 0, 0, 1,
1616 1, 0, 0, 1, 28, 3, 0, 1,
1617 1, 2, 1, 0, 1, 1, 2, 1,
1618 1, 2, 1, 1, 0, 2, 1, 1,
1619 1, 1, 0, 0, 6, 1, 1, 0,
1620 0, 46, 1, 1, 0, 0, 0, 0,
1621 2, 1, 0, 0, 0, 1, 0, 0,
1622 0, 0, 0, 0, 0, 13, 2, 0,
1623 0, 0, 9, 0, 1, 28, 0, 1,
1624 3, 0, 2, 0, 0, 0, 1, 0,
1625 1, 1, 2, 0, 18, 2, 0, 0,
1626 16, 35, 0, 0, 0, 1, 0, 28,
1627 0, 0, 0, 0, 1, 0, 2, 0,
1628 0, 1, 0, 0, 1, 0, 0, 1,
1629 0, 0, 0, 0, 1, 11, 0, 0,
1630 0, 0, 4, 0, 12, 1, 7, 0,
1631 4, 0, 0, 0, 0, 1, 2, 1,
1632 1, 1, 1, 0, 1, 1, 0, 0,
1633 2, 0, 0, 0, 1, 32, 0, 0,
1634 0, 0, 1, 3, 1, 1, 1, 0,
1635 2, 0, 1, 1, 2, 0, 3, 0,
1636 1, 0, 2, 1, 2, 0, 0, 5,
1637 1, 4, 0, 0, 1, 43, 0, 0,
1638 0, 2, 3, 2, 1, 1, 0, 0,
1639 0, 0, 0, 0, 0, 0, 0, 0,
1640 0, 0, 0, 0, 0, 0, 0, 0,
1641 0, 1, 1, 0, 0, 0, 0, 0,
1642 0, 0, 0, 4, 1, 0, 15, 0,
1643 0, 0, 1, 6, 1, 0, 0, 1,
1644 0, 2, 0, 0, 0, 9, 0, 1,
1645 1, 0, 0, 0, 3, 0, 1, 0,
1646 28, 0, 0, 0, 1, 0, 1, 0,
1647 0, 0, 1, 0, 0, 0, 0, 0,
1648 0, 0, 1, 0, 2, 0, 0, 18,
1649 0, 0, 1, 0, 0, 0, 0, 0,
1650 0, 0, 0, 1, 0, 0, 0, 16,
1651 36, 0, 0, 0, 0, 1, 0, 0,
1652 0, 0, 0, 1, 0, 0, 0, 0, 1568 0, 0, 0, 1, 0, 0, 0, 0,
1653 0, 0, 2, 0, 0, 0, 0, 0, 1569 0, 0, 0, 28, 0, 0, 0, 1,
1654 1, 0, 0, 0, 0, 0, 0, 0, 1570 1, 1, 1, 0, 0, 2, 0, 1,
1655 28, 0, 0, 0, 1, 1, 1, 1, 1571 0, 0, 0, 0, 0, 0, 0, 0,
1656 0, 0, 2, 0, 1, 0, 0, 0, 1572 0, 1, 1, 4, 0, 0, 2, 2,
1657 0, 0, 0, 0, 0, 0, 1, 1, 1573 0, 11, 0, 0, 0, 0, 0, 0,
1658 4, 0, 0, 2, 2, 0, 11, 0, 1574 0, 1, 1, 3, 0, 0, 4, 0,
1659 0, 0, 0, 0, 0, 0, 1, 1, 1575 0, 0, 18, 0, 0, 0, 1, 4,
1660 3, 0, 0, 4, 0, 0, 0, 18, 1576 1, 4, 1, 0, 3, 2, 2, 2,
1661 0, 0, 0, 1, 4, 1, 4, 1, 1577 1, 0, 0, 1, 8, 0, 0, 0,
1662 0, 3, 2, 2, 2, 1, 0, 0, 1578 4, 12, 0, 2, 0, 3, 0, 1,
1663 1, 8, 0, 0, 0, 4, 12, 0, 1579 0, 2, 0, 1, 2, 0, 3, 1,
1664 2, 0, 3, 0, 1, 0, 2, 0, 1580 2, 0, 0, 0, 0, 0, 1, 1,
1665 1, 2, 0, 0, 3, 0, 1, 1, 1581 0, 0, 1, 28, 3, 0, 1, 1,
1666 1, 2, 2, 4, 1, 6, 2, 4, 1582 2, 1, 0, 1, 1, 2, 1, 1,
1667 2, 4, 1, 4, 0, 6, 1, 3, 1583 2, 1, 1, 0, 2, 1, 1, 1,
1668 1, 2, 0, 2, 11, 1, 1, 1, 1584 1, 0, 0, 6, 1, 1, 0, 0,
1669 0, 1, 1, 0, 2, 0, 3, 3, 1585 46, 1, 1, 0, 0, 0, 0, 2,
1670 2, 1, 0, 0, 0, 1, 0, 1, 1586 1, 0, 0, 0, 1, 0, 0, 0,
1671 0, 1, 1, 0, 2, 0, 0, 1, 1587 0, 0, 0, 0, 13, 2, 0, 0,
1672 0, 0, 0, 0, 0, 0, 0, 1, 1588 0, 9, 0, 1, 28, 0, 1, 3,
1673 0, 0, 0, 0, 0, 0, 0, 1, 1589 0, 2, 0, 0, 0, 1, 0, 1,
1674 0, 0, 0, 4, 3, 2, 2, 0, 1590 1, 2, 0, 18, 2, 0, 0, 16,
1675 6, 1, 0, 1, 1, 0, 2, 0, 1591 35, 0, 0, 0, 1, 0, 28, 0,
1676 4, 3, 0, 1, 1, 0, 0, 0, 1592 0, 0, 0, 1, 0, 2, 0, 0,
1677 0, 0, 0, 0, 1, 0, 0, 0, 1593 1, 0, 0, 1, 0, 0, 1, 0,
1678 1, 0, 3, 0, 2, 0, 0, 0, 1594 0, 0, 0, 1, 11, 0, 0, 0,
1679 3, 0, 2, 1, 1, 3, 1, 0, 1595 0, 4, 0, 12, 1, 7, 0, 4,
1680 0, 0, 0, 0, 5, 2, 0, 0,
1681 0, 0, 0, 0, 1, 0, 0, 1,
1682 1, 0, 0, 35, 4, 0, 0, 0,
1683 0, 0, 0, 0, 1, 0, 0, 0,
1684 0, 0, 0, 3, 0, 1, 0, 0,
1685 3, 0, 0, 1, 0, 0, 0, 0,
1686 28, 0, 0, 0, 0, 1, 0, 3,
1687 1, 4, 0, 1, 0, 0, 1, 0,
1688 0, 1, 0, 0, 0, 0, 1, 1,
1689 0, 7, 0, 0, 2, 2, 0, 11,
1690 0, 0, 0, 0, 0, 1, 1, 3,
1691 0, 0, 4, 0, 0, 0, 12, 1,
1692 4, 1, 5, 2, 0, 3, 2, 2,
1693 2, 1, 7, 0, 7, 17, 3, 0,
1694 2, 0, 3, 0, 0, 1, 0, 2,
1695 0, 1, 1, 0, 0, 0, 0, 0,
1696 1, 1, 1, 0, 0, 0, 1, 1,
1697 1, 1, 0, 0, 0, 1, 1, 4,
1698 0, 0, 0, 0, 1, 2, 1, 1, 1596 0, 0, 0, 0, 1, 2, 1, 1,
1699 1, 1, 0, 1, 1, 0, 0, 2, 1597 1, 1, 0, 1, 1, 0, 0, 2,
1700 0, 0, 0, 1, 32, 0, 0, 0, 1598 0, 0, 0, 1, 32, 0, 0, 0,
@@ -1759,145 +1657,143 @@ var _hcltok_single_lengths []byte = []byte{
1759 1, 5, 2, 0, 3, 2, 2, 2, 1657 1, 5, 2, 0, 3, 2, 2, 2,
1760 1, 7, 0, 7, 17, 3, 0, 2, 1658 1, 7, 0, 7, 17, 3, 0, 2,
1761 0, 3, 0, 0, 1, 0, 2, 0, 1659 0, 3, 0, 0, 1, 0, 2, 0,
1762 54, 2, 1, 1, 1, 1, 1, 2, 1660 2, 0, 0, 0, 0, 0, 1, 0,
1763 1, 3, 2, 2, 1, 34, 1, 1, 1661 0, 0, 2, 2, 1, 0, 0, 0,
1764 0, 3, 2, 0, 0, 0, 1, 2, 1662 2, 2, 4, 0, 0, 0, 0, 1,
1765 4, 1, 0, 1, 0, 0, 0, 0, 1663 2, 1, 1, 1, 1, 0, 1, 1,
1766 1, 1, 1, 0, 0, 1, 30, 47, 1664 0, 0, 2, 0, 0, 0, 1, 32,
1767 13, 9, 3, 0, 1, 28, 2, 0, 1665 0, 0, 0, 0, 1, 3, 1, 1,
1768 18, 16, 0, 6, 6, 6, 6, 5, 1666 1, 0, 2, 0, 1, 1, 2, 0,
1769 4, 7, 7, 7, 6, 4, 7, 6, 1667 3, 0, 1, 0, 2, 1, 2, 0,
1770 6, 6, 6, 6, 6, 6, 1, 1, 1668 0, 5, 1, 4, 0, 0, 1, 43,
1771 1, 1, 0, 0, 0, 4, 4, 4, 1669 0, 0, 0, 2, 3, 2, 1, 1,
1772 4, 1, 1, 0, 0, 0, 4, 2, 1670 0, 0, 0, 0, 0, 0, 0, 0,
1773 1, 1, 0, 0, 0, 33, 34, 0, 1671 0, 0, 0, 0, 0, 0, 0, 0,
1774 3, 2, 0, 0, 0, 1, 2, 4, 1672 0, 0, 0, 1, 1, 0, 0, 0,
1775 1, 0, 1, 0, 0, 0, 0, 1, 1673 0, 0, 0, 0, 0, 4, 1, 0,
1776 1, 1, 0, 0, 1, 30, 47, 13, 1674 15, 0, 0, 0, 1, 6, 1, 0,
1777 9, 3, 0, 1, 28, 2, 0, 18, 1675 0, 1, 0, 2, 0, 0, 0, 9,
1778 16, 0, 1676 0, 1, 1, 0, 0, 0, 3, 0,
1677 1, 0, 28, 0, 0, 0, 1, 0,
1678 1, 0, 0, 0, 1, 0, 0, 0,
1679 0, 0, 0, 0, 1, 0, 2, 0,
1680 0, 18, 0, 0, 1, 0, 0, 0,
1681 0, 0, 0, 0, 0, 1, 0, 0,
1682 0, 16, 36, 0, 0, 0, 0, 1,
1683 0, 0, 0, 0, 0, 1, 0, 0,
1684 0, 0, 0, 0, 2, 0, 0, 0,
1685 0, 0, 1, 0, 0, 0, 0, 0,
1686 0, 0, 28, 0, 0, 0, 1, 1,
1687 1, 1, 0, 0, 2, 0, 1, 0,
1688 0, 0, 0, 0, 0, 0, 0, 0,
1689 1, 1, 4, 0, 0, 2, 2, 0,
1690 11, 0, 0, 0, 0, 0, 0, 0,
1691 1, 1, 3, 0, 0, 4, 0, 0,
1692 0, 18, 0, 0, 0, 1, 4, 1,
1693 4, 1, 0, 3, 2, 2, 2, 1,
1694 0, 0, 1, 8, 0, 0, 0, 4,
1695 12, 0, 2, 0, 3, 0, 1, 0,
1696 2, 0, 1, 2, 0, 0, 3, 0,
1697 1, 1, 1, 2, 2, 4, 1, 6,
1698 2, 4, 2, 4, 1, 4, 0, 6,
1699 1, 3, 1, 2, 0, 2, 11, 1,
1700 1, 1, 0, 1, 1, 0, 2, 0,
1701 3, 3, 2, 1, 0, 0, 0, 1,
1702 0, 1, 0, 1, 1, 0, 2, 0,
1703 0, 1, 0, 0, 0, 0, 0, 0,
1704 0, 1, 0, 0, 0, 0, 0, 0,
1705 0, 1, 0, 0, 0, 4, 3, 2,
1706 2, 0, 6, 1, 0, 1, 1, 0,
1707 2, 0, 4, 3, 0, 1, 1, 0,
1708 0, 0, 0, 0, 0, 0, 1, 0,
1709 0, 0, 1, 0, 3, 0, 2, 0,
1710 0, 0, 3, 0, 2, 1, 1, 3,
1711 1, 0, 0, 0, 0, 0, 5, 2,
1712 0, 0, 0, 0, 0, 0, 1, 0,
1713 0, 1, 1, 0, 0, 35, 4, 0,
1714 0, 0, 0, 0, 0, 0, 1, 0,
1715 0, 0, 0, 0, 0, 3, 0, 1,
1716 0, 0, 3, 0, 0, 1, 0, 0,
1717 0, 0, 28, 0, 0, 0, 0, 1,
1718 0, 3, 1, 4, 0, 1, 0, 0,
1719 1, 0, 0, 1, 0, 0, 0, 0,
1720 1, 1, 0, 7, 0, 0, 2, 2,
1721 0, 11, 0, 0, 0, 0, 0, 1,
1722 1, 3, 0, 0, 4, 0, 0, 0,
1723 12, 1, 4, 1, 5, 2, 0, 3,
1724 2, 2, 2, 1, 7, 0, 7, 17,
1725 3, 0, 2, 0, 3, 0, 0, 1,
1726 0, 2, 0, 53, 2, 1, 1, 1,
1727 1, 1, 2, 3, 2, 2, 1, 34,
1728 1, 1, 0, 3, 2, 0, 0, 0,
1729 1, 2, 4, 1, 0, 1, 0, 0,
1730 0, 0, 1, 1, 1, 0, 0, 1,
1731 30, 47, 13, 9, 3, 0, 1, 28,
1732 2, 0, 18, 16, 0, 6, 4, 2,
1733 2, 0, 1, 1, 1, 2, 1, 2,
1734 0, 0, 0, 4, 2, 2, 3, 3,
1735 2, 1, 1, 0, 0, 0, 4, 2,
1736 2, 3, 3, 2, 1, 1, 0, 0,
1737 0, 33, 34, 0, 3, 2, 0, 0,
1738 0, 1, 2, 4, 1, 0, 1, 0,
1739 0, 0, 0, 1, 1, 1, 0, 0,
1740 1, 30, 47, 13, 9, 3, 0, 1,
1741 28, 2, 0, 18, 16, 0,
1779} 1742}
1780 1743
1781var _hcltok_range_lengths []byte = []byte{ 1744var _hcltok_range_lengths []byte = []byte{
1782 0, 0, 0, 0, 0, 1, 1, 1, 1745 0, 0, 0, 0, 1, 1, 1, 5,
1783 5, 5, 5, 0, 0, 3, 0, 1, 1746 5, 5, 0, 0, 3, 0, 1, 1,
1784 1, 4, 2, 3, 0, 1, 0, 2, 1747 4, 2, 3, 0, 1, 0, 2, 2,
1785 2, 4, 2, 2, 3, 1, 1, 1, 1748 4, 2, 2, 3, 1, 1, 1, 1,
1786 1, 0, 1, 1, 2, 2, 1, 4, 1749 0, 1, 1, 2, 2, 1, 4, 6,
1787 6, 9, 6, 8, 5, 8, 7, 10, 1750 9, 6, 8, 5, 8, 7, 10, 4,
1788 4, 6, 4, 7, 7, 5, 5, 4, 1751 6, 4, 7, 7, 5, 5, 4, 5,
1789 5, 1, 2, 8, 4, 3, 3, 3, 1752 1, 2, 8, 4, 3, 3, 3, 0,
1790 0, 3, 1, 2, 1, 2, 2, 3, 1753 3, 1, 2, 1, 2, 2, 3, 3,
1791 3, 1, 3, 2, 2, 1, 2, 2, 1754 1, 3, 2, 2, 1, 2, 2, 2,
1792 2, 3, 4, 4, 3, 1, 2, 1, 1755 3, 4, 4, 3, 1, 2, 1, 3,
1793 3, 2, 2, 2, 2, 2, 3, 3, 1756 2, 2, 2, 2, 2, 3, 3, 1,
1794 1, 1, 2, 1, 3, 2, 2, 3, 1757 1, 2, 1, 3, 2, 2, 3, 2,
1795 2, 7, 0, 1, 4, 1, 2, 4, 1758 7, 0, 1, 4, 1, 2, 4, 2,
1796 2, 1, 2, 0, 2, 2, 3, 5, 1759 1, 2, 0, 2, 2, 3, 5, 5,
1797 5, 1, 4, 1, 1, 2, 2, 1, 1760 1, 4, 1, 1, 2, 2, 1, 0,
1798 0, 0, 1, 1, 1, 1, 1, 2, 1761 0, 1, 1, 1, 1, 1, 2, 2,
1799 2, 2, 2, 1, 1, 1, 4, 2, 1762 2, 2, 1, 1, 1, 4, 2, 2,
1800 2, 3, 1, 4, 4, 6, 1, 3, 1763 3, 1, 4, 4, 6, 1, 3, 1,
1801 1, 1, 2, 1, 1, 1, 5, 3, 1764 1, 2, 1, 1, 1, 5, 3, 1,
1802 1, 1, 1, 2, 3, 3, 1, 2, 1765 1, 1, 2, 3, 3, 1, 2, 2,
1803 2, 1, 4, 1, 2, 5, 2, 1, 1766 1, 4, 1, 2, 5, 2, 1, 1,
1804 1, 0, 2, 2, 2, 2, 2, 2, 1767 0, 2, 2, 2, 2, 2, 2, 2,
1805 2, 2, 2, 1, 1, 2, 4, 2, 1768 2, 2, 1, 1, 2, 4, 2, 1,
1806 1, 2, 2, 2, 6, 1, 1, 2, 1769 2, 2, 2, 6, 1, 1, 2, 1,
1807 1, 2, 1, 1, 1, 2, 2, 2, 1770 2, 1, 1, 1, 2, 2, 2, 1,
1808 1, 3, 2, 5, 2, 8, 6, 2, 1771 3, 2, 5, 2, 8, 6, 2, 2,
1809 2, 2, 2, 3, 1, 3, 1, 2, 1772 2, 2, 3, 1, 3, 1, 2, 1,
1810 1, 3, 2, 2, 3, 1, 1, 1, 1773 3, 2, 2, 3, 1, 1, 1, 1,
1811 1, 1, 1, 1, 2, 2, 4, 1, 1774 1, 1, 1, 2, 2, 4, 1, 2,
1812 2, 1, 0, 1, 1, 1, 1, 0, 1775 1, 0, 1, 1, 1, 1, 0, 1,
1813 1, 2, 3, 1, 3, 3, 1, 0, 1776 2, 3, 1, 3, 3, 1, 0, 3,
1814 3, 0, 2, 3, 1, 0, 0, 0, 1777 0, 2, 3, 1, 0, 0, 0, 0,
1815 0, 2, 2, 2, 2, 1, 5, 2, 1778 2, 2, 2, 2, 1, 5, 2, 2,
1816 2, 5, 7, 5, 0, 1, 0, 1, 1779 5, 7, 5, 0, 1, 0, 1, 1,
1817 1, 1, 1, 1, 0, 1, 1, 0, 1780 1, 1, 1, 0, 1, 1, 0, 3,
1818 3, 3, 1, 1, 2, 1, 3, 5, 1781 3, 1, 1, 2, 1, 3, 5, 1,
1819 1, 1, 2, 2, 1, 1, 1, 1, 1782 1, 2, 2, 1, 1, 1, 1, 2,
1820 2, 6, 3, 7, 2, 6, 1, 6, 1783 6, 3, 7, 2, 6, 1, 6, 2,
1821 2, 8, 0, 4, 2, 5, 2, 3, 1784 8, 0, 4, 2, 5, 2, 3, 3,
1822 3, 3, 1, 2, 8, 2, 0, 2, 1785 3, 1, 2, 8, 2, 0, 2, 1,
1823 1, 2, 1, 5, 2, 1, 3, 3, 1786 2, 1, 5, 2, 1, 3, 3, 0,
1824 0, 2, 1, 2, 1, 0, 1, 1, 1787 2, 1, 2, 1, 0, 1, 1, 3,
1825 3, 1, 1, 2, 3, 0, 0, 3, 1788 1, 1, 2, 3, 0, 0, 3, 2,
1826 2, 4, 1, 4, 1, 1, 3, 1, 1789 4, 1, 4, 1, 1, 3, 1, 1,
1827 1, 1, 1, 2, 2, 1, 3, 1, 1790 1, 1, 2, 2, 1, 3, 1, 4,
1828 4, 3, 3, 1, 1, 5, 2, 1, 1791 3, 3, 1, 1, 5, 2, 1, 1,
1829 1, 2, 1, 2, 1, 3, 2, 0, 1792 2, 1, 2, 1, 3, 2, 0, 1,
1830 1, 1, 1, 1, 1, 1, 1, 2, 1793 1, 1, 1, 1, 1, 1, 2, 1,
1831 1, 1, 1, 1, 1, 1, 1, 0, 1794 1, 1, 1, 1, 1, 1, 0, 1,
1832 1, 1, 2, 2, 1, 1, 1, 3, 1795 1, 2, 2, 1, 1, 1, 3, 2,
1833 2, 1, 0, 2, 1, 1, 1, 1, 1796 1, 0, 2, 1, 1, 1, 1, 0,
1834 0, 3, 0, 1, 1, 4, 2, 3,
1835 0, 1, 0, 2, 2, 4, 2, 2,
1836 3, 1, 1, 1, 1, 0, 1, 1,
1837 2, 2, 1, 4, 6, 9, 6, 8,
1838 5, 8, 7, 10, 4, 6, 4, 7,
1839 7, 5, 5, 4, 5, 1, 2, 8,
1840 4, 3, 3, 3, 0, 3, 1, 2,
1841 1, 2, 2, 3, 3, 1, 3, 2,
1842 2, 1, 2, 2, 2, 3, 4, 4,
1843 3, 1, 2, 1, 3, 2, 2, 2,
1844 2, 2, 3, 3, 1, 1, 2, 1,
1845 3, 2, 2, 3, 2, 7, 0, 1,
1846 4, 1, 2, 4, 2, 1, 2, 0,
1847 2, 2, 3, 5, 5, 1, 4, 1,
1848 1, 2, 2, 1, 0, 0, 1, 1,
1849 1, 1, 1, 2, 2, 2, 2, 1,
1850 1, 1, 4, 2, 2, 3, 1, 4,
1851 4, 6, 1, 3, 1, 1, 2, 1,
1852 1, 1, 5, 3, 1, 1, 1, 2,
1853 3, 3, 1, 2, 2, 1, 4, 1,
1854 2, 5, 2, 1, 1, 0, 2, 2,
1855 2, 2, 2, 2, 2, 2, 2, 1,
1856 1, 2, 4, 2, 1, 2, 2, 2,
1857 6, 1, 1, 2, 1, 2, 1, 1,
1858 1, 2, 2, 2, 1, 3, 2, 5,
1859 2, 8, 6, 2, 2, 2, 2, 3,
1860 1, 3, 1, 2, 1, 3, 2, 2,
1861 3, 1, 1, 1, 1, 1, 1, 1,
1862 2, 2, 4, 1, 2, 1, 0, 1,
1863 1, 1, 1, 0, 1, 2, 3, 1,
1864 3, 3, 1, 0, 3, 0, 2, 3,
1865 1, 0, 0, 0, 0, 2, 2, 2,
1866 2, 1, 5, 2, 2, 5, 7, 5,
1867 0, 1, 0, 1, 1, 1, 1, 1,
1868 0, 1, 1, 1, 2, 2, 3, 3,
1869 4, 7, 5, 7, 5, 3, 3, 7,
1870 3, 13, 1, 3, 5, 3, 5, 3,
1871 6, 5, 2, 2, 8, 4, 1, 2,
1872 3, 2, 10, 2, 2, 0, 2, 3,
1873 3, 1, 2, 3, 3, 1, 2, 3,
1874 3, 4, 4, 2, 1, 2, 2, 3,
1875 2, 2, 5, 3, 2, 3, 2, 1,
1876 3, 3, 6, 2, 2, 5, 2, 5,
1877 1, 1, 2, 4, 1, 11, 1, 3,
1878 8, 4, 2, 1, 0, 4, 3, 3,
1879 3, 2, 9, 1, 1, 4, 3, 2,
1880 2, 2, 3, 4, 2, 3, 2, 4,
1881 3, 2, 2, 3, 3, 4, 3, 3,
1882 4, 2, 5, 4, 8, 7, 1, 2,
1883 1, 3, 1, 2, 5, 1, 2, 2,
1884 2, 2, 1, 3, 2, 2, 3, 3,
1885 1, 9, 1, 5, 1, 3, 2, 2,
1886 3, 2, 3, 3, 3, 1, 3, 3,
1887 2, 2, 4, 5, 3, 3, 4, 3,
1888 3, 3, 2, 2, 2, 4, 2, 2,
1889 1, 3, 3, 3, 3, 3, 3, 2,
1890 2, 3, 2, 3, 3, 2, 3, 2,
1891 3, 1, 2, 2, 2, 2, 2, 2,
1892 2, 2, 2, 2, 2, 3, 2, 3,
1893 2, 3, 5, 3, 3, 1, 2, 3,
1894 2, 2, 1, 2, 3, 4, 3, 0,
1895 3, 0, 2, 3, 1, 0, 0, 0,
1896 0, 2, 3, 2, 4, 6, 4, 1,
1897 1, 2, 1, 2, 1, 3, 2, 3,
1898 2, 0, 0, 1, 1, 1, 1, 1,
1899 0, 0, 0, 1, 1, 1, 0, 0,
1900 0, 0, 1, 1, 1, 0, 0, 0,
1901 3, 0, 1, 1, 4, 2, 3, 0, 1797 3, 0, 1, 1, 4, 2, 3, 0,
1902 1, 0, 2, 2, 4, 2, 2, 3, 1798 1, 0, 2, 2, 4, 2, 2, 3,
1903 1, 1, 1, 1, 0, 1, 1, 2, 1799 1, 1, 1, 1, 0, 1, 1, 2,
@@ -1962,826 +1858,889 @@ var _hcltok_range_lengths []byte = []byte{
1962 0, 2, 3, 1, 0, 0, 0, 0, 1858 0, 2, 3, 1, 0, 0, 0, 0,
1963 2, 3, 2, 4, 6, 4, 1, 1, 1859 2, 3, 2, 4, 6, 4, 1, 1,
1964 2, 1, 2, 1, 3, 2, 3, 2, 1860 2, 1, 2, 1, 3, 2, 3, 2,
1965 11, 0, 0, 0, 0, 0, 0, 0, 1861 5, 1, 1, 1, 1, 1, 0, 1,
1966 0, 1, 0, 0, 0, 5, 0, 0, 1862 1, 1, 0, 0, 0, 1, 1, 1,
1967 1, 1, 1, 0, 1, 1, 5, 4, 1863 0, 0, 0, 3, 0, 1, 1, 4,
1968 2, 0, 1, 0, 2, 2, 5, 2, 1864 2, 3, 0, 1, 0, 2, 2, 4,
1969 3, 5, 3, 2, 3, 5, 1, 1, 1865 2, 2, 3, 1, 1, 1, 1, 0,
1970 1, 3, 1, 1, 2, 2, 3, 1, 1866 1, 1, 2, 2, 1, 4, 6, 9,
1971 2, 3, 1, 5, 5, 5, 5, 5, 1867 6, 8, 5, 8, 7, 10, 4, 6,
1972 3, 5, 5, 5, 5, 3, 5, 5, 1868 4, 7, 7, 5, 5, 4, 5, 1,
1973 5, 5, 5, 5, 5, 5, 0, 0, 1869 2, 8, 4, 3, 3, 3, 0, 3,
1974 0, 0, 1, 1, 1, 5, 5, 5, 1870 1, 2, 1, 2, 2, 3, 3, 1,
1975 5, 0, 0, 1, 1, 1, 5, 6, 1871 3, 2, 2, 1, 2, 2, 2, 3,
1976 0, 0, 1, 1, 1, 8, 5, 1, 1872 4, 4, 3, 1, 2, 1, 3, 2,
1977 1, 1, 0, 1, 1, 5, 4, 2, 1873 2, 2, 2, 2, 3, 3, 1, 1,
1978 0, 1, 0, 2, 2, 5, 2, 3, 1874 2, 1, 3, 2, 2, 3, 2, 7,
1979 5, 3, 2, 3, 5, 1, 1, 1, 1875 0, 1, 4, 1, 2, 4, 2, 1,
1980 3, 1, 1, 2, 2, 3, 1, 2, 1876 2, 0, 2, 2, 3, 5, 5, 1,
1981 3, 1, 1877 4, 1, 1, 2, 2, 1, 0, 0,
1878 1, 1, 1, 1, 1, 2, 2, 2,
1879 2, 1, 1, 1, 4, 2, 2, 3,
1880 1, 4, 4, 6, 1, 3, 1, 1,
1881 2, 1, 1, 1, 5, 3, 1, 1,
1882 1, 2, 3, 3, 1, 2, 2, 1,
1883 4, 1, 2, 5, 2, 1, 1, 0,
1884 2, 2, 2, 2, 2, 2, 2, 2,
1885 2, 1, 1, 2, 4, 2, 1, 2,
1886 2, 2, 6, 1, 1, 2, 1, 2,
1887 1, 1, 1, 2, 2, 2, 1, 3,
1888 2, 5, 2, 8, 6, 2, 2, 2,
1889 2, 3, 1, 3, 1, 2, 1, 3,
1890 2, 2, 3, 1, 1, 1, 1, 1,
1891 1, 1, 2, 2, 4, 1, 2, 1,
1892 0, 1, 1, 1, 1, 0, 1, 2,
1893 3, 1, 3, 3, 1, 0, 3, 0,
1894 2, 3, 1, 0, 0, 0, 0, 2,
1895 2, 2, 2, 1, 5, 2, 2, 5,
1896 7, 5, 0, 1, 0, 1, 1, 1,
1897 1, 1, 0, 1, 1, 1, 2, 2,
1898 3, 3, 4, 7, 5, 7, 5, 3,
1899 3, 7, 3, 13, 1, 3, 5, 3,
1900 5, 3, 6, 5, 2, 2, 8, 4,
1901 1, 2, 3, 2, 10, 2, 2, 0,
1902 2, 3, 3, 1, 2, 3, 3, 1,
1903 2, 3, 3, 4, 4, 2, 1, 2,
1904 2, 3, 2, 2, 5, 3, 2, 3,
1905 2, 1, 3, 3, 6, 2, 2, 5,
1906 2, 5, 1, 1, 2, 4, 1, 11,
1907 1, 3, 8, 4, 2, 1, 0, 4,
1908 3, 3, 3, 2, 9, 1, 1, 4,
1909 3, 2, 2, 2, 3, 4, 2, 3,
1910 2, 4, 3, 2, 2, 3, 3, 4,
1911 3, 3, 4, 2, 5, 4, 8, 7,
1912 1, 2, 1, 3, 1, 2, 5, 1,
1913 2, 2, 2, 2, 1, 3, 2, 2,
1914 3, 3, 1, 9, 1, 5, 1, 3,
1915 2, 2, 3, 2, 3, 3, 3, 1,
1916 3, 3, 2, 2, 4, 5, 3, 3,
1917 4, 3, 3, 3, 2, 2, 2, 4,
1918 2, 2, 1, 3, 3, 3, 3, 3,
1919 3, 2, 2, 3, 2, 3, 3, 2,
1920 3, 2, 3, 1, 2, 2, 2, 2,
1921 2, 2, 2, 2, 2, 2, 2, 3,
1922 2, 3, 2, 3, 5, 3, 3, 1,
1923 2, 3, 2, 2, 1, 2, 3, 4,
1924 3, 0, 3, 0, 2, 3, 1, 0,
1925 0, 0, 0, 2, 3, 2, 4, 6,
1926 4, 1, 1, 2, 1, 2, 1, 3,
1927 2, 3, 2, 11, 0, 0, 0, 0,
1928 0, 0, 0, 1, 0, 0, 0, 5,
1929 0, 0, 1, 1, 1, 0, 1, 1,
1930 5, 4, 2, 0, 1, 0, 2, 2,
1931 5, 2, 3, 5, 3, 2, 3, 5,
1932 1, 1, 1, 3, 1, 1, 2, 2,
1933 3, 1, 2, 3, 1, 5, 6, 0,
1934 0, 0, 0, 0, 0, 0, 0, 5,
1935 1, 1, 1, 5, 6, 0, 0, 0,
1936 0, 0, 0, 1, 1, 1, 5, 6,
1937 0, 0, 0, 0, 0, 0, 1, 1,
1938 1, 8, 5, 1, 1, 1, 0, 1,
1939 1, 5, 4, 2, 0, 1, 0, 2,
1940 2, 5, 2, 3, 5, 3, 2, 3,
1941 5, 1, 1, 1, 3, 1, 1, 2,
1942 2, 3, 1, 2, 3, 1,
1982} 1943}
1983 1944
1984var _hcltok_index_offsets []int16 = []int16{ 1945var _hcltok_index_offsets []int16 = []int16{
1985 0, 0, 2, 4, 6, 9, 14, 18, 1946 0, 0, 2, 4, 7, 12, 16, 18,
1986 20, 58, 95, 137, 139, 144, 148, 149, 1947 56, 93, 135, 137, 142, 146, 147, 149,
1987 151, 153, 159, 164, 169, 171, 174, 176, 1948 151, 157, 162, 167, 169, 172, 174, 177,
1988 179, 183, 189, 192, 195, 201, 203, 205, 1949 181, 187, 190, 193, 199, 201, 203, 205,
1989 207, 210, 243, 245, 247, 250, 253, 256, 1950 208, 241, 243, 245, 248, 251, 254, 262,
1990 264, 272, 283, 291, 300, 308, 317, 326, 1951 270, 281, 289, 298, 306, 315, 324, 336,
1991 338, 345, 352, 360, 368, 377, 383, 391, 1952 343, 350, 358, 366, 375, 381, 389, 395,
1992 397, 405, 407, 410, 424, 430, 438, 442, 1953 403, 405, 408, 422, 428, 436, 440, 444,
1993 446, 448, 495, 497, 500, 502, 507, 513, 1954 446, 493, 495, 498, 500, 505, 511, 517,
1994 519, 524, 527, 531, 534, 537, 539, 542, 1955 522, 525, 529, 532, 535, 537, 540, 543,
1995 545, 548, 552, 557, 562, 566, 568, 571, 1956 546, 550, 555, 560, 564, 566, 569, 571,
1996 573, 577, 580, 583, 586, 589, 593, 598, 1957 575, 578, 581, 584, 587, 591, 596, 600,
1997 602, 604, 606, 609, 611, 615, 618, 621, 1958 602, 604, 607, 609, 613, 616, 619, 627,
1998 629, 633, 641, 657, 659, 664, 666, 670, 1959 631, 639, 655, 657, 662, 664, 668, 679,
1999 681, 685, 687, 690, 692, 695, 700, 704, 1960 683, 685, 688, 690, 693, 698, 702, 708,
2000 710, 716, 727, 732, 735, 738, 741, 744, 1961 714, 725, 730, 733, 736, 739, 742, 744,
2001 746, 750, 751, 754, 756, 786, 788, 790, 1962 748, 749, 752, 754, 784, 786, 788, 791,
2002 793, 797, 800, 804, 806, 808, 810, 816, 1963 795, 798, 802, 804, 806, 808, 814, 817,
2003 819, 822, 826, 828, 833, 838, 845, 848, 1964 820, 824, 826, 831, 836, 843, 846, 850,
2004 852, 856, 858, 861, 881, 883, 885, 892, 1965 854, 856, 859, 879, 881, 883, 890, 894,
2005 896, 898, 900, 902, 905, 909, 913, 915, 1966 896, 898, 900, 903, 907, 911, 913, 917,
2006 919, 922, 924, 929, 947, 986, 992, 995, 1967 920, 922, 927, 945, 984, 990, 993, 995,
2007 997, 999, 1001, 1004, 1007, 1010, 1013, 1016, 1968 997, 999, 1002, 1005, 1008, 1011, 1014, 1018,
2008 1020, 1023, 1026, 1029, 1031, 1033, 1036, 1043, 1969 1021, 1024, 1027, 1029, 1031, 1034, 1041, 1044,
2009 1046, 1048, 1051, 1054, 1057, 1065, 1067, 1069, 1970 1046, 1049, 1052, 1055, 1063, 1065, 1067, 1070,
2010 1072, 1074, 1077, 1079, 1081, 1111, 1114, 1117, 1971 1072, 1075, 1077, 1079, 1109, 1112, 1115, 1118,
2011 1120, 1123, 1128, 1132, 1139, 1142, 1151, 1160, 1972 1121, 1126, 1130, 1137, 1140, 1149, 1158, 1161,
2012 1163, 1167, 1170, 1173, 1177, 1179, 1183, 1185, 1973 1165, 1168, 1171, 1175, 1177, 1181, 1183, 1186,
2013 1188, 1190, 1194, 1198, 1202, 1210, 1212, 1214, 1974 1188, 1192, 1196, 1200, 1208, 1210, 1212, 1216,
2014 1218, 1222, 1224, 1237, 1239, 1242, 1245, 1250, 1975 1220, 1222, 1235, 1237, 1240, 1243, 1248, 1250,
2015 1252, 1255, 1257, 1259, 1262, 1267, 1269, 1271, 1976 1253, 1255, 1257, 1260, 1265, 1267, 1269, 1274,
2016 1276, 1278, 1281, 1285, 1305, 1309, 1313, 1315, 1977 1276, 1279, 1283, 1303, 1307, 1311, 1313, 1315,
2017 1317, 1325, 1327, 1334, 1339, 1341, 1345, 1348, 1978 1323, 1325, 1332, 1337, 1339, 1343, 1346, 1349,
2018 1351, 1354, 1358, 1361, 1364, 1368, 1378, 1384, 1979 1352, 1356, 1359, 1362, 1366, 1376, 1382, 1385,
2019 1387, 1390, 1400, 1420, 1426, 1429, 1431, 1435, 1980 1388, 1398, 1418, 1424, 1427, 1429, 1433, 1435,
2020 1437, 1440, 1442, 1446, 1448, 1450, 1454, 1456, 1981 1438, 1440, 1444, 1446, 1448, 1452, 1454, 1458,
2021 1460, 1465, 1471, 1473, 1475, 1478, 1480, 1484, 1982 1463, 1469, 1471, 1473, 1476, 1478, 1482, 1489,
2022 1491, 1494, 1496, 1499, 1503, 1533, 1538, 1540, 1983 1492, 1494, 1497, 1501, 1531, 1536, 1538, 1541,
2023 1543, 1547, 1556, 1561, 1569, 1573, 1581, 1585, 1984 1545, 1554, 1559, 1567, 1571, 1579, 1583, 1591,
2024 1593, 1597, 1608, 1610, 1616, 1619, 1627, 1631, 1985 1595, 1606, 1608, 1614, 1617, 1625, 1629, 1634,
2025 1636, 1641, 1646, 1648, 1651, 1666, 1670, 1672, 1986 1639, 1644, 1646, 1649, 1664, 1668, 1670, 1673,
2026 1675, 1677, 1726, 1729, 1736, 1739, 1741, 1745, 1987 1675, 1724, 1727, 1734, 1737, 1739, 1743, 1747,
2027 1749, 1752, 1756, 1758, 1761, 1763, 1765, 1767, 1988 1750, 1754, 1756, 1759, 1761, 1763, 1765, 1767,
2028 1769, 1773, 1775, 1777, 1780, 1784, 1798, 1801, 1989 1771, 1773, 1775, 1778, 1782, 1796, 1799, 1803,
2029 1805, 1808, 1813, 1824, 1829, 1832, 1862, 1866, 1990 1806, 1811, 1822, 1827, 1830, 1860, 1864, 1867,
2030 1869, 1874, 1876, 1880, 1883, 1886, 1888, 1893, 1991 1872, 1874, 1878, 1881, 1884, 1886, 1891, 1893,
2031 1895, 1901, 1906, 1912, 1914, 1934, 1942, 1945, 1992 1899, 1904, 1910, 1912, 1932, 1940, 1943, 1945,
2032 1947, 1965, 2003, 2005, 2008, 2010, 2015, 2018, 1993 1963, 2001, 2003, 2006, 2008, 2013, 2016, 2045,
2033 2047, 2049, 2051, 2053, 2055, 2058, 2060, 2064, 1994 2047, 2049, 2051, 2053, 2056, 2058, 2062, 2065,
2034 2067, 2069, 2072, 2074, 2076, 2079, 2081, 2083, 1995 2067, 2070, 2072, 2074, 2077, 2079, 2081, 2083,
2035 2085, 2087, 2089, 2092, 2095, 2098, 2111, 2113, 1996 2085, 2087, 2090, 2093, 2096, 2109, 2111, 2115,
2036 2117, 2120, 2122, 2127, 2130, 2144, 2147, 2156, 1997 2118, 2120, 2125, 2128, 2142, 2145, 2154, 2156,
2037 2158, 2163, 2167, 2168, 2170, 2172, 2178, 2183, 1998 2161, 2165, 2166, 2168, 2170, 2176, 2181, 2186,
2038 2188, 2190, 2193, 2195, 2198, 2202, 2208, 2211, 1999 2188, 2191, 2193, 2196, 2200, 2206, 2209, 2212,
2039 2214, 2220, 2222, 2224, 2226, 2229, 2262, 2264, 2000 2218, 2220, 2222, 2224, 2227, 2260, 2262, 2264,
2040 2266, 2269, 2272, 2275, 2283, 2291, 2302, 2310, 2001 2267, 2270, 2273, 2281, 2289, 2300, 2308, 2317,
2041 2319, 2327, 2336, 2345, 2357, 2364, 2371, 2379, 2002 2325, 2334, 2343, 2355, 2362, 2369, 2377, 2385,
2042 2387, 2396, 2402, 2410, 2416, 2424, 2426, 2429, 2003 2394, 2400, 2408, 2414, 2422, 2424, 2427, 2441,
2043 2443, 2449, 2457, 2461, 2465, 2467, 2514, 2516, 2004 2447, 2455, 2459, 2463, 2465, 2512, 2514, 2517,
2044 2519, 2521, 2526, 2532, 2538, 2543, 2546, 2550, 2005 2519, 2524, 2530, 2536, 2541, 2544, 2548, 2551,
2045 2553, 2556, 2558, 2561, 2564, 2567, 2571, 2576, 2006 2554, 2556, 2559, 2562, 2565, 2569, 2574, 2579,
2046 2581, 2585, 2587, 2590, 2592, 2596, 2599, 2602, 2007 2583, 2585, 2588, 2590, 2594, 2597, 2600, 2603,
2047 2605, 2608, 2612, 2617, 2621, 2623, 2625, 2628, 2008 2606, 2610, 2615, 2619, 2621, 2623, 2626, 2628,
2048 2630, 2634, 2637, 2640, 2648, 2652, 2660, 2676, 2009 2632, 2635, 2638, 2646, 2650, 2658, 2674, 2676,
2049 2678, 2683, 2685, 2689, 2700, 2704, 2706, 2709, 2010 2681, 2683, 2687, 2698, 2702, 2704, 2707, 2709,
2050 2711, 2714, 2719, 2723, 2729, 2735, 2746, 2751, 2011 2712, 2717, 2721, 2727, 2733, 2744, 2749, 2752,
2051 2754, 2757, 2760, 2763, 2765, 2769, 2770, 2773, 2012 2755, 2758, 2761, 2763, 2767, 2768, 2771, 2773,
2052 2775, 2805, 2807, 2809, 2812, 2816, 2819, 2823, 2013 2803, 2805, 2807, 2810, 2814, 2817, 2821, 2823,
2053 2825, 2827, 2829, 2835, 2838, 2841, 2845, 2847, 2014 2825, 2827, 2833, 2836, 2839, 2843, 2845, 2850,
2054 2852, 2857, 2864, 2867, 2871, 2875, 2877, 2880, 2015 2855, 2862, 2865, 2869, 2873, 2875, 2878, 2898,
2055 2900, 2902, 2904, 2911, 2915, 2917, 2919, 2921, 2016 2900, 2902, 2909, 2913, 2915, 2917, 2919, 2922,
2056 2924, 2928, 2932, 2934, 2938, 2941, 2943, 2948, 2017 2926, 2930, 2932, 2936, 2939, 2941, 2946, 2964,
2057 2966, 3005, 3011, 3014, 3016, 3018, 3020, 3023, 2018 3003, 3009, 3012, 3014, 3016, 3018, 3021, 3024,
2058 3026, 3029, 3032, 3035, 3039, 3042, 3045, 3048, 2019 3027, 3030, 3033, 3037, 3040, 3043, 3046, 3048,
2059 3050, 3052, 3055, 3062, 3065, 3067, 3070, 3073, 2020 3050, 3053, 3060, 3063, 3065, 3068, 3071, 3074,
2060 3076, 3084, 3086, 3088, 3091, 3093, 3096, 3098, 2021 3082, 3084, 3086, 3089, 3091, 3094, 3096, 3098,
2061 3100, 3130, 3133, 3136, 3139, 3142, 3147, 3151, 2022 3128, 3131, 3134, 3137, 3140, 3145, 3149, 3156,
2062 3158, 3161, 3170, 3179, 3182, 3186, 3189, 3192, 2023 3159, 3168, 3177, 3180, 3184, 3187, 3190, 3194,
2063 3196, 3198, 3202, 3204, 3207, 3209, 3213, 3217, 2024 3196, 3200, 3202, 3205, 3207, 3211, 3215, 3219,
2064 3221, 3229, 3231, 3233, 3237, 3241, 3243, 3256, 2025 3227, 3229, 3231, 3235, 3239, 3241, 3254, 3256,
2065 3258, 3261, 3264, 3269, 3271, 3274, 3276, 3278, 2026 3259, 3262, 3267, 3269, 3272, 3274, 3276, 3279,
2066 3281, 3286, 3288, 3290, 3295, 3297, 3300, 3304, 2027 3284, 3286, 3288, 3293, 3295, 3298, 3302, 3322,
2067 3324, 3328, 3332, 3334, 3336, 3344, 3346, 3353, 2028 3326, 3330, 3332, 3334, 3342, 3344, 3351, 3356,
2068 3358, 3360, 3364, 3367, 3370, 3373, 3377, 3380, 2029 3358, 3362, 3365, 3368, 3371, 3375, 3378, 3381,
2069 3383, 3387, 3397, 3403, 3406, 3409, 3419, 3439, 2030 3385, 3395, 3401, 3404, 3407, 3417, 3437, 3443,
2070 3445, 3448, 3450, 3454, 3456, 3459, 3461, 3465, 2031 3446, 3448, 3452, 3454, 3457, 3459, 3463, 3465,
2071 3467, 3469, 3473, 3475, 3477, 3483, 3486, 3491, 2032 3467, 3471, 3473, 3475, 3481, 3484, 3489, 3494,
2072 3496, 3502, 3512, 3520, 3532, 3539, 3549, 3555, 2033 3500, 3510, 3518, 3530, 3537, 3547, 3553, 3565,
2073 3567, 3573, 3591, 3594, 3602, 3608, 3618, 3625, 2034 3571, 3589, 3592, 3600, 3606, 3616, 3623, 3630,
2074 3632, 3640, 3648, 3651, 3656, 3676, 3682, 3685, 2035 3638, 3646, 3649, 3654, 3674, 3680, 3683, 3687,
2075 3689, 3693, 3697, 3709, 3712, 3717, 3718, 3724, 2036 3691, 3695, 3707, 3710, 3715, 3716, 3722, 3729,
2076 3731, 3737, 3740, 3743, 3747, 3751, 3754, 3757, 2037 3735, 3738, 3741, 3745, 3749, 3752, 3755, 3760,
2077 3762, 3766, 3772, 3778, 3781, 3785, 3788, 3791, 2038 3764, 3770, 3776, 3779, 3783, 3786, 3789, 3794,
2078 3796, 3799, 3802, 3808, 3812, 3815, 3819, 3822, 2039 3797, 3800, 3806, 3810, 3813, 3817, 3820, 3823,
2079 3825, 3829, 3833, 3840, 3843, 3846, 3852, 3855, 2040 3827, 3831, 3838, 3841, 3844, 3850, 3853, 3860,
2080 3862, 3864, 3866, 3869, 3878, 3883, 3897, 3901, 2041 3862, 3864, 3867, 3876, 3881, 3895, 3899, 3903,
2081 3905, 3920, 3926, 3929, 3932, 3934, 3939, 3945, 2042 3918, 3924, 3927, 3930, 3932, 3937, 3943, 3947,
2082 3949, 3957, 3963, 3973, 3976, 3979, 3984, 3988, 2043 3955, 3961, 3971, 3974, 3977, 3982, 3986, 3989,
2083 3991, 3994, 3997, 4001, 4006, 4010, 4014, 4017, 2044 3992, 3995, 3999, 4004, 4008, 4012, 4015, 4020,
2084 4022, 4027, 4030, 4036, 4040, 4046, 4051, 4055, 2045 4025, 4028, 4034, 4038, 4044, 4049, 4053, 4057,
2085 4059, 4067, 4070, 4078, 4084, 4094, 4105, 4108, 2046 4065, 4068, 4076, 4082, 4092, 4103, 4106, 4109,
2086 4111, 4113, 4117, 4119, 4122, 4133, 4137, 4140, 2047 4111, 4115, 4117, 4120, 4131, 4135, 4138, 4141,
2087 4143, 4146, 4149, 4151, 4155, 4159, 4162, 4166, 2048 4144, 4147, 4149, 4153, 4157, 4160, 4164, 4169,
2088 4171, 4174, 4184, 4186, 4227, 4233, 4237, 4240, 2049 4172, 4182, 4184, 4225, 4231, 4235, 4238, 4241,
2089 4243, 4247, 4250, 4254, 4258, 4263, 4265, 4269, 2050 4245, 4248, 4252, 4256, 4261, 4263, 4267, 4271,
2090 4273, 4276, 4279, 4284, 4293, 4297, 4302, 4307, 2051 4274, 4277, 4282, 4291, 4295, 4300, 4305, 4309,
2091 4311, 4318, 4322, 4325, 4329, 4332, 4337, 4340, 2052 4316, 4320, 4323, 4327, 4330, 4335, 4338, 4341,
2092 4343, 4373, 4377, 4381, 4385, 4389, 4394, 4398, 2053 4371, 4375, 4379, 4383, 4387, 4392, 4396, 4402,
2093 4404, 4408, 4416, 4419, 4424, 4428, 4431, 4436, 2054 4406, 4414, 4417, 4422, 4426, 4429, 4434, 4437,
2094 4439, 4443, 4446, 4449, 4452, 4455, 4458, 4462, 2055 4441, 4444, 4447, 4450, 4453, 4456, 4460, 4464,
2095 4466, 4469, 4479, 4482, 4485, 4490, 4496, 4499, 2056 4467, 4477, 4480, 4483, 4488, 4494, 4497, 4512,
2096 4514, 4517, 4521, 4527, 4531, 4535, 4538, 4542, 2057 4515, 4519, 4525, 4529, 4533, 4536, 4540, 4547,
2097 4549, 4552, 4555, 4561, 4564, 4568, 4573, 4589, 2058 4550, 4553, 4559, 4562, 4566, 4571, 4587, 4589,
2098 4591, 4599, 4601, 4609, 4615, 4617, 4621, 4624, 2059 4597, 4599, 4607, 4613, 4615, 4619, 4622, 4625,
2099 4627, 4630, 4634, 4645, 4648, 4660, 4684, 4692, 2060 4628, 4632, 4643, 4646, 4658, 4682, 4690, 4692,
2100 4694, 4698, 4701, 4706, 4709, 4711, 4716, 4719, 2061 4696, 4699, 4704, 4707, 4709, 4714, 4717, 4723,
2101 4725, 4728, 4730, 4732, 4734, 4736, 4738, 4740, 2062 4726, 4734, 4736, 4738, 4740, 4742, 4744, 4746,
2102 4742, 4744, 4746, 4748, 4750, 4752, 4754, 4756, 2063 4748, 4750, 4752, 4755, 4758, 4760, 4762, 4764,
2103 4758, 4760, 4762, 4764, 4766, 4768, 4770, 4772, 2064 4766, 4769, 4772, 4777, 4781, 4782, 4784, 4786,
2104 4777, 4781, 4782, 4784, 4786, 4792, 4797, 4802, 2065 4792, 4797, 4802, 4804, 4807, 4809, 4812, 4816,
2105 4804, 4807, 4809, 4812, 4816, 4822, 4825, 4828, 2066 4822, 4825, 4828, 4834, 4836, 4838, 4840, 4843,
2106 4834, 4836, 4838, 4840, 4843, 4876, 4878, 4880, 2067 4876, 4878, 4880, 4883, 4886, 4889, 4897, 4905,
2107 4883, 4886, 4889, 4897, 4905, 4916, 4924, 4933, 2068 4916, 4924, 4933, 4941, 4950, 4959, 4971, 4978,
2108 4941, 4950, 4959, 4971, 4978, 4985, 4993, 5001, 2069 4985, 4993, 5001, 5010, 5016, 5024, 5030, 5038,
2109 5010, 5016, 5024, 5030, 5038, 5040, 5043, 5057, 2070 5040, 5043, 5057, 5063, 5071, 5075, 5079, 5081,
2110 5063, 5071, 5075, 5079, 5081, 5128, 5130, 5133, 2071 5128, 5130, 5133, 5135, 5140, 5146, 5152, 5157,
2111 5135, 5140, 5146, 5152, 5157, 5160, 5164, 5167, 2072 5160, 5164, 5167, 5170, 5172, 5175, 5178, 5181,
2112 5170, 5172, 5175, 5178, 5181, 5185, 5190, 5195, 2073 5185, 5190, 5195, 5199, 5201, 5204, 5206, 5210,
2113 5199, 5201, 5204, 5206, 5210, 5213, 5216, 5219, 2074 5213, 5216, 5219, 5222, 5226, 5231, 5235, 5237,
2114 5222, 5226, 5231, 5235, 5237, 5239, 5242, 5244, 2075 5239, 5242, 5244, 5248, 5251, 5254, 5262, 5266,
2115 5248, 5251, 5254, 5262, 5266, 5274, 5290, 5292, 2076 5274, 5290, 5292, 5297, 5299, 5303, 5314, 5318,
2116 5297, 5299, 5303, 5314, 5318, 5320, 5323, 5325, 2077 5320, 5323, 5325, 5328, 5333, 5337, 5343, 5349,
2117 5328, 5333, 5337, 5343, 5349, 5360, 5365, 5368, 2078 5360, 5365, 5368, 5371, 5374, 5377, 5379, 5383,
2118 5371, 5374, 5377, 5379, 5383, 5384, 5387, 5389, 2079 5384, 5387, 5389, 5419, 5421, 5423, 5426, 5430,
2119 5419, 5421, 5423, 5426, 5430, 5433, 5437, 5439, 2080 5433, 5437, 5439, 5441, 5443, 5449, 5452, 5455,
2120 5441, 5443, 5449, 5452, 5455, 5459, 5461, 5466, 2081 5459, 5461, 5466, 5471, 5478, 5481, 5485, 5489,
2121 5471, 5478, 5481, 5485, 5489, 5491, 5494, 5514, 2082 5491, 5494, 5514, 5516, 5518, 5525, 5529, 5531,
2122 5516, 5518, 5525, 5529, 5531, 5533, 5535, 5538, 2083 5533, 5535, 5538, 5542, 5546, 5548, 5552, 5555,
2123 5542, 5546, 5548, 5552, 5555, 5557, 5562, 5580, 2084 5557, 5562, 5580, 5619, 5625, 5628, 5630, 5632,
2124 5619, 5625, 5628, 5630, 5632, 5634, 5637, 5640, 2085 5634, 5637, 5640, 5643, 5646, 5649, 5653, 5656,
2125 5643, 5646, 5649, 5653, 5656, 5659, 5662, 5664, 2086 5659, 5662, 5664, 5666, 5669, 5676, 5679, 5681,
2126 5666, 5669, 5676, 5679, 5681, 5684, 5687, 5690, 2087 5684, 5687, 5690, 5698, 5700, 5702, 5705, 5707,
2127 5698, 5700, 5702, 5705, 5707, 5710, 5712, 5714, 2088 5710, 5712, 5714, 5744, 5747, 5750, 5753, 5756,
2128 5744, 5747, 5750, 5753, 5756, 5761, 5765, 5772, 2089 5761, 5765, 5772, 5775, 5784, 5793, 5796, 5800,
2129 5775, 5784, 5793, 5796, 5800, 5803, 5806, 5810, 2090 5803, 5806, 5810, 5812, 5816, 5818, 5821, 5823,
2130 5812, 5816, 5818, 5821, 5823, 5827, 5831, 5835, 2091 5827, 5831, 5835, 5843, 5845, 5847, 5851, 5855,
2131 5843, 5845, 5847, 5851, 5855, 5857, 5870, 5872, 2092 5857, 5870, 5872, 5875, 5878, 5883, 5885, 5888,
2132 5875, 5878, 5883, 5885, 5888, 5890, 5892, 5895, 2093 5890, 5892, 5895, 5900, 5902, 5904, 5909, 5911,
2133 5900, 5902, 5904, 5909, 5911, 5914, 5918, 5938, 2094 5914, 5918, 5938, 5942, 5946, 5948, 5950, 5958,
2134 5942, 5946, 5948, 5950, 5958, 5960, 5967, 5972, 2095 5960, 5967, 5972, 5974, 5978, 5981, 5984, 5987,
2135 5974, 5978, 5981, 5984, 5987, 5991, 5994, 5997, 2096 5991, 5994, 5997, 6001, 6011, 6017, 6020, 6023,
2136 6001, 6011, 6017, 6020, 6023, 6033, 6053, 6059, 2097 6033, 6053, 6059, 6062, 6064, 6068, 6070, 6073,
2137 6062, 6064, 6068, 6070, 6073, 6075, 6079, 6081, 2098 6075, 6079, 6081, 6083, 6087, 6089, 6091, 6097,
2138 6083, 6087, 6089, 6091, 6097, 6100, 6105, 6110, 2099 6100, 6105, 6110, 6116, 6126, 6134, 6146, 6153,
2139 6116, 6126, 6134, 6146, 6153, 6163, 6169, 6181, 2100 6163, 6169, 6181, 6187, 6205, 6208, 6216, 6222,
2140 6187, 6205, 6208, 6216, 6222, 6232, 6239, 6246, 2101 6232, 6239, 6246, 6254, 6262, 6265, 6270, 6290,
2141 6254, 6262, 6265, 6270, 6290, 6296, 6299, 6303, 2102 6296, 6299, 6303, 6307, 6311, 6323, 6326, 6331,
2142 6307, 6311, 6323, 6326, 6331, 6332, 6338, 6345, 2103 6332, 6338, 6345, 6351, 6354, 6357, 6361, 6365,
2143 6351, 6354, 6357, 6361, 6365, 6368, 6371, 6376, 2104 6368, 6371, 6376, 6380, 6386, 6392, 6395, 6399,
2144 6380, 6386, 6392, 6395, 6399, 6402, 6405, 6410, 2105 6402, 6405, 6410, 6413, 6416, 6422, 6426, 6429,
2145 6413, 6416, 6422, 6426, 6429, 6433, 6436, 6439, 2106 6433, 6436, 6439, 6443, 6447, 6454, 6457, 6460,
2146 6443, 6447, 6454, 6457, 6460, 6466, 6469, 6476, 2107 6466, 6469, 6476, 6478, 6480, 6483, 6492, 6497,
2147 6478, 6480, 6483, 6492, 6497, 6511, 6515, 6519, 2108 6511, 6515, 6519, 6534, 6540, 6543, 6546, 6548,
2148 6534, 6540, 6543, 6546, 6548, 6553, 6559, 6563, 2109 6553, 6559, 6563, 6571, 6577, 6587, 6590, 6593,
2149 6571, 6577, 6587, 6590, 6593, 6598, 6602, 6605, 2110 6598, 6602, 6605, 6608, 6611, 6615, 6620, 6624,
2150 6608, 6611, 6615, 6620, 6624, 6628, 6631, 6636, 2111 6628, 6631, 6636, 6641, 6644, 6650, 6654, 6660,
2151 6641, 6644, 6650, 6654, 6660, 6665, 6669, 6673, 2112 6665, 6669, 6673, 6681, 6684, 6692, 6698, 6708,
2152 6681, 6684, 6692, 6698, 6708, 6719, 6722, 6725, 2113 6719, 6722, 6725, 6727, 6731, 6733, 6736, 6747,
2153 6727, 6731, 6733, 6736, 6747, 6751, 6754, 6757, 2114 6751, 6754, 6757, 6760, 6763, 6765, 6769, 6773,
2154 6760, 6763, 6765, 6769, 6773, 6776, 6780, 6785, 2115 6776, 6780, 6785, 6788, 6798, 6800, 6841, 6847,
2155 6788, 6798, 6800, 6841, 6847, 6851, 6854, 6857, 2116 6851, 6854, 6857, 6861, 6864, 6868, 6872, 6877,
2156 6861, 6864, 6868, 6872, 6877, 6879, 6883, 6887, 2117 6879, 6883, 6887, 6890, 6893, 6898, 6907, 6911,
2157 6890, 6893, 6898, 6907, 6911, 6916, 6921, 6925, 2118 6916, 6921, 6925, 6932, 6936, 6939, 6943, 6946,
2158 6932, 6936, 6939, 6943, 6946, 6951, 6954, 6957, 2119 6951, 6954, 6957, 6987, 6991, 6995, 6999, 7003,
2159 6987, 6991, 6995, 6999, 7003, 7008, 7012, 7018, 2120 7008, 7012, 7018, 7022, 7030, 7033, 7038, 7042,
2160 7022, 7030, 7033, 7038, 7042, 7045, 7050, 7053, 2121 7045, 7050, 7053, 7057, 7060, 7063, 7066, 7069,
2161 7057, 7060, 7063, 7066, 7069, 7072, 7076, 7080, 2122 7072, 7076, 7080, 7083, 7093, 7096, 7099, 7104,
2162 7083, 7093, 7096, 7099, 7104, 7110, 7113, 7128, 2123 7110, 7113, 7128, 7131, 7135, 7141, 7145, 7149,
2163 7131, 7135, 7141, 7145, 7149, 7152, 7156, 7163, 2124 7152, 7156, 7163, 7166, 7169, 7175, 7178, 7182,
2164 7166, 7169, 7175, 7178, 7182, 7187, 7203, 7205, 2125 7187, 7203, 7205, 7213, 7215, 7223, 7229, 7231,
2165 7213, 7215, 7223, 7229, 7231, 7235, 7238, 7241, 2126 7235, 7238, 7241, 7244, 7248, 7259, 7262, 7274,
2166 7244, 7248, 7259, 7262, 7274, 7298, 7306, 7308, 2127 7298, 7306, 7308, 7312, 7315, 7320, 7323, 7325,
2167 7312, 7315, 7320, 7323, 7325, 7330, 7333, 7339, 2128 7330, 7333, 7339, 7342, 7407, 7410, 7412, 7414,
2168 7342, 7408, 7411, 7413, 7415, 7417, 7419, 7421, 2129 7416, 7418, 7420, 7423, 7428, 7431, 7434, 7436,
2169 7424, 7426, 7431, 7434, 7437, 7439, 7479, 7481, 2130 7476, 7478, 7480, 7482, 7487, 7491, 7492, 7494,
2170 7483, 7485, 7490, 7494, 7495, 7497, 7499, 7506, 2131 7496, 7503, 7510, 7517, 7519, 7521, 7523, 7526,
2171 7513, 7520, 7522, 7524, 7526, 7529, 7532, 7538, 2132 7529, 7535, 7538, 7543, 7550, 7555, 7558, 7562,
2172 7541, 7546, 7553, 7558, 7561, 7565, 7572, 7604, 2133 7569, 7601, 7650, 7665, 7678, 7683, 7685, 7689,
2173 7653, 7668, 7681, 7686, 7688, 7692, 7723, 7729, 2134 7720, 7726, 7728, 7749, 7769, 7771, 7783, 7794,
2174 7731, 7752, 7772, 7774, 7786, 7798, 7810, 7822, 2135 7797, 7800, 7801, 7803, 7805, 7807, 7810, 7812,
2175 7833, 7841, 7854, 7867, 7880, 7892, 7900, 7913, 2136 7820, 7822, 7824, 7826, 7836, 7845, 7848, 7852,
2176 7925, 7937, 7949, 7961, 7973, 7985, 7997, 7999, 2137 7856, 7859, 7861, 7863, 7865, 7867, 7869, 7879,
2177 8001, 8003, 8005, 8007, 8009, 8011, 8021, 8031, 2138 7888, 7891, 7895, 7899, 7902, 7904, 7906, 7908,
2178 8041, 8051, 8053, 8055, 8057, 8059, 8061, 8071, 2139 7910, 7912, 7954, 7994, 7996, 8001, 8005, 8006,
2179 8080, 8082, 8084, 8086, 8088, 8090, 8132, 8172, 2140 8008, 8010, 8017, 8024, 8031, 8033, 8035, 8037,
2180 8174, 8179, 8183, 8184, 8186, 8188, 8195, 8202, 2141 8040, 8043, 8049, 8052, 8057, 8064, 8069, 8072,
2181 8209, 8211, 8213, 8215, 8218, 8221, 8227, 8230, 2142 8076, 8083, 8115, 8164, 8179, 8192, 8197, 8199,
2182 8235, 8242, 8247, 8250, 8254, 8261, 8293, 8342, 2143 8203, 8234, 8240, 8242, 8263, 8283,
2183 8357, 8370, 8375, 8377, 8381, 8412, 8418, 8420,
2184 8441, 8461,
2185} 2144}
2186 2145
2187var _hcltok_indicies []int16 = []int16{ 2146var _hcltok_indicies []int16 = []int16{
2188 2, 1, 4, 3, 6, 5, 6, 7, 2147 1, 0, 3, 2, 3, 4, 2, 6,
2189 5, 9, 11, 11, 10, 8, 12, 12, 2148 8, 8, 7, 5, 9, 9, 7, 5,
2190 10, 8, 10, 8, 13, 14, 15, 16, 2149 7, 5, 10, 11, 12, 13, 15, 16,
2191 18, 19, 20, 21, 22, 23, 24, 25, 2150 17, 18, 19, 20, 21, 22, 23, 24,
2192 26, 27, 28, 29, 30, 31, 32, 33, 2151 25, 26, 27, 28, 29, 30, 31, 32,
2193 34, 35, 36, 37, 38, 39, 40, 42, 2152 33, 34, 35, 36, 37, 39, 40, 41,
2194 43, 44, 45, 46, 14, 14, 17, 17, 2153 42, 43, 11, 11, 14, 14, 38, 0,
2195 41, 3, 14, 15, 16, 18, 19, 20, 2154 11, 12, 13, 15, 16, 17, 18, 19,
2196 21, 22, 23, 24, 25, 26, 27, 28, 2155 20, 21, 22, 23, 24, 25, 26, 27,
2197 29, 30, 31, 32, 33, 34, 35, 36, 2156 28, 29, 30, 31, 32, 33, 34, 35,
2198 37, 38, 39, 40, 42, 43, 44, 45, 2157 36, 37, 39, 40, 41, 42, 43, 11,
2199 46, 14, 14, 17, 17, 41, 3, 47, 2158 11, 14, 14, 38, 0, 44, 45, 11,
2200 48, 14, 14, 49, 16, 18, 19, 20, 2159 11, 46, 13, 15, 16, 17, 16, 47,
2201 19, 50, 51, 23, 52, 25, 26, 53, 2160 48, 20, 49, 22, 23, 50, 51, 52,
2202 54, 55, 56, 57, 58, 59, 60, 61, 2161 53, 54, 55, 56, 57, 58, 59, 60,
2203 62, 63, 64, 65, 40, 42, 66, 44, 2162 61, 62, 37, 39, 63, 41, 64, 65,
2204 67, 68, 69, 14, 14, 14, 17, 41, 2163 66, 11, 11, 11, 14, 38, 0, 44,
2205 3, 47, 3, 14, 14, 14, 14, 3, 2164 0, 11, 11, 11, 11, 0, 11, 11,
2206 14, 14, 14, 3, 14, 3, 14, 3, 2165 11, 0, 11, 0, 11, 11, 0, 0,
2207 14, 3, 3, 3, 3, 3, 14, 3, 2166 0, 0, 0, 0, 11, 0, 0, 0,
2208 3, 3, 3, 14, 14, 14, 14, 14, 2167 0, 11, 11, 11, 11, 11, 0, 0,
2209 3, 3, 14, 3, 3, 14, 3, 14, 2168 11, 0, 0, 11, 0, 11, 0, 0,
2210 3, 3, 14, 3, 3, 3, 14, 14, 2169 11, 0, 0, 0, 11, 11, 11, 11,
2211 14, 14, 14, 14, 3, 14, 14, 3, 2170 11, 11, 0, 11, 11, 0, 11, 11,
2212 14, 14, 3, 3, 3, 3, 3, 3, 2171 0, 0, 0, 0, 0, 0, 11, 11,
2213 14, 14, 3, 3, 14, 3, 14, 14, 2172 0, 0, 11, 0, 11, 11, 11, 0,
2214 14, 3, 70, 71, 72, 73, 17, 74, 2173 67, 68, 69, 70, 14, 71, 72, 73,
2215 75, 76, 77, 78, 79, 80, 81, 82, 2174 74, 75, 76, 77, 78, 79, 80, 81,
2216 83, 84, 85, 86, 87, 88, 89, 90, 2175 82, 83, 84, 85, 86, 87, 88, 89,
2217 91, 92, 93, 94, 95, 96, 97, 98, 2176 90, 91, 92, 93, 94, 95, 96, 97,
2218 99, 100, 3, 14, 3, 14, 3, 14, 2177 0, 11, 0, 11, 0, 11, 11, 0,
2219 14, 3, 14, 14, 3, 3, 3, 14, 2178 11, 11, 0, 0, 0, 11, 0, 0,
2220 3, 3, 3, 3, 3, 3, 3, 14, 2179 0, 0, 0, 0, 0, 11, 0, 0,
2221 3, 3, 3, 3, 3, 3, 3, 14, 2180 0, 0, 0, 0, 0, 11, 11, 11,
2222 14, 14, 14, 14, 14, 14, 14, 14, 2181 11, 11, 11, 11, 11, 11, 11, 11,
2223 14, 14, 3, 3, 3, 3, 3, 3, 2182 0, 0, 0, 0, 0, 0, 0, 0,
2224 3, 3, 14, 14, 14, 14, 14, 14, 2183 11, 11, 11, 11, 11, 11, 11, 11,
2225 14, 14, 14, 3, 3, 3, 3, 3, 2184 11, 0, 0, 0, 0, 0, 0, 0,
2226 3, 3, 3, 14, 14, 14, 14, 14, 2185 0, 11, 11, 11, 11, 11, 11, 11,
2227 14, 14, 14, 14, 3, 14, 14, 14, 2186 11, 11, 0, 11, 11, 11, 11, 11,
2228 14, 14, 14, 14, 14, 3, 14, 14, 2187 11, 11, 11, 0, 11, 11, 11, 11,
2229 14, 14, 14, 14, 14, 14, 14, 14, 2188 11, 11, 11, 11, 11, 11, 11, 0,
2230 14, 3, 14, 14, 14, 14, 14, 14, 2189 11, 11, 11, 11, 11, 11, 0, 11,
2231 3, 14, 14, 14, 14, 14, 14, 3, 2190 11, 11, 11, 11, 11, 0, 0, 0,
2232 3, 3, 3, 3, 3, 3, 3, 14, 2191 0, 0, 0, 0, 0, 11, 11, 11,
2233 14, 14, 14, 14, 14, 14, 14, 3, 2192 11, 11, 11, 11, 11, 0, 11, 11,
2234 14, 14, 14, 14, 14, 14, 14, 14, 2193 11, 11, 11, 11, 11, 11, 0, 11,
2235 3, 14, 14, 14, 14, 14, 3, 3, 2194 11, 11, 11, 11, 0, 0, 0, 0,
2236 3, 3, 3, 3, 3, 3, 14, 14, 2195 0, 0, 0, 0, 11, 11, 11, 11,
2237 14, 14, 14, 14, 3, 14, 14, 14, 2196 11, 11, 0, 11, 11, 11, 11, 11,
2238 14, 14, 14, 14, 3, 14, 3, 14, 2197 11, 11, 0, 11, 0, 11, 11, 0,
2239 14, 3, 14, 14, 14, 14, 14, 14, 2198 11, 11, 11, 11, 11, 11, 11, 11,
2240 14, 14, 14, 14, 14, 14, 14, 3, 2199 11, 11, 11, 11, 11, 0, 11, 11,
2241 14, 14, 14, 14, 14, 3, 14, 14, 2200 11, 11, 11, 0, 11, 11, 11, 11,
2242 14, 14, 14, 14, 14, 3, 14, 14, 2201 11, 11, 11, 0, 11, 11, 11, 0,
2243 14, 3, 14, 14, 14, 3, 14, 3, 2202 11, 11, 11, 0, 11, 0, 98, 99,
2244 101, 102, 103, 104, 105, 106, 107, 108, 2203 100, 101, 102, 103, 104, 105, 106, 107,
2245 109, 110, 111, 112, 113, 114, 115, 116, 2204 108, 109, 110, 111, 112, 113, 114, 16,
2246 117, 19, 118, 119, 120, 121, 122, 123, 2205 115, 116, 117, 118, 119, 120, 121, 122,
2247 124, 125, 126, 127, 128, 129, 130, 131, 2206 123, 124, 125, 126, 127, 128, 129, 130,
2248 132, 133, 134, 135, 17, 18, 136, 137, 2207 131, 132, 14, 15, 133, 134, 135, 136,
2249 138, 139, 140, 17, 19, 17, 3, 14, 2208 137, 14, 16, 14, 0, 11, 0, 11,
2250 3, 14, 14, 3, 3, 14, 3, 3, 2209 11, 0, 0, 11, 0, 0, 0, 0,
2251 3, 3, 14, 3, 3, 3, 3, 3, 2210 11, 0, 0, 0, 0, 0, 11, 0,
2252 14, 3, 3, 3, 3, 3, 14, 14, 2211 0, 0, 0, 0, 11, 11, 11, 11,
2253 14, 14, 14, 3, 3, 3, 14, 3, 2212 11, 0, 0, 0, 11, 0, 0, 0,
2254 3, 3, 14, 14, 14, 3, 3, 3, 2213 11, 11, 11, 0, 0, 0, 11, 11,
2255 14, 14, 3, 3, 3, 14, 14, 14, 2214 0, 0, 0, 11, 11, 11, 0, 0,
2256 3, 3, 3, 14, 14, 14, 14, 3, 2215 0, 11, 11, 11, 11, 0, 11, 11,
2257 14, 14, 14, 14, 3, 3, 3, 3, 2216 11, 11, 0, 0, 0, 0, 0, 11,
2258 3, 14, 14, 14, 14, 3, 3, 14, 2217 11, 11, 11, 0, 0, 11, 11, 11,
2259 14, 14, 3, 3, 14, 14, 14, 14, 2218 0, 0, 11, 11, 11, 11, 0, 11,
2260 3, 14, 14, 3, 14, 14, 3, 3, 2219 11, 0, 11, 11, 0, 0, 0, 11,
2261 3, 14, 14, 14, 3, 3, 3, 3, 2220 11, 11, 0, 0, 0, 0, 11, 11,
2262 14, 14, 14, 14, 14, 3, 3, 3, 2221 11, 11, 11, 0, 0, 0, 0, 11,
2263 3, 14, 3, 14, 14, 3, 14, 14, 2222 0, 11, 11, 0, 11, 11, 0, 11,
2264 3, 14, 3, 14, 14, 14, 3, 14, 2223 0, 11, 11, 11, 0, 11, 11, 0,
2265 14, 3, 3, 3, 14, 3, 3, 3, 2224 0, 0, 11, 0, 0, 0, 0, 0,
2266 3, 3, 3, 3, 14, 14, 14, 14, 2225 0, 0, 11, 11, 11, 11, 0, 11,
2267 3, 14, 14, 14, 14, 14, 14, 14, 2226 11, 11, 11, 11, 11, 11, 0, 138,
2268 3, 141, 142, 143, 144, 145, 146, 147, 2227 139, 140, 141, 142, 143, 144, 145, 146,
2269 148, 149, 17, 150, 151, 152, 153, 154, 2228 14, 147, 148, 149, 150, 151, 0, 11,
2270 3, 14, 3, 3, 3, 3, 3, 14, 2229 0, 0, 0, 0, 0, 11, 11, 0,
2271 14, 3, 14, 14, 14, 3, 14, 14, 2230 11, 11, 11, 0, 11, 11, 11, 11,
2272 14, 14, 14, 14, 14, 14, 14, 14, 2231 11, 11, 11, 11, 11, 11, 0, 11,
2273 3, 14, 14, 14, 3, 3, 14, 14, 2232 11, 11, 0, 0, 11, 11, 11, 0,
2274 14, 3, 3, 14, 3, 3, 14, 14, 2233 0, 11, 0, 0, 11, 11, 11, 11,
2275 14, 14, 14, 3, 3, 3, 3, 14, 2234 11, 0, 0, 0, 0, 11, 11, 11,
2276 14, 14, 14, 14, 14, 3, 14, 14, 2235 11, 11, 11, 0, 11, 11, 11, 11,
2277 14, 14, 14, 3, 155, 112, 156, 157, 2236 11, 0, 152, 109, 153, 154, 155, 14,
2278 158, 17, 159, 160, 19, 17, 3, 14, 2237 156, 157, 16, 14, 0, 11, 11, 11,
2279 14, 14, 14, 3, 3, 3, 14, 3, 2238 11, 0, 0, 0, 11, 0, 0, 11,
2280 3, 14, 14, 14, 3, 3, 3, 14, 2239 11, 11, 0, 0, 0, 11, 11, 0,
2281 14, 3, 122, 3, 19, 17, 17, 161, 2240 119, 0, 16, 14, 14, 158, 0, 14,
2282 3, 17, 3, 14, 19, 162, 163, 19, 2241 0, 11, 16, 159, 160, 16, 161, 162,
2283 164, 165, 19, 60, 166, 167, 168, 169, 2242 16, 57, 163, 164, 165, 166, 167, 16,
2284 170, 19, 171, 172, 173, 19, 174, 175, 2243 168, 169, 170, 16, 171, 172, 173, 15,
2285 176, 18, 177, 178, 179, 18, 180, 19, 2244 174, 175, 176, 15, 177, 16, 14, 0,
2286 17, 3, 3, 14, 14, 3, 3, 3, 2245 0, 11, 11, 0, 0, 0, 11, 11,
2287 14, 14, 14, 14, 3, 14, 14, 3, 2246 11, 11, 0, 11, 11, 0, 0, 0,
2288 3, 3, 3, 14, 14, 3, 3, 14, 2247 0, 11, 11, 0, 0, 11, 11, 0,
2289 14, 3, 3, 3, 3, 3, 3, 14, 2248 0, 0, 0, 0, 0, 11, 11, 11,
2290 14, 14, 3, 3, 3, 14, 3, 3, 2249 0, 0, 0, 11, 0, 0, 0, 11,
2291 3, 14, 14, 3, 14, 14, 14, 14, 2250 11, 0, 11, 11, 11, 11, 0, 11,
2292 3, 14, 14, 14, 14, 3, 14, 14, 2251 11, 11, 11, 0, 11, 11, 11, 11,
2293 14, 14, 14, 14, 3, 3, 3, 14, 2252 11, 11, 0, 0, 0, 11, 11, 11,
2294 14, 14, 14, 3, 181, 182, 3, 17, 2253 11, 0, 178, 179, 0, 14, 0, 11,
2295 3, 14, 3, 3, 14, 19, 183, 184, 2254 0, 0, 11, 16, 180, 181, 182, 183,
2296 185, 186, 60, 187, 188, 58, 189, 190, 2255 57, 184, 185, 55, 186, 187, 188, 189,
2297 191, 192, 193, 194, 195, 196, 197, 17, 2256 190, 191, 192, 193, 194, 14, 0, 0,
2298 3, 3, 14, 3, 14, 14, 14, 14, 2257 11, 0, 11, 11, 11, 11, 11, 11,
2299 14, 14, 14, 3, 14, 14, 14, 3, 2258 11, 0, 11, 11, 11, 0, 11, 0,
2300 14, 3, 3, 14, 3, 14, 3, 3, 2259 0, 11, 0, 11, 0, 0, 11, 11,
2301 14, 14, 14, 14, 3, 14, 14, 14, 2260 11, 11, 0, 11, 11, 11, 0, 0,
2302 3, 3, 14, 14, 14, 14, 3, 14, 2261 11, 11, 11, 11, 0, 11, 11, 0,
2303 14, 3, 3, 14, 14, 14, 14, 14, 2262 0, 11, 11, 11, 11, 11, 0, 195,
2304 3, 198, 199, 200, 201, 202, 203, 204, 2263 196, 197, 198, 199, 200, 201, 202, 203,
2305 205, 206, 207, 208, 204, 209, 210, 211, 2264 204, 205, 201, 206, 207, 208, 209, 38,
2306 212, 41, 3, 213, 214, 19, 215, 216, 2265 0, 210, 211, 16, 212, 213, 214, 215,
2307 217, 218, 219, 220, 221, 222, 223, 19, 2266 216, 217, 218, 219, 220, 16, 14, 221,
2308 17, 224, 225, 226, 227, 19, 228, 229, 2267 222, 223, 224, 16, 225, 226, 227, 228,
2309 230, 231, 232, 233, 234, 235, 236, 237, 2268 229, 230, 231, 232, 233, 234, 235, 236,
2310 238, 239, 240, 241, 242, 19, 147, 17, 2269 237, 238, 239, 16, 144, 14, 240, 0,
2311 243, 3, 14, 14, 14, 14, 14, 3, 2270 11, 11, 11, 11, 11, 0, 0, 0,
2312 3, 3, 14, 3, 14, 14, 3, 14, 2271 11, 0, 11, 11, 0, 11, 0, 11,
2313 3, 14, 14, 3, 3, 3, 14, 14, 2272 11, 0, 0, 0, 11, 11, 11, 0,
2314 14, 3, 3, 3, 14, 14, 14, 3, 2273 0, 0, 11, 11, 11, 0, 0, 0,
2315 3, 3, 3, 14, 3, 3, 14, 3, 2274 0, 11, 0, 0, 11, 0, 0, 11,
2316 3, 14, 14, 14, 3, 3, 14, 3, 2275 11, 11, 0, 0, 11, 0, 11, 11,
2317 14, 14, 14, 3, 14, 14, 14, 14, 2276 11, 0, 11, 11, 11, 11, 11, 11,
2318 14, 14, 3, 3, 3, 14, 14, 3, 2277 0, 0, 0, 11, 11, 0, 11, 11,
2319 14, 14, 3, 14, 14, 3, 14, 14, 2278 0, 11, 11, 0, 11, 11, 0, 11,
2320 3, 14, 14, 14, 14, 14, 14, 14, 2279 11, 11, 11, 11, 11, 11, 0, 11,
2321 3, 14, 3, 14, 3, 14, 14, 3, 2280 0, 11, 0, 11, 11, 0, 11, 0,
2322 14, 3, 14, 14, 3, 14, 3, 14, 2281 11, 11, 0, 11, 0, 11, 0, 241,
2323 3, 244, 215, 245, 246, 247, 248, 249, 2282 212, 242, 243, 244, 245, 246, 247, 248,
2324 250, 251, 252, 253, 101, 254, 19, 255, 2283 249, 250, 98, 251, 16, 252, 253, 254,
2325 256, 257, 19, 258, 132, 259, 260, 261, 2284 16, 255, 129, 256, 257, 258, 259, 260,
2326 262, 263, 264, 265, 266, 19, 3, 3, 2285 261, 262, 263, 16, 0, 0, 0, 11,
2327 3, 14, 14, 14, 3, 14, 14, 3, 2286 11, 11, 0, 11, 11, 0, 11, 11,
2328 14, 14, 3, 3, 3, 3, 3, 14, 2287 0, 0, 0, 0, 0, 11, 11, 11,
2329 14, 14, 14, 3, 14, 14, 14, 14, 2288 11, 0, 11, 11, 11, 11, 11, 11,
2330 14, 14, 3, 3, 3, 14, 14, 14, 2289 0, 0, 0, 11, 11, 11, 11, 11,
2331 14, 14, 14, 14, 14, 14, 3, 14, 2290 11, 11, 11, 11, 0, 11, 11, 11,
2332 14, 14, 14, 14, 14, 14, 14, 3, 2291 11, 11, 11, 11, 11, 0, 11, 11,
2333 14, 14, 3, 3, 3, 3, 14, 14, 2292 0, 0, 0, 0, 11, 11, 11, 0,
2334 14, 3, 3, 3, 14, 3, 3, 3, 2293 0, 0, 11, 0, 0, 0, 11, 11,
2335 14, 14, 3, 14, 14, 14, 3, 14, 2294 0, 11, 11, 11, 0, 11, 0, 0,
2336 3, 3, 3, 14, 14, 3, 14, 14, 2295 0, 11, 11, 0, 11, 11, 11, 0,
2337 14, 3, 14, 14, 14, 3, 3, 3, 2296 11, 11, 11, 0, 0, 0, 0, 11,
2338 3, 14, 19, 184, 267, 268, 17, 19, 2297 16, 181, 264, 265, 14, 16, 14, 0,
2339 17, 3, 3, 14, 3, 14, 19, 267, 2298 0, 11, 0, 11, 16, 264, 14, 0,
2340 17, 3, 19, 269, 17, 3, 3, 14, 2299 16, 266, 14, 0, 0, 11, 16, 267,
2341 19, 270, 271, 272, 175, 273, 274, 19, 2300 268, 269, 172, 270, 271, 16, 272, 273,
2342 275, 276, 277, 17, 3, 3, 14, 14, 2301 274, 14, 0, 0, 11, 11, 11, 0,
2343 14, 3, 14, 14, 3, 14, 14, 14, 2302 11, 11, 0, 11, 11, 11, 11, 0,
2344 14, 3, 3, 14, 3, 3, 14, 14, 2303 0, 11, 0, 0, 11, 11, 0, 11,
2345 3, 14, 3, 19, 17, 3, 278, 19, 2304 0, 16, 14, 0, 275, 16, 276, 0,
2346 279, 3, 17, 3, 14, 3, 14, 280, 2305 14, 0, 11, 0, 11, 277, 16, 278,
2347 19, 281, 282, 3, 14, 3, 3, 3, 2306 279, 0, 11, 0, 0, 0, 11, 11,
2348 14, 14, 14, 14, 3, 283, 284, 285, 2307 11, 11, 0, 280, 281, 282, 16, 283,
2349 19, 286, 287, 288, 289, 290, 291, 292, 2308 284, 285, 286, 287, 288, 289, 290, 291,
2350 293, 294, 295, 296, 297, 298, 299, 17, 2309 292, 293, 294, 295, 296, 14, 0, 11,
2351 3, 14, 14, 14, 3, 3, 3, 3, 2310 11, 11, 0, 0, 0, 0, 11, 11,
2352 14, 14, 3, 3, 14, 3, 3, 3, 2311 0, 0, 11, 0, 0, 0, 0, 0,
2353 3, 3, 3, 3, 14, 3, 14, 3, 2312 0, 0, 11, 0, 11, 0, 0, 0,
2354 3, 3, 3, 3, 3, 14, 14, 14, 2313 0, 0, 0, 11, 11, 11, 11, 11,
2355 14, 14, 3, 3, 14, 3, 3, 3, 2314 0, 0, 11, 0, 0, 0, 11, 0,
2356 14, 3, 3, 14, 3, 3, 14, 3, 2315 0, 11, 0, 0, 11, 0, 0, 11,
2357 3, 14, 3, 3, 3, 14, 14, 14, 2316 0, 0, 0, 11, 11, 11, 0, 0,
2358 3, 3, 3, 14, 14, 14, 14, 3, 2317 0, 11, 11, 11, 11, 0, 297, 16,
2359 300, 19, 301, 19, 302, 303, 304, 305, 2318 298, 16, 299, 300, 301, 302, 14, 0,
2360 17, 3, 14, 14, 14, 14, 14, 3, 2319 11, 11, 11, 11, 11, 0, 0, 0,
2361 3, 3, 14, 3, 3, 14, 14, 14, 2320 11, 0, 0, 11, 11, 11, 11, 11,
2362 14, 14, 14, 14, 14, 14, 14, 3, 2321 11, 11, 11, 11, 11, 0, 11, 11,
2363 14, 14, 14, 14, 14, 14, 14, 14, 2322 11, 11, 11, 11, 11, 11, 11, 11,
2364 14, 14, 14, 14, 14, 14, 14, 14, 2323 11, 11, 11, 11, 11, 11, 11, 11,
2365 14, 14, 14, 3, 14, 14, 14, 14, 2324 11, 0, 11, 11, 11, 11, 11, 0,
2366 14, 3, 306, 19, 17, 3, 14, 307, 2325 303, 16, 14, 0, 11, 304, 16, 100,
2367 19, 103, 17, 3, 14, 308, 3, 17, 2326 14, 0, 11, 305, 0, 14, 0, 11,
2368 3, 14, 19, 309, 17, 3, 3, 14, 2327 16, 306, 14, 0, 0, 11, 307, 0,
2369 310, 3, 19, 311, 17, 3, 3, 14, 2328 16, 308, 14, 0, 0, 11, 11, 11,
2370 14, 14, 14, 3, 14, 14, 14, 14, 2329 11, 0, 11, 11, 11, 11, 0, 11,
2371 3, 14, 14, 14, 14, 14, 3, 3, 2330 11, 11, 11, 11, 0, 0, 11, 0,
2372 14, 3, 14, 14, 14, 3, 14, 3, 2331 11, 11, 11, 0, 11, 0, 11, 11,
2373 14, 14, 14, 3, 3, 3, 3, 3, 2332 11, 0, 0, 0, 0, 0, 0, 0,
2374 3, 3, 14, 14, 14, 3, 14, 3, 2333 11, 11, 11, 0, 11, 0, 0, 0,
2375 3, 3, 14, 14, 14, 14, 3, 312, 2334 11, 11, 11, 11, 0, 309, 310, 69,
2376 313, 72, 314, 315, 316, 317, 318, 319, 2335 311, 312, 313, 314, 315, 316, 317, 318,
2377 320, 321, 322, 323, 324, 325, 326, 327, 2336 319, 320, 321, 322, 323, 324, 325, 326,
2378 328, 329, 330, 331, 332, 334, 335, 336, 2337 327, 328, 329, 331, 332, 333, 334, 335,
2379 337, 338, 339, 333, 3, 14, 14, 14, 2338 336, 330, 0, 11, 11, 11, 11, 0,
2380 14, 3, 14, 3, 14, 14, 3, 14, 2339 11, 0, 11, 11, 0, 11, 11, 11,
2381 14, 14, 3, 3, 3, 3, 3, 3, 2340 0, 0, 0, 0, 0, 0, 0, 0,
2382 3, 3, 3, 14, 14, 14, 14, 14, 2341 0, 11, 11, 11, 11, 11, 0, 11,
2383 3, 14, 14, 14, 14, 14, 14, 14, 2342 11, 11, 11, 11, 11, 11, 0, 11,
2384 3, 14, 14, 14, 3, 14, 14, 14, 2343 11, 11, 0, 11, 11, 11, 11, 11,
2385 14, 14, 14, 14, 3, 14, 14, 14, 2344 11, 11, 0, 11, 11, 11, 0, 11,
2386 3, 14, 14, 14, 14, 14, 14, 14, 2345 11, 11, 11, 11, 11, 11, 0, 11,
2387 3, 14, 14, 14, 3, 14, 14, 14, 2346 11, 11, 0, 11, 11, 11, 11, 11,
2388 14, 14, 14, 14, 14, 14, 14, 3, 2347 11, 11, 11, 11, 11, 0, 11, 0,
2389 14, 3, 14, 14, 14, 14, 14, 3, 2348 11, 11, 11, 11, 11, 0, 11, 11,
2390 14, 14, 3, 14, 14, 14, 14, 14, 2349 0, 11, 11, 11, 11, 11, 11, 11,
2391 14, 14, 3, 14, 14, 14, 3, 14, 2350 0, 11, 11, 11, 0, 11, 11, 11,
2392 14, 14, 14, 3, 14, 14, 14, 14, 2351 11, 0, 11, 11, 11, 11, 0, 11,
2393 3, 14, 14, 14, 14, 3, 14, 3, 2352 11, 11, 11, 0, 11, 0, 11, 11,
2394 14, 14, 3, 14, 14, 14, 14, 14, 2353 0, 11, 11, 11, 11, 11, 11, 11,
2395 14, 14, 14, 14, 14, 14, 14, 14, 2354 11, 11, 11, 11, 11, 11, 11, 0,
2396 14, 3, 14, 14, 14, 3, 14, 3, 2355 11, 11, 11, 0, 11, 0, 11, 11,
2397 14, 14, 3, 14, 3, 340, 341, 342, 2356 0, 11, 0, 337, 338, 339, 101, 102,
2398 104, 105, 106, 107, 108, 343, 110, 111, 2357 103, 104, 105, 340, 107, 108, 109, 110,
2399 112, 113, 114, 115, 344, 345, 170, 346, 2358 111, 112, 341, 342, 167, 343, 258, 117,
2400 261, 120, 347, 122, 232, 272, 125, 348, 2359 344, 119, 229, 269, 122, 345, 346, 347,
2401 349, 350, 351, 352, 353, 354, 355, 356, 2360 348, 349, 350, 351, 352, 353, 354, 131,
2402 357, 134, 358, 19, 17, 18, 19, 137, 2361 355, 16, 14, 15, 16, 134, 135, 136,
2403 138, 139, 140, 17, 17, 3, 14, 14, 2362 137, 14, 14, 0, 11, 11, 0, 11,
2404 3, 14, 14, 14, 14, 14, 14, 3, 2363 11, 11, 11, 11, 11, 0, 0, 0,
2405 3, 3, 14, 3, 14, 14, 14, 14, 2364 11, 0, 11, 11, 11, 11, 0, 11,
2406 3, 14, 14, 14, 3, 14, 14, 3, 2365 11, 11, 0, 11, 11, 0, 11, 11,
2407 14, 14, 14, 3, 3, 14, 14, 14, 2366 11, 0, 0, 11, 11, 11, 0, 0,
2408 3, 3, 14, 14, 3, 14, 3, 14, 2367 11, 11, 0, 11, 0, 11, 0, 11,
2409 3, 14, 14, 14, 3, 3, 14, 14, 2368 11, 11, 0, 0, 11, 11, 0, 11,
2410 3, 14, 14, 3, 14, 14, 14, 3, 2369 11, 0, 11, 11, 11, 0, 356, 140,
2411 359, 143, 145, 146, 147, 148, 149, 17, 2370 142, 143, 144, 145, 146, 14, 357, 148,
2412 360, 151, 361, 153, 362, 3, 14, 14, 2371 358, 150, 359, 0, 11, 11, 0, 0,
2413 3, 3, 3, 3, 14, 3, 3, 14, 2372 0, 0, 11, 0, 0, 11, 11, 11,
2414 14, 14, 14, 14, 3, 363, 112, 364, 2373 11, 11, 0, 360, 109, 361, 154, 155,
2415 157, 158, 17, 159, 160, 19, 17, 3, 2374 14, 156, 157, 16, 14, 0, 11, 11,
2416 14, 14, 14, 14, 3, 3, 3, 14, 2375 11, 11, 0, 0, 0, 11, 16, 159,
2417 19, 162, 163, 19, 365, 366, 222, 311, 2376 160, 16, 362, 363, 219, 308, 163, 164,
2418 166, 167, 168, 367, 170, 368, 369, 370, 2377 165, 364, 167, 365, 366, 367, 368, 369,
2419 371, 372, 373, 374, 375, 376, 377, 178, 2378 370, 371, 372, 373, 374, 175, 176, 15,
2420 179, 18, 378, 19, 17, 3, 3, 3, 2379 375, 16, 14, 0, 0, 0, 0, 11,
2421 3, 14, 14, 14, 3, 3, 3, 3, 2380 11, 11, 0, 0, 0, 0, 0, 11,
2422 3, 14, 14, 3, 14, 14, 14, 3, 2381 11, 0, 11, 11, 11, 0, 11, 11,
2423 14, 14, 3, 3, 3, 14, 14, 3, 2382 0, 0, 0, 11, 11, 0, 11, 11,
2424 14, 14, 14, 14, 3, 14, 3, 14, 2383 11, 11, 0, 11, 0, 11, 11, 11,
2425 14, 14, 14, 14, 3, 3, 3, 3, 2384 11, 11, 0, 0, 0, 0, 0, 11,
2426 3, 14, 14, 14, 14, 14, 14, 3, 2385 11, 11, 11, 11, 11, 0, 11, 0,
2427 14, 3, 19, 183, 184, 379, 186, 60, 2386 16, 180, 181, 376, 183, 57, 184, 185,
2428 187, 188, 58, 189, 190, 380, 17, 193, 2387 55, 186, 187, 377, 14, 190, 378, 192,
2429 381, 195, 196, 197, 17, 3, 14, 14, 2388 193, 194, 14, 0, 11, 11, 11, 11,
2430 14, 14, 14, 14, 14, 3, 14, 14, 2389 11, 11, 11, 0, 11, 11, 0, 11,
2431 3, 14, 3, 382, 383, 200, 201, 202, 2390 0, 379, 380, 197, 198, 199, 381, 201,
2432 384, 204, 205, 385, 386, 387, 204, 209, 2391 202, 382, 383, 384, 201, 206, 207, 208,
2433 210, 211, 212, 41, 3, 213, 214, 19, 2392 209, 38, 0, 210, 211, 16, 212, 213,
2434 215, 216, 218, 388, 220, 389, 222, 223, 2393 215, 385, 217, 386, 219, 220, 16, 14,
2435 19, 17, 390, 225, 226, 227, 19, 228, 2394 387, 222, 223, 224, 16, 225, 226, 227,
2436 229, 230, 231, 232, 233, 234, 235, 391, 2395 228, 229, 230, 231, 232, 388, 234, 235,
2437 237, 238, 392, 240, 241, 242, 19, 147, 2396 389, 237, 238, 239, 16, 144, 14, 240,
2438 17, 243, 3, 3, 14, 3, 3, 14, 2397 0, 0, 11, 0, 0, 11, 0, 11,
2439 3, 14, 14, 14, 14, 14, 3, 14, 2398 11, 11, 11, 11, 0, 11, 11, 0,
2440 14, 3, 393, 394, 395, 396, 397, 398, 2399 390, 391, 392, 393, 394, 395, 396, 397,
2441 399, 400, 250, 401, 322, 402, 216, 403, 2400 247, 398, 319, 399, 213, 400, 401, 402,
2442 404, 405, 406, 407, 404, 408, 409, 410, 2401 403, 404, 401, 405, 406, 407, 258, 408,
2443 261, 411, 263, 412, 413, 274, 3, 14, 2402 260, 409, 410, 271, 0, 11, 0, 11,
2444 3, 14, 3, 14, 3, 14, 3, 14, 2403 0, 11, 0, 11, 0, 11, 11, 0,
2445 14, 3, 14, 3, 14, 14, 14, 3, 2404 11, 0, 11, 11, 11, 0, 11, 11,
2446 14, 14, 3, 3, 14, 14, 14, 3, 2405 0, 0, 11, 11, 11, 0, 11, 0,
2447 14, 3, 14, 3, 14, 14, 3, 14, 2406 11, 0, 11, 11, 0, 11, 0, 11,
2448 3, 14, 3, 14, 3, 14, 3, 14, 2407 0, 11, 0, 11, 0, 11, 0, 0,
2449 3, 3, 3, 14, 14, 14, 3, 14, 2408 0, 11, 11, 11, 0, 11, 11, 0,
2450 14, 3, 19, 270, 232, 414, 404, 415, 2409 16, 267, 229, 411, 401, 412, 271, 16,
2451 274, 19, 416, 417, 277, 17, 3, 14, 2410 413, 414, 274, 14, 0, 11, 0, 11,
2452 3, 14, 14, 14, 3, 3, 3, 14, 2411 11, 11, 0, 0, 0, 11, 11, 0,
2453 14, 3, 280, 19, 281, 418, 3, 14, 2412 277, 16, 278, 415, 0, 11, 11, 0,
2454 14, 3, 19, 286, 287, 288, 289, 290, 2413 16, 283, 284, 285, 286, 287, 288, 289,
2455 291, 292, 293, 294, 295, 419, 17, 3, 2414 290, 291, 292, 416, 14, 0, 0, 0,
2456 3, 3, 14, 19, 420, 19, 268, 303, 2415 11, 16, 417, 16, 265, 300, 301, 302,
2457 304, 305, 17, 3, 3, 14, 422, 422, 2416 14, 0, 0, 11, 419, 419, 419, 419,
2458 422, 422, 421, 422, 422, 422, 421, 422, 2417 418, 419, 419, 419, 418, 419, 418, 419,
2459 421, 422, 422, 421, 421, 421, 421, 421, 2418 419, 418, 418, 418, 418, 418, 418, 419,
2460 421, 422, 421, 421, 421, 421, 422, 422, 2419 418, 418, 418, 418, 419, 419, 419, 419,
2461 422, 422, 422, 421, 421, 422, 421, 421, 2420 419, 418, 418, 419, 418, 418, 419, 418,
2462 422, 421, 422, 421, 421, 422, 421, 421, 2421 419, 418, 418, 419, 418, 418, 418, 419,
2463 421, 422, 422, 422, 422, 422, 422, 421, 2422 419, 419, 419, 419, 419, 418, 419, 419,
2464 422, 422, 421, 422, 422, 421, 421, 421, 2423 418, 419, 419, 418, 418, 418, 418, 418,
2465 421, 421, 421, 422, 422, 421, 421, 422, 2424 418, 419, 419, 418, 418, 419, 418, 419,
2466 421, 422, 422, 422, 421, 423, 424, 425, 2425 419, 419, 418, 421, 422, 423, 424, 425,
2467 426, 427, 428, 429, 430, 431, 432, 433, 2426 426, 427, 428, 429, 430, 431, 432, 433,
2468 434, 435, 436, 437, 438, 439, 440, 441, 2427 434, 435, 436, 437, 438, 439, 440, 441,
2469 442, 443, 444, 445, 446, 447, 448, 449, 2428 442, 443, 444, 445, 446, 447, 448, 449,
2470 450, 451, 452, 453, 454, 421, 422, 421, 2429 450, 451, 452, 418, 419, 418, 419, 418,
2471 422, 421, 422, 422, 421, 422, 422, 421, 2430 419, 419, 418, 419, 419, 418, 418, 418,
2472 421, 421, 422, 421, 421, 421, 421, 421, 2431 419, 418, 418, 418, 418, 418, 418, 418,
2473 421, 421, 422, 421, 421, 421, 421, 421, 2432 419, 418, 418, 418, 418, 418, 418, 418,
2474 421, 421, 422, 422, 422, 422, 422, 422, 2433 419, 419, 419, 419, 419, 419, 419, 419,
2475 422, 422, 422, 422, 422, 421, 421, 421, 2434 419, 419, 419, 418, 418, 418, 418, 418,
2476 421, 421, 421, 421, 421, 422, 422, 422, 2435 418, 418, 418, 419, 419, 419, 419, 419,
2477 422, 422, 422, 422, 422, 422, 421, 421, 2436 419, 419, 419, 419, 418, 418, 418, 418,
2478 421, 421, 421, 421, 421, 421, 422, 422, 2437 418, 418, 418, 418, 419, 419, 419, 419,
2479 422, 422, 422, 422, 422, 422, 422, 421, 2438 419, 419, 419, 419, 419, 418, 419, 419,
2480 422, 422, 422, 422, 422, 422, 422, 422, 2439 419, 419, 419, 419, 419, 419, 418, 419,
2481 421, 422, 422, 422, 422, 422, 422, 422, 2440 419, 419, 419, 419, 419, 419, 419, 419,
2482 422, 422, 422, 422, 421, 422, 422, 422, 2441 419, 419, 418, 419, 419, 419, 419, 419,
2483 422, 422, 422, 421, 422, 422, 422, 422, 2442 419, 418, 419, 419, 419, 419, 419, 419,
2484 422, 422, 421, 421, 421, 421, 421, 421, 2443 418, 418, 418, 418, 418, 418, 418, 418,
2485 421, 421, 422, 422, 422, 422, 422, 422, 2444 419, 419, 419, 419, 419, 419, 419, 419,
2486 422, 422, 421, 422, 422, 422, 422, 422, 2445 418, 419, 419, 419, 419, 419, 419, 419,
2487 422, 422, 422, 421, 422, 422, 422, 422, 2446 419, 418, 419, 419, 419, 419, 419, 418,
2488 422, 421, 421, 421, 421, 421, 421, 421, 2447 418, 418, 418, 418, 418, 418, 418, 419,
2489 421, 422, 422, 422, 422, 422, 422, 421, 2448 419, 419, 419, 419, 419, 418, 419, 419,
2490 422, 422, 422, 422, 422, 422, 422, 421, 2449 419, 419, 419, 419, 419, 418, 419, 418,
2491 422, 421, 422, 422, 421, 422, 422, 422, 2450 419, 419, 418, 419, 419, 419, 419, 419,
2492 422, 422, 422, 422, 422, 422, 422, 422, 2451 419, 419, 419, 419, 419, 419, 419, 419,
2493 422, 422, 421, 422, 422, 422, 422, 422, 2452 418, 419, 419, 419, 419, 419, 418, 419,
2494 421, 422, 422, 422, 422, 422, 422, 422, 2453 419, 419, 419, 419, 419, 419, 418, 419,
2495 421, 422, 422, 422, 421, 422, 422, 422, 2454 419, 419, 418, 419, 419, 419, 418, 419,
2496 421, 422, 421, 455, 456, 457, 458, 459, 2455 418, 453, 454, 455, 456, 457, 458, 459,
2497 460, 461, 462, 463, 464, 465, 466, 467, 2456 460, 461, 462, 463, 464, 465, 466, 467,
2498 468, 469, 470, 471, 472, 473, 474, 475, 2457 468, 469, 470, 471, 472, 473, 474, 475,
2499 476, 477, 478, 479, 480, 481, 482, 483, 2458 476, 477, 478, 479, 480, 481, 482, 483,
2500 484, 485, 486, 487, 488, 489, 490, 427, 2459 484, 485, 486, 487, 488, 425, 489, 490,
2501 491, 492, 493, 494, 495, 496, 427, 472, 2460 491, 492, 493, 494, 425, 470, 425, 418,
2502 427, 421, 422, 421, 422, 422, 421, 421, 2461 419, 418, 419, 419, 418, 418, 419, 418,
2503 422, 421, 421, 421, 421, 422, 421, 421, 2462 418, 418, 418, 419, 418, 418, 418, 418,
2504 421, 421, 421, 422, 421, 421, 421, 421, 2463 418, 419, 418, 418, 418, 418, 418, 419,
2505 421, 422, 422, 422, 422, 422, 421, 421, 2464 419, 419, 419, 419, 418, 418, 418, 419,
2506 421, 422, 421, 421, 421, 422, 422, 422, 2465 418, 418, 418, 419, 419, 419, 418, 418,
2507 421, 421, 421, 422, 422, 421, 421, 421, 2466 418, 419, 419, 418, 418, 418, 419, 419,
2508 422, 422, 422, 421, 421, 421, 422, 422, 2467 419, 418, 418, 418, 419, 419, 419, 419,
2509 422, 422, 421, 422, 422, 422, 422, 421, 2468 418, 419, 419, 419, 419, 418, 418, 418,
2510 421, 421, 421, 421, 422, 422, 422, 422, 2469 418, 418, 419, 419, 419, 419, 418, 418,
2511 421, 421, 422, 422, 422, 421, 421, 422, 2470 419, 419, 419, 418, 418, 419, 419, 419,
2512 422, 422, 422, 421, 422, 422, 421, 422, 2471 419, 418, 419, 419, 418, 419, 419, 418,
2513 422, 421, 421, 421, 422, 422, 422, 421, 2472 418, 418, 419, 419, 419, 418, 418, 418,
2514 421, 421, 421, 422, 422, 422, 422, 422, 2473 418, 419, 419, 419, 419, 419, 418, 418,
2515 421, 421, 421, 421, 422, 421, 422, 422, 2474 418, 418, 419, 418, 419, 419, 418, 419,
2516 421, 422, 422, 421, 422, 421, 422, 422, 2475 419, 418, 419, 418, 419, 419, 419, 418,
2517 422, 421, 422, 422, 421, 421, 421, 422, 2476 419, 419, 418, 418, 418, 419, 418, 418,
2518 421, 421, 421, 421, 421, 421, 421, 422, 2477 418, 418, 418, 418, 418, 419, 419, 419,
2519 422, 422, 422, 421, 422, 422, 422, 422, 2478 419, 418, 419, 419, 419, 419, 419, 419,
2520 422, 422, 422, 421, 497, 498, 499, 500, 2479 419, 418, 495, 496, 497, 498, 499, 500,
2521 501, 502, 503, 504, 505, 427, 506, 507, 2480 501, 502, 503, 425, 504, 505, 506, 507,
2522 508, 509, 510, 421, 422, 421, 421, 421, 2481 508, 418, 419, 418, 418, 418, 418, 418,
2523 421, 421, 422, 422, 421, 422, 422, 422, 2482 419, 419, 418, 419, 419, 419, 418, 419,
2524 421, 422, 422, 422, 422, 422, 422, 422, 2483 419, 419, 419, 419, 419, 419, 419, 419,
2525 422, 422, 422, 421, 422, 422, 422, 421, 2484 419, 418, 419, 419, 419, 418, 418, 419,
2526 421, 422, 422, 422, 421, 421, 422, 421, 2485 419, 419, 418, 418, 419, 418, 418, 419,
2527 421, 422, 422, 422, 422, 422, 421, 421, 2486 419, 419, 419, 419, 418, 418, 418, 418,
2528 421, 421, 422, 422, 422, 422, 422, 422, 2487 419, 419, 419, 419, 419, 419, 418, 419,
2529 421, 422, 422, 422, 422, 422, 421, 511, 2488 419, 419, 419, 419, 418, 509, 464, 510,
2530 466, 512, 513, 514, 427, 515, 516, 472, 2489 511, 512, 425, 513, 514, 470, 425, 418,
2531 427, 421, 422, 422, 422, 422, 421, 421, 2490 419, 419, 419, 419, 418, 418, 418, 419,
2532 421, 422, 421, 421, 422, 422, 422, 421, 2491 418, 418, 419, 419, 419, 418, 418, 418,
2533 421, 421, 422, 422, 421, 477, 421, 472, 2492 419, 419, 418, 475, 418, 470, 425, 425,
2534 427, 427, 517, 421, 427, 421, 422, 472, 2493 515, 418, 425, 418, 419, 470, 516, 517,
2535 518, 519, 472, 520, 521, 472, 522, 523, 2494 470, 518, 519, 470, 520, 521, 522, 523,
2536 524, 525, 526, 527, 472, 528, 529, 530, 2495 524, 525, 470, 526, 527, 528, 470, 529,
2537 472, 531, 532, 533, 491, 534, 535, 536, 2496 530, 531, 489, 532, 533, 534, 489, 535,
2538 491, 537, 472, 427, 421, 421, 422, 422, 2497 470, 425, 418, 418, 419, 419, 418, 418,
2539 421, 421, 421, 422, 422, 422, 422, 421, 2498 418, 419, 419, 419, 419, 418, 419, 419,
2540 422, 422, 421, 421, 421, 421, 422, 422, 2499 418, 418, 418, 418, 419, 419, 418, 418,
2541 421, 421, 422, 422, 421, 421, 421, 421, 2500 419, 419, 418, 418, 418, 418, 418, 418,
2542 421, 421, 422, 422, 422, 421, 421, 421, 2501 419, 419, 419, 418, 418, 418, 419, 418,
2543 422, 421, 421, 421, 422, 422, 421, 422, 2502 418, 418, 419, 419, 418, 419, 419, 419,
2544 422, 422, 422, 421, 422, 422, 422, 422, 2503 419, 418, 419, 419, 419, 419, 418, 419,
2545 421, 422, 422, 422, 422, 422, 422, 421, 2504 419, 419, 419, 419, 419, 418, 418, 418,
2546 421, 421, 422, 422, 422, 422, 421, 538, 2505 419, 419, 419, 419, 418, 536, 537, 418,
2547 539, 421, 427, 421, 422, 421, 421, 422, 2506 425, 418, 419, 418, 418, 419, 470, 538,
2548 472, 540, 541, 542, 543, 522, 544, 545, 2507 539, 540, 541, 520, 542, 543, 544, 545,
2549 546, 547, 548, 549, 550, 551, 552, 553, 2508 546, 547, 548, 549, 550, 551, 552, 553,
2550 554, 555, 427, 421, 421, 422, 421, 422, 2509 425, 418, 418, 419, 418, 419, 419, 419,
2551 422, 422, 422, 422, 422, 422, 421, 422, 2510 419, 419, 419, 419, 418, 419, 419, 419,
2552 422, 422, 421, 422, 421, 421, 422, 421, 2511 418, 419, 418, 418, 419, 418, 419, 418,
2553 422, 421, 421, 422, 422, 422, 422, 421, 2512 418, 419, 419, 419, 419, 418, 419, 419,
2554 422, 422, 422, 421, 421, 422, 422, 422, 2513 419, 418, 418, 419, 419, 419, 419, 418,
2555 422, 421, 422, 422, 421, 421, 422, 422, 2514 419, 419, 418, 418, 419, 419, 419, 419,
2556 422, 422, 422, 421, 556, 557, 558, 559, 2515 419, 418, 554, 555, 556, 557, 558, 559,
2557 560, 561, 562, 563, 564, 565, 566, 562, 2516 560, 561, 562, 563, 564, 560, 566, 567,
2558 568, 569, 570, 571, 567, 421, 572, 573, 2517 568, 569, 565, 418, 570, 571, 470, 572,
2559 472, 574, 575, 576, 577, 578, 579, 580, 2518 573, 574, 575, 576, 577, 578, 579, 580,
2560 581, 582, 472, 427, 583, 584, 585, 586, 2519 470, 425, 581, 582, 583, 584, 470, 585,
2561 472, 587, 588, 589, 590, 591, 592, 593, 2520 586, 587, 588, 589, 590, 591, 592, 593,
2562 594, 595, 596, 597, 598, 599, 600, 601, 2521 594, 595, 596, 597, 598, 599, 470, 501,
2563 472, 503, 427, 602, 421, 422, 422, 422, 2522 425, 600, 418, 419, 419, 419, 419, 419,
2564 422, 422, 421, 421, 421, 422, 421, 422, 2523 418, 418, 418, 419, 418, 419, 419, 418,
2565 422, 421, 422, 421, 422, 422, 421, 421, 2524 419, 418, 419, 419, 418, 418, 418, 419,
2566 421, 422, 422, 422, 421, 421, 421, 422, 2525 419, 419, 418, 418, 418, 419, 419, 419,
2567 422, 422, 421, 421, 421, 421, 422, 421, 2526 418, 418, 418, 418, 419, 418, 418, 419,
2568 421, 422, 421, 421, 422, 422, 422, 421, 2527 418, 418, 419, 419, 419, 418, 418, 419,
2569 421, 422, 421, 422, 422, 422, 421, 422, 2528 418, 419, 419, 419, 418, 419, 419, 419,
2570 422, 422, 422, 422, 422, 421, 421, 421, 2529 419, 419, 419, 418, 418, 418, 419, 419,
2571 422, 422, 421, 422, 422, 421, 422, 422, 2530 418, 419, 419, 418, 419, 419, 418, 419,
2572 421, 422, 422, 421, 422, 422, 422, 422, 2531 419, 418, 419, 419, 419, 419, 419, 419,
2573 422, 422, 422, 421, 422, 421, 422, 421, 2532 419, 418, 419, 418, 419, 418, 419, 419,
2574 422, 422, 421, 422, 421, 422, 422, 421, 2533 418, 419, 418, 419, 419, 418, 419, 418,
2575 422, 421, 422, 421, 603, 574, 604, 605, 2534 419, 418, 601, 572, 602, 603, 604, 605,
2576 606, 607, 608, 609, 610, 611, 612, 455, 2535 606, 607, 608, 609, 610, 453, 611, 470,
2577 613, 472, 614, 615, 616, 472, 617, 487, 2536 612, 613, 614, 470, 615, 485, 616, 617,
2578 618, 619, 620, 621, 622, 623, 624, 625, 2537 618, 619, 620, 621, 622, 623, 470, 418,
2579 472, 421, 421, 421, 422, 422, 422, 421, 2538 418, 418, 419, 419, 419, 418, 419, 419,
2580 422, 422, 421, 422, 422, 421, 421, 421, 2539 418, 419, 419, 418, 418, 418, 418, 418,
2581 421, 421, 422, 422, 422, 422, 421, 422, 2540 419, 419, 419, 419, 418, 419, 419, 419,
2582 422, 422, 422, 422, 422, 421, 421, 421, 2541 419, 419, 419, 418, 418, 418, 419, 419,
2583 422, 422, 422, 422, 422, 422, 422, 422, 2542 419, 419, 419, 419, 419, 419, 419, 418,
2584 422, 421, 422, 422, 422, 422, 422, 422, 2543 419, 419, 419, 419, 419, 419, 419, 419,
2585 422, 422, 421, 422, 422, 421, 421, 421, 2544 418, 419, 419, 418, 418, 418, 418, 419,
2586 421, 422, 422, 422, 421, 421, 421, 422, 2545 419, 419, 418, 418, 418, 419, 418, 418,
2587 421, 421, 421, 422, 422, 421, 422, 422, 2546 418, 419, 419, 418, 419, 419, 419, 418,
2588 422, 421, 422, 421, 421, 421, 422, 422, 2547 419, 418, 418, 418, 419, 419, 418, 419,
2589 421, 422, 422, 422, 421, 422, 422, 422, 2548 419, 419, 418, 419, 419, 419, 418, 418,
2590 421, 421, 421, 421, 422, 472, 541, 626, 2549 418, 418, 419, 470, 539, 624, 625, 425,
2591 627, 427, 472, 427, 421, 421, 422, 421, 2550 470, 425, 418, 418, 419, 418, 419, 470,
2592 422, 472, 626, 427, 421, 472, 628, 427, 2551 624, 425, 418, 470, 626, 425, 418, 418,
2593 421, 421, 422, 472, 629, 630, 631, 532, 2552 419, 470, 627, 628, 629, 530, 630, 631,
2594 632, 633, 472, 634, 635, 636, 427, 421, 2553 470, 632, 633, 634, 425, 418, 418, 419,
2595 421, 422, 422, 422, 421, 422, 422, 421, 2554 419, 419, 418, 419, 419, 418, 419, 419,
2596 422, 422, 422, 422, 421, 421, 422, 421, 2555 419, 419, 418, 418, 419, 418, 418, 419,
2597 421, 422, 422, 421, 422, 421, 472, 427, 2556 419, 418, 419, 418, 470, 425, 418, 635,
2598 421, 637, 472, 638, 421, 427, 421, 422, 2557 470, 636, 418, 425, 418, 419, 418, 419,
2599 421, 422, 639, 472, 640, 641, 421, 422, 2558 637, 470, 638, 639, 418, 419, 418, 418,
2600 421, 421, 421, 422, 422, 422, 422, 421, 2559 418, 419, 419, 419, 419, 418, 640, 641,
2601 642, 643, 644, 472, 645, 646, 647, 648, 2560 642, 470, 643, 644, 645, 646, 647, 648,
2602 649, 650, 651, 652, 653, 654, 655, 656, 2561 649, 650, 651, 652, 653, 654, 655, 656,
2603 657, 658, 427, 421, 422, 422, 422, 421, 2562 425, 418, 419, 419, 419, 418, 418, 418,
2604 421, 421, 421, 422, 422, 421, 421, 422, 2563 418, 419, 419, 418, 418, 419, 418, 418,
2605 421, 421, 421, 421, 421, 421, 421, 422, 2564 418, 418, 418, 418, 418, 419, 418, 419,
2606 421, 422, 421, 421, 421, 421, 421, 421, 2565 418, 418, 418, 418, 418, 418, 419, 419,
2607 422, 422, 422, 422, 422, 421, 421, 422, 2566 419, 419, 419, 418, 418, 419, 418, 418,
2608 421, 421, 421, 422, 421, 421, 422, 421, 2567 418, 419, 418, 418, 419, 418, 418, 419,
2609 421, 422, 421, 421, 422, 421, 421, 421, 2568 418, 418, 419, 418, 418, 418, 419, 419,
2610 422, 422, 422, 421, 421, 421, 422, 422, 2569 419, 418, 418, 418, 419, 419, 419, 419,
2611 422, 422, 421, 659, 472, 660, 472, 661, 2570 418, 657, 470, 658, 470, 659, 660, 661,
2612 662, 663, 664, 427, 421, 422, 422, 422, 2571 662, 425, 418, 419, 419, 419, 419, 419,
2613 422, 422, 421, 421, 421, 422, 421, 421, 2572 418, 418, 418, 419, 418, 418, 419, 419,
2614 422, 422, 422, 422, 422, 422, 422, 422, 2573 419, 419, 419, 419, 419, 419, 419, 419,
2615 422, 422, 421, 422, 422, 422, 422, 422, 2574 418, 419, 419, 419, 419, 419, 419, 419,
2616 422, 422, 422, 422, 422, 422, 422, 422, 2575 419, 419, 419, 419, 419, 419, 419, 419,
2617 422, 422, 422, 422, 422, 422, 421, 422, 2576 419, 419, 419, 419, 418, 419, 419, 419,
2618 422, 422, 422, 422, 421, 665, 472, 427, 2577 419, 419, 418, 663, 470, 425, 418, 419,
2619 421, 422, 666, 472, 457, 427, 421, 422, 2578 664, 470, 455, 425, 418, 419, 665, 418,
2620 667, 421, 427, 421, 422, 472, 668, 427, 2579 425, 418, 419, 470, 666, 425, 418, 418,
2621 421, 421, 422, 669, 421, 472, 670, 427, 2580 419, 667, 418, 470, 668, 425, 418, 418,
2622 421, 421, 422, 672, 671, 422, 422, 422, 2581 419, 670, 669, 419, 419, 419, 419, 670,
2623 422, 672, 671, 422, 672, 671, 672, 672, 2582 669, 419, 670, 669, 670, 670, 419, 670,
2624 422, 672, 671, 422, 672, 422, 672, 671, 2583 669, 419, 670, 419, 670, 669, 419, 670,
2625 422, 672, 422, 672, 422, 671, 672, 672, 2584 419, 670, 419, 669, 670, 670, 670, 670,
2626 672, 672, 672, 672, 672, 672, 671, 422, 2585 670, 670, 670, 670, 669, 419, 419, 670,
2627 422, 672, 672, 422, 672, 422, 672, 671, 2586 670, 419, 670, 419, 670, 669, 670, 670,
2628 672, 672, 672, 672, 672, 422, 672, 422, 2587 670, 670, 670, 419, 670, 419, 670, 419,
2629 672, 422, 672, 671, 672, 672, 422, 672, 2588 670, 669, 670, 670, 419, 670, 419, 670,
2630 422, 672, 671, 672, 672, 672, 672, 672, 2589 669, 670, 670, 670, 670, 670, 419, 670,
2631 422, 672, 422, 672, 671, 422, 422, 672, 2590 419, 670, 669, 419, 419, 670, 419, 670,
2632 422, 672, 671, 672, 672, 672, 422, 672, 2591 669, 670, 670, 670, 419, 670, 419, 670,
2633 422, 672, 422, 672, 422, 672, 671, 672, 2592 419, 670, 419, 670, 669, 670, 419, 670,
2634 422, 672, 422, 672, 671, 422, 672, 672, 2593 419, 670, 669, 419, 670, 670, 670, 670,
2635 672, 672, 422, 672, 422, 672, 422, 672, 2594 419, 670, 419, 670, 419, 670, 419, 670,
2636 422, 672, 422, 672, 422, 672, 671, 422, 2595 419, 670, 419, 670, 669, 419, 670, 669,
2637 672, 671, 672, 672, 672, 422, 672, 422, 2596 670, 670, 670, 419, 670, 419, 670, 669,
2638 672, 671, 672, 422, 672, 422, 672, 671, 2597 670, 419, 670, 419, 670, 669, 419, 670,
2639 422, 672, 672, 672, 672, 422, 672, 422, 2598 670, 670, 670, 419, 670, 419, 670, 669,
2640 672, 671, 422, 672, 422, 672, 422, 672, 2599 419, 670, 419, 670, 419, 670, 669, 670,
2641 671, 672, 672, 422, 672, 422, 672, 671, 2600 670, 419, 670, 419, 670, 669, 419, 670,
2642 422, 672, 422, 672, 422, 672, 422, 671, 2601 419, 670, 419, 670, 419, 669, 670, 670,
2643 672, 672, 672, 422, 672, 422, 672, 671, 2602 670, 419, 670, 419, 670, 669, 419, 670,
2644 422, 672, 671, 672, 672, 422, 672, 671, 2603 669, 670, 670, 419, 670, 669, 670, 670,
2645 672, 672, 672, 422, 672, 672, 672, 672, 2604 670, 419, 670, 670, 670, 670, 670, 670,
2646 672, 672, 422, 422, 672, 422, 672, 422, 2605 419, 419, 670, 419, 670, 419, 670, 419,
2647 672, 422, 672, 671, 672, 422, 672, 422, 2606 670, 669, 670, 419, 670, 419, 670, 669,
2648 672, 671, 422, 672, 671, 672, 422, 672, 2607 419, 670, 669, 670, 419, 670, 669, 670,
2649 671, 672, 422, 672, 671, 422, 422, 672, 2608 419, 670, 669, 419, 419, 670, 669, 419,
2650 671, 422, 672, 422, 672, 422, 672, 422, 2609 670, 419, 670, 419, 670, 419, 670, 419,
2651 672, 422, 672, 422, 671, 672, 672, 422, 2610 670, 419, 669, 670, 670, 419, 670, 670,
2652 672, 672, 672, 672, 422, 422, 672, 672, 2611 670, 670, 419, 419, 670, 670, 670, 670,
2653 672, 672, 672, 422, 672, 672, 672, 672, 2612 670, 419, 670, 670, 670, 670, 670, 669,
2654 672, 671, 422, 672, 672, 422, 672, 422, 2613 419, 670, 670, 419, 670, 419, 669, 670,
2655 671, 672, 672, 422, 672, 671, 422, 422, 2614 670, 419, 670, 669, 419, 419, 670, 419,
2656 672, 422, 671, 672, 672, 671, 422, 672, 2615 669, 670, 670, 669, 419, 670, 419, 669,
2657 422, 671, 672, 671, 422, 672, 422, 672, 2616 670, 669, 419, 670, 419, 670, 419, 669,
2658 422, 671, 672, 672, 671, 422, 672, 422, 2617 670, 670, 669, 419, 670, 419, 670, 419,
2659 672, 422, 672, 671, 672, 422, 672, 422, 2618 670, 669, 670, 419, 670, 419, 670, 669,
2660 672, 671, 422, 672, 671, 422, 422, 672, 2619 419, 670, 669, 419, 419, 670, 669, 670,
2661 671, 672, 422, 671, 672, 671, 422, 672, 2620 419, 669, 670, 669, 419, 670, 419, 670,
2662 422, 672, 422, 671, 672, 671, 422, 422, 2621 419, 669, 670, 669, 419, 419, 670, 669,
2663 672, 671, 672, 422, 672, 422, 672, 671, 2622 670, 419, 670, 419, 670, 669, 419, 670,
2664 422, 672, 422, 671, 672, 671, 422, 422, 2623 419, 669, 670, 669, 419, 419, 670, 419,
2665 672, 422, 671, 672, 671, 422, 422, 672, 2624 669, 670, 669, 419, 419, 670, 669, 670,
2666 671, 672, 422, 672, 671, 672, 422, 672, 2625 419, 670, 669, 670, 419, 670, 669, 670,
2667 671, 672, 422, 672, 422, 672, 422, 671, 2626 419, 670, 419, 670, 419, 669, 670, 669,
2668 672, 671, 422, 422, 672, 671, 672, 422, 2627 419, 419, 670, 669, 670, 419, 670, 419,
2669 672, 422, 672, 671, 422, 672, 671, 672, 2628 670, 669, 419, 670, 669, 670, 670, 419,
2670 672, 422, 672, 422, 672, 671, 671, 422, 2629 670, 419, 670, 669, 669, 419, 669, 419,
2671 671, 422, 672, 672, 422, 672, 672, 672, 2630 670, 670, 419, 670, 670, 670, 670, 670,
2672 672, 672, 672, 672, 671, 422, 672, 672, 2631 670, 670, 669, 419, 670, 670, 670, 419,
2673 672, 422, 671, 672, 672, 672, 422, 672, 2632 669, 670, 670, 670, 419, 670, 419, 670,
2674 422, 672, 422, 672, 422, 672, 422, 672, 2633 419, 670, 419, 670, 419, 670, 669, 419,
2675 671, 422, 422, 672, 671, 672, 422, 672, 2634 419, 670, 669, 670, 419, 670, 669, 419,
2676 671, 422, 422, 672, 422, 422, 422, 672, 2635 419, 670, 419, 419, 419, 670, 419, 670,
2677 422, 672, 422, 672, 422, 672, 422, 671, 2636 419, 670, 419, 670, 419, 669, 419, 670,
2678 422, 672, 422, 672, 422, 671, 672, 671, 2637 419, 670, 419, 669, 670, 669, 419, 670,
2679 422, 672, 422, 671, 672, 422, 672, 672, 2638 419, 669, 670, 419, 670, 670, 670, 669,
2680 672, 671, 422, 672, 422, 422, 672, 422, 2639 419, 670, 419, 419, 670, 419, 669, 670,
2681 671, 672, 672, 671, 422, 672, 672, 672, 2640 670, 669, 419, 670, 670, 670, 670, 419,
2682 672, 422, 672, 422, 671, 672, 672, 672, 2641 670, 419, 669, 670, 670, 670, 419, 670,
2683 422, 672, 671, 672, 422, 672, 422, 672, 2642 669, 670, 419, 670, 419, 670, 419, 670,
2684 422, 672, 422, 672, 671, 672, 672, 422, 2643 419, 670, 669, 670, 670, 419, 670, 669,
2685 672, 671, 422, 672, 422, 672, 422, 671, 2644 419, 670, 419, 670, 419, 669, 670, 670,
2686 672, 672, 671, 422, 672, 422, 671, 672, 2645 669, 419, 670, 419, 669, 670, 669, 419,
2687 671, 422, 672, 671, 422, 672, 422, 672, 2646 670, 669, 419, 670, 419, 670, 669, 670,
2688 671, 672, 672, 672, 671, 422, 422, 422, 2647 670, 670, 669, 419, 419, 419, 670, 669,
2689 672, 671, 422, 672, 422, 671, 672, 671, 2648 419, 670, 419, 669, 670, 669, 419, 670,
2690 422, 672, 422, 672, 422, 671, 672, 672, 2649 419, 670, 419, 669, 670, 670, 670, 669,
2691 672, 671, 422, 672, 422, 671, 672, 672, 2650 419, 670, 419, 669, 670, 670, 670, 670,
2692 672, 672, 671, 422, 672, 422, 672, 671, 2651 669, 419, 670, 419, 670, 669, 419, 419,
2693 422, 422, 672, 422, 672, 671, 672, 422, 2652 670, 419, 670, 669, 670, 419, 670, 419,
2694 672, 422, 671, 672, 672, 671, 422, 672, 2653 669, 670, 670, 669, 419, 670, 419, 670,
2695 422, 672, 671, 422, 672, 672, 672, 422, 2654 669, 419, 670, 670, 670, 419, 670, 419,
2696 672, 422, 671, 422, 672, 671, 672, 422, 2655 669, 419, 670, 669, 670, 419, 419, 670,
2697 422, 672, 422, 672, 422, 671, 672, 672, 2656 419, 670, 419, 669, 670, 670, 670, 670,
2698 672, 672, 671, 422, 672, 422, 672, 422, 2657 669, 419, 670, 419, 670, 419, 670, 419,
2699 672, 422, 672, 422, 672, 671, 672, 672, 2658 670, 419, 670, 669, 670, 670, 670, 419,
2700 672, 422, 672, 422, 672, 422, 672, 422, 2659 670, 419, 670, 419, 670, 419, 669, 670,
2701 671, 672, 672, 422, 422, 672, 671, 672, 2660 670, 419, 419, 670, 669, 670, 419, 670,
2702 422, 672, 672, 671, 422, 672, 422, 672, 2661 670, 669, 419, 670, 419, 670, 669, 419,
2703 671, 422, 422, 672, 672, 672, 672, 422, 2662 419, 670, 670, 670, 670, 419, 670, 419,
2704 672, 422, 672, 422, 671, 672, 672, 422, 2663 670, 419, 669, 670, 670, 419, 669, 670,
2705 671, 672, 671, 422, 672, 422, 671, 672, 2664 669, 419, 670, 419, 669, 670, 669, 419,
2706 671, 422, 672, 422, 671, 672, 422, 672, 2665 670, 419, 669, 670, 419, 670, 670, 669,
2707 672, 671, 422, 672, 672, 422, 671, 672, 2666 419, 670, 670, 419, 669, 670, 669, 419,
2708 671, 422, 672, 422, 672, 671, 672, 422, 2667 670, 419, 670, 669, 670, 419, 670, 419,
2709 672, 422, 671, 672, 671, 422, 672, 422, 2668 669, 670, 669, 419, 670, 419, 670, 419,
2710 672, 422, 672, 422, 672, 422, 672, 671, 2669 670, 419, 670, 419, 670, 669, 671, 669,
2711 673, 671, 674, 675, 676, 677, 678, 679, 2670 672, 673, 674, 675, 676, 677, 678, 679,
2712 680, 681, 682, 683, 684, 676, 685, 686, 2671 680, 681, 682, 674, 683, 684, 685, 686,
2713 687, 688, 689, 676, 690, 691, 692, 693, 2672 687, 674, 688, 689, 690, 691, 692, 693,
2714 694, 695, 696, 697, 698, 699, 700, 701, 2673 694, 695, 696, 697, 698, 699, 700, 701,
2715 702, 703, 704, 676, 705, 673, 685, 673, 2674 702, 674, 703, 671, 683, 671, 704, 671,
2716 706, 673, 671, 672, 672, 672, 672, 422, 2675 669, 670, 670, 670, 670, 419, 669, 670,
2717 671, 672, 672, 671, 422, 672, 671, 422, 2676 670, 669, 419, 670, 669, 419, 419, 670,
2718 422, 672, 671, 422, 672, 422, 671, 672, 2677 669, 419, 670, 419, 669, 670, 669, 419,
2719 671, 422, 422, 672, 422, 671, 672, 672, 2678 419, 670, 419, 669, 670, 670, 669, 419,
2720 671, 422, 672, 672, 672, 671, 422, 672, 2679 670, 670, 670, 669, 419, 670, 419, 670,
2721 422, 672, 672, 671, 422, 422, 672, 422, 2680 670, 669, 419, 419, 670, 419, 669, 670,
2722 671, 672, 671, 422, 672, 671, 422, 422, 2681 669, 419, 670, 669, 419, 419, 670, 419,
2723 672, 422, 672, 671, 422, 672, 422, 422, 2682 670, 669, 419, 670, 419, 419, 670, 419,
2724 672, 422, 672, 422, 671, 672, 672, 671, 2683 670, 419, 669, 670, 670, 669, 419, 670,
2725 422, 672, 672, 422, 672, 671, 422, 672, 2684 670, 419, 670, 669, 419, 670, 419, 670,
2726 422, 672, 671, 422, 672, 422, 671, 422, 2685 669, 419, 670, 419, 669, 419, 670, 670,
2727 672, 672, 672, 422, 672, 671, 672, 422, 2686 670, 419, 670, 669, 670, 419, 670, 669,
2728 672, 671, 422, 672, 671, 672, 422, 672, 2687 419, 670, 669, 670, 419, 670, 669, 419,
2729 671, 422, 672, 671, 422, 672, 422, 672, 2688 670, 669, 419, 670, 419, 670, 669, 419,
2730 671, 422, 672, 671, 422, 672, 671, 707, 2689 670, 669, 419, 670, 669, 705, 706, 707,
2731 708, 709, 710, 711, 712, 713, 714, 715, 2690 708, 709, 710, 711, 712, 713, 714, 715,
2732 716, 717, 718, 678, 719, 720, 721, 722, 2691 716, 676, 717, 718, 719, 720, 721, 718,
2733 723, 720, 724, 725, 726, 727, 728, 729, 2692 722, 723, 724, 725, 726, 727, 728, 729,
2734 730, 731, 732, 673, 671, 672, 422, 672, 2693 730, 671, 669, 670, 419, 670, 669, 670,
2735 671, 672, 422, 672, 671, 672, 422, 672, 2694 419, 670, 669, 670, 419, 670, 669, 670,
2736 671, 672, 422, 672, 671, 422, 672, 422, 2695 419, 670, 669, 419, 670, 419, 670, 669,
2737 672, 671, 672, 422, 672, 671, 672, 422, 2696 670, 419, 670, 669, 670, 419, 419, 419,
2738 422, 422, 672, 671, 672, 422, 672, 671, 2697 670, 669, 670, 419, 670, 669, 670, 670,
2739 672, 672, 672, 672, 422, 672, 422, 671, 2698 670, 670, 419, 670, 419, 669, 670, 669,
2740 672, 671, 422, 422, 672, 422, 672, 671, 2699 419, 419, 670, 419, 670, 669, 670, 419,
2741 672, 422, 672, 671, 422, 672, 671, 672, 2700 670, 669, 419, 670, 669, 670, 670, 419,
2742 672, 422, 672, 671, 422, 672, 671, 672, 2701 670, 669, 419, 670, 669, 670, 419, 670,
2743 422, 672, 671, 422, 672, 671, 422, 672, 2702 669, 419, 670, 669, 419, 670, 669, 419,
2744 671, 422, 672, 671, 672, 671, 422, 422, 2703 670, 669, 670, 669, 419, 419, 670, 669,
2745 672, 671, 672, 422, 672, 671, 422, 672, 2704 670, 419, 670, 669, 419, 670, 419, 669,
2746 422, 671, 672, 671, 422, 676, 733, 673, 2705 670, 669, 419, 674, 731, 671, 674, 732,
2747 676, 734, 676, 735, 685, 673, 671, 672, 2706 674, 733, 683, 671, 669, 670, 669, 419,
2748 671, 422, 672, 671, 422, 676, 734, 685, 2707 670, 669, 419, 674, 732, 683, 671, 669,
2749 673, 671, 676, 736, 673, 685, 673, 671, 2708 674, 734, 671, 683, 671, 669, 670, 669,
2750 672, 671, 422, 676, 737, 694, 738, 720, 2709 419, 674, 735, 692, 736, 718, 737, 730,
2751 739, 732, 676, 740, 741, 742, 673, 685, 2710 674, 738, 739, 740, 671, 683, 671, 669,
2752 673, 671, 672, 671, 422, 672, 422, 672, 2711 670, 669, 419, 670, 419, 670, 669, 419,
2753 671, 422, 672, 422, 672, 422, 671, 672, 2712 670, 419, 670, 419, 669, 670, 670, 669,
2754 672, 671, 422, 672, 422, 672, 671, 422, 2713 419, 670, 419, 670, 669, 419, 670, 669,
2755 672, 671, 676, 685, 427, 671, 743, 676, 2714 674, 683, 425, 669, 741, 674, 742, 683,
2756 744, 685, 673, 671, 427, 672, 671, 422, 2715 671, 669, 425, 670, 669, 419, 670, 669,
2757 672, 671, 422, 745, 676, 746, 747, 673, 2716 419, 743, 674, 744, 745, 671, 669, 419,
2758 671, 422, 672, 671, 672, 672, 671, 422, 2717 670, 669, 670, 670, 669, 419, 419, 670,
2759 422, 672, 422, 672, 671, 676, 748, 749, 2718 419, 670, 669, 674, 746, 747, 748, 749,
2760 750, 751, 752, 753, 754, 755, 756, 757, 2719 750, 751, 752, 753, 754, 755, 756, 671,
2761 758, 673, 685, 673, 671, 672, 422, 672, 2720 683, 671, 669, 670, 419, 670, 670, 670,
2762 672, 672, 672, 672, 672, 672, 422, 672, 2721 670, 670, 670, 670, 419, 670, 419, 670,
2763 422, 672, 672, 672, 672, 672, 672, 671, 2722 670, 670, 670, 670, 670, 669, 419, 670,
2764 422, 672, 672, 422, 672, 422, 671, 672, 2723 670, 419, 670, 419, 669, 670, 419, 670,
2765 422, 672, 672, 672, 422, 672, 672, 422, 2724 670, 670, 419, 670, 670, 419, 670, 670,
2766 672, 672, 422, 672, 672, 422, 672, 672, 2725 419, 670, 670, 419, 670, 670, 669, 419,
2767 671, 422, 676, 759, 676, 735, 760, 761, 2726 674, 757, 674, 733, 758, 759, 760, 671,
2768 762, 673, 685, 673, 671, 672, 671, 422, 2727 683, 671, 669, 670, 669, 419, 670, 670,
2769 672, 672, 672, 422, 672, 672, 672, 422, 2728 670, 419, 670, 670, 670, 419, 670, 419,
2770 672, 422, 672, 671, 422, 422, 422, 422, 2729 670, 669, 419, 419, 419, 419, 670, 670,
2771 672, 672, 422, 422, 422, 422, 422, 672, 2730 419, 419, 419, 419, 419, 670, 670, 670,
2772 672, 672, 672, 672, 672, 672, 422, 672, 2731 670, 670, 670, 670, 419, 670, 419, 670,
2773 422, 672, 422, 671, 672, 672, 672, 422, 2732 419, 669, 670, 670, 670, 419, 670, 419,
2774 672, 422, 672, 671, 685, 427, 763, 676, 2733 670, 669, 683, 425, 761, 674, 683, 425,
2775 685, 427, 672, 671, 422, 764, 676, 765, 2734 670, 669, 419, 762, 674, 763, 683, 425,
2776 685, 427, 672, 671, 422, 672, 422, 766, 2735 670, 669, 419, 670, 419, 764, 683, 671,
2777 685, 673, 671, 427, 672, 671, 422, 676, 2736 669, 425, 670, 669, 419, 674, 765, 671,
2778 767, 673, 685, 673, 671, 672, 671, 422, 2737 683, 671, 669, 670, 669, 419, 766, 766,
2779 768, 769, 768, 770, 771, 768, 772, 768, 2738 766, 768, 769, 770, 766, 767, 767, 771,
2780 773, 768, 771, 774, 775, 774, 777, 776, 2739 768, 771, 769, 771, 767, 772, 773, 772,
2781 778, 779, 778, 780, 781, 776, 782, 776, 2740 775, 774, 776, 774, 777, 774, 779, 778,
2782 783, 778, 784, 779, 785, 780, 787, 786, 2741 781, 782, 780, 781, 783, 780, 785, 784,
2783 788, 789, 789, 786, 790, 786, 791, 788, 2742 786, 784, 787, 784, 789, 788, 791, 792,
2784 792, 789, 793, 789, 795, 795, 795, 795, 2743 790, 791, 793, 790, 795, 795, 795, 795,
2785 794, 795, 795, 795, 794, 795, 794, 795, 2744 794, 795, 795, 795, 794, 795, 794, 795,
2786 795, 794, 794, 794, 794, 794, 794, 795, 2745 795, 794, 794, 794, 794, 794, 794, 795,
2787 794, 794, 794, 794, 795, 795, 795, 795, 2746 794, 794, 794, 794, 795, 795, 795, 795,
@@ -3103,375 +3062,343 @@ var _hcltok_indicies []int16 = []int16{
3103 1046, 1045, 795, 1046, 795, 1140, 1059, 1047, 3062 1046, 1045, 795, 1046, 795, 1140, 1059, 1047,
3104 1045, 801, 1046, 1045, 795, 1050, 1141, 1047, 3063 1045, 801, 1046, 1045, 795, 1050, 1141, 1047,
3105 1059, 1047, 1045, 1046, 1045, 795, 1142, 1143, 3064 1059, 1047, 1045, 1046, 1045, 795, 1142, 1143,
3106 1144, 1142, 1145, 1146, 1147, 1148, 1149, 1150, 3065 1144, 1142, 1145, 1146, 1147, 1149, 1150, 1151,
3107 1151, 1152, 1153, 1154, 672, 672, 422, 1155, 3066 1152, 1153, 1154, 670, 670, 419, 1155, 1156,
3108 1156, 1157, 1158, 672, 1161, 1162, 1164, 1165, 3067 1157, 1158, 670, 1161, 1162, 1164, 1165, 1166,
3109 1166, 1160, 1167, 1168, 1169, 1170, 1171, 1172, 3068 1160, 1167, 1168, 1169, 1170, 1171, 1172, 1173,
3110 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 3069 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181,
3111 1181, 1182, 1183, 1184, 1185, 1186, 1188, 1189, 3070 1182, 1183, 1184, 1185, 1186, 1188, 1189, 1190,
3112 1190, 1191, 1192, 1193, 672, 1148, 10, 1148, 3071 1191, 1192, 1193, 670, 1148, 7, 1148, 419,
3113 422, 1148, 422, 1160, 1163, 1187, 1194, 1159, 3072 1148, 419, 1160, 1163, 1187, 1194, 1159, 1142,
3114 1142, 1142, 1195, 1143, 1196, 1198, 1197, 2, 3073 1142, 1195, 1143, 1196, 1198, 1197, 4, 1147,
3115 1, 1199, 1197, 1200, 1197, 5, 1, 1197, 3074 1200, 1197, 1201, 1197, 2, 1147, 1197, 6,
3116 6, 5, 9, 11, 11, 10, 1202, 1203, 3075 8, 8, 7, 1202, 1203, 1204, 1197, 1205,
3117 1204, 1197, 1205, 1206, 1197, 1207, 1197, 422, 3076 1206, 1197, 1207, 1197, 419, 419, 1209, 1210,
3118 422, 1209, 1210, 491, 472, 1211, 472, 1212, 3077 489, 470, 1211, 470, 1212, 1213, 1214, 1215,
3119 1213, 1214, 1215, 1216, 1217, 1218, 1219, 1220, 3078 1216, 1217, 1218, 1219, 1220, 1221, 1222, 544,
3120 1221, 1222, 546, 1223, 522, 1224, 1225, 1226, 3079 1223, 520, 1224, 1225, 1226, 1227, 1228, 1229,
3121 1227, 1228, 1229, 1230, 1231, 1232, 1233, 1234, 3080 1230, 1231, 1232, 1233, 1234, 1235, 419, 419,
3122 1235, 422, 422, 422, 427, 567, 1208, 1236, 3081 419, 425, 565, 1208, 1236, 1197, 1237, 1197,
3123 1197, 1237, 1197, 672, 1238, 422, 422, 422, 3082 670, 1238, 419, 419, 419, 670, 1238, 670,
3124 672, 1238, 672, 672, 422, 1238, 422, 1238, 3083 670, 419, 1238, 419, 1238, 419, 1238, 419,
3125 422, 1238, 422, 672, 672, 672, 672, 672, 3084 670, 670, 670, 670, 670, 1238, 419, 670,
3126 1238, 422, 672, 672, 672, 422, 672, 422, 3085 670, 670, 419, 670, 419, 1238, 419, 670,
3127 1238, 422, 672, 672, 672, 672, 422, 1238, 3086 670, 670, 670, 419, 1238, 670, 419, 670,
3128 672, 422, 672, 422, 672, 422, 672, 672, 3087 419, 670, 419, 670, 670, 419, 670, 1238,
3129 422, 672, 1238, 422, 672, 422, 672, 422, 3088 419, 670, 419, 670, 419, 670, 1238, 670,
3130 672, 1238, 672, 422, 1238, 672, 422, 672, 3089 419, 1238, 670, 419, 670, 419, 1238, 670,
3131 422, 1238, 672, 672, 672, 672, 672, 1238, 3090 670, 670, 670, 670, 1238, 419, 419, 670,
3132 422, 422, 672, 422, 672, 1238, 672, 422, 3091 419, 670, 1238, 670, 419, 1238, 670, 670,
3133 1238, 672, 672, 1238, 422, 422, 672, 422, 3092 1238, 419, 419, 670, 419, 670, 419, 670,
3134 672, 422, 672, 1238, 1239, 1240, 1241, 1242, 3093 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1245,
3135 1243, 1244, 1245, 1246, 1247, 1248, 1249, 717, 3094 1246, 1247, 1248, 1249, 715, 1250, 1251, 1252,
3136 1250, 1251, 1252, 1253, 1254, 1255, 1256, 1257, 3095 1253, 1254, 1255, 1256, 1257, 1258, 1259, 1260,
3137 1258, 1259, 1260, 1261, 1260, 1262, 1263, 1264, 3096 1261, 1260, 1262, 1263, 1264, 1265, 1266, 671,
3138 1265, 1266, 673, 1238, 1267, 1268, 1269, 1270, 3097 1238, 1267, 1268, 1269, 1270, 1271, 1272, 1273,
3139 1271, 1272, 1273, 1274, 1275, 1276, 1277, 1278, 3098 1274, 1275, 1276, 1277, 1278, 1279, 1280, 1281,
3140 1279, 1280, 1281, 1282, 1283, 1284, 1285, 727, 3099 1282, 1283, 1284, 1285, 725, 1286, 1287, 1288,
3141 1286, 1287, 1288, 694, 1289, 1290, 1291, 1292, 3100 692, 1289, 1290, 1291, 1292, 1293, 1294, 671,
3142 1293, 1294, 673, 1295, 1296, 1297, 1298, 1299, 3101 1295, 1296, 1297, 1298, 1299, 1300, 1301, 1302,
3143 1300, 1301, 1302, 676, 1303, 673, 676, 1304, 3102 674, 1303, 671, 674, 1304, 1305, 1306, 1307,
3144 1305, 1306, 1307, 685, 1238, 1308, 1309, 1310, 3103 683, 1238, 1308, 1309, 1310, 1311, 703, 1312,
3145 1311, 705, 1312, 1313, 685, 1314, 1315, 1316, 3104 1313, 683, 1314, 1315, 1316, 1317, 1318, 671,
3146 1317, 1318, 673, 1238, 1319, 1278, 1320, 1321, 3105 1238, 1319, 1278, 1320, 1321, 1322, 683, 1323,
3147 1322, 685, 1323, 1324, 676, 673, 685, 427, 3106 1324, 674, 671, 683, 425, 1238, 1288, 671,
3148 1238, 1288, 673, 676, 685, 427, 685, 427, 3107 674, 683, 425, 683, 425, 1325, 683, 1238,
3149 1325, 685, 1238, 427, 676, 1326, 1327, 676, 3108 425, 674, 1326, 1327, 674, 1328, 1329, 681,
3150 1328, 1329, 683, 1330, 1331, 1332, 1333, 1334, 3109 1330, 1331, 1332, 1333, 1334, 1284, 1335, 1336,
3151 1284, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 3110 1337, 1338, 1339, 1340, 1341, 1342, 1343, 1344,
3152 1342, 1343, 1344, 1345, 1346, 1303, 1347, 676, 3111 1345, 1346, 1303, 1347, 674, 683, 425, 1238,
3153 685, 427, 1238, 1348, 1349, 685, 673, 1238, 3112 1348, 1349, 683, 671, 1238, 425, 671, 1238,
3154 427, 673, 1238, 676, 1350, 733, 1351, 1352, 3113 674, 1350, 731, 1351, 1352, 1353, 1354, 1355,
3155 1353, 1354, 1355, 1356, 1357, 1358, 673, 1359, 3114 1356, 1357, 1358, 671, 1359, 1360, 1361, 1362,
3156 1360, 1361, 1362, 1363, 1364, 673, 685, 1238, 3115 1363, 1364, 671, 683, 1238, 1366, 1367, 1368,
3157 1366, 1367, 1368, 1369, 1370, 1371, 1372, 1373, 3116 1369, 1370, 1371, 1372, 1373, 1374, 1375, 1376,
3158 1374, 1375, 1376, 1372, 1378, 1379, 1380, 1381, 3117 1372, 1378, 1379, 1380, 1381, 1365, 1377, 1365,
3159 1365, 1377, 1365, 1238, 1365, 1238, 1382, 1382, 3118 1238, 1365, 1238, 1382, 1382, 1383, 1384, 1385,
3160 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 3119 1386, 1387, 1388, 1389, 1390, 1387, 767, 1391,
3161 1387, 771, 1391, 1391, 1391, 1392, 1393, 1386, 3120 1391, 1391, 1392, 1391, 1391, 768, 769, 770,
3162 1391, 772, 773, 1394, 1391, 771, 1395, 1395, 3121 1391, 767, 1382, 1382, 1393, 1396, 1397, 1395,
3163 1395, 1397, 1398, 1399, 1395, 1400, 1401, 1402, 3122 1398, 1399, 1398, 1400, 1391, 1402, 1401, 1396,
3164 1395, 1396, 1403, 1403, 1403, 1405, 1406, 1407, 3123 1403, 1395, 1405, 1404, 1394, 1394, 1394, 768,
3165 1403, 1408, 1409, 1410, 1403, 1404, 1391, 1391, 3124 769, 770, 1394, 767, 767, 1406, 773, 1406,
3166 1411, 1412, 1386, 1391, 772, 773, 1394, 1391, 3125 1407, 1406, 775, 1408, 1409, 1410, 1411, 1412,
3167 771, 1413, 1414, 1415, 771, 1416, 1417, 1418, 3126 1413, 1414, 1411, 776, 775, 1408, 1415, 1415,
3168 769, 769, 769, 769, 1420, 1421, 1422, 1396, 3127 777, 779, 1416, 1415, 776, 1418, 1419, 1417,
3169 769, 1423, 1424, 1425, 769, 1419, 770, 770, 3128 1418, 1419, 1420, 1417, 775, 1408, 1421, 1415,
3170 770, 1427, 1428, 1429, 1396, 770, 1430, 1431, 3129 775, 1408, 1415, 1423, 1422, 1425, 1424, 776,
3171 1432, 770, 1426, 769, 769, 769, 1434, 1435, 3130 1426, 777, 1426, 779, 1426, 785, 1427, 1428,
3172 1436, 1404, 769, 1437, 1438, 1439, 769, 1433, 3131 1429, 1430, 1431, 1432, 1433, 1430, 786, 785,
3173 1395, 1395, 771, 1440, 1441, 1399, 1395, 1400, 3132 1427, 1434, 1434, 787, 789, 1435, 1434, 786,
3174 1401, 1402, 1395, 1396, 1442, 1443, 1444, 771, 3133 1437, 1438, 1436, 1437, 1438, 1439, 1436, 785,
3175 1445, 1446, 1447, 770, 770, 770, 770, 1449, 3134 1427, 1440, 1434, 785, 1427, 1434, 1442, 1441,
3176 1450, 1451, 1404, 770, 1452, 1453, 1454, 770, 3135 1444, 1443, 786, 1445, 787, 1445, 789, 1445,
3177 1448, 1403, 1403, 771, 1455, 1456, 1407, 1403, 3136 795, 1448, 1449, 1451, 1452, 1453, 1447, 1454,
3178 1408, 1409, 1410, 1403, 1404, 1403, 1403, 1403, 3137 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462,
3179 1405, 1406, 1407, 771, 1408, 1409, 1410, 1403, 3138 1463, 1464, 1465, 1466, 1467, 1468, 1469, 1470,
3180 1404, 1403, 1403, 1403, 1405, 1406, 1407, 772, 3139 1471, 1472, 1473, 1475, 1476, 1477, 1478, 1479,
3181 1408, 1409, 1410, 1403, 1404, 1403, 1403, 1403, 3140 1480, 795, 795, 1446, 1447, 1450, 1474, 1481,
3182 1405, 1406, 1407, 773, 1408, 1409, 1410, 1403, 3141 1446, 1046, 795, 795, 1483, 1484, 865, 846,
3183 1404, 1395, 1395, 1395, 1397, 1398, 1399, 771, 3142 1485, 846, 1486, 1487, 1488, 1489, 1490, 1491,
3184 1400, 1401, 1402, 1395, 1396, 1395, 1395, 1395, 3143 1492, 1493, 1494, 1495, 1496, 920, 1497, 896,
3185 1397, 1398, 1399, 772, 1400, 1401, 1402, 1395, 3144 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505,
3186 1396, 1395, 1395, 1395, 1397, 1398, 1399, 773, 3145 1506, 1507, 1508, 1509, 795, 795, 795, 801,
3187 1400, 1401, 1402, 1395, 1396, 1458, 769, 1460, 3146 941, 1482, 1046, 1510, 795, 795, 795, 1046,
3188 1459, 1461, 770, 1463, 1462, 771, 1464, 775, 3147 1510, 1046, 1046, 795, 1510, 795, 1510, 795,
3189 1464, 1465, 1464, 777, 1466, 1467, 1468, 1469, 3148 1510, 795, 1046, 1046, 1046, 1046, 1046, 1510,
3190 1470, 1471, 1472, 1469, 781, 777, 1466, 1474, 3149 795, 1046, 1046, 1046, 795, 1046, 795, 1510,
3191 1475, 1473, 782, 783, 1476, 1473, 781, 1479, 3150 795, 1046, 1046, 1046, 1046, 795, 1510, 1046,
3192 1480, 1481, 1482, 1477, 1483, 1484, 1485, 1477, 3151 795, 1046, 795, 1046, 795, 1046, 1046, 795,
3193 1478, 1488, 1489, 1490, 1491, 1486, 1492, 1493, 3152 1046, 1510, 795, 1046, 795, 1046, 795, 1046,
3194 1494, 1486, 1487, 1496, 1495, 1498, 1497, 781, 3153 1510, 1046, 795, 1510, 1046, 795, 1046, 795,
3195 1499, 782, 1499, 783, 1499, 787, 1500, 1501, 3154 1510, 1046, 1046, 1046, 1046, 1046, 1510, 795,
3196 1502, 1503, 1504, 1505, 1506, 1503, 789, 787, 3155 795, 1046, 795, 1046, 1510, 1046, 795, 1510,
3197 1500, 1508, 1507, 790, 791, 1509, 1507, 789, 3156 1046, 1046, 1510, 795, 795, 1046, 795, 1046,
3198 1511, 1510, 1513, 1512, 789, 1514, 790, 1514, 3157 795, 1046, 1510, 1511, 1512, 1513, 1514, 1515,
3199 791, 1514, 795, 1517, 1518, 1520, 1521, 1522, 3158 1516, 1517, 1518, 1519, 1520, 1521, 1091, 1522,
3200 1516, 1523, 1524, 1525, 1526, 1527, 1528, 1529, 3159 1523, 1524, 1525, 1526, 1527, 1528, 1529, 1530,
3201 1530, 1531, 1532, 1533, 1534, 1535, 1536, 1537, 3160 1531, 1532, 1533, 1532, 1534, 1535, 1536, 1537,
3202 1538, 1539, 1540, 1541, 1542, 1544, 1545, 1546, 3161 1538, 1047, 1510, 1539, 1540, 1541, 1542, 1543,
3203 1547, 1548, 1549, 795, 795, 1515, 1516, 1519, 3162 1544, 1545, 1546, 1547, 1548, 1549, 1550, 1551,
3204 1543, 1550, 1515, 1046, 795, 795, 1552, 1553, 3163 1552, 1553, 1554, 1555, 1556, 1557, 1101, 1558,
3205 865, 846, 1554, 846, 1555, 1556, 1557, 1558, 3164 1559, 1560, 1068, 1561, 1562, 1563, 1564, 1565,
3206 1559, 1560, 1561, 1562, 1563, 1564, 1565, 920, 3165 1566, 1047, 1567, 1568, 1569, 1570, 1571, 1572,
3207 1566, 896, 1567, 1568, 1569, 1570, 1571, 1572, 3166 1573, 1574, 1050, 1575, 1047, 1050, 1576, 1577,
3208 1573, 1574, 1575, 1576, 1577, 1578, 795, 795, 3167 1578, 1579, 1059, 1510, 1580, 1581, 1582, 1583,
3209 795, 801, 941, 1551, 1046, 1579, 795, 795, 3168 1079, 1584, 1585, 1059, 1586, 1587, 1588, 1589,
3210 795, 1046, 1579, 1046, 1046, 795, 1579, 795, 3169 1590, 1047, 1510, 1591, 1550, 1592, 1593, 1594,
3211 1579, 795, 1579, 795, 1046, 1046, 1046, 1046, 3170 1059, 1595, 1596, 1050, 1047, 1059, 801, 1510,
3212 1046, 1579, 795, 1046, 1046, 1046, 795, 1046, 3171 1560, 1047, 1050, 1059, 801, 1059, 801, 1597,
3213 795, 1579, 795, 1046, 1046, 1046, 1046, 795, 3172 1059, 1510, 801, 1050, 1598, 1599, 1050, 1600,
3214 1579, 1046, 795, 1046, 795, 1046, 795, 1046, 3173 1601, 1057, 1602, 1603, 1604, 1605, 1606, 1556,
3215 1046, 795, 1046, 1579, 795, 1046, 795, 1046, 3174 1607, 1608, 1609, 1610, 1611, 1612, 1613, 1614,
3216 795, 1046, 1579, 1046, 795, 1579, 1046, 795, 3175 1615, 1616, 1617, 1618, 1575, 1619, 1050, 1059,
3217 1046, 795, 1579, 1046, 1046, 1046, 1046, 1046, 3176 801, 1510, 1620, 1621, 1059, 1047, 1510, 801,
3218 1579, 795, 795, 1046, 795, 1046, 1579, 1046, 3177 1047, 1510, 1050, 1622, 1107, 1623, 1624, 1625,
3219 795, 1579, 1046, 1046, 1579, 795, 795, 1046, 3178 1626, 1627, 1628, 1629, 1630, 1047, 1631, 1632,
3220 795, 1046, 795, 1046, 1579, 1580, 1581, 1582, 3179 1633, 1634, 1635, 1636, 1047, 1059, 1510, 1638,
3221 1583, 1584, 1585, 1586, 1587, 1588, 1589, 1590, 3180 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646,
3222 1091, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 3181 1647, 1648, 1644, 1650, 1651, 1652, 1653, 1637,
3223 1598, 1599, 1600, 1601, 1602, 1601, 1603, 1604, 3182 1649, 1637, 1510, 1637, 1510,
3224 1605, 1606, 1607, 1047, 1579, 1608, 1609, 1610,
3225 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618,
3226 1619, 1620, 1621, 1622, 1623, 1624, 1625, 1626,
3227 1101, 1627, 1628, 1629, 1068, 1630, 1631, 1632,
3228 1633, 1634, 1635, 1047, 1636, 1637, 1638, 1639,
3229 1640, 1641, 1642, 1643, 1050, 1644, 1047, 1050,
3230 1645, 1646, 1647, 1648, 1059, 1579, 1649, 1650,
3231 1651, 1652, 1079, 1653, 1654, 1059, 1655, 1656,
3232 1657, 1658, 1659, 1047, 1579, 1660, 1619, 1661,
3233 1662, 1663, 1059, 1664, 1665, 1050, 1047, 1059,
3234 801, 1579, 1629, 1047, 1050, 1059, 801, 1059,
3235 801, 1666, 1059, 1579, 801, 1050, 1667, 1668,
3236 1050, 1669, 1670, 1057, 1671, 1672, 1673, 1674,
3237 1675, 1625, 1676, 1677, 1678, 1679, 1680, 1681,
3238 1682, 1683, 1684, 1685, 1686, 1687, 1644, 1688,
3239 1050, 1059, 801, 1579, 1689, 1690, 1059, 1047,
3240 1579, 801, 1047, 1579, 1050, 1691, 1107, 1692,
3241 1693, 1694, 1695, 1696, 1697, 1698, 1699, 1047,
3242 1700, 1701, 1702, 1703, 1704, 1705, 1047, 1059,
3243 1579, 1707, 1708, 1709, 1710, 1711, 1712, 1713,
3244 1714, 1715, 1716, 1717, 1713, 1719, 1720, 1721,
3245 1722, 1706, 1718, 1706, 1579, 1706, 1579,
3246} 3183}
3247 3184
3248var _hcltok_trans_targs []int16 = []int16{ 3185var _hcltok_trans_targs []int16 = []int16{
3249 1464, 1, 1464, 1464, 1464, 3, 4, 1472, 3186 1459, 1459, 2, 3, 1459, 1459, 4, 1467,
3250 1464, 5, 1473, 6, 7, 9, 10, 287, 3187 5, 6, 8, 9, 286, 12, 13, 14,
3251 13, 14, 15, 16, 17, 288, 289, 20, 3188 15, 16, 287, 288, 19, 289, 21, 22,
3252 290, 22, 23, 291, 292, 293, 294, 295, 3189 290, 291, 292, 293, 294, 295, 296, 297,
3253 296, 297, 298, 299, 300, 329, 349, 354, 3190 298, 299, 328, 348, 353, 127, 128, 129,
3254 128, 129, 130, 357, 152, 372, 376, 1464, 3191 356, 151, 371, 375, 1459, 10, 11, 17,
3255 11, 12, 18, 19, 21, 24, 25, 26, 3192 18, 20, 23, 24, 25, 26, 27, 28,
3256 27, 28, 29, 30, 31, 32, 33, 65, 3193 29, 30, 31, 32, 64, 105, 120, 131,
3257 106, 121, 132, 155, 171, 284, 34, 35, 3194 154, 170, 283, 33, 34, 35, 36, 37,
3258 36, 37, 38, 39, 40, 41, 42, 43, 3195 38, 39, 40, 41, 42, 43, 44, 45,
3259 44, 45, 46, 47, 48, 49, 50, 51, 3196 46, 47, 48, 49, 50, 51, 52, 53,
3260 52, 53, 54, 55, 56, 57, 58, 59, 3197 54, 55, 56, 57, 58, 59, 60, 61,
3261 60, 61, 62, 63, 64, 66, 67, 68, 3198 62, 63, 65, 66, 67, 68, 69, 70,
3262 69, 70, 71, 72, 73, 74, 75, 76, 3199 71, 72, 73, 74, 75, 76, 77, 78,
3263 77, 78, 79, 80, 81, 82, 83, 84, 3200 79, 80, 81, 82, 83, 84, 85, 86,
3264 85, 86, 87, 88, 89, 90, 91, 92, 3201 87, 88, 89, 90, 91, 92, 93, 94,
3265 93, 94, 95, 96, 97, 98, 99, 100, 3202 95, 96, 97, 98, 99, 100, 101, 102,
3266 101, 102, 103, 104, 105, 107, 108, 109, 3203 103, 104, 106, 107, 108, 109, 110, 111,
3267 110, 111, 112, 113, 114, 115, 116, 117, 3204 112, 113, 114, 115, 116, 117, 118, 119,
3268 118, 119, 120, 122, 123, 124, 125, 126, 3205 121, 122, 123, 124, 125, 126, 130, 132,
3269 127, 131, 133, 134, 135, 136, 137, 138, 3206 133, 134, 135, 136, 137, 138, 139, 140,
3270 139, 140, 141, 142, 143, 144, 145, 146, 3207 141, 142, 143, 144, 145, 146, 147, 148,
3271 147, 148, 149, 150, 151, 153, 154, 156, 3208 149, 150, 152, 153, 155, 156, 157, 158,
3272 157, 158, 159, 160, 161, 162, 163, 164, 3209 159, 160, 161, 162, 163, 164, 165, 166,
3273 165, 166, 167, 168, 169, 170, 172, 204, 3210 167, 168, 169, 171, 203, 227, 230, 231,
3274 228, 231, 232, 234, 243, 244, 247, 251, 3211 233, 242, 243, 246, 250, 268, 275, 277,
3275 269, 276, 278, 280, 282, 173, 174, 175, 3212 279, 281, 172, 173, 174, 175, 176, 177,
3276 176, 177, 178, 179, 180, 181, 182, 183, 3213 178, 179, 180, 181, 182, 183, 184, 185,
3277 184, 185, 186, 187, 188, 189, 190, 191, 3214 186, 187, 188, 189, 190, 191, 192, 193,
3278 192, 193, 194, 195, 196, 197, 198, 199, 3215 194, 195, 196, 197, 198, 199, 200, 201,
3279 200, 201, 202, 203, 205, 206, 207, 208, 3216 202, 204, 205, 206, 207, 208, 209, 210,
3280 209, 210, 211, 212, 213, 214, 215, 216, 3217 211, 212, 213, 214, 215, 216, 217, 218,
3281 217, 218, 219, 220, 221, 222, 223, 224, 3218 219, 220, 221, 222, 223, 224, 225, 226,
3282 225, 226, 227, 229, 230, 233, 235, 236, 3219 228, 229, 232, 234, 235, 236, 237, 238,
3283 237, 238, 239, 240, 241, 242, 245, 246, 3220 239, 240, 241, 244, 245, 247, 248, 249,
3284 248, 249, 250, 252, 253, 254, 255, 256, 3221 251, 252, 253, 254, 255, 256, 257, 258,
3285 257, 258, 259, 260, 261, 262, 263, 264, 3222 259, 260, 261, 262, 263, 264, 265, 266,
3286 265, 266, 267, 268, 270, 271, 272, 273, 3223 267, 269, 270, 271, 272, 273, 274, 276,
3287 274, 275, 277, 279, 281, 283, 285, 286, 3224 278, 280, 282, 284, 285, 300, 301, 302,
3288 301, 302, 303, 304, 305, 306, 307, 308, 3225 303, 304, 305, 306, 307, 308, 309, 310,
3289 309, 310, 311, 312, 313, 314, 315, 316, 3226 311, 312, 313, 314, 315, 316, 317, 318,
3290 317, 318, 319, 320, 321, 322, 323, 324, 3227 319, 320, 321, 322, 323, 324, 325, 326,
3291 325, 326, 327, 328, 330, 331, 332, 333, 3228 327, 329, 330, 331, 332, 333, 334, 335,
3292 334, 335, 336, 337, 338, 339, 340, 341, 3229 336, 337, 338, 339, 340, 341, 342, 343,
3293 342, 343, 344, 345, 346, 347, 348, 350, 3230 344, 345, 346, 347, 349, 350, 351, 352,
3294 351, 352, 353, 355, 356, 358, 359, 360, 3231 354, 355, 357, 358, 359, 360, 361, 362,
3295 361, 362, 363, 364, 365, 366, 367, 368, 3232 363, 364, 365, 366, 367, 368, 369, 370,
3296 369, 370, 371, 373, 374, 375, 377, 383, 3233 372, 373, 374, 376, 382, 404, 409, 411,
3297 405, 410, 412, 414, 378, 379, 380, 381, 3234 413, 377, 378, 379, 380, 381, 383, 384,
3298 382, 384, 385, 386, 387, 388, 389, 390, 3235 385, 386, 387, 388, 389, 390, 391, 392,
3299 391, 392, 393, 394, 395, 396, 397, 398, 3236 393, 394, 395, 396, 397, 398, 399, 400,
3300 399, 400, 401, 402, 403, 404, 406, 407, 3237 401, 402, 403, 405, 406, 407, 408, 410,
3301 408, 409, 411, 413, 415, 1464, 1477, 438, 3238 412, 414, 1459, 1471, 1459, 437, 438, 439,
3302 439, 440, 441, 418, 442, 443, 444, 445, 3239 440, 417, 441, 442, 443, 444, 445, 446,
3303 446, 447, 448, 449, 450, 451, 452, 453, 3240 447, 448, 449, 450, 451, 452, 453, 454,
3304 454, 455, 456, 457, 458, 459, 460, 461, 3241 455, 456, 457, 458, 459, 460, 461, 462,
3305 462, 463, 464, 465, 466, 467, 468, 470, 3242 463, 464, 465, 466, 467, 469, 470, 471,
3306 471, 472, 473, 474, 475, 476, 477, 478, 3243 472, 473, 474, 475, 476, 477, 478, 479,
3307 479, 480, 481, 482, 483, 484, 485, 486, 3244 480, 481, 482, 483, 484, 485, 419, 486,
3308 420, 487, 488, 489, 490, 491, 492, 493, 3245 487, 488, 489, 490, 491, 492, 493, 494,
3309 494, 495, 496, 497, 498, 499, 500, 501, 3246 495, 496, 497, 498, 499, 500, 501, 502,
3310 502, 503, 504, 419, 505, 506, 507, 508, 3247 503, 418, 504, 505, 506, 507, 508, 510,
3311 509, 511, 512, 513, 514, 515, 516, 517, 3248 511, 512, 513, 514, 515, 516, 517, 518,
3312 518, 519, 520, 521, 522, 523, 524, 526, 3249 519, 520, 521, 522, 523, 525, 526, 527,
3313 527, 528, 529, 530, 531, 535, 537, 538, 3250 528, 529, 530, 534, 536, 537, 538, 539,
3314 539, 540, 435, 541, 542, 543, 544, 545, 3251 434, 540, 541, 542, 543, 544, 545, 546,
3315 546, 547, 548, 549, 550, 551, 552, 553, 3252 547, 548, 549, 550, 551, 552, 553, 554,
3316 554, 555, 557, 558, 560, 561, 562, 563, 3253 556, 557, 559, 560, 561, 562, 563, 564,
3317 564, 565, 433, 566, 567, 568, 569, 570, 3254 432, 565, 566, 567, 568, 569, 570, 571,
3318 571, 572, 573, 574, 576, 608, 632, 635, 3255 572, 573, 575, 607, 631, 634, 635, 637,
3319 636, 638, 647, 648, 651, 655, 673, 533, 3256 646, 647, 650, 654, 672, 532, 679, 681,
3320 680, 682, 684, 686, 577, 578, 579, 580, 3257 683, 685, 576, 577, 578, 579, 580, 581,
3321 581, 582, 583, 584, 585, 586, 587, 588, 3258 582, 583, 584, 585, 586, 587, 588, 589,
3322 589, 590, 591, 592, 593, 594, 595, 596, 3259 590, 591, 592, 593, 594, 595, 596, 597,
3323 597, 598, 599, 600, 601, 602, 603, 604, 3260 598, 599, 600, 601, 602, 603, 604, 605,
3324 605, 606, 607, 609, 610, 611, 612, 613, 3261 606, 608, 609, 610, 611, 612, 613, 614,
3325 614, 615, 616, 617, 618, 619, 620, 621, 3262 615, 616, 617, 618, 619, 620, 621, 622,
3326 622, 623, 624, 625, 626, 627, 628, 629, 3263 623, 624, 625, 626, 627, 628, 629, 630,
3327 630, 631, 633, 634, 637, 639, 640, 641, 3264 632, 633, 636, 638, 639, 640, 641, 642,
3328 642, 643, 644, 645, 646, 649, 650, 652, 3265 643, 644, 645, 648, 649, 651, 652, 653,
3329 653, 654, 656, 657, 658, 659, 660, 661, 3266 655, 656, 657, 658, 659, 660, 661, 662,
3330 662, 663, 664, 665, 666, 667, 668, 669, 3267 663, 664, 665, 666, 667, 668, 669, 670,
3331 670, 671, 672, 674, 675, 676, 677, 678, 3268 671, 673, 674, 675, 676, 677, 678, 680,
3332 679, 681, 683, 685, 687, 689, 690, 1464, 3269 682, 684, 686, 688, 689, 1459, 1459, 690,
3333 1464, 691, 828, 829, 760, 830, 831, 832, 3270 827, 828, 759, 829, 830, 831, 832, 833,
3334 833, 834, 835, 789, 836, 725, 837, 838, 3271 834, 788, 835, 724, 836, 837, 838, 839,
3335 839, 840, 841, 842, 843, 844, 745, 845, 3272 840, 841, 842, 843, 744, 844, 845, 846,
3336 846, 847, 848, 849, 850, 851, 852, 853, 3273 847, 848, 849, 850, 851, 852, 853, 769,
3337 854, 770, 855, 857, 858, 859, 860, 861, 3274 854, 856, 857, 858, 859, 860, 861, 862,
3338 862, 863, 864, 865, 866, 703, 867, 868, 3275 863, 864, 865, 702, 866, 867, 868, 869,
3339 869, 870, 871, 872, 873, 874, 875, 741, 3276 870, 871, 872, 873, 874, 740, 875, 876,
3340 876, 877, 878, 879, 880, 811, 882, 883, 3277 877, 878, 879, 810, 881, 882, 885, 887,
3341 886, 888, 889, 890, 891, 892, 893, 896, 3278 888, 889, 890, 891, 892, 895, 896, 898,
3342 897, 899, 900, 901, 903, 904, 905, 906, 3279 899, 900, 902, 903, 904, 905, 906, 907,
3343 907, 908, 909, 910, 911, 912, 913, 915, 3280 908, 909, 910, 911, 912, 914, 915, 916,
3344 916, 917, 918, 921, 923, 924, 926, 928, 3281 917, 920, 922, 923, 925, 927, 1509, 1510,
3345 1515, 1517, 1518, 1516, 931, 932, 1515, 934, 3282 929, 930, 931, 1509, 1509, 932, 1523, 1523,
3346 1541, 1541, 1541, 1543, 1544, 1542, 939, 940, 3283 1524, 935, 1523, 936, 1525, 1526, 1529, 1530,
3347 1545, 1546, 1550, 1550, 1550, 1551, 946, 947, 3284 1534, 1534, 1535, 941, 1534, 942, 1536, 1537,
3348 1552, 1553, 1557, 1558, 1557, 973, 974, 975, 3285 1540, 1541, 1545, 1546, 1545, 968, 969, 970,
3349 976, 953, 977, 978, 979, 980, 981, 982, 3286 971, 948, 972, 973, 974, 975, 976, 977,
3350 983, 984, 985, 986, 987, 988, 989, 990, 3287 978, 979, 980, 981, 982, 983, 984, 985,
3351 991, 992, 993, 994, 995, 996, 997, 998, 3288 986, 987, 988, 989, 990, 991, 992, 993,
3352 999, 1000, 1001, 1002, 1003, 1005, 1006, 1007, 3289 994, 995, 996, 997, 998, 1000, 1001, 1002,
3353 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 3290 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010,
3354 1016, 1017, 1018, 1019, 1020, 1021, 955, 1022, 3291 1011, 1012, 1013, 1014, 1015, 1016, 950, 1017,
3355 1023, 1024, 1025, 1026, 1027, 1028, 1029, 1030, 3292 1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025,
3356 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 3293 1026, 1027, 1028, 1029, 1030, 1031, 1032, 1033,
3357 1039, 954, 1040, 1041, 1042, 1043, 1044, 1046, 3294 1034, 949, 1035, 1036, 1037, 1038, 1039, 1041,
3358 1047, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 3295 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049,
3359 1055, 1056, 1057, 1058, 1059, 1061, 1062, 1063, 3296 1050, 1051, 1052, 1053, 1054, 1056, 1057, 1058,
3360 1064, 1065, 1066, 1070, 1072, 1073, 1074, 1075, 3297 1059, 1060, 1061, 1065, 1067, 1068, 1069, 1070,
3361 970, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 3298 965, 1071, 1072, 1073, 1074, 1075, 1076, 1077,
3362 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 3299 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085,
3363 1092, 1093, 1095, 1096, 1097, 1098, 1099, 1100, 3300 1087, 1088, 1090, 1091, 1092, 1093, 1094, 1095,
3364 968, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 3301 963, 1096, 1097, 1098, 1099, 1100, 1101, 1102,
3365 1108, 1109, 1111, 1143, 1167, 1170, 1171, 1173, 3302 1103, 1104, 1106, 1138, 1162, 1165, 1166, 1168,
3366 1182, 1183, 1186, 1190, 1208, 1068, 1215, 1217, 3303 1177, 1178, 1181, 1185, 1203, 1063, 1210, 1212,
3367 1219, 1221, 1112, 1113, 1114, 1115, 1116, 1117, 3304 1214, 1216, 1107, 1108, 1109, 1110, 1111, 1112,
3368 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 3305 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120,
3369 1126, 1127, 1128, 1129, 1130, 1131, 1132, 1133, 3306 1121, 1122, 1123, 1124, 1125, 1126, 1127, 1128,
3370 1134, 1135, 1136, 1137, 1138, 1139, 1140, 1141, 3307 1129, 1130, 1131, 1132, 1133, 1134, 1135, 1136,
3371 1142, 1144, 1145, 1146, 1147, 1148, 1149, 1150, 3308 1137, 1139, 1140, 1141, 1142, 1143, 1144, 1145,
3372 1151, 1152, 1153, 1154, 1155, 1156, 1157, 1158, 3309 1146, 1147, 1148, 1149, 1150, 1151, 1152, 1153,
3373 1159, 1160, 1161, 1162, 1163, 1164, 1165, 1166, 3310 1154, 1155, 1156, 1157, 1158, 1159, 1160, 1161,
3374 1168, 1169, 1172, 1174, 1175, 1176, 1177, 1178, 3311 1163, 1164, 1167, 1169, 1170, 1171, 1172, 1173,
3375 1179, 1180, 1181, 1184, 1185, 1187, 1188, 1189, 3312 1174, 1175, 1176, 1179, 1180, 1182, 1183, 1184,
3376 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 3313 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193,
3377 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 3314 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201,
3378 1207, 1209, 1210, 1211, 1212, 1213, 1214, 1216, 3315 1202, 1204, 1205, 1206, 1207, 1208, 1209, 1211,
3379 1218, 1220, 1222, 1224, 1225, 1557, 1557, 1226, 3316 1213, 1215, 1217, 1219, 1220, 1545, 1545, 1221,
3380 1363, 1364, 1295, 1365, 1366, 1367, 1368, 1369, 3317 1358, 1359, 1290, 1360, 1361, 1362, 1363, 1364,
3381 1370, 1324, 1371, 1260, 1372, 1373, 1374, 1375, 3318 1365, 1319, 1366, 1255, 1367, 1368, 1369, 1370,
3382 1376, 1377, 1378, 1379, 1280, 1380, 1381, 1382, 3319 1371, 1372, 1373, 1374, 1275, 1375, 1376, 1377,
3383 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1305, 3320 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1300,
3384 1390, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 3321 1385, 1387, 1388, 1389, 1390, 1391, 1392, 1393,
3385 1399, 1400, 1401, 1238, 1402, 1403, 1404, 1405, 3322 1394, 1395, 1396, 1233, 1397, 1398, 1399, 1400,
3386 1406, 1407, 1408, 1409, 1410, 1276, 1411, 1412, 3323 1401, 1402, 1403, 1404, 1405, 1271, 1406, 1407,
3387 1413, 1414, 1415, 1346, 1417, 1418, 1421, 1423, 3324 1408, 1409, 1410, 1341, 1412, 1413, 1416, 1418,
3388 1424, 1425, 1426, 1427, 1428, 1431, 1432, 1434, 3325 1419, 1420, 1421, 1422, 1423, 1426, 1427, 1429,
3389 1435, 1436, 1438, 1439, 1440, 1441, 1442, 1443, 3326 1430, 1431, 1433, 1434, 1435, 1436, 1437, 1438,
3390 1444, 1445, 1446, 1447, 1448, 1450, 1451, 1452, 3327 1439, 1440, 1441, 1442, 1443, 1445, 1446, 1447,
3391 1453, 1456, 1458, 1459, 1461, 1463, 1465, 1464, 3328 1448, 1451, 1453, 1454, 1456, 1458, 1460, 1459,
3392 1466, 1467, 1464, 1468, 1464, 1469, 1470, 1471, 3329 1461, 1462, 1459, 1463, 1459, 1464, 1465, 1466,
3393 1474, 1475, 1476, 1464, 1478, 1464, 1479, 1464, 3330 1468, 1469, 1470, 1459, 1472, 1459, 1473, 1459,
3394 1480, 1481, 1482, 1483, 1484, 1485, 1486, 1487, 3331 1474, 1475, 1476, 1477, 1478, 1479, 1480, 1481,
3395 1488, 1489, 1490, 1491, 1492, 1493, 1494, 1495, 3332 1482, 1483, 1484, 1485, 1486, 1487, 1488, 1489,
3396 1496, 1497, 1498, 1499, 1500, 1501, 1502, 1503, 3333 1490, 1491, 1492, 1493, 1494, 1495, 1496, 1497,
3397 1504, 1505, 1506, 1507, 1508, 1509, 1510, 1511, 3334 1498, 1499, 1500, 1501, 1502, 1503, 1504, 1505,
3398 1512, 1513, 1514, 1464, 1464, 1464, 1464, 1464, 3335 1506, 1507, 1508, 1459, 1459, 1459, 1459, 1459,
3399 2, 1464, 1464, 8, 1464, 1464, 1464, 1464, 3336 1459, 1, 1459, 7, 1459, 1459, 1459, 1459,
3400 1464, 416, 417, 421, 422, 423, 424, 425, 3337 1459, 415, 416, 420, 421, 422, 423, 424,
3401 426, 427, 428, 429, 430, 431, 432, 434, 3338 425, 426, 427, 428, 429, 430, 431, 433,
3402 436, 437, 469, 510, 525, 532, 534, 536, 3339 435, 436, 468, 509, 524, 531, 533, 535,
3403 556, 559, 575, 688, 1464, 1464, 1464, 692, 3340 555, 558, 574, 687, 1459, 1459, 1459, 691,
3404 693, 694, 695, 696, 697, 698, 699, 700, 3341 692, 693, 694, 695, 696, 697, 698, 699,
3405 701, 702, 704, 705, 706, 707, 708, 709, 3342 700, 701, 703, 704, 705, 706, 707, 708,
3406 710, 711, 712, 713, 714, 715, 716, 717, 3343 709, 710, 711, 712, 713, 714, 715, 716,
3407 718, 719, 720, 721, 722, 723, 724, 726, 3344 717, 718, 719, 720, 721, 722, 723, 725,
3408 727, 728, 729, 730, 731, 732, 733, 734, 3345 726, 727, 728, 729, 730, 731, 732, 733,
3409 735, 736, 737, 738, 739, 740, 742, 743, 3346 734, 735, 736, 737, 738, 739, 741, 742,
3410 744, 746, 747, 748, 749, 750, 751, 752, 3347 743, 745, 746, 747, 748, 749, 750, 751,
3411 753, 754, 755, 756, 757, 758, 759, 761, 3348 752, 753, 754, 755, 756, 757, 758, 760,
3412 762, 763, 764, 765, 766, 767, 768, 769, 3349 761, 762, 763, 764, 765, 766, 767, 768,
3413 771, 772, 773, 774, 775, 776, 777, 778, 3350 770, 771, 772, 773, 774, 775, 776, 777,
3414 779, 780, 781, 782, 783, 784, 785, 786, 3351 778, 779, 780, 781, 782, 783, 784, 785,
3415 787, 788, 790, 791, 792, 793, 794, 795, 3352 786, 787, 789, 790, 791, 792, 793, 794,
3416 796, 797, 798, 799, 800, 801, 802, 803, 3353 795, 796, 797, 798, 799, 800, 801, 802,
3417 804, 805, 806, 807, 808, 809, 810, 812, 3354 803, 804, 805, 806, 807, 808, 809, 811,
3418 813, 814, 815, 816, 817, 818, 819, 820, 3355 812, 813, 814, 815, 816, 817, 818, 819,
3419 821, 822, 823, 824, 825, 826, 827, 856, 3356 820, 821, 822, 823, 824, 825, 826, 855,
3420 881, 884, 885, 887, 894, 895, 898, 902, 3357 880, 883, 884, 886, 893, 894, 897, 901,
3421 914, 919, 920, 922, 925, 927, 1515, 1515, 3358 913, 918, 919, 921, 924, 926, 1511, 1509,
3422 1534, 1536, 1519, 1515, 1538, 1539, 1540, 1515, 3359 1512, 1517, 1519, 1509, 1520, 1521, 1522, 1509,
3423 929, 930, 933, 1515, 1516, 929, 930, 1519, 3360 928, 1509, 1509, 1513, 1514, 1516, 1509, 1515,
3424 931, 932, 933, 1515, 1516, 929, 930, 1519, 3361 1509, 1509, 1509, 1518, 1509, 1509, 1509, 933,
3425 931, 932, 933, 1520, 1525, 1521, 1522, 1524, 3362 934, 938, 939, 1523, 1531, 1532, 1533, 1523,
3426 1531, 1532, 1533, 1517, 1521, 1522, 1524, 1531, 3363 937, 1523, 1523, 934, 1527, 1528, 1523, 1523,
3427 1532, 1533, 1518, 1523, 1526, 1527, 1528, 1529, 3364 1523, 1523, 1523, 940, 944, 945, 1534, 1542,
3428 1530, 1517, 1521, 1522, 1524, 1531, 1532, 1533, 3365 1543, 1544, 1534, 943, 1534, 1534, 940, 1538,
3429 1520, 1525, 1523, 1526, 1527, 1528, 1529, 1530, 3366 1539, 1534, 1534, 1534, 1534, 1534, 1545, 1547,
3430 1518, 1523, 1526, 1527, 1528, 1529, 1530, 1520, 3367 1548, 1549, 1550, 1551, 1552, 1553, 1554, 1555,
3431 1525, 1515, 1535, 1515, 1515, 1537, 1515, 1515, 3368 1556, 1557, 1558, 1559, 1560, 1561, 1562, 1563,
3432 1515, 935, 936, 942, 943, 1541, 1547, 1548, 3369 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571,
3433 1549, 1541, 937, 938, 941, 1541, 1542, 1541, 3370 1572, 1573, 1574, 1575, 1576, 1577, 1578, 1579,
3434 936, 937, 938, 939, 940, 941, 1541, 1542, 3371 1580, 1581, 1545, 946, 947, 951, 952, 953,
3435 1541, 936, 937, 938, 939, 940, 941, 1541, 3372 954, 955, 956, 957, 958, 959, 960, 961,
3436 1541, 1541, 1541, 1541, 944, 949, 950, 1550, 3373 962, 964, 966, 967, 999, 1040, 1055, 1062,
3437 1554, 1555, 1556, 1550, 945, 948, 1550, 1550, 3374 1064, 1066, 1086, 1089, 1105, 1218, 1545, 1222,
3438 1550, 1550, 1550, 1557, 1559, 1560, 1561, 1562, 3375 1223, 1224, 1225, 1226, 1227, 1228, 1229, 1230,
3439 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 3376 1231, 1232, 1234, 1235, 1236, 1237, 1238, 1239,
3440 1571, 1572, 1573, 1574, 1575, 1576, 1577, 1578,
3441 1579, 1580, 1581, 1582, 1583, 1584, 1585, 1586,
3442 1587, 1588, 1589, 1590, 1591, 1592, 1593, 1557,
3443 951, 952, 956, 957, 958, 959, 960, 961,
3444 962, 963, 964, 965, 966, 967, 969, 971,
3445 972, 1004, 1045, 1060, 1067, 1069, 1071, 1091,
3446 1094, 1110, 1223, 1557, 1227, 1228, 1229, 1230,
3447 1231, 1232, 1233, 1234, 1235, 1236, 1237, 1239,
3448 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247, 3377 1240, 1241, 1242, 1243, 1244, 1245, 1246, 1247,
3449 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 3378 1248, 1249, 1250, 1251, 1252, 1253, 1254, 1256,
3450 1256, 1257, 1258, 1259, 1261, 1262, 1263, 1264, 3379 1257, 1258, 1259, 1260, 1261, 1262, 1263, 1264,
3451 1265, 1266, 1267, 1268, 1269, 1270, 1271, 1272, 3380 1265, 1266, 1267, 1268, 1269, 1270, 1272, 1273,
3452 1273, 1274, 1275, 1277, 1278, 1279, 1281, 1282, 3381 1274, 1276, 1277, 1278, 1279, 1280, 1281, 1282,
3453 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1290, 3382 1283, 1284, 1285, 1286, 1287, 1288, 1289, 1291,
3454 1291, 1292, 1293, 1294, 1296, 1297, 1298, 1299, 3383 1292, 1293, 1294, 1295, 1296, 1297, 1298, 1299,
3455 1300, 1301, 1302, 1303, 1304, 1306, 1307, 1308, 3384 1301, 1302, 1303, 1304, 1305, 1306, 1307, 1308,
3456 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316, 3385 1309, 1310, 1311, 1312, 1313, 1314, 1315, 1316,
3457 1317, 1318, 1319, 1320, 1321, 1322, 1323, 1325, 3386 1317, 1318, 1320, 1321, 1322, 1323, 1324, 1325,
3458 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333, 3387 1326, 1327, 1328, 1329, 1330, 1331, 1332, 1333,
3459 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1341, 3388 1334, 1335, 1336, 1337, 1338, 1339, 1340, 1342,
3460 1342, 1343, 1344, 1345, 1347, 1348, 1349, 1350, 3389 1343, 1344, 1345, 1346, 1347, 1348, 1349, 1350,
3461 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1358, 3390 1351, 1352, 1353, 1354, 1355, 1356, 1357, 1386,
3462 1359, 1360, 1361, 1362, 1391, 1416, 1419, 1420, 3391 1411, 1414, 1415, 1417, 1424, 1425, 1428, 1432,
3463 1422, 1429, 1430, 1433, 1437, 1449, 1454, 1455, 3392 1444, 1449, 1450, 1452, 1455, 1457,
3464 1457, 1460, 1462,
3465} 3393}
3466 3394
3467var _hcltok_trans_actions []byte = []byte{ 3395var _hcltok_trans_actions []byte = []byte{
3468 151, 0, 93, 147, 109, 0, 0, 201, 3396 145, 107, 0, 0, 91, 141, 0, 7,
3469 143, 0, 13, 0, 0, 0, 0, 0,
3470 0, 0, 0, 0, 0, 0, 0, 0, 3397 0, 0, 0, 0, 0, 0, 0, 0,
3471 0, 0, 0, 0, 0, 0, 0, 0, 3398 0, 0, 0, 0, 0, 0, 0, 0,
3472 0, 0, 0, 0, 0, 0, 0, 0, 3399 0, 0, 0, 0, 0, 0, 0, 0,
3473 0, 0, 0, 0, 0, 0, 0, 123,
3474 0, 0, 0, 0, 0, 0, 0, 0, 3400 0, 0, 0, 0, 0, 0, 0, 0,
3401 0, 0, 0, 0, 121, 0, 0, 0,
3475 0, 0, 0, 0, 0, 0, 0, 0, 3402 0, 0, 0, 0, 0, 0, 0, 0,
3476 0, 0, 0, 0, 0, 0, 0, 0, 3403 0, 0, 0, 0, 0, 0, 0, 0,
3477 0, 0, 0, 0, 0, 0, 0, 0, 3404 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3517,8 +3444,8 @@ var _hcltok_trans_actions []byte = []byte{
3517 0, 0, 0, 0, 0, 0, 0, 0, 3444 0, 0, 0, 0, 0, 0, 0, 0,
3518 0, 0, 0, 0, 0, 0, 0, 0, 3445 0, 0, 0, 0, 0, 0, 0, 0,
3519 0, 0, 0, 0, 0, 0, 0, 0, 3446 0, 0, 0, 0, 0, 0, 0, 0,
3520 0, 0, 0, 0, 0, 145, 198, 0,
3521 0, 0, 0, 0, 0, 0, 0, 0, 3447 0, 0, 0, 0, 0, 0, 0, 0,
3448 0, 0, 143, 193, 149, 0, 0, 0,
3522 0, 0, 0, 0, 0, 0, 0, 0, 3449 0, 0, 0, 0, 0, 0, 0, 0,
3523 0, 0, 0, 0, 0, 0, 0, 0, 3450 0, 0, 0, 0, 0, 0, 0, 0,
3524 0, 0, 0, 0, 0, 0, 0, 0, 3451 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3548,9 +3475,8 @@ var _hcltok_trans_actions []byte = []byte{
3548 0, 0, 0, 0, 0, 0, 0, 0, 3475 0, 0, 0, 0, 0, 0, 0, 0,
3549 0, 0, 0, 0, 0, 0, 0, 0, 3476 0, 0, 0, 0, 0, 0, 0, 0,
3550 0, 0, 0, 0, 0, 0, 0, 0, 3477 0, 0, 0, 0, 0, 0, 0, 0,
3551 0, 0, 0, 0, 0, 0, 0, 149,
3552 127, 0, 0, 0, 0, 0, 0, 0,
3553 0, 0, 0, 0, 0, 0, 0, 0, 3478 0, 0, 0, 0, 0, 0, 0, 0,
3479 0, 0, 0, 0, 0, 147, 125, 0,
3554 0, 0, 0, 0, 0, 0, 0, 0, 3480 0, 0, 0, 0, 0, 0, 0, 0,
3555 0, 0, 0, 0, 0, 0, 0, 0, 3481 0, 0, 0, 0, 0, 0, 0, 0,
3556 0, 0, 0, 0, 0, 0, 0, 0, 3482 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3561,11 +3487,12 @@ var _hcltok_trans_actions []byte = []byte{
3561 0, 0, 0, 0, 0, 0, 0, 0, 3487 0, 0, 0, 0, 0, 0, 0, 0,
3562 0, 0, 0, 0, 0, 0, 0, 0, 3488 0, 0, 0, 0, 0, 0, 0, 0,
3563 0, 0, 0, 0, 0, 0, 0, 0, 3489 0, 0, 0, 0, 0, 0, 0, 0,
3564 35, 13, 13, 13, 0, 0, 37, 0,
3565 57, 43, 55, 180, 180, 180, 0, 0,
3566 0, 0, 77, 63, 75, 186, 0, 0,
3567 0, 0, 87, 192, 91, 0, 0, 0,
3568 0, 0, 0, 0, 0, 0, 0, 0, 3490 0, 0, 0, 0, 0, 0, 0, 0,
3491 0, 0, 0, 0, 0, 0, 31, 169,
3492 0, 0, 0, 35, 33, 0, 55, 41,
3493 175, 0, 53, 0, 175, 175, 0, 0,
3494 75, 61, 181, 0, 73, 0, 181, 181,
3495 0, 0, 85, 187, 89, 0, 0, 0,
3569 0, 0, 0, 0, 0, 0, 0, 0, 3496 0, 0, 0, 0, 0, 0, 0, 0,
3570 0, 0, 0, 0, 0, 0, 0, 0, 3497 0, 0, 0, 0, 0, 0, 0, 0,
3571 0, 0, 0, 0, 0, 0, 0, 0, 3498 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3595,8 +3522,8 @@ var _hcltok_trans_actions []byte = []byte{
3595 0, 0, 0, 0, 0, 0, 0, 0, 3522 0, 0, 0, 0, 0, 0, 0, 0,
3596 0, 0, 0, 0, 0, 0, 0, 0, 3523 0, 0, 0, 0, 0, 0, 0, 0,
3597 0, 0, 0, 0, 0, 0, 0, 0, 3524 0, 0, 0, 0, 0, 0, 0, 0,
3598 0, 0, 0, 0, 0, 89, 81, 0,
3599 0, 0, 0, 0, 0, 0, 0, 0, 3525 0, 0, 0, 0, 0, 0, 0, 0,
3526 0, 0, 0, 0, 0, 87, 79, 0,
3600 0, 0, 0, 0, 0, 0, 0, 0, 3527 0, 0, 0, 0, 0, 0, 0, 0,
3601 0, 0, 0, 0, 0, 0, 0, 0, 3528 0, 0, 0, 0, 0, 0, 0, 0,
3602 0, 0, 0, 0, 0, 0, 0, 0, 3529 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3607,20 +3534,20 @@ var _hcltok_trans_actions []byte = []byte{
3607 0, 0, 0, 0, 0, 0, 0, 0, 3534 0, 0, 0, 0, 0, 0, 0, 0,
3608 0, 0, 0, 0, 0, 0, 0, 0, 3535 0, 0, 0, 0, 0, 0, 0, 0,
3609 0, 0, 0, 0, 0, 0, 0, 0, 3536 0, 0, 0, 0, 0, 0, 0, 0,
3610 0, 0, 0, 0, 0, 0, 0, 95,
3611 0, 0, 121, 210, 113, 0, 13, 204,
3612 13, 0, 0, 115, 0, 117, 0, 125,
3613 0, 0, 0, 0, 0, 0, 0, 0, 3537 0, 0, 0, 0, 0, 0, 0, 0,
3538 0, 0, 0, 0, 0, 0, 0, 93,
3539 0, 0, 119, 0, 111, 0, 7, 7,
3540 7, 0, 0, 113, 0, 115, 0, 123,
3614 0, 0, 0, 0, 0, 0, 0, 0, 3541 0, 0, 0, 0, 0, 0, 0, 0,
3615 0, 0, 0, 0, 0, 0, 13, 13,
3616 13, 207, 207, 207, 207, 207, 207, 13,
3617 13, 207, 13, 129, 141, 137, 99, 105,
3618 0, 135, 131, 0, 103, 97, 111, 101,
3619 133, 0, 0, 0, 0, 0, 0, 0,
3620 0, 0, 0, 0, 0, 0, 0, 0, 3542 0, 0, 0, 0, 0, 0, 0, 0,
3543 0, 0, 0, 0, 0, 0, 7, 7,
3544 7, 196, 196, 196, 196, 196, 196, 7,
3545 7, 196, 7, 127, 139, 135, 97, 133,
3546 103, 0, 129, 0, 101, 95, 109, 99,
3547 131, 0, 0, 0, 0, 0, 0, 0,
3621 0, 0, 0, 0, 0, 0, 0, 0, 3548 0, 0, 0, 0, 0, 0, 0, 0,
3622 0, 0, 0, 0, 107, 119, 139, 0,
3623 0, 0, 0, 0, 0, 0, 0, 0, 3549 0, 0, 0, 0, 0, 0, 0, 0,
3550 0, 0, 0, 0, 105, 117, 137, 0,
3624 0, 0, 0, 0, 0, 0, 0, 0, 3551 0, 0, 0, 0, 0, 0, 0, 0,
3625 0, 0, 0, 0, 0, 0, 0, 0, 3552 0, 0, 0, 0, 0, 0, 0, 0,
3626 0, 0, 0, 0, 0, 0, 0, 0, 3553 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3637,32 +3564,24 @@ var _hcltok_trans_actions []byte = []byte{
3637 0, 0, 0, 0, 0, 0, 0, 0, 3564 0, 0, 0, 0, 0, 0, 0, 0,
3638 0, 0, 0, 0, 0, 0, 0, 0, 3565 0, 0, 0, 0, 0, 0, 0, 0,
3639 0, 0, 0, 0, 0, 0, 0, 0, 3566 0, 0, 0, 0, 0, 0, 0, 0,
3640 0, 0, 0, 0, 0, 0, 21, 19,
3641 0, 0, 13, 23, 0, 13, 13, 29,
3642 0, 0, 0, 153, 174, 1, 1, 174,
3643 1, 1, 1, 156, 177, 3, 3, 177,
3644 3, 3, 3, 0, 0, 0, 0, 13,
3645 13, 13, 13, 174, 1, 1, 174, 174,
3646 174, 174, 174, 1, 1, 174, 174, 174,
3647 174, 177, 3, 3, 177, 177, 177, 177,
3648 1, 1, 0, 0, 13, 13, 13, 13,
3649 177, 3, 3, 177, 177, 177, 177, 3,
3650 3, 31, 0, 25, 15, 0, 27, 17,
3651 33, 0, 0, 0, 0, 45, 0, 183,
3652 183, 51, 0, 0, 0, 162, 213, 159,
3653 5, 5, 5, 5, 5, 5, 168, 217,
3654 165, 7, 7, 7, 7, 7, 7, 47,
3655 39, 49, 41, 53, 0, 0, 0, 65,
3656 0, 189, 189, 71, 0, 0, 67, 59,
3657 69, 61, 73, 79, 0, 0, 0, 0,
3658 0, 0, 0, 0, 0, 0, 0, 0, 3567 0, 0, 0, 0, 0, 0, 0, 0,
3568 0, 0, 0, 0, 0, 0, 0, 13,
3569 0, 0, 172, 17, 0, 7, 7, 23,
3570 0, 25, 27, 0, 0, 0, 151, 0,
3571 15, 19, 9, 0, 21, 11, 29, 0,
3572 0, 0, 0, 43, 0, 178, 178, 49,
3573 0, 157, 154, 1, 175, 175, 45, 37,
3574 47, 39, 51, 0, 0, 0, 63, 0,
3575 184, 184, 69, 0, 163, 160, 1, 181,
3576 181, 65, 57, 67, 59, 71, 77, 0,
3659 0, 0, 0, 0, 0, 0, 0, 0, 3577 0, 0, 0, 0, 0, 0, 0, 0,
3660 0, 0, 13, 13, 13, 195, 195, 195,
3661 195, 195, 195, 13, 13, 195, 13, 83,
3662 0, 0, 0, 0, 0, 0, 0, 0, 3578 0, 0, 0, 0, 0, 0, 0, 0,
3579 0, 0, 0, 0, 0, 7, 7, 7,
3580 190, 190, 190, 190, 190, 190, 7, 7,
3581 190, 7, 81, 0, 0, 0, 0, 0,
3663 0, 0, 0, 0, 0, 0, 0, 0, 3582 0, 0, 0, 0, 0, 0, 0, 0,
3664 0, 0, 0, 0, 0, 0, 0, 0, 3583 0, 0, 0, 0, 0, 0, 0, 0,
3665 0, 0, 0, 85, 0, 0, 0, 0, 3584 0, 0, 0, 0, 0, 0, 83, 0,
3666 0, 0, 0, 0, 0, 0, 0, 0, 3585 0, 0, 0, 0, 0, 0, 0, 0,
3667 0, 0, 0, 0, 0, 0, 0, 0, 3586 0, 0, 0, 0, 0, 0, 0, 0,
3668 0, 0, 0, 0, 0, 0, 0, 0, 3587 0, 0, 0, 0, 0, 0, 0, 0,
@@ -3680,7 +3599,7 @@ var _hcltok_trans_actions []byte = []byte{
3680 0, 0, 0, 0, 0, 0, 0, 0, 3599 0, 0, 0, 0, 0, 0, 0, 0,
3681 0, 0, 0, 0, 0, 0, 0, 0, 3600 0, 0, 0, 0, 0, 0, 0, 0,
3682 0, 0, 0, 0, 0, 0, 0, 0, 3601 0, 0, 0, 0, 0, 0, 0, 0,
3683 0, 0, 0, 3602 0, 0, 0, 0, 0, 0,
3684} 3603}
3685 3604
3686var _hcltok_to_state_actions []byte = []byte{ 3605var _hcltok_to_state_actions []byte = []byte{
@@ -3866,24 +3785,22 @@ var _hcltok_to_state_actions []byte = []byte{
3866 0, 0, 0, 0, 0, 0, 0, 0, 3785 0, 0, 0, 0, 0, 0, 0, 0,
3867 0, 0, 0, 0, 0, 0, 0, 0, 3786 0, 0, 0, 0, 0, 0, 0, 0,
3868 0, 0, 0, 0, 0, 0, 0, 0, 3787 0, 0, 0, 0, 0, 0, 0, 0,
3869 0, 0, 0, 0, 0, 0, 0, 0, 3788 0, 0, 0, 3, 0, 0, 0, 0,
3870 9, 0, 0, 0, 0, 0, 0, 0,
3871 0, 0, 0, 0, 0, 0, 0, 0,
3872 0, 0, 0, 0, 0, 0, 0, 0, 3789 0, 0, 0, 0, 0, 0, 0, 0,
3873 0, 0, 0, 0, 0, 0, 0, 0, 3790 0, 0, 0, 0, 0, 0, 0, 0,
3874 0, 0, 0, 0, 0, 0, 0, 0, 3791 0, 0, 0, 0, 0, 0, 0, 0,
3875 0, 0, 0, 0, 0, 0, 0, 0, 3792 0, 0, 0, 0, 0, 0, 0, 0,
3876 0, 0, 0, 9, 0, 0, 0, 0,
3877 0, 0, 0, 0, 0, 0, 0, 0, 3793 0, 0, 0, 0, 0, 0, 0, 0,
3794 0, 0, 0, 0, 0, 3, 0, 0,
3878 0, 0, 0, 0, 0, 0, 0, 0, 3795 0, 0, 0, 0, 0, 0, 0, 0,
3879 0, 0, 0, 0, 0, 171, 0, 0, 3796 0, 0, 0, 166, 0, 0, 0, 0,
3880 0, 0, 0, 0, 0, 0, 171, 0, 3797 0, 0, 0, 0, 0, 0, 166, 0,
3881 0, 0, 0, 0, 0, 9, 0, 0,
3882 0, 0, 0, 0, 0, 0, 0, 0, 3798 0, 0, 0, 0, 0, 0, 0, 0,
3799 0, 3, 0, 0, 0, 0, 0, 0,
3883 0, 0, 0, 0, 0, 0, 0, 0, 3800 0, 0, 0, 0, 0, 0, 0, 0,
3884 0, 0, 0, 0, 0, 0, 0, 0, 3801 0, 0, 0, 0, 0, 0, 0, 0,
3885 0, 0, 0, 0, 0, 0, 0, 0, 3802 0, 0, 0, 0, 0, 0, 0, 0,
3886 0, 0, 3803 0, 0, 0, 0, 0, 0,
3887} 3804}
3888 3805
3889var _hcltok_from_state_actions []byte = []byte{ 3806var _hcltok_from_state_actions []byte = []byte{
@@ -4069,147 +3986,144 @@ var _hcltok_from_state_actions []byte = []byte{
4069 0, 0, 0, 0, 0, 0, 0, 0, 3986 0, 0, 0, 0, 0, 0, 0, 0,
4070 0, 0, 0, 0, 0, 0, 0, 0, 3987 0, 0, 0, 0, 0, 0, 0, 0,
4071 0, 0, 0, 0, 0, 0, 0, 0, 3988 0, 0, 0, 0, 0, 0, 0, 0,
3989 0, 0, 0, 5, 0, 0, 0, 0,
4072 0, 0, 0, 0, 0, 0, 0, 0, 3990 0, 0, 0, 0, 0, 0, 0, 0,
4073 11, 0, 0, 0, 0, 0, 0, 0,
4074 0, 0, 0, 0, 0, 0, 0, 0, 3991 0, 0, 0, 0, 0, 0, 0, 0,
4075 0, 0, 0, 0, 0, 0, 0, 0, 3992 0, 0, 0, 0, 0, 0, 0, 0,
4076 0, 0, 0, 0, 0, 0, 0, 0, 3993 0, 0, 0, 0, 0, 0, 0, 0,
4077 0, 0, 0, 0, 0, 0, 0, 0, 3994 0, 0, 0, 0, 0, 0, 0, 0,
3995 0, 0, 0, 0, 0, 5, 0, 0,
4078 0, 0, 0, 0, 0, 0, 0, 0, 3996 0, 0, 0, 0, 0, 0, 0, 0,
4079 0, 0, 0, 11, 0, 0, 0, 0, 3997 0, 0, 0, 5, 0, 0, 0, 0,
3998 0, 0, 0, 0, 0, 0, 5, 0,
4080 0, 0, 0, 0, 0, 0, 0, 0, 3999 0, 0, 0, 0, 0, 0, 0, 0,
4000 0, 5, 0, 0, 0, 0, 0, 0,
4081 0, 0, 0, 0, 0, 0, 0, 0, 4001 0, 0, 0, 0, 0, 0, 0, 0,
4082 0, 0, 0, 0, 0, 11, 0, 0,
4083 0, 0, 0, 0, 0, 0, 11, 0,
4084 0, 0, 0, 0, 0, 11, 0, 0,
4085 0, 0, 0, 0, 0, 0, 0, 0, 4002 0, 0, 0, 0, 0, 0, 0, 0,
4086 0, 0, 0, 0, 0, 0, 0, 0, 4003 0, 0, 0, 0, 0, 0, 0, 0,
4087 0, 0, 0, 0, 0, 0, 0, 0, 4004 0, 0, 0, 0, 0, 0,
4088 0, 0, 0, 0, 0, 0, 0, 0,
4089 0, 0,
4090} 4005}
4091 4006
4092var _hcltok_eof_trans []int16 = []int16{ 4007var _hcltok_eof_trans []int16 = []int16{
4093 0, 1, 4, 1, 1, 9, 9, 9, 4008 0, 1, 1, 1, 6, 6, 6, 1,
4094 4, 4, 4, 4, 4, 4, 4, 4, 4009 1, 1, 1, 1, 1, 1, 1, 1,
4095 4, 4, 4, 4, 4, 4, 4, 4, 4010 1, 1, 1, 1, 1, 1, 1, 1,
4096 4, 4, 4, 4, 4, 4, 4, 4, 4011 1, 1, 1, 1, 1, 1, 1, 1,
4097 4, 4, 4, 4, 4, 4, 4, 4, 4012 1, 1, 1, 1, 1, 1, 1, 1,
4098 4, 4, 4, 4, 4, 4, 4, 4, 4013 1, 1, 1, 1, 1, 1, 1, 1,
4099 4, 4, 4, 4, 4, 4, 4, 4, 4014 1, 1, 1, 1, 1, 1, 1, 1,
4100 4, 4, 4, 4, 4, 4, 4, 4, 4015 1, 1, 1, 1, 1, 1, 1, 1,
4101 4, 4, 4, 4, 4, 4, 4, 4, 4016 1, 1, 1, 1, 1, 1, 1, 1,
4102 4, 4, 4, 4, 4, 4, 4, 4, 4017 1, 1, 1, 1, 1, 1, 1, 1,
4103 4, 4, 4, 4, 4, 4, 4, 4, 4018 1, 1, 1, 1, 1, 1, 1, 1,
4104 4, 4, 4, 4, 4, 4, 4, 4, 4019 1, 1, 1, 1, 1, 1, 1, 1,
4105 4, 4, 4, 4, 4, 4, 4, 4, 4020 1, 1, 1, 1, 1, 1, 1, 1,
4106 4, 4, 4, 4, 4, 4, 4, 4, 4021 1, 1, 1, 1, 1, 1, 1, 1,
4107 4, 4, 4, 4, 4, 4, 4, 4, 4022 1, 1, 1, 1, 1, 1, 1, 1,
4108 4, 4, 4, 4, 4, 4, 4, 4, 4023 1, 1, 1, 1, 1, 1, 1, 1,
4109 4, 4, 4, 4, 4, 4, 4, 4, 4024 1, 1, 1, 1, 1, 1, 1, 1,
4110 4, 4, 4, 4, 4, 4, 4, 4, 4025 1, 1, 1, 1, 1, 1, 1, 1,
4111 4, 4, 4, 4, 4, 4, 4, 4, 4026 1, 1, 1, 1, 1, 1, 1, 1,
4112 4, 4, 4, 4, 4, 4, 4, 4, 4027 1, 1, 1, 1, 1, 1, 1, 1,
4113 4, 4, 4, 4, 4, 4, 4, 4, 4028 1, 1, 1, 1, 1, 1, 1, 1,
4114 4, 4, 4, 4, 4, 4, 4, 4, 4029 1, 1, 1, 1, 1, 1, 1, 1,
4115 4, 4, 4, 4, 4, 4, 4, 4, 4030 1, 1, 1, 1, 1, 1, 1, 1,
4116 4, 4, 4, 4, 4, 4, 4, 4, 4031 1, 1, 1, 1, 1, 1, 1, 1,
4117 4, 4, 4, 4, 4, 4, 4, 4, 4032 1, 1, 1, 1, 1, 1, 1, 1,
4118 4, 4, 4, 4, 4, 4, 4, 4, 4033 1, 1, 1, 1, 1, 1, 1, 1,
4119 4, 4, 4, 4, 4, 4, 4, 4, 4034 1, 1, 1, 1, 1, 1, 1, 1,
4120 4, 4, 4, 4, 4, 4, 4, 4, 4035 1, 1, 1, 1, 1, 1, 1, 1,
4121 4, 4, 4, 4, 4, 4, 4, 4, 4036 1, 1, 1, 1, 1, 1, 1, 1,
4122 4, 4, 4, 4, 4, 4, 4, 4, 4037 1, 1, 1, 1, 1, 1, 1, 1,
4123 4, 4, 4, 4, 4, 4, 4, 4, 4038 1, 1, 1, 1, 1, 1, 1, 1,
4124 4, 4, 4, 4, 4, 4, 4, 4, 4039 1, 1, 1, 1, 1, 1, 1, 1,
4125 4, 4, 4, 4, 4, 4, 4, 4, 4040 1, 1, 1, 1, 1, 1, 1, 1,
4126 4, 4, 4, 4, 4, 4, 4, 4, 4041 1, 1, 1, 1, 1, 1, 1, 1,
4127 4, 4, 4, 4, 4, 4, 4, 4, 4042 1, 1, 1, 1, 1, 1, 1, 1,
4128 4, 4, 4, 4, 4, 4, 4, 4, 4043 1, 1, 1, 1, 1, 1, 1, 1,
4129 4, 4, 4, 4, 4, 4, 4, 4, 4044 1, 1, 1, 1, 1, 1, 1, 1,
4130 4, 4, 4, 4, 4, 4, 4, 4, 4045 1, 1, 1, 1, 1, 1, 1, 1,
4131 4, 4, 4, 4, 4, 4, 4, 4, 4046 1, 1, 1, 1, 1, 1, 1, 1,
4132 4, 4, 4, 4, 4, 4, 4, 4, 4047 1, 1, 1, 1, 1, 1, 1, 1,
4133 4, 4, 4, 4, 4, 4, 4, 4, 4048 1, 1, 1, 1, 1, 1, 1, 1,
4134 4, 4, 4, 4, 4, 4, 4, 4, 4049 1, 1, 1, 1, 1, 1, 1, 1,
4135 4, 4, 4, 4, 4, 4, 4, 4, 4050 1, 1, 1, 1, 1, 1, 1, 1,
4136 4, 4, 4, 4, 4, 4, 4, 4, 4051 1, 1, 1, 1, 1, 1, 1, 1,
4137 4, 4, 4, 4, 4, 4, 4, 4, 4052 1, 1, 1, 1, 1, 1, 1, 1,
4138 4, 4, 4, 4, 4, 4, 4, 4, 4053 1, 1, 1, 1, 1, 1, 1, 1,
4139 4, 4, 4, 4, 4, 4, 4, 4, 4054 1, 1, 1, 1, 1, 1, 1, 1,
4140 4, 4, 4, 4, 4, 4, 4, 4, 4055 1, 1, 1, 1, 1, 1, 1, 1,
4141 4, 4, 4, 4, 4, 4, 4, 4, 4056 1, 1, 1, 1, 1, 1, 1, 1,
4142 4, 4, 4, 4, 4, 4, 4, 4, 4057 1, 1, 1, 1, 1, 1, 1, 1,
4143 4, 4, 4, 4, 4, 4, 4, 4, 4058 1, 1, 1, 1, 1, 1, 1, 1,
4144 4, 4, 4, 4, 4, 4, 4, 4, 4059 1, 1, 1, 1, 1, 1, 1, 419,
4145 422, 422, 1, 422, 422, 422, 422, 422, 4060 419, 421, 419, 419, 419, 419, 419, 419,
4146 422, 422, 422, 422, 422, 422, 422, 422, 4061 419, 419, 419, 419, 419, 419, 419, 419,
4147 422, 422, 422, 422, 422, 422, 422, 422, 4062 419, 419, 419, 419, 419, 419, 419, 419,
4148 422, 422, 422, 422, 422, 422, 422, 422, 4063 419, 419, 419, 419, 419, 419, 419, 419,
4149 422, 422, 422, 422, 422, 422, 422, 422, 4064 419, 419, 419, 419, 419, 419, 419, 419,
4150 422, 422, 422, 422, 422, 422, 422, 422, 4065 419, 419, 419, 419, 419, 419, 419, 419,
4151 422, 422, 422, 422, 422, 422, 422, 422, 4066 419, 419, 419, 419, 419, 419, 419, 419,
4152 422, 422, 422, 422, 422, 422, 422, 422, 4067 419, 419, 419, 419, 419, 419, 419, 419,
4153 422, 422, 422, 422, 422, 422, 422, 422, 4068 419, 419, 419, 419, 419, 419, 419, 419,
4154 422, 422, 422, 422, 422, 422, 422, 422, 4069 419, 419, 419, 419, 419, 419, 419, 419,
4155 422, 422, 422, 422, 422, 422, 422, 422, 4070 419, 419, 419, 419, 419, 419, 419, 419,
4156 422, 422, 422, 422, 422, 422, 422, 422, 4071 419, 419, 419, 419, 419, 419, 419, 419,
4157 422, 422, 422, 422, 422, 422, 422, 422, 4072 419, 419, 419, 419, 419, 419, 419, 419,
4158 422, 422, 422, 422, 422, 422, 422, 422, 4073 419, 419, 419, 419, 419, 419, 419, 419,
4159 422, 422, 422, 422, 422, 422, 422, 422, 4074 419, 419, 419, 419, 419, 419, 419, 419,
4160 422, 422, 422, 422, 422, 422, 422, 422, 4075 419, 419, 419, 419, 419, 419, 419, 419,
4161 422, 422, 422, 422, 422, 422, 422, 422, 4076 419, 419, 419, 419, 419, 419, 419, 419,
4162 422, 422, 422, 422, 422, 422, 422, 422, 4077 419, 419, 419, 419, 419, 419, 419, 419,
4163 422, 422, 422, 422, 422, 422, 422, 422, 4078 419, 419, 419, 419, 419, 419, 419, 419,
4164 422, 422, 422, 422, 422, 422, 422, 422, 4079 419, 419, 419, 419, 419, 419, 419, 419,
4165 422, 422, 422, 422, 422, 422, 422, 422, 4080 419, 419, 419, 419, 419, 419, 419, 419,
4166 422, 422, 422, 422, 422, 422, 422, 422, 4081 419, 419, 419, 419, 419, 419, 419, 419,
4167 422, 422, 422, 422, 422, 422, 422, 422, 4082 419, 419, 419, 419, 419, 419, 419, 419,
4168 422, 422, 422, 422, 422, 422, 422, 422, 4083 419, 419, 419, 419, 419, 419, 419, 419,
4169 422, 422, 422, 422, 422, 422, 422, 422, 4084 419, 419, 419, 419, 419, 419, 419, 419,
4170 422, 422, 422, 422, 422, 422, 422, 422, 4085 419, 419, 419, 419, 419, 419, 419, 419,
4171 422, 422, 422, 422, 422, 422, 422, 422, 4086 419, 419, 419, 419, 419, 419, 419, 419,
4172 422, 422, 422, 422, 422, 422, 422, 422, 4087 419, 419, 419, 419, 419, 419, 419, 419,
4173 422, 422, 422, 422, 422, 422, 422, 422, 4088 419, 419, 419, 419, 419, 419, 419, 419,
4174 422, 422, 422, 422, 422, 422, 422, 422, 4089 419, 419, 419, 419, 419, 419, 419, 419,
4175 422, 422, 422, 422, 422, 422, 422, 422, 4090 419, 419, 419, 419, 419, 419, 419, 419,
4176 422, 422, 422, 422, 422, 422, 422, 422, 4091 419, 419, 419, 419, 419, 419, 419, 419,
4177 422, 422, 422, 422, 422, 422, 422, 422, 4092 419, 419, 419, 419, 419, 419, 419, 419,
4178 422, 422, 422, 422, 422, 422, 422, 422, 4093 419, 419, 419, 419, 419, 419, 419, 419,
4179 422, 422, 422, 672, 672, 672, 672, 672, 4094 419, 419, 670, 670, 670, 670, 670, 670,
4180 672, 672, 672, 672, 672, 672, 672, 672, 4095 670, 670, 670, 670, 670, 670, 670, 670,
4181 672, 672, 672, 672, 672, 672, 672, 672, 4096 670, 670, 670, 670, 670, 670, 670, 670,
4182 672, 672, 672, 672, 672, 672, 672, 672, 4097 670, 670, 670, 670, 670, 670, 670, 670,
4183 672, 672, 672, 672, 672, 672, 672, 672, 4098 670, 670, 670, 670, 670, 670, 670, 670,
4184 672, 672, 672, 672, 672, 672, 672, 672, 4099 670, 670, 670, 670, 670, 670, 670, 670,
4185 672, 672, 672, 672, 672, 672, 672, 672, 4100 670, 670, 670, 670, 670, 670, 670, 670,
4186 672, 672, 672, 672, 672, 672, 672, 672, 4101 670, 670, 670, 670, 670, 670, 670, 670,
4187 672, 672, 672, 672, 672, 672, 672, 672, 4102 670, 670, 670, 670, 670, 670, 670, 670,
4188 672, 672, 672, 672, 672, 672, 672, 672, 4103 670, 670, 670, 670, 670, 670, 670, 670,
4189 672, 672, 672, 672, 672, 672, 672, 672, 4104 670, 670, 670, 670, 670, 670, 670, 670,
4190 672, 672, 672, 672, 672, 672, 672, 672, 4105 670, 670, 670, 670, 670, 670, 670, 670,
4191 672, 672, 672, 672, 672, 672, 672, 672, 4106 670, 670, 670, 670, 670, 670, 670, 670,
4192 672, 672, 672, 672, 672, 672, 672, 672, 4107 670, 670, 670, 670, 670, 670, 670, 670,
4193 672, 672, 672, 672, 672, 672, 672, 672, 4108 670, 670, 670, 670, 670, 670, 670, 670,
4194 672, 672, 672, 672, 672, 672, 672, 672, 4109 670, 670, 670, 670, 670, 670, 670, 670,
4195 672, 672, 672, 672, 672, 672, 672, 672, 4110 670, 670, 670, 670, 670, 670, 670, 670,
4196 672, 672, 672, 672, 672, 672, 672, 672, 4111 670, 670, 670, 670, 670, 670, 670, 670,
4197 672, 672, 672, 672, 672, 672, 672, 672, 4112 670, 670, 670, 670, 670, 670, 670, 670,
4198 672, 672, 672, 672, 672, 672, 672, 672, 4113 670, 670, 670, 670, 670, 670, 670, 670,
4199 672, 672, 672, 672, 672, 672, 672, 672, 4114 670, 670, 670, 670, 670, 670, 670, 670,
4200 672, 672, 672, 672, 672, 672, 672, 672, 4115 670, 670, 670, 670, 670, 670, 670, 670,
4201 672, 672, 672, 672, 672, 672, 672, 672, 4116 670, 670, 670, 670, 670, 670, 670, 670,
4202 672, 672, 672, 672, 672, 672, 672, 672, 4117 670, 670, 670, 670, 670, 670, 670, 670,
4203 672, 672, 672, 672, 672, 672, 672, 672, 4118 670, 670, 670, 670, 670, 670, 670, 670,
4204 672, 672, 672, 672, 672, 672, 672, 672, 4119 670, 670, 670, 670, 670, 670, 670, 670,
4205 672, 672, 672, 672, 672, 672, 672, 672, 4120 670, 670, 670, 670, 670, 670, 670, 670,
4206 672, 672, 672, 672, 672, 672, 672, 672, 4121 670, 670, 670, 670, 670, 670, 670, 670,
4207 672, 672, 672, 672, 672, 672, 672, 672, 4122 670, 670, 670, 670, 670, 670, 670, 670,
4208 672, 672, 672, 672, 672, 672, 672, 672, 4123 670, 670, 670, 670, 670, 670, 670, 670,
4209 672, 769, 769, 769, 769, 769, 775, 775, 4124 767, 772, 772, 772, 773, 773, 775, 775,
4210 777, 779, 779, 777, 777, 779, 0, 0, 4125 775, 779, 0, 0, 785, 785, 785, 789,
4211 787, 789, 787, 787, 789, 0, 0, 795, 4126 0, 0, 795, 795, 797, 795, 795, 795,
4212 795, 797, 795, 795, 795, 795, 795, 795,
4213 795, 795, 795, 795, 795, 795, 795, 795, 4127 795, 795, 795, 795, 795, 795, 795, 795,
4214 795, 795, 795, 795, 795, 795, 795, 795, 4128 795, 795, 795, 795, 795, 795, 795, 795,
4215 795, 795, 795, 795, 795, 795, 795, 795, 4129 795, 795, 795, 795, 795, 795, 795, 795,
@@ -4243,7 +4157,7 @@ var _hcltok_eof_trans []int16 = []int16{
4243 795, 795, 795, 795, 795, 795, 795, 795, 4157 795, 795, 795, 795, 795, 795, 795, 795,
4244 795, 795, 795, 795, 795, 795, 795, 795, 4158 795, 795, 795, 795, 795, 795, 795, 795,
4245 795, 795, 795, 795, 795, 795, 795, 795, 4159 795, 795, 795, 795, 795, 795, 795, 795,
4246 795, 795, 1046, 1046, 1046, 1046, 1046, 1046, 4160 795, 795, 795, 795, 795, 1046, 1046, 1046,
4247 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 4161 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4248 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 4162 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4249 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 4163 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
@@ -4273,45 +4187,49 @@ var _hcltok_eof_trans []int16 = []int16{
4273 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 4187 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4274 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 4188 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4275 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046, 4189 1046, 1046, 1046, 1046, 1046, 1046, 1046, 1046,
4276 0, 1196, 1197, 1198, 1197, 1198, 1198, 1198, 4190 1046, 1046, 1046, 0, 1196, 1197, 1198, 1200,
4277 1202, 1203, 1198, 1198, 1198, 1209, 1198, 1198, 4191 1198, 1198, 1198, 1203, 1198, 1198, 1198, 1209,
4192 1198, 1198, 1239, 1239, 1239, 1239, 1239, 1239,
4278 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, 4193 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239,
4279 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, 4194 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239,
4280 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, 4195 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239,
4281 1239, 1239, 1239, 1239, 1239, 1239, 1239, 1239, 4196 1239, 1239, 1239, 1239, 1239, 0, 1392, 1394,
4282 1239, 1239, 1239, 0, 1392, 1396, 1404, 1392, 4197 1395, 1399, 1399, 1392, 1402, 1395, 1405, 1395,
4283 1392, 1396, 1396, 1404, 1396, 1392, 1404, 1404, 4198 1407, 1407, 1407, 0, 1416, 1418, 1418, 1416,
4284 1404, 1404, 1404, 1396, 1396, 1396, 1458, 1460, 4199 1416, 1423, 1425, 1427, 1427, 1427, 0, 1435,
4285 1458, 1463, 1465, 1465, 1465, 0, 1474, 1478, 4200 1437, 1437, 1435, 1435, 1442, 1444, 1446, 1446,
4286 1487, 1496, 1498, 1500, 1500, 1500, 0, 1508, 4201 1446, 0, 1483, 1511, 1511, 1511, 1511, 1511,
4287 1511, 1513, 1515, 1515, 1515, 0, 1552, 1580, 4202 1511, 1511, 1511, 1511, 1511, 1511, 1511, 1511,
4288 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580, 4203 1511, 1511, 1511, 1511, 1511, 1511, 1511, 1511,
4289 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580, 4204 1511, 1511, 1511, 1511, 1511, 1511, 1511, 1511,
4290 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580, 4205 1511, 1511, 1511, 1511, 1511, 1511,
4291 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1580,
4292 1580, 1580,
4293} 4206}
4294 4207
4295const hcltok_start int = 1464 4208const hcltok_start int = 1459
4296const hcltok_first_final int = 1464 4209const hcltok_first_final int = 1459
4297const hcltok_error int = 0 4210const hcltok_error int = 0
4298 4211
4299const hcltok_en_stringTemplate int = 1515 4212const hcltok_en_stringTemplate int = 1509
4300const hcltok_en_heredocTemplate int = 1541 4213const hcltok_en_heredocTemplate int = 1523
4301const hcltok_en_bareTemplate int = 1550 4214const hcltok_en_bareTemplate int = 1534
4302const hcltok_en_identOnly int = 1557 4215const hcltok_en_identOnly int = 1545
4303const hcltok_en_main int = 1464 4216const hcltok_en_main int = 1459
4304 4217
4305// line 16 "scan_tokens.rl" 4218//line scan_tokens.rl:16
4306 4219
4307func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { 4220func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token {
4221 stripData := stripUTF8BOM(data)
4222 start.Byte += len(data) - len(stripData)
4223 data = stripData
4224
4308 f := &tokenAccum{ 4225 f := &tokenAccum{
4309 Filename: filename, 4226 Filename: filename,
4310 Bytes: data, 4227 Bytes: data,
4311 Pos: start, 4228 Pos: start,
4229 StartByte: start.Byte,
4312 } 4230 }
4313 4231
4314 // line 294 "scan_tokens.rl" 4232//line scan_tokens.rl:305
4315 4233
4316 // Ragel state 4234 // Ragel state
4317 p := 0 // "Pointer" into data 4235 p := 0 // "Pointer" into data
@@ -4339,7 +4257,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4339 var retBraces []int // stack of brace levels that cause us to use fret 4257 var retBraces []int // stack of brace levels that cause us to use fret
4340 var heredocs []heredocInProgress // stack of heredocs we're currently processing 4258 var heredocs []heredocInProgress // stack of heredocs we're currently processing
4341 4259
4342 // line 329 "scan_tokens.rl" 4260//line scan_tokens.rl:340
4343 4261
4344 // Make Go compiler happy 4262 // Make Go compiler happy
4345 _ = ts 4263 _ = ts
@@ -4359,7 +4277,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4359 f.emitToken(TokenType(b[0]), ts, te) 4277 f.emitToken(TokenType(b[0]), ts, te)
4360 } 4278 }
4361 4279
4362 // line 4372 "scan_tokens.go" 4280//line scan_tokens.go:4289
4363 { 4281 {
4364 top = 0 4282 top = 0
4365 ts = 0 4283 ts = 0
@@ -4367,7 +4285,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4367 act = 0 4285 act = 0
4368 } 4286 }
4369 4287
4370 // line 4380 "scan_tokens.go" 4288//line scan_tokens.go:4297
4371 { 4289 {
4372 var _klen int 4290 var _klen int
4373 var _trans int 4291 var _trans int
@@ -4387,12 +4305,11 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4387 for ; _nacts > 0; _nacts-- { 4305 for ; _nacts > 0; _nacts-- {
4388 _acts++ 4306 _acts++
4389 switch _hcltok_actions[_acts-1] { 4307 switch _hcltok_actions[_acts-1] {
4390 case 6: 4308 case 3:
4391 // line 1 "NONE" 4309//line NONE:1
4392
4393 ts = p 4310 ts = p
4394 4311
4395 // line 4404 "scan_tokens.go" 4312//line scan_tokens.go:4320
4396 } 4313 }
4397 } 4314 }
4398 4315
@@ -4464,33 +4381,21 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4464 _acts++ 4381 _acts++
4465 switch _hcltok_actions[_acts-1] { 4382 switch _hcltok_actions[_acts-1] {
4466 case 0: 4383 case 0:
4467 // line 218 "scan_tokens.rl" 4384//line scan_tokens.rl:224
4468
4469 p--
4470
4471 case 1:
4472 // line 219 "scan_tokens.rl"
4473
4474 p--
4475
4476 case 2:
4477 // line 224 "scan_tokens.rl"
4478
4479 p--
4480
4481 case 3:
4482 // line 225 "scan_tokens.rl"
4483
4484 p-- 4385 p--
4485 4386
4486 case 7: 4387 case 4:
4487 // line 1 "NONE" 4388//line NONE:1
4488
4489 te = p + 1 4389 te = p + 1
4490 4390
4491 case 8: 4391 case 5:
4492 // line 155 "scan_tokens.rl" 4392//line scan_tokens.rl:248
4493 4393 act = 4
4394 case 6:
4395//line scan_tokens.rl:250
4396 act = 6
4397 case 7:
4398//line scan_tokens.rl:160
4494 te = p + 1 4399 te = p + 1
4495 { 4400 {
4496 token(TokenTemplateInterp) 4401 token(TokenTemplateInterp)
@@ -4503,13 +4408,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4503 stack = append(stack, 0) 4408 stack = append(stack, 0)
4504 stack[top] = cs 4409 stack[top] = cs
4505 top++ 4410 top++
4506 cs = 1464 4411 cs = 1459
4507 goto _again 4412 goto _again
4508 } 4413 }
4509 } 4414 }
4510 case 9: 4415 case 8:
4511 // line 165 "scan_tokens.rl" 4416//line scan_tokens.rl:170
4512
4513 te = p + 1 4417 te = p + 1
4514 { 4418 {
4515 token(TokenTemplateControl) 4419 token(TokenTemplateControl)
@@ -4522,13 +4426,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4522 stack = append(stack, 0) 4426 stack = append(stack, 0)
4523 stack[top] = cs 4427 stack[top] = cs
4524 top++ 4428 top++
4525 cs = 1464 4429 cs = 1459
4526 goto _again 4430 goto _again
4527 } 4431 }
4528 } 4432 }
4529 case 10: 4433 case 9:
4530 // line 79 "scan_tokens.rl" 4434//line scan_tokens.rl:84
4531
4532 te = p + 1 4435 te = p + 1
4533 { 4436 {
4534 token(TokenCQuote) 4437 token(TokenCQuote)
@@ -4540,23 +4443,20 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4540 goto _again 4443 goto _again
4541 4444
4542 } 4445 }
4543 case 11: 4446 case 10:
4544 // line 239 "scan_tokens.rl" 4447//line scan_tokens.rl:248
4545
4546 te = p + 1 4448 te = p + 1
4547 { 4449 {
4548 token(TokenInvalid) 4450 token(TokenQuotedLit)
4549 } 4451 }
4550 case 12: 4452 case 11:
4551 // line 240 "scan_tokens.rl" 4453//line scan_tokens.rl:251
4552
4553 te = p + 1 4454 te = p + 1
4554 { 4455 {
4555 token(TokenBadUTF8) 4456 token(TokenBadUTF8)
4556 } 4457 }
4557 case 13: 4458 case 12:
4558 // line 155 "scan_tokens.rl" 4459//line scan_tokens.rl:160
4559
4560 te = p 4460 te = p
4561 p-- 4461 p--
4562 { 4462 {
@@ -4570,13 +4470,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4570 stack = append(stack, 0) 4470 stack = append(stack, 0)
4571 stack[top] = cs 4471 stack[top] = cs
4572 top++ 4472 top++
4573 cs = 1464 4473 cs = 1459
4574 goto _again 4474 goto _again
4575 } 4475 }
4576 } 4476 }
4577 case 14: 4477 case 13:
4578 // line 165 "scan_tokens.rl" 4478//line scan_tokens.rl:170
4579
4580 te = p 4479 te = p
4581 p-- 4480 p--
4582 { 4481 {
@@ -4590,59 +4489,73 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4590 stack = append(stack, 0) 4489 stack = append(stack, 0)
4591 stack[top] = cs 4490 stack[top] = cs
4592 top++ 4491 top++
4593 cs = 1464 4492 cs = 1459
4594 goto _again 4493 goto _again
4595 } 4494 }
4596 } 4495 }
4597 case 15: 4496 case 14:
4598 // line 238 "scan_tokens.rl" 4497//line scan_tokens.rl:248
4599
4600 te = p 4498 te = p
4601 p-- 4499 p--
4602 { 4500 {
4603 token(TokenQuotedLit) 4501 token(TokenQuotedLit)
4604 } 4502 }
4503 case 15:
4504//line scan_tokens.rl:249
4505 te = p
4506 p--
4507 {
4508 token(TokenQuotedNewline)
4509 }
4605 case 16: 4510 case 16:
4606 // line 239 "scan_tokens.rl" 4511//line scan_tokens.rl:250
4607
4608 te = p 4512 te = p
4609 p-- 4513 p--
4610 { 4514 {
4611 token(TokenInvalid) 4515 token(TokenInvalid)
4612 } 4516 }
4613 case 17: 4517 case 17:
4614 // line 240 "scan_tokens.rl" 4518//line scan_tokens.rl:251
4615
4616 te = p 4519 te = p
4617 p-- 4520 p--
4618 { 4521 {
4619 token(TokenBadUTF8) 4522 token(TokenBadUTF8)
4620 } 4523 }
4621 case 18: 4524 case 18:
4622 // line 238 "scan_tokens.rl" 4525//line scan_tokens.rl:248
4623
4624 p = (te) - 1 4526 p = (te) - 1
4625 { 4527 {
4626 token(TokenQuotedLit) 4528 token(TokenQuotedLit)
4627 } 4529 }
4628 case 19: 4530 case 19:
4629 // line 240 "scan_tokens.rl" 4531//line scan_tokens.rl:251
4630
4631 p = (te) - 1 4532 p = (te) - 1
4632 { 4533 {
4633 token(TokenBadUTF8) 4534 token(TokenBadUTF8)
4634 } 4535 }
4635 case 20: 4536 case 20:
4636 // line 143 "scan_tokens.rl" 4537//line NONE:1
4538 switch act {
4539 case 4:
4540 {
4541 p = (te) - 1
4542 token(TokenQuotedLit)
4543 }
4544 case 6:
4545 {
4546 p = (te) - 1
4547 token(TokenInvalid)
4548 }
4549 }
4637 4550
4638 act = 10
4639 case 21: 4551 case 21:
4640 // line 248 "scan_tokens.rl" 4552//line scan_tokens.rl:148
4641
4642 act = 11 4553 act = 11
4643 case 22: 4554 case 22:
4644 // line 155 "scan_tokens.rl" 4555//line scan_tokens.rl:259
4645 4556 act = 12
4557 case 23:
4558//line scan_tokens.rl:160
4646 te = p + 1 4559 te = p + 1
4647 { 4560 {
4648 token(TokenTemplateInterp) 4561 token(TokenTemplateInterp)
@@ -4655,13 +4568,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4655 stack = append(stack, 0) 4568 stack = append(stack, 0)
4656 stack[top] = cs 4569 stack[top] = cs
4657 top++ 4570 top++
4658 cs = 1464 4571 cs = 1459
4659 goto _again 4572 goto _again
4660 } 4573 }
4661 } 4574 }
4662 case 23: 4575 case 24:
4663 // line 165 "scan_tokens.rl" 4576//line scan_tokens.rl:170
4664
4665 te = p + 1 4577 te = p + 1
4666 { 4578 {
4667 token(TokenTemplateControl) 4579 token(TokenTemplateControl)
@@ -4674,13 +4586,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4674 stack = append(stack, 0) 4586 stack = append(stack, 0)
4675 stack[top] = cs 4587 stack[top] = cs
4676 top++ 4588 top++
4677 cs = 1464 4589 cs = 1459
4678 goto _again 4590 goto _again
4679 } 4591 }
4680 } 4592 }
4681 case 24: 4593 case 25:
4682 // line 106 "scan_tokens.rl" 4594//line scan_tokens.rl:111
4683
4684 te = p + 1 4595 te = p + 1
4685 { 4596 {
4686 // This action is called specificially when a heredoc literal 4597 // This action is called specificially when a heredoc literal
@@ -4724,16 +4635,14 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4724 topdoc.StartOfLine = true 4635 topdoc.StartOfLine = true
4725 token(TokenStringLit) 4636 token(TokenStringLit)
4726 } 4637 }
4727 case 25: 4638 case 26:
4728 // line 248 "scan_tokens.rl" 4639//line scan_tokens.rl:259
4729
4730 te = p + 1 4640 te = p + 1
4731 { 4641 {
4732 token(TokenBadUTF8) 4642 token(TokenBadUTF8)
4733 } 4643 }
4734 case 26: 4644 case 27:
4735 // line 155 "scan_tokens.rl" 4645//line scan_tokens.rl:160
4736
4737 te = p 4646 te = p
4738 p-- 4647 p--
4739 { 4648 {
@@ -4747,13 +4656,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4747 stack = append(stack, 0) 4656 stack = append(stack, 0)
4748 stack[top] = cs 4657 stack[top] = cs
4749 top++ 4658 top++
4750 cs = 1464 4659 cs = 1459
4751 goto _again 4660 goto _again
4752 } 4661 }
4753 } 4662 }
4754 case 27: 4663 case 28:
4755 // line 165 "scan_tokens.rl" 4664//line scan_tokens.rl:170
4756
4757 te = p 4665 te = p
4758 p-- 4666 p--
4759 { 4667 {
@@ -4767,13 +4675,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4767 stack = append(stack, 0) 4675 stack = append(stack, 0)
4768 stack[top] = cs 4676 stack[top] = cs
4769 top++ 4677 top++
4770 cs = 1464 4678 cs = 1459
4771 goto _again 4679 goto _again
4772 } 4680 }
4773 } 4681 }
4774 case 28: 4682 case 29:
4775 // line 143 "scan_tokens.rl" 4683//line scan_tokens.rl:148
4776
4777 te = p 4684 te = p
4778 p-- 4685 p--
4779 { 4686 {
@@ -4783,17 +4690,15 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4783 heredocs[len(heredocs)-1].StartOfLine = false 4690 heredocs[len(heredocs)-1].StartOfLine = false
4784 token(TokenStringLit) 4691 token(TokenStringLit)
4785 } 4692 }
4786 case 29: 4693 case 30:
4787 // line 248 "scan_tokens.rl" 4694//line scan_tokens.rl:259
4788
4789 te = p 4695 te = p
4790 p-- 4696 p--
4791 { 4697 {
4792 token(TokenBadUTF8) 4698 token(TokenBadUTF8)
4793 } 4699 }
4794 case 30: 4700 case 31:
4795 // line 143 "scan_tokens.rl" 4701//line scan_tokens.rl:148
4796
4797 p = (te) - 1 4702 p = (te) - 1
4798 { 4703 {
4799 // This action is called when a heredoc literal _doesn't_ end 4704 // This action is called when a heredoc literal _doesn't_ end
@@ -4802,16 +4707,15 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4802 heredocs[len(heredocs)-1].StartOfLine = false 4707 heredocs[len(heredocs)-1].StartOfLine = false
4803 token(TokenStringLit) 4708 token(TokenStringLit)
4804 } 4709 }
4805 case 31: 4710 case 32:
4806 // line 1 "NONE" 4711//line NONE:1
4807
4808 switch act { 4712 switch act {
4809 case 0: 4713 case 0:
4810 { 4714 {
4811 cs = 0 4715 cs = 0
4812 goto _again 4716 goto _again
4813 } 4717 }
4814 case 10: 4718 case 11:
4815 { 4719 {
4816 p = (te) - 1 4720 p = (te) - 1
4817 4721
@@ -4821,24 +4725,21 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4821 heredocs[len(heredocs)-1].StartOfLine = false 4725 heredocs[len(heredocs)-1].StartOfLine = false
4822 token(TokenStringLit) 4726 token(TokenStringLit)
4823 } 4727 }
4824 case 11: 4728 case 12:
4825 { 4729 {
4826 p = (te) - 1 4730 p = (te) - 1
4827 token(TokenBadUTF8) 4731 token(TokenBadUTF8)
4828 } 4732 }
4829 } 4733 }
4830 4734
4831 case 32:
4832 // line 151 "scan_tokens.rl"
4833
4834 act = 14
4835 case 33: 4735 case 33:
4836 // line 255 "scan_tokens.rl" 4736//line scan_tokens.rl:156
4837
4838 act = 15 4737 act = 15
4839 case 34: 4738 case 34:
4840 // line 155 "scan_tokens.rl" 4739//line scan_tokens.rl:266
4841 4740 act = 16
4741 case 35:
4742//line scan_tokens.rl:160
4842 te = p + 1 4743 te = p + 1
4843 { 4744 {
4844 token(TokenTemplateInterp) 4745 token(TokenTemplateInterp)
@@ -4851,13 +4752,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4851 stack = append(stack, 0) 4752 stack = append(stack, 0)
4852 stack[top] = cs 4753 stack[top] = cs
4853 top++ 4754 top++
4854 cs = 1464 4755 cs = 1459
4855 goto _again 4756 goto _again
4856 } 4757 }
4857 } 4758 }
4858 case 35: 4759 case 36:
4859 // line 165 "scan_tokens.rl" 4760//line scan_tokens.rl:170
4860
4861 te = p + 1 4761 te = p + 1
4862 { 4762 {
4863 token(TokenTemplateControl) 4763 token(TokenTemplateControl)
@@ -4870,27 +4770,24 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4870 stack = append(stack, 0) 4770 stack = append(stack, 0)
4871 stack[top] = cs 4771 stack[top] = cs
4872 top++ 4772 top++
4873 cs = 1464 4773 cs = 1459
4874 goto _again 4774 goto _again
4875 } 4775 }
4876 } 4776 }
4877 case 36: 4777 case 37:
4878 // line 151 "scan_tokens.rl" 4778//line scan_tokens.rl:156
4879
4880 te = p + 1 4779 te = p + 1
4881 { 4780 {
4882 token(TokenStringLit) 4781 token(TokenStringLit)
4883 } 4782 }
4884 case 37: 4783 case 38:
4885 // line 255 "scan_tokens.rl" 4784//line scan_tokens.rl:266
4886
4887 te = p + 1 4785 te = p + 1
4888 { 4786 {
4889 token(TokenBadUTF8) 4787 token(TokenBadUTF8)
4890 } 4788 }
4891 case 38: 4789 case 39:
4892 // line 155 "scan_tokens.rl" 4790//line scan_tokens.rl:160
4893
4894 te = p 4791 te = p
4895 p-- 4792 p--
4896 { 4793 {
@@ -4904,13 +4801,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4904 stack = append(stack, 0) 4801 stack = append(stack, 0)
4905 stack[top] = cs 4802 stack[top] = cs
4906 top++ 4803 top++
4907 cs = 1464 4804 cs = 1459
4908 goto _again 4805 goto _again
4909 } 4806 }
4910 } 4807 }
4911 case 39: 4808 case 40:
4912 // line 165 "scan_tokens.rl" 4809//line scan_tokens.rl:170
4913
4914 te = p 4810 te = p
4915 p-- 4811 p--
4916 { 4812 {
@@ -4924,231 +4820,191 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
4924 stack = append(stack, 0) 4820 stack = append(stack, 0)
4925 stack[top] = cs 4821 stack[top] = cs
4926 top++ 4822 top++
4927 cs = 1464 4823 cs = 1459
4928 goto _again 4824 goto _again
4929 } 4825 }
4930 } 4826 }
4931 case 40: 4827 case 41:
4932 // line 151 "scan_tokens.rl" 4828//line scan_tokens.rl:156
4933
4934 te = p 4829 te = p
4935 p-- 4830 p--
4936 { 4831 {
4937 token(TokenStringLit) 4832 token(TokenStringLit)
4938 } 4833 }
4939 case 41: 4834 case 42:
4940 // line 255 "scan_tokens.rl" 4835//line scan_tokens.rl:266
4941
4942 te = p 4836 te = p
4943 p-- 4837 p--
4944 { 4838 {
4945 token(TokenBadUTF8) 4839 token(TokenBadUTF8)
4946 } 4840 }
4947 case 42: 4841 case 43:
4948 // line 151 "scan_tokens.rl" 4842//line scan_tokens.rl:156
4949
4950 p = (te) - 1 4843 p = (te) - 1
4951 { 4844 {
4952 token(TokenStringLit) 4845 token(TokenStringLit)
4953 } 4846 }
4954 case 43: 4847 case 44:
4955 // line 1 "NONE" 4848//line NONE:1
4956
4957 switch act { 4849 switch act {
4958 case 0: 4850 case 0:
4959 { 4851 {
4960 cs = 0 4852 cs = 0
4961 goto _again 4853 goto _again
4962 } 4854 }
4963 case 14: 4855 case 15:
4964 { 4856 {
4965 p = (te) - 1 4857 p = (te) - 1
4966 4858
4967 token(TokenStringLit) 4859 token(TokenStringLit)
4968 } 4860 }
4969 case 15: 4861 case 16:
4970 { 4862 {
4971 p = (te) - 1 4863 p = (te) - 1
4972 token(TokenBadUTF8) 4864 token(TokenBadUTF8)
4973 } 4865 }
4974 } 4866 }
4975 4867
4976 case 44:
4977 // line 259 "scan_tokens.rl"
4978
4979 act = 16
4980 case 45: 4868 case 45:
4981 // line 260 "scan_tokens.rl" 4869//line scan_tokens.rl:270
4982
4983 act = 17 4870 act = 17
4984 case 46: 4871 case 46:
4985 // line 260 "scan_tokens.rl" 4872//line scan_tokens.rl:271
4986 4873 act = 18
4874 case 47:
4875//line scan_tokens.rl:271
4987 te = p + 1 4876 te = p + 1
4988 { 4877 {
4989 token(TokenBadUTF8) 4878 token(TokenBadUTF8)
4990 } 4879 }
4991 case 47: 4880 case 48:
4992 // line 261 "scan_tokens.rl" 4881//line scan_tokens.rl:272
4993
4994 te = p + 1 4882 te = p + 1
4995 { 4883 {
4996 token(TokenInvalid) 4884 token(TokenInvalid)
4997 } 4885 }
4998 case 48: 4886 case 49:
4999 // line 259 "scan_tokens.rl" 4887//line scan_tokens.rl:270
5000
5001 te = p 4888 te = p
5002 p-- 4889 p--
5003 { 4890 {
5004 token(TokenIdent) 4891 token(TokenIdent)
5005 } 4892 }
5006 case 49: 4893 case 50:
5007 // line 260 "scan_tokens.rl" 4894//line scan_tokens.rl:271
5008
5009 te = p 4895 te = p
5010 p-- 4896 p--
5011 { 4897 {
5012 token(TokenBadUTF8) 4898 token(TokenBadUTF8)
5013 } 4899 }
5014 case 50: 4900 case 51:
5015 // line 259 "scan_tokens.rl" 4901//line scan_tokens.rl:270
5016
5017 p = (te) - 1 4902 p = (te) - 1
5018 { 4903 {
5019 token(TokenIdent) 4904 token(TokenIdent)
5020 } 4905 }
5021 case 51: 4906 case 52:
5022 // line 260 "scan_tokens.rl" 4907//line scan_tokens.rl:271
5023
5024 p = (te) - 1 4908 p = (te) - 1
5025 { 4909 {
5026 token(TokenBadUTF8) 4910 token(TokenBadUTF8)
5027 } 4911 }
5028 case 52: 4912 case 53:
5029 // line 1 "NONE" 4913//line NONE:1
5030
5031 switch act { 4914 switch act {
5032 case 16: 4915 case 17:
5033 { 4916 {
5034 p = (te) - 1 4917 p = (te) - 1
5035 token(TokenIdent) 4918 token(TokenIdent)
5036 } 4919 }
5037 case 17: 4920 case 18:
5038 { 4921 {
5039 p = (te) - 1 4922 p = (te) - 1
5040 token(TokenBadUTF8) 4923 token(TokenBadUTF8)
5041 } 4924 }
5042 } 4925 }
5043 4926
5044 case 53:
5045 // line 267 "scan_tokens.rl"
5046
5047 act = 21
5048 case 54: 4927 case 54:
5049 // line 269 "scan_tokens.rl" 4928//line scan_tokens.rl:278
5050
5051 act = 22 4929 act = 22
5052 case 55: 4930 case 55:
5053 // line 280 "scan_tokens.rl" 4931//line scan_tokens.rl:301
5054
5055 act = 32
5056 case 56:
5057 // line 290 "scan_tokens.rl"
5058
5059 act = 38
5060 case 57:
5061 // line 291 "scan_tokens.rl"
5062
5063 act = 39 4932 act = 39
5064 case 58: 4933 case 56:
5065 // line 269 "scan_tokens.rl" 4934//line scan_tokens.rl:280
5066
5067 te = p + 1 4935 te = p + 1
5068 { 4936 {
5069 token(TokenComment) 4937 token(TokenComment)
5070 } 4938 }
5071 case 59: 4939 case 57:
5072 // line 270 "scan_tokens.rl" 4940//line scan_tokens.rl:281
5073
5074 te = p + 1 4941 te = p + 1
5075 { 4942 {
5076 token(TokenNewline) 4943 token(TokenNewline)
5077 } 4944 }
5078 case 60: 4945 case 58:
5079 // line 272 "scan_tokens.rl" 4946//line scan_tokens.rl:283
5080
5081 te = p + 1 4947 te = p + 1
5082 { 4948 {
5083 token(TokenEqualOp) 4949 token(TokenEqualOp)
5084 } 4950 }
5085 case 61: 4951 case 59:
5086 // line 273 "scan_tokens.rl" 4952//line scan_tokens.rl:284
5087
5088 te = p + 1 4953 te = p + 1
5089 { 4954 {
5090 token(TokenNotEqual) 4955 token(TokenNotEqual)
5091 } 4956 }
5092 case 62: 4957 case 60:
5093 // line 274 "scan_tokens.rl" 4958//line scan_tokens.rl:285
5094
5095 te = p + 1 4959 te = p + 1
5096 { 4960 {
5097 token(TokenGreaterThanEq) 4961 token(TokenGreaterThanEq)
5098 } 4962 }
5099 case 63: 4963 case 61:
5100 // line 275 "scan_tokens.rl" 4964//line scan_tokens.rl:286
5101
5102 te = p + 1 4965 te = p + 1
5103 { 4966 {
5104 token(TokenLessThanEq) 4967 token(TokenLessThanEq)
5105 } 4968 }
5106 case 64: 4969 case 62:
5107 // line 276 "scan_tokens.rl" 4970//line scan_tokens.rl:287
5108
5109 te = p + 1 4971 te = p + 1
5110 { 4972 {
5111 token(TokenAnd) 4973 token(TokenAnd)
5112 } 4974 }
5113 case 65: 4975 case 63:
5114 // line 277 "scan_tokens.rl" 4976//line scan_tokens.rl:288
5115
5116 te = p + 1 4977 te = p + 1
5117 { 4978 {
5118 token(TokenOr) 4979 token(TokenOr)
5119 } 4980 }
5120 case 66: 4981 case 64:
5121 // line 278 "scan_tokens.rl" 4982//line scan_tokens.rl:289
5122
5123 te = p + 1 4983 te = p + 1
5124 { 4984 {
5125 token(TokenEllipsis) 4985 token(TokenEllipsis)
5126 } 4986 }
5127 case 67: 4987 case 65:
5128 // line 279 "scan_tokens.rl" 4988//line scan_tokens.rl:290
5129
5130 te = p + 1 4989 te = p + 1
5131 { 4990 {
5132 token(TokenFatArrow) 4991 token(TokenFatArrow)
5133 } 4992 }
5134 case 68: 4993 case 66:
5135 // line 280 "scan_tokens.rl" 4994//line scan_tokens.rl:291
5136
5137 te = p + 1 4995 te = p + 1
5138 { 4996 {
5139 selfToken() 4997 selfToken()
5140 } 4998 }
5141 case 69: 4999 case 67:
5142 // line 175 "scan_tokens.rl" 5000//line scan_tokens.rl:180
5143
5144 te = p + 1 5001 te = p + 1
5145 { 5002 {
5146 token(TokenOBrace) 5003 token(TokenOBrace)
5147 braces++ 5004 braces++
5148 } 5005 }
5149 case 70: 5006 case 68:
5150 // line 180 "scan_tokens.rl" 5007//line scan_tokens.rl:185
5151
5152 te = p + 1 5008 te = p + 1
5153 { 5009 {
5154 if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces { 5010 if len(retBraces) > 0 && retBraces[len(retBraces)-1] == braces {
@@ -5167,9 +5023,8 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
5167 braces-- 5023 braces--
5168 } 5024 }
5169 } 5025 }
5170 case 71: 5026 case 69:
5171 // line 192 "scan_tokens.rl" 5027//line scan_tokens.rl:197
5172
5173 te = p + 1 5028 te = p + 1
5174 { 5029 {
5175 // Only consume from the retBraces stack and return if we are at 5030 // Only consume from the retBraces stack and return if we are at
@@ -5197,9 +5052,8 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
5197 braces-- 5052 braces--
5198 } 5053 }
5199 } 5054 }
5200 case 72: 5055 case 70:
5201 // line 74 "scan_tokens.rl" 5056//line scan_tokens.rl:79
5202
5203 te = p + 1 5057 te = p + 1
5204 { 5058 {
5205 token(TokenOQuote) 5059 token(TokenOQuote)
@@ -5207,13 +5061,12 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
5207 stack = append(stack, 0) 5061 stack = append(stack, 0)
5208 stack[top] = cs 5062 stack[top] = cs
5209 top++ 5063 top++
5210 cs = 1515 5064 cs = 1509
5211 goto _again 5065 goto _again
5212 } 5066 }
5213 } 5067 }
5214 case 73: 5068 case 71:
5215 // line 84 "scan_tokens.rl" 5069//line scan_tokens.rl:89
5216
5217 te = p + 1 5070 te = p + 1
5218 { 5071 {
5219 token(TokenOHeredoc) 5072 token(TokenOHeredoc)
@@ -5238,138 +5091,109 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
5238 stack = append(stack, 0) 5091 stack = append(stack, 0)
5239 stack[top] = cs 5092 stack[top] = cs
5240 top++ 5093 top++
5241 cs = 1541 5094 cs = 1523
5242 goto _again 5095 goto _again
5243 } 5096 }
5244 } 5097 }
5245 case 74: 5098 case 72:
5246 // line 290 "scan_tokens.rl" 5099//line scan_tokens.rl:301
5247
5248 te = p + 1 5100 te = p + 1
5249 { 5101 {
5250 token(TokenBadUTF8) 5102 token(TokenBadUTF8)
5251 } 5103 }
5252 case 75: 5104 case 73:
5253 // line 291 "scan_tokens.rl" 5105//line scan_tokens.rl:302
5254
5255 te = p + 1 5106 te = p + 1
5256 { 5107 {
5257 token(TokenInvalid) 5108 token(TokenInvalid)
5258 } 5109 }
5259 case 76: 5110 case 74:
5260 // line 265 "scan_tokens.rl" 5111//line scan_tokens.rl:276
5261
5262 te = p 5112 te = p
5263 p-- 5113 p--
5264 5114
5265 case 77: 5115 case 75:
5266 // line 266 "scan_tokens.rl" 5116//line scan_tokens.rl:277
5267
5268 te = p 5117 te = p
5269 p-- 5118 p--
5270 { 5119 {
5271 token(TokenNumberLit) 5120 token(TokenNumberLit)
5272 } 5121 }
5273 case 78: 5122 case 76:
5274 // line 267 "scan_tokens.rl" 5123//line scan_tokens.rl:278
5275
5276 te = p 5124 te = p
5277 p-- 5125 p--
5278 { 5126 {
5279 token(TokenIdent) 5127 token(TokenIdent)
5280 } 5128 }
5281 case 79: 5129 case 77:
5282 // line 269 "scan_tokens.rl" 5130//line scan_tokens.rl:280
5283
5284 te = p 5131 te = p
5285 p-- 5132 p--
5286 { 5133 {
5287 token(TokenComment) 5134 token(TokenComment)
5288 } 5135 }
5289 case 80: 5136 case 78:
5290 // line 280 "scan_tokens.rl" 5137//line scan_tokens.rl:291
5291
5292 te = p 5138 te = p
5293 p-- 5139 p--
5294 { 5140 {
5295 selfToken() 5141 selfToken()
5296 } 5142 }
5297 case 81: 5143 case 79:
5298 // line 290 "scan_tokens.rl" 5144//line scan_tokens.rl:301
5299
5300 te = p 5145 te = p
5301 p-- 5146 p--
5302 { 5147 {
5303 token(TokenBadUTF8) 5148 token(TokenBadUTF8)
5304 } 5149 }
5305 case 82: 5150 case 80:
5306 // line 291 "scan_tokens.rl" 5151//line scan_tokens.rl:302
5307
5308 te = p 5152 te = p
5309 p-- 5153 p--
5310 { 5154 {
5311 token(TokenInvalid) 5155 token(TokenInvalid)
5312 } 5156 }
5313 case 83: 5157 case 81:
5314 // line 266 "scan_tokens.rl" 5158//line scan_tokens.rl:277
5315
5316 p = (te) - 1 5159 p = (te) - 1
5317 { 5160 {
5318 token(TokenNumberLit) 5161 token(TokenNumberLit)
5319 } 5162 }
5320 case 84: 5163 case 82:
5321 // line 267 "scan_tokens.rl" 5164//line scan_tokens.rl:278
5322
5323 p = (te) - 1 5165 p = (te) - 1
5324 { 5166 {
5325 token(TokenIdent) 5167 token(TokenIdent)
5326 } 5168 }
5327 case 85: 5169 case 83:
5328 // line 280 "scan_tokens.rl" 5170//line scan_tokens.rl:291
5329
5330 p = (te) - 1 5171 p = (te) - 1
5331 { 5172 {
5332 selfToken() 5173 selfToken()
5333 } 5174 }
5334 case 86: 5175 case 84:
5335 // line 290 "scan_tokens.rl" 5176//line scan_tokens.rl:301
5336
5337 p = (te) - 1 5177 p = (te) - 1
5338 { 5178 {
5339 token(TokenBadUTF8) 5179 token(TokenBadUTF8)
5340 } 5180 }
5341 case 87: 5181 case 85:
5342 // line 1 "NONE" 5182//line NONE:1
5343
5344 switch act { 5183 switch act {
5345 case 21:
5346 {
5347 p = (te) - 1
5348 token(TokenIdent)
5349 }
5350 case 22: 5184 case 22:
5351 { 5185 {
5352 p = (te) - 1 5186 p = (te) - 1
5353 token(TokenComment) 5187 token(TokenIdent)
5354 }
5355 case 32:
5356 {
5357 p = (te) - 1
5358 selfToken()
5359 }
5360 case 38:
5361 {
5362 p = (te) - 1
5363 token(TokenBadUTF8)
5364 } 5188 }
5365 case 39: 5189 case 39:
5366 { 5190 {
5367 p = (te) - 1 5191 p = (te) - 1
5368 token(TokenInvalid) 5192 token(TokenBadUTF8)
5369 } 5193 }
5370 } 5194 }
5371 5195
5372 // line 5232 "scan_tokens.go" 5196//line scan_tokens.go:5055
5373 } 5197 }
5374 } 5198 }
5375 5199
@@ -5380,17 +5204,15 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
5380 for ; _nacts > 0; _nacts-- { 5204 for ; _nacts > 0; _nacts-- {
5381 _acts++ 5205 _acts++
5382 switch _hcltok_actions[_acts-1] { 5206 switch _hcltok_actions[_acts-1] {
5383 case 4: 5207 case 1:
5384 // line 1 "NONE" 5208//line NONE:1
5385
5386 ts = 0 5209 ts = 0
5387 5210
5388 case 5: 5211 case 2:
5389 // line 1 "NONE" 5212//line NONE:1
5390
5391 act = 0 5213 act = 0
5392 5214
5393 // line 5252 "scan_tokens.go" 5215//line scan_tokens.go:5073
5394 } 5216 }
5395 } 5217 }
5396 5218
@@ -5416,7 +5238,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
5416 } 5238 }
5417 } 5239 }
5418 5240
5419 // line 352 "scan_tokens.rl" 5241//line scan_tokens.rl:363
5420 5242
5421 // If we fall out here without being in a final state then we've 5243 // If we fall out here without being in a final state then we've
5422 // encountered something that the scanner can't match, which we'll 5244 // encountered something that the scanner can't match, which we'll
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl
index 83ef65b..4443dc4 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/scan_tokens.rl
@@ -9,17 +9,22 @@ import (
9 9
10// This file is generated from scan_tokens.rl. DO NOT EDIT. 10// This file is generated from scan_tokens.rl. DO NOT EDIT.
11%%{ 11%%{
12 # (except you are actually in scan_tokens.rl here, so edit away!) 12 # (except when you are actually in scan_tokens.rl here, so edit away!)
13 13
14 machine hcltok; 14 machine hcltok;
15 write data; 15 write data;
16}%% 16}%%
17 17
18func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token { 18func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []Token {
19 stripData := stripUTF8BOM(data)
20 start.Byte += len(data) - len(stripData)
21 data = stripData
22
19 f := &tokenAccum{ 23 f := &tokenAccum{
20 Filename: filename, 24 Filename: filename,
21 Bytes: data, 25 Bytes: data,
22 Pos: start, 26 Pos: start,
27 StartByte: start.Byte,
23 } 28 }
24 29
25 %%{ 30 %%{
@@ -39,7 +44,7 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
39 Ident = (ID_Start | '_') (ID_Continue | '-')*; 44 Ident = (ID_Start | '_') (ID_Continue | '-')*;
40 45
41 # Symbols that just represent themselves are handled as a single rule. 46 # Symbols that just represent themselves are handled as a single rule.
42 SelfToken = "[" | "]" | "(" | ")" | "." | "," | "*" | "/" | "%" | "+" | "-" | "=" | "<" | ">" | "!" | "?" | ":" | "\n" | "&" | "|" | "~" | "^" | ";" | "`"; 47 SelfToken = "[" | "]" | "(" | ")" | "." | "," | "*" | "/" | "%" | "+" | "-" | "=" | "<" | ">" | "!" | "?" | ":" | "\n" | "&" | "|" | "~" | "^" | ";" | "`" | "'";
43 48
44 EqualOp = "=="; 49 EqualOp = "==";
45 NotEqual = "!="; 50 NotEqual = "!=";
@@ -58,9 +63,17 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
58 BeginHeredocTmpl = '<<' ('-')? Ident Newline; 63 BeginHeredocTmpl = '<<' ('-')? Ident Newline;
59 64
60 Comment = ( 65 Comment = (
61 ("#" (any - EndOfLine)* EndOfLine) | 66 # The :>> operator in these is a "finish-guarded concatenation",
62 ("//" (any - EndOfLine)* EndOfLine) | 67 # which terminates the sequence on its left when it completes
63 ("/*" any* "*/") 68 # the sequence on its right.
69 # In the single-line comment cases this is allowing us to make
70 # the trailing EndOfLine optional while still having the overall
71 # pattern terminate. In the multi-line case it ensures that
72 # the first comment in the file ends at the first */, rather than
73 # gobbling up all of the "any*" until the _final_ */ in the file.
74 ("#" (any - EndOfLine)* :>> EndOfLine?) |
75 ("//" (any - EndOfLine)* :>> EndOfLine?) |
76 ("/*" any* :>> "*/")
64 ); 77 );
65 78
66 # Note: hclwrite assumes that only ASCII spaces appear between tokens, 79 # Note: hclwrite assumes that only ASCII spaces appear between tokens,
@@ -213,29 +226,35 @@ func scanTokens(data []byte, filename string, start hcl.Pos, mode scanMode) []To
213 TemplateInterp = "${" ("~")?; 226 TemplateInterp = "${" ("~")?;
214 TemplateControl = "%{" ("~")?; 227 TemplateControl = "%{" ("~")?;
215 EndStringTmpl = '"'; 228 EndStringTmpl = '"';
216 StringLiteralChars = (AnyUTF8 - ("\r"|"\n")); 229 NewlineChars = ("\r"|"\n");
230 NewlineCharsSeq = NewlineChars+;
231 StringLiteralChars = (AnyUTF8 - NewlineChars);
232 TemplateIgnoredNonBrace = (^'{' %{ fhold; });
233 TemplateNotInterp = '$' (TemplateIgnoredNonBrace | TemplateInterp);
234 TemplateNotControl = '%' (TemplateIgnoredNonBrace | TemplateControl);
235 QuotedStringLiteralWithEsc = ('\\' StringLiteralChars) | (StringLiteralChars - ("$" | '%' | '"' | "\\"));
217 TemplateStringLiteral = ( 236 TemplateStringLiteral = (
218 ('$' ^'{' %{ fhold; }) | 237 (TemplateNotInterp) |
219 ('%' ^'{' %{ fhold; }) | 238 (TemplateNotControl) |
220 ('\\' StringLiteralChars) | 239 (QuotedStringLiteralWithEsc)+
221 (StringLiteralChars - ("$" | '%' | '"')) 240 );
222 )+;
223 HeredocStringLiteral = ( 241 HeredocStringLiteral = (
224 ('$' ^'{' %{ fhold; }) | 242 (TemplateNotInterp) |
225 ('%' ^'{' %{ fhold; }) | 243 (TemplateNotControl) |
226 (StringLiteralChars - ("$" | '%')) 244 (StringLiteralChars - ("$" | '%'))*
227 )*; 245 );
228 BareStringLiteral = ( 246 BareStringLiteral = (
229 ('$' ^'{') | 247 (TemplateNotInterp) |
230 ('%' ^'{') | 248 (TemplateNotControl) |
231 (StringLiteralChars - ("$" | '%')) 249 (StringLiteralChars - ("$" | '%'))*
232 )* Newline?; 250 ) Newline?;
233 251
234 stringTemplate := |* 252 stringTemplate := |*
235 TemplateInterp => beginTemplateInterp; 253 TemplateInterp => beginTemplateInterp;
236 TemplateControl => beginTemplateControl; 254 TemplateControl => beginTemplateControl;
237 EndStringTmpl => endStringTemplate; 255 EndStringTmpl => endStringTemplate;
238 TemplateStringLiteral => { token(TokenQuotedLit); }; 256 TemplateStringLiteral => { token(TokenQuotedLit); };
257 NewlineCharsSeq => { token(TokenQuotedNewline); };
239 AnyUTF8 => { token(TokenInvalid); }; 258 AnyUTF8 => { token(TokenInvalid); };
240 BrokenUTF8 => { token(TokenBadUTF8); }; 259 BrokenUTF8 => { token(TokenBadUTF8); };
241 *|; 260 *|;
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md
index 49b9a3e..091c1c2 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md
@@ -4,18 +4,18 @@ This is the specification of the syntax and semantics of the native syntax
4for HCL. HCL is a system for defining configuration languages for applications. 4for HCL. HCL is a system for defining configuration languages for applications.
5The HCL information model is designed to support multiple concrete syntaxes 5The HCL information model is designed to support multiple concrete syntaxes
6for configuration, but this native syntax is considered the primary format 6for configuration, but this native syntax is considered the primary format
7and is optimized for human authoring and maintenence, as opposed to machine 7and is optimized for human authoring and maintenance, as opposed to machine
8generation of configuration. 8generation of configuration.
9 9
10The language consists of three integrated sub-languages: 10The language consists of three integrated sub-languages:
11 11
12* The _structural_ language defines the overall heirarchical configuration 12- The _structural_ language defines the overall hierarchical configuration
13 structure, and is a serialization of HCL bodies, blocks and attributes. 13 structure, and is a serialization of HCL bodies, blocks and attributes.
14 14
15* The _expression_ language is used to express attribute values, either as 15- The _expression_ language is used to express attribute values, either as
16 literals or as derivations of other values. 16 literals or as derivations of other values.
17 17
18* The _template_ language is used to compose values together into strings, 18- The _template_ language is used to compose values together into strings,
19 as one of several types of expression in the expression language. 19 as one of several types of expression in the expression language.
20 20
21In normal use these three sub-languages are used together within configuration 21In normal use these three sub-languages are used together within configuration
@@ -30,19 +30,19 @@ Within this specification a semi-formal notation is used to illustrate the
30details of syntax. This notation is intended for human consumption rather 30details of syntax. This notation is intended for human consumption rather
31than machine consumption, with the following conventions: 31than machine consumption, with the following conventions:
32 32
33* A naked name starting with an uppercase letter is a global production, 33- A naked name starting with an uppercase letter is a global production,
34 common to all of the syntax specifications in this document. 34 common to all of the syntax specifications in this document.
35* A naked name starting with a lowercase letter is a local production, 35- A naked name starting with a lowercase letter is a local production,
36 meaningful only within the specification where it is defined. 36 meaningful only within the specification where it is defined.
37* Double and single quotes (`"` and `'`) are used to mark literal character 37- Double and single quotes (`"` and `'`) are used to mark literal character
38 sequences, which may be either punctuation markers or keywords. 38 sequences, which may be either punctuation markers or keywords.
39* The default operator for combining items, which has no punctuation, 39- The default operator for combining items, which has no punctuation,
40 is concatenation. 40 is concatenation.
41* The symbol `|` indicates that any one of its left and right operands may 41- The symbol `|` indicates that any one of its left and right operands may
42 be present. 42 be present.
43* The `*` symbol indicates zero or more repetitions of the item to its left. 43- The `*` symbol indicates zero or more repetitions of the item to its left.
44* The `?` symbol indicates zero or one of the item to its left. 44- The `?` symbol indicates zero or one of the item to its left.
45* Parentheses (`(` and `)`) are used to group items together to apply 45- Parentheses (`(` and `)`) are used to group items together to apply
46 the `|`, `*` and `?` operators to them collectively. 46 the `|`, `*` and `?` operators to them collectively.
47 47
48The grammar notation does not fully describe the language. The prose may 48The grammar notation does not fully describe the language. The prose may
@@ -77,11 +77,11 @@ are not valid within HCL native syntax.
77 77
78Comments serve as program documentation and come in two forms: 78Comments serve as program documentation and come in two forms:
79 79
80* _Line comments_ start with either the `//` or `#` sequences and end with 80- _Line comments_ start with either the `//` or `#` sequences and end with
81 the next newline sequence. A line comments is considered equivalent to a 81 the next newline sequence. A line comments is considered equivalent to a
82 newline sequence. 82 newline sequence.
83 83
84* _Inline comments_ start with the `/*` sequence and end with the `*/` 84- _Inline comments_ start with the `/*` sequence and end with the `*/`
85 sequence, and may have any characters within except the ending sequence. 85 sequence, and may have any characters within except the ending sequence.
86 An inline comments is considered equivalent to a whitespace sequence. 86 An inline comments is considered equivalent to a whitespace sequence.
87 87
@@ -91,7 +91,7 @@ template literals except inside an interpolation sequence or template directive.
91### Identifiers 91### Identifiers
92 92
93Identifiers name entities such as blocks, attributes and expression variables. 93Identifiers name entities such as blocks, attributes and expression variables.
94Identifiers are interpreted as per [UAX #31][UAX31] Section 2. Specifically, 94Identifiers are interpreted as per [UAX #31][uax31] Section 2. Specifically,
95their syntax is defined in terms of the `ID_Start` and `ID_Continue` 95their syntax is defined in terms of the `ID_Start` and `ID_Continue`
96character properties as follows: 96character properties as follows:
97 97
@@ -109,7 +109,7 @@ that is not part of the unicode `ID_Continue` definition. This is to allow
109attribute names and block type names to contain dashes, although underscores 109attribute names and block type names to contain dashes, although underscores
110as word separators are considered the idiomatic usage. 110as word separators are considered the idiomatic usage.
111 111
112[UAX31]: http://unicode.org/reports/tr31/ "Unicode Identifier and Pattern Syntax" 112[uax31]: http://unicode.org/reports/tr31/ "Unicode Identifier and Pattern Syntax"
113 113
114### Keywords 114### Keywords
115 115
@@ -150,18 +150,19 @@ expmark = ('e' | 'E') ("+" | "-")?;
150The structural language consists of syntax representing the following 150The structural language consists of syntax representing the following
151constructs: 151constructs:
152 152
153* _Attributes_, which assign a value to a specified name. 153- _Attributes_, which assign a value to a specified name.
154* _Blocks_, which create a child body annotated by a type and optional labels. 154- _Blocks_, which create a child body annotated by a type and optional labels.
155* _Body Content_, which consists of a collection of attributes and blocks. 155- _Body Content_, which consists of a collection of attributes and blocks.
156 156
157These constructs correspond to the similarly-named concepts in the 157These constructs correspond to the similarly-named concepts in the
158language-agnostic HCL information model. 158language-agnostic HCL information model.
159 159
160```ebnf 160```ebnf
161ConfigFile = Body; 161ConfigFile = Body;
162Body = (Attribute | Block)*; 162Body = (Attribute | Block | OneLineBlock)*;
163Attribute = Identifier "=" Expression Newline; 163Attribute = Identifier "=" Expression Newline;
164Block = Identifier (StringLit|Identifier)* "{" Newline Body "}" Newline; 164Block = Identifier (StringLit|Identifier)* "{" Newline Body "}" Newline;
165OneLineBlock = Identifier (StringLit|Identifier)* "{" (Identifier "=" Expression)? "}" Newline;
165``` 166```
166 167
167### Configuration Files 168### Configuration Files
@@ -186,7 +187,7 @@ for later evaluation by the calling application.
186### Blocks 187### Blocks
187 188
188A _block_ creates a child body that is annotated with a block _type_ and 189A _block_ creates a child body that is annotated with a block _type_ and
189zero or more block _labels_. Blocks create a structural heirachy which can be 190zero or more block _labels_. Blocks create a structural hierachy which can be
190interpreted by the calling application. 191interpreted by the calling application.
191 192
192Block labels can either be quoted literal strings or naked identifiers. 193Block labels can either be quoted literal strings or naked identifiers.
@@ -252,9 +253,9 @@ LiteralValue = (
252); 253);
253``` 254```
254 255
255* Numeric literals represent values of type _number_. 256- Numeric literals represent values of type _number_.
256* The `true` and `false` keywords represent values of type _bool_. 257- The `true` and `false` keywords represent values of type _bool_.
257* The `null` keyword represents a null value of the dynamic pseudo-type. 258- The `null` keyword represents a null value of the dynamic pseudo-type.
258 259
259String literals are not directly available in the expression sub-language, but 260String literals are not directly available in the expression sub-language, but
260are available via the template sub-language, which can in turn be incorporated 261are available via the template sub-language, which can in turn be incorporated
@@ -285,8 +286,8 @@ When specifying an object element, an identifier is interpreted as a literal
285attribute name as opposed to a variable reference. To populate an item key 286attribute name as opposed to a variable reference. To populate an item key
286from a variable, use parentheses to disambiguate: 287from a variable, use parentheses to disambiguate:
287 288
288* `{foo = "baz"}` is interpreted as an attribute literally named `foo`. 289- `{foo = "baz"}` is interpreted as an attribute literally named `foo`.
289* `{(foo) = "baz"}` is interpreted as an attribute whose name is taken 290- `{(foo) = "baz"}` is interpreted as an attribute whose name is taken
290 from the variable named `foo`. 291 from the variable named `foo`.
291 292
292Between the open and closing delimiters of these sequences, newline sequences 293Between the open and closing delimiters of these sequences, newline sequences
@@ -296,14 +297,14 @@ There is a syntax ambiguity between _for expressions_ and collection values
296whose first element is a reference to a variable named `for`. The 297whose first element is a reference to a variable named `for`. The
297_for expression_ interpretation has priority, so to produce a tuple whose 298_for expression_ interpretation has priority, so to produce a tuple whose
298first element is the value of a variable named `for`, or an object with a 299first element is the value of a variable named `for`, or an object with a
299key named `for`, use paretheses to disambiguate: 300key named `for`, use parentheses to disambiguate:
300 301
301* `[for, foo, baz]` is a syntax error. 302- `[for, foo, baz]` is a syntax error.
302* `[(for), foo, baz]` is a tuple whose first element is the value of variable 303- `[(for), foo, baz]` is a tuple whose first element is the value of variable
303 `for`. 304 `for`.
304* `{for: 1, baz: 2}` is a syntax error. 305- `{for: 1, baz: 2}` is a syntax error.
305* `{(for): 1, baz: 2}` is an object with an attribute literally named `for`. 306- `{(for): 1, baz: 2}` is an object with an attribute literally named `for`.
306* `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the 307- `{baz: 2, for: 1}` is equivalent to the previous example, and resolves the
307 ambiguity by reordering. 308 ambiguity by reordering.
308 309
309### Template Expressions 310### Template Expressions
@@ -311,9 +312,9 @@ key named `for`, use paretheses to disambiguate:
311A _template expression_ embeds a program written in the template sub-language 312A _template expression_ embeds a program written in the template sub-language
312as an expression. Template expressions come in two forms: 313as an expression. Template expressions come in two forms:
313 314
314* A _quoted_ template expression is delimited by quote characters (`"`) and 315- A _quoted_ template expression is delimited by quote characters (`"`) and
315 defines a template as a single-line expression with escape characters. 316 defines a template as a single-line expression with escape characters.
316* A _heredoc_ template expression is introduced by a `<<` sequence and 317- A _heredoc_ template expression is introduced by a `<<` sequence and
317 defines a template via a multi-line sequence terminated by a user-chosen 318 defines a template via a multi-line sequence terminated by a user-chosen
318 delimiter. 319 delimiter.
319 320
@@ -321,7 +322,7 @@ In both cases the template interpolation and directive syntax is available for
321use within the delimiters, and any text outside of these special sequences is 322use within the delimiters, and any text outside of these special sequences is
322interpreted as a literal string. 323interpreted as a literal string.
323 324
324In _quoted_ template expressions any literal string sequences within the 325In _quoted_ template expressions any literal string sequences within the
325template behave in a special way: literal newline sequences are not permitted 326template behave in a special way: literal newline sequences are not permitted
326and instead _escape sequences_ can be included, starting with the 327and instead _escape sequences_ can be included, starting with the
327backslash `\`: 328backslash `\`:
@@ -457,14 +458,14 @@ are provided, the first is the key and the second is the value.
457Tuple, object, list, map, and set types are iterable. The type of collection 458Tuple, object, list, map, and set types are iterable. The type of collection
458used defines how the key and value variables are populated: 459used defines how the key and value variables are populated:
459 460
460* For tuple and list types, the _key_ is the zero-based index into the 461- For tuple and list types, the _key_ is the zero-based index into the
461 sequence for each element, and the _value_ is the element value. The 462 sequence for each element, and the _value_ is the element value. The
462 elements are visited in index order. 463 elements are visited in index order.
463* For object and map types, the _key_ is the string attribute name or element 464- For object and map types, the _key_ is the string attribute name or element
464 key, and the _value_ is the attribute or element value. The elements are 465 key, and the _value_ is the attribute or element value. The elements are
465 visited in the order defined by a lexicographic sort of the attribute names 466 visited in the order defined by a lexicographic sort of the attribute names
466 or keys. 467 or keys.
467* For set types, the _key_ and _value_ are both the element value. The elements 468- For set types, the _key_ and _value_ are both the element value. The elements
468 are visited in an undefined but consistent order. 469 are visited in an undefined but consistent order.
469 470
470The expression after the colon and (in the case of object `for`) the expression 471The expression after the colon and (in the case of object `for`) the expression
@@ -482,16 +483,16 @@ object.
482In the case of object `for`, it is an error if two input elements produce 483In the case of object `for`, it is an error if two input elements produce
483the same result from the attribute name expression, since duplicate 484the same result from the attribute name expression, since duplicate
484attributes are not possible. If the ellipsis symbol (`...`) appears 485attributes are not possible. If the ellipsis symbol (`...`) appears
485immediately after the value experssion, this activates the grouping mode in 486immediately after the value expression, this activates the grouping mode in
486which each value in the resulting object is a _tuple_ of all of the values 487which each value in the resulting object is a _tuple_ of all of the values
487that were produced against each distinct key. 488that were produced against each distinct key.
488 489
489* `[for v in ["a", "b"]: v]` returns `["a", "b"]`. 490- `[for v in ["a", "b"]: v]` returns `["a", "b"]`.
490* `[for i, v in ["a", "b"]: i]` returns `[0, 1]`. 491- `[for i, v in ["a", "b"]: i]` returns `[0, 1]`.
491* `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`. 492- `{for i, v in ["a", "b"]: v => i}` returns `{a = 0, b = 1}`.
492* `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute 493- `{for i, v in ["a", "a", "b"]: k => v}` produces an error, because attribute
493 `a` is defined twice. 494 `a` is defined twice.
494* `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`. 495- `{for i, v in ["a", "a", "b"]: v => i...}` returns `{a = [0, 1], b = [2]}`.
495 496
496If the `if` keyword is used after the element expression(s), it applies an 497If the `if` keyword is used after the element expression(s), it applies an
497additional predicate that can be used to conditionally filter elements from 498additional predicate that can be used to conditionally filter elements from
@@ -501,7 +502,7 @@ element expression(s). It must evaluate to a boolean value; if `true`, the
501element will be evaluated as normal, while if `false` the element will be 502element will be evaluated as normal, while if `false` the element will be
502skipped. 503skipped.
503 504
504* `[for i, v in ["a", "b", "c"]: v if i < 2]` returns `["a", "b"]`. 505- `[for i, v in ["a", "b", "c"]: v if i < 2]` returns `["a", "b"]`.
505 506
506If the collection value, element expression(s) or condition expression return 507If the collection value, element expression(s) or condition expression return
507unknown values that are otherwise type-valid, the result is a value of the 508unknown values that are otherwise type-valid, the result is a value of the
@@ -566,10 +567,10 @@ elements in a tuple, list, or set value.
566 567
567There are two kinds of "splat" operator: 568There are two kinds of "splat" operator:
568 569
569* The _attribute-only_ splat operator supports only attribute lookups into 570- The _attribute-only_ splat operator supports only attribute lookups into
570 the elements from a list, but supports an arbitrary number of them. 571 the elements from a list, but supports an arbitrary number of them.
571 572
572* The _full_ splat operator additionally supports indexing into the elements 573- The _full_ splat operator additionally supports indexing into the elements
573 from a list, and allows any combination of attribute access and index 574 from a list, and allows any combination of attribute access and index
574 operations. 575 operations.
575 576
@@ -582,9 +583,9 @@ fullSplat = "[" "*" "]" (GetAttr | Index)*;
582The splat operators can be thought of as shorthands for common operations that 583The splat operators can be thought of as shorthands for common operations that
583could otherwise be performed using _for expressions_: 584could otherwise be performed using _for expressions_:
584 585
585* `tuple.*.foo.bar[0]` is approximately equivalent to 586- `tuple.*.foo.bar[0]` is approximately equivalent to
586 `[for v in tuple: v.foo.bar][0]`. 587 `[for v in tuple: v.foo.bar][0]`.
587* `tuple[*].foo.bar[0]` is approximately equivalent to 588- `tuple[*].foo.bar[0]` is approximately equivalent to
588 `[for v in tuple: v.foo.bar[0]]` 589 `[for v in tuple: v.foo.bar[0]]`
589 590
590Note the difference in how the trailing index operator is interpreted in 591Note the difference in how the trailing index operator is interpreted in
@@ -596,13 +597,15 @@ _for expressions_ shown above: if a splat operator is applied to a value that
596is _not_ of tuple, list, or set type, the value is coerced automatically into 597is _not_ of tuple, list, or set type, the value is coerced automatically into
597a single-value list of the value type: 598a single-value list of the value type:
598 599
599* `any_object.*.id` is equivalent to `[any_object.id]`, assuming that `any_object` 600- `any_object.*.id` is equivalent to `[any_object.id]`, assuming that `any_object`
600 is a single object. 601 is a single object.
601* `any_number.*` is equivalent to `[any_number]`, assuming that `any_number` 602- `any_number.*` is equivalent to `[any_number]`, assuming that `any_number`
602 is a single number. 603 is a single number.
603 604
604If the left operand of a splat operator is an unknown value of any type, the 605If applied to a null value that is not tuple, list, or set, the result is always
605result is a value of the dynamic pseudo-type. 606an empty tuple, which allows conveniently converting a possibly-null scalar
607value into a tuple of zero or one elements. It is illegal to apply a splat
608operator to a null value of tuple, list, or set type.
606 609
607### Operations 610### Operations
608 611
@@ -683,7 +686,7 @@ Arithmetic operations are considered to be performed in an arbitrary-precision
683number space. 686number space.
684 687
685If either operand of an arithmetic operator is an unknown number or a value 688If either operand of an arithmetic operator is an unknown number or a value
686of the dynamic pseudo-type, the result is an unknown number. 689of the dynamic pseudo-type, the result is an unknown number.
687 690
688### Logic Operators 691### Logic Operators
689 692
@@ -708,7 +711,7 @@ the outcome of a boolean expression.
708Conditional = Expression "?" Expression ":" Expression; 711Conditional = Expression "?" Expression ":" Expression;
709``` 712```
710 713
711The first expression is the _predicate_, which is evaluated and must produce 714The first expression is the _predicate_, which is evaluated and must produce
712a boolean result. If the predicate value is `true`, the result of the second 715a boolean result. If the predicate value is `true`, the result of the second
713expression is the result of the conditional. If the predicate value is 716expression is the result of the conditional. If the predicate value is
714`false`, the result of the third expression is the result of the conditional. 717`false`, the result of the third expression is the result of the conditional.
@@ -769,15 +772,15 @@ sequence is escaped as `%%{`.
769 772
770When the template sub-language is embedded in the expression language via 773When the template sub-language is embedded in the expression language via
771_template expressions_, additional constraints and transforms are applied to 774_template expressions_, additional constraints and transforms are applied to
772template literalsas described in the definition of template expressions. 775template literals as described in the definition of template expressions.
773 776
774The value of a template literal can be modified by _strip markers_ in any 777The value of a template literal can be modified by _strip markers_ in any
775interpolations or directives that are adjacent to it. A strip marker is 778interpolations or directives that are adjacent to it. A strip marker is
776a tilde (`~`) placed immediately after the opening `{` or before the closing 779a tilde (`~`) placed immediately after the opening `{` or before the closing
777`}` of a template sequence: 780`}` of a template sequence:
778 781
779* `hello ${~ "world" }` produces `"helloworld"`. 782- `hello ${~ "world" }` produces `"helloworld"`.
780* `%{ if true ~} hello %{~ endif }` produces `"hello"`. 783- `%{ if true ~} hello %{~ endif }` produces `"hello"`.
781 784
782When a strip marker is present, any spaces adjacent to it in the corresponding 785When a strip marker is present, any spaces adjacent to it in the corresponding
783string literal (if any) are removed before producing the final value. Space 786string literal (if any) are removed before producing the final value. Space
@@ -786,7 +789,7 @@ characters are interpreted as per Unicode's definition.
786Stripping is done at syntax level rather than value level. Values returned 789Stripping is done at syntax level rather than value level. Values returned
787by interpolations or directives are not subject to stripping: 790by interpolations or directives are not subject to stripping:
788 791
789* `${"hello" ~}${" world"}` produces `"hello world"`, and not `"helloworld"`, 792- `${"hello" ~}${" world"}` produces `"hello world"`, and not `"helloworld"`,
790 because the space is not in a template literal directly adjacent to the 793 because the space is not in a template literal directly adjacent to the
791 strip marker. 794 strip marker.
792 795
@@ -824,9 +827,9 @@ TemplateIf = (
824The evaluation of the `if` directive is equivalent to the conditional 827The evaluation of the `if` directive is equivalent to the conditional
825expression, with the following exceptions: 828expression, with the following exceptions:
826 829
827* The two sub-templates always produce strings, and thus the result value is 830- The two sub-templates always produce strings, and thus the result value is
828 also always a string. 831 also always a string.
829* The `else` clause may be omitted, in which case the conditional's third 832- The `else` clause may be omitted, in which case the conditional's third
830 expression result is implied to be the empty string. 833 expression result is implied to be the empty string.
831 834
832### Template For Directive 835### Template For Directive
@@ -846,9 +849,9 @@ TemplateFor = (
846The evaluation of the `for` directive is equivalent to the _for expression_ 849The evaluation of the `for` directive is equivalent to the _for expression_
847when producing a tuple, with the following exceptions: 850when producing a tuple, with the following exceptions:
848 851
849* The sub-template always produces a string. 852- The sub-template always produces a string.
850* There is no equivalent of the "if" clause on the for expression. 853- There is no equivalent of the "if" clause on the for expression.
851* The elements of the resulting tuple are all converted to strings and 854- The elements of the resulting tuple are all converted to strings and
852 concatenated to produce a flat string result. 855 concatenated to produce a flat string result.
853 856
854### Template Interpolation Unwrapping 857### Template Interpolation Unwrapping
@@ -864,13 +867,13 @@ template or expression syntax. Unwrapping allows arbitrary expressions to be
864used to populate attributes when strings in such languages are interpreted 867used to populate attributes when strings in such languages are interpreted
865as templates. 868as templates.
866 869
867* `${true}` produces the boolean value `true` 870- `${true}` produces the boolean value `true`
868* `${"${true}"}` produces the boolean value `true`, because both the inner 871- `${"${true}"}` produces the boolean value `true`, because both the inner
869 and outer interpolations are subject to unwrapping. 872 and outer interpolations are subject to unwrapping.
870* `hello ${true}` produces the string `"hello true"` 873- `hello ${true}` produces the string `"hello true"`
871* `${""}${true}` produces the string `"true"` because there are two 874- `${""}${true}` produces the string `"true"` because there are two
872 interpolation sequences, even though one produces an empty result. 875 interpolation sequences, even though one produces an empty result.
873* `%{ for v in [true] }${v}%{ endif }` produces the string `true` because 876- `%{ for v in [true] }${v}%{ endif }` produces the string `true` because
874 the presence of the `for` directive circumvents the unwrapping even though 877 the presence of the `for` directive circumvents the unwrapping even though
875 the final result is a single value. 878 the final result is a single value.
876 879
@@ -903,7 +906,7 @@ key/value pairs given are returned as the static pairs, with no further
903interpretation. 906interpretation.
904 907
905The usual requirement that an attribute name be interpretable as a string 908The usual requirement that an attribute name be interpretable as a string
906does not apply to this static analyis, allowing callers to provide map-like 909does not apply to this static analysis, allowing callers to provide map-like
907constructs with different key types by building on the map syntax. 910constructs with different key types by building on the map syntax.
908 911
909### Static Call 912### Static Call
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go
index d69f65b..476025d 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure.go
@@ -9,6 +9,10 @@ import (
9 9
10// AsHCLBlock returns the block data expressed as a *hcl.Block. 10// AsHCLBlock returns the block data expressed as a *hcl.Block.
11func (b *Block) AsHCLBlock() *hcl.Block { 11func (b *Block) AsHCLBlock() *hcl.Block {
12 if b == nil {
13 return nil
14 }
15
12 lastHeaderRange := b.TypeRange 16 lastHeaderRange := b.TypeRange
13 if len(b.LabelRanges) > 0 { 17 if len(b.LabelRanges) > 0 {
14 lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1] 18 lastHeaderRange = b.LabelRanges[len(b.LabelRanges)-1]
@@ -43,8 +47,8 @@ type Body struct {
43var assertBodyImplBody hcl.Body = &Body{} 47var assertBodyImplBody hcl.Body = &Body{}
44 48
45func (b *Body) walkChildNodes(w internalWalkFunc) { 49func (b *Body) walkChildNodes(w internalWalkFunc) {
46 b.Attributes = w(b.Attributes).(Attributes) 50 w(b.Attributes)
47 b.Blocks = w(b.Blocks).(Blocks) 51 w(b.Blocks)
48} 52}
49 53
50func (b *Body) Range() hcl.Range { 54func (b *Body) Range() hcl.Range {
@@ -82,8 +86,8 @@ func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostic
82 86
83 diags = append(diags, &hcl.Diagnostic{ 87 diags = append(diags, &hcl.Diagnostic{
84 Severity: hcl.DiagError, 88 Severity: hcl.DiagError,
85 Summary: "Unsupported attribute", 89 Summary: "Unsupported argument",
86 Detail: fmt.Sprintf("An attribute named %q is not expected here.%s", name, suggestion), 90 Detail: fmt.Sprintf("An argument named %q is not expected here.%s", name, suggestion),
87 Subject: &attr.NameRange, 91 Subject: &attr.NameRange,
88 }) 92 })
89 } 93 }
@@ -103,7 +107,7 @@ func (b *Body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostic
103 // Is there an attribute of the same name? 107 // Is there an attribute of the same name?
104 for _, attrS := range schema.Attributes { 108 for _, attrS := range schema.Attributes {
105 if attrS.Name == blockTy { 109 if attrS.Name == blockTy {
106 suggestion = fmt.Sprintf(" Did you mean to define attribute %q?", blockTy) 110 suggestion = fmt.Sprintf(" Did you mean to define argument %q? If so, use the equals sign to assign it a value.", blockTy)
107 break 111 break
108 } 112 }
109 } 113 }
@@ -147,8 +151,8 @@ func (b *Body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Bod
147 if attrS.Required { 151 if attrS.Required {
148 diags = append(diags, &hcl.Diagnostic{ 152 diags = append(diags, &hcl.Diagnostic{
149 Severity: hcl.DiagError, 153 Severity: hcl.DiagError,
150 Summary: "Missing required attribute", 154 Summary: "Missing required argument",
151 Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), 155 Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name),
152 Subject: b.MissingItemRange().Ptr(), 156 Subject: b.MissingItemRange().Ptr(),
153 }) 157 })
154 } 158 }
@@ -251,9 +255,9 @@ func (b *Body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
251 example := b.Blocks[0] 255 example := b.Blocks[0]
252 diags = append(diags, &hcl.Diagnostic{ 256 diags = append(diags, &hcl.Diagnostic{
253 Severity: hcl.DiagError, 257 Severity: hcl.DiagError,
254 Summary: fmt.Sprintf("Unexpected %s block", example.Type), 258 Summary: fmt.Sprintf("Unexpected %q block", example.Type),
255 Detail: "Blocks are not allowed here.", 259 Detail: "Blocks are not allowed here.",
256 Context: &example.TypeRange, 260 Subject: &example.TypeRange,
257 }) 261 })
258 // we will continue processing anyway, and return the attributes 262 // we will continue processing anyway, and return the attributes
259 // we are able to find so that certain analyses can still be done 263 // we are able to find so that certain analyses can still be done
@@ -275,15 +279,19 @@ func (b *Body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
275} 279}
276 280
277func (b *Body) MissingItemRange() hcl.Range { 281func (b *Body) MissingItemRange() hcl.Range {
278 return b.EndRange 282 return hcl.Range{
283 Filename: b.SrcRange.Filename,
284 Start: b.SrcRange.Start,
285 End: b.SrcRange.Start,
286 }
279} 287}
280 288
281// Attributes is the collection of attribute definitions within a body. 289// Attributes is the collection of attribute definitions within a body.
282type Attributes map[string]*Attribute 290type Attributes map[string]*Attribute
283 291
284func (a Attributes) walkChildNodes(w internalWalkFunc) { 292func (a Attributes) walkChildNodes(w internalWalkFunc) {
285 for k, attr := range a { 293 for _, attr := range a {
286 a[k] = w(attr).(*Attribute) 294 w(attr)
287 } 295 }
288} 296}
289 297
@@ -317,7 +325,7 @@ type Attribute struct {
317} 325}
318 326
319func (a *Attribute) walkChildNodes(w internalWalkFunc) { 327func (a *Attribute) walkChildNodes(w internalWalkFunc) {
320 a.Expr = w(a.Expr).(Expression) 328 w(a.Expr)
321} 329}
322 330
323func (a *Attribute) Range() hcl.Range { 331func (a *Attribute) Range() hcl.Range {
@@ -326,6 +334,9 @@ func (a *Attribute) Range() hcl.Range {
326 334
327// AsHCLAttribute returns the block data expressed as a *hcl.Attribute. 335// AsHCLAttribute returns the block data expressed as a *hcl.Attribute.
328func (a *Attribute) AsHCLAttribute() *hcl.Attribute { 336func (a *Attribute) AsHCLAttribute() *hcl.Attribute {
337 if a == nil {
338 return nil
339 }
329 return &hcl.Attribute{ 340 return &hcl.Attribute{
330 Name: a.Name, 341 Name: a.Name,
331 Expr: a.Expr, 342 Expr: a.Expr,
@@ -339,8 +350,8 @@ func (a *Attribute) AsHCLAttribute() *hcl.Attribute {
339type Blocks []*Block 350type Blocks []*Block
340 351
341func (bs Blocks) walkChildNodes(w internalWalkFunc) { 352func (bs Blocks) walkChildNodes(w internalWalkFunc) {
342 for i, block := range bs { 353 for _, block := range bs {
343 bs[i] = w(block).(*Block) 354 w(block)
344 } 355 }
345} 356}
346 357
@@ -371,9 +382,13 @@ type Block struct {
371} 382}
372 383
373func (b *Block) walkChildNodes(w internalWalkFunc) { 384func (b *Block) walkChildNodes(w internalWalkFunc) {
374 b.Body = w(b.Body).(*Body) 385 w(b.Body)
375} 386}
376 387
377func (b *Block) Range() hcl.Range { 388func (b *Block) Range() hcl.Range {
378 return hcl.RangeBetween(b.TypeRange, b.CloseBraceRange) 389 return hcl.RangeBetween(b.TypeRange, b.CloseBraceRange)
379} 390}
391
392func (b *Block) DefRange() hcl.Range {
393 return hcl.RangeBetween(b.TypeRange, b.OpenBraceRange)
394}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure_at_pos.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure_at_pos.go
new file mode 100644
index 0000000..d8f023b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/structure_at_pos.go
@@ -0,0 +1,118 @@
1package hclsyntax
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7// -----------------------------------------------------------------------------
8// The methods in this file are all optional extension methods that serve to
9// implement the methods of the same name on *hcl.File when its root body
10// is provided by this package.
11// -----------------------------------------------------------------------------
12
13// BlocksAtPos implements the method of the same name for an *hcl.File that
14// is backed by a *Body.
15func (b *Body) BlocksAtPos(pos hcl.Pos) []*hcl.Block {
16 list, _ := b.blocksAtPos(pos, true)
17 return list
18}
19
20// InnermostBlockAtPos implements the method of the same name for an *hcl.File
21// that is backed by a *Body.
22func (b *Body) InnermostBlockAtPos(pos hcl.Pos) *hcl.Block {
23 _, innermost := b.blocksAtPos(pos, false)
24 return innermost.AsHCLBlock()
25}
26
27// OutermostBlockAtPos implements the method of the same name for an *hcl.File
28// that is backed by a *Body.
29func (b *Body) OutermostBlockAtPos(pos hcl.Pos) *hcl.Block {
30 return b.outermostBlockAtPos(pos).AsHCLBlock()
31}
32
33// blocksAtPos is the internal engine of both BlocksAtPos and
34// InnermostBlockAtPos, which both need to do the same logic but return a
35// differently-shaped result.
36//
37// list is nil if makeList is false, avoiding an allocation. Innermost is
38// always set, and if the returned list is non-nil it will always match the
39// final element from that list.
40func (b *Body) blocksAtPos(pos hcl.Pos, makeList bool) (list []*hcl.Block, innermost *Block) {
41 current := b
42
43Blocks:
44 for current != nil {
45 for _, block := range current.Blocks {
46 wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange)
47 if wholeRange.ContainsPos(pos) {
48 innermost = block
49 if makeList {
50 list = append(list, innermost.AsHCLBlock())
51 }
52 current = block.Body
53 continue Blocks
54 }
55 }
56
57 // If we fall out here then none of the current body's nested blocks
58 // contain the position we are looking for, and so we're done.
59 break
60 }
61
62 return
63}
64
65// outermostBlockAtPos is the internal version of OutermostBlockAtPos that
66// returns a hclsyntax.Block rather than an hcl.Block, allowing for further
67// analysis if necessary.
68func (b *Body) outermostBlockAtPos(pos hcl.Pos) *Block {
69 // This is similar to blocksAtPos, but simpler because we know it only
70 // ever needs to search the first level of nested blocks.
71
72 for _, block := range b.Blocks {
73 wholeRange := hcl.RangeBetween(block.TypeRange, block.CloseBraceRange)
74 if wholeRange.ContainsPos(pos) {
75 return block
76 }
77 }
78
79 return nil
80}
81
82// AttributeAtPos implements the method of the same name for an *hcl.File
83// that is backed by a *Body.
84func (b *Body) AttributeAtPos(pos hcl.Pos) *hcl.Attribute {
85 return b.attributeAtPos(pos).AsHCLAttribute()
86}
87
88// attributeAtPos is the internal version of AttributeAtPos that returns a
89// hclsyntax.Block rather than an hcl.Block, allowing for further analysis if
90// necessary.
91func (b *Body) attributeAtPos(pos hcl.Pos) *Attribute {
92 searchBody := b
93 _, block := b.blocksAtPos(pos, false)
94 if block != nil {
95 searchBody = block.Body
96 }
97
98 for _, attr := range searchBody.Attributes {
99 if attr.SrcRange.ContainsPos(pos) {
100 return attr
101 }
102 }
103
104 return nil
105}
106
107// OutermostExprAtPos implements the method of the same name for an *hcl.File
108// that is backed by a *Body.
109func (b *Body) OutermostExprAtPos(pos hcl.Pos) hcl.Expression {
110 attr := b.attributeAtPos(pos)
111 if attr == nil {
112 return nil
113 }
114 if !attr.Expr.Range().ContainsPos(pos) {
115 return nil
116 }
117 return attr.Expr
118}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go
index bcaa15f..3d898fd 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token.go
@@ -1,6 +1,7 @@
1package hclsyntax 1package hclsyntax
2 2
3import ( 3import (
4 "bytes"
4 "fmt" 5 "fmt"
5 6
6 "github.com/apparentlymart/go-textseg/textseg" 7 "github.com/apparentlymart/go-textseg/textseg"
@@ -84,16 +85,18 @@ const (
84 // things that might work in other languages they are familiar with, or 85 // things that might work in other languages they are familiar with, or
85 // simply make incorrect assumptions about the HCL language. 86 // simply make incorrect assumptions about the HCL language.
86 87
87 TokenBitwiseAnd TokenType = '&' 88 TokenBitwiseAnd TokenType = '&'
88 TokenBitwiseOr TokenType = '|' 89 TokenBitwiseOr TokenType = '|'
89 TokenBitwiseNot TokenType = '~' 90 TokenBitwiseNot TokenType = '~'
90 TokenBitwiseXor TokenType = '^' 91 TokenBitwiseXor TokenType = '^'
91 TokenStarStar TokenType = '➚' 92 TokenStarStar TokenType = '➚'
92 TokenBacktick TokenType = '`' 93 TokenApostrophe TokenType = '\''
93 TokenSemicolon TokenType = ';' 94 TokenBacktick TokenType = '`'
94 TokenTabs TokenType = '␉' 95 TokenSemicolon TokenType = ';'
95 TokenInvalid TokenType = '�' 96 TokenTabs TokenType = '␉'
96 TokenBadUTF8 TokenType = '💩' 97 TokenInvalid TokenType = '�'
98 TokenBadUTF8 TokenType = '💩'
99 TokenQuotedNewline TokenType = '␤'
97 100
98 // TokenNil is a placeholder for when a token is required but none is 101 // TokenNil is a placeholder for when a token is required but none is
99 // available, e.g. when reporting errors. The scanner will never produce 102 // available, e.g. when reporting errors. The scanner will never produce
@@ -114,10 +117,11 @@ const (
114) 117)
115 118
116type tokenAccum struct { 119type tokenAccum struct {
117 Filename string 120 Filename string
118 Bytes []byte 121 Bytes []byte
119 Pos hcl.Pos 122 Pos hcl.Pos
120 Tokens []Token 123 Tokens []Token
124 StartByte int
121} 125}
122 126
123func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) { 127func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) {
@@ -125,11 +129,11 @@ func (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) {
125 // the start pos to get our end pos. 129 // the start pos to get our end pos.
126 130
127 start := f.Pos 131 start := f.Pos
128 start.Column += startOfs - f.Pos.Byte // Safe because only ASCII spaces can be in the offset 132 start.Column += startOfs + f.StartByte - f.Pos.Byte // Safe because only ASCII spaces can be in the offset
129 start.Byte = startOfs 133 start.Byte = startOfs + f.StartByte
130 134
131 end := start 135 end := start
132 end.Byte = endOfs 136 end.Byte = endOfs + f.StartByte
133 b := f.Bytes[startOfs:endOfs] 137 b := f.Bytes[startOfs:endOfs]
134 for len(b) > 0 { 138 for len(b) > 0 {
135 advance, seq, _ := textseg.ScanGraphemeClusters(b, true) 139 advance, seq, _ := textseg.ScanGraphemeClusters(b, true)
@@ -160,6 +164,13 @@ type heredocInProgress struct {
160 StartOfLine bool 164 StartOfLine bool
161} 165}
162 166
167func tokenOpensFlushHeredoc(tok Token) bool {
168 if tok.Type != TokenOHeredoc {
169 return false
170 }
171 return bytes.HasPrefix(tok.Bytes, []byte{'<', '<', '-'})
172}
173
163// checkInvalidTokens does a simple pass across the given tokens and generates 174// checkInvalidTokens does a simple pass across the given tokens and generates
164// diagnostics for tokens that should _never_ appear in HCL source. This 175// diagnostics for tokens that should _never_ appear in HCL source. This
165// is intended to avoid the need for the parser to have special support 176// is intended to avoid the need for the parser to have special support
@@ -174,11 +185,15 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics {
174 toldBitwise := 0 185 toldBitwise := 0
175 toldExponent := 0 186 toldExponent := 0
176 toldBacktick := 0 187 toldBacktick := 0
188 toldApostrophe := 0
177 toldSemicolon := 0 189 toldSemicolon := 0
178 toldTabs := 0 190 toldTabs := 0
179 toldBadUTF8 := 0 191 toldBadUTF8 := 0
180 192
181 for _, tok := range tokens { 193 for _, tok := range tokens {
194 // copy token so it's safe to point to it
195 tok := tok
196
182 switch tok.Type { 197 switch tok.Type {
183 case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot: 198 case TokenBitwiseAnd, TokenBitwiseOr, TokenBitwiseXor, TokenBitwiseNot:
184 if toldBitwise < 4 { 199 if toldBitwise < 4 {
@@ -214,22 +229,36 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics {
214 case TokenBacktick: 229 case TokenBacktick:
215 // Only report for alternating (even) backticks, so we won't report both start and ends of the same 230 // Only report for alternating (even) backticks, so we won't report both start and ends of the same
216 // backtick-quoted string. 231 // backtick-quoted string.
217 if toldExponent < 4 && (toldExponent%2) == 0 { 232 if (toldBacktick % 2) == 0 {
218 diags = append(diags, &hcl.Diagnostic{ 233 diags = append(diags, &hcl.Diagnostic{
219 Severity: hcl.DiagError, 234 Severity: hcl.DiagError,
220 Summary: "Invalid character", 235 Summary: "Invalid character",
221 Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"<<EOT\".", 236 Detail: "The \"`\" character is not valid. To create a multi-line string, use the \"heredoc\" syntax, like \"<<EOT\".",
222 Subject: &tok.Range, 237 Subject: &tok.Range,
223 }) 238 })
224 239 }
240 if toldBacktick <= 2 {
225 toldBacktick++ 241 toldBacktick++
226 } 242 }
243 case TokenApostrophe:
244 if (toldApostrophe % 2) == 0 {
245 newDiag := &hcl.Diagnostic{
246 Severity: hcl.DiagError,
247 Summary: "Invalid character",
248 Detail: "Single quotes are not valid. Use double quotes (\") to enclose strings.",
249 Subject: &tok.Range,
250 }
251 diags = append(diags, newDiag)
252 }
253 if toldApostrophe <= 2 {
254 toldApostrophe++
255 }
227 case TokenSemicolon: 256 case TokenSemicolon:
228 if toldSemicolon < 1 { 257 if toldSemicolon < 1 {
229 diags = append(diags, &hcl.Diagnostic{ 258 diags = append(diags, &hcl.Diagnostic{
230 Severity: hcl.DiagError, 259 Severity: hcl.DiagError,
231 Summary: "Invalid character", 260 Summary: "Invalid character",
232 Detail: "The \";\" character is not valid. Use newlines to separate attributes and blocks, and commas to separate items in collection values.", 261 Detail: "The \";\" character is not valid. Use newlines to separate arguments and blocks, and commas to separate items in collection values.",
233 Subject: &tok.Range, 262 Subject: &tok.Range,
234 }) 263 })
235 264
@@ -257,6 +286,13 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics {
257 286
258 toldBadUTF8++ 287 toldBadUTF8++
259 } 288 }
289 case TokenQuotedNewline:
290 diags = append(diags, &hcl.Diagnostic{
291 Severity: hcl.DiagError,
292 Summary: "Invalid multi-line string",
293 Detail: "Quoted strings may not be split over multiple lines. To produce a multi-line string, either use the \\n escape to represent a newline character or use the \"heredoc\" multi-line template syntax.",
294 Subject: &tok.Range,
295 })
260 case TokenInvalid: 296 case TokenInvalid:
261 diags = append(diags, &hcl.Diagnostic{ 297 diags = append(diags, &hcl.Diagnostic{
262 Severity: hcl.DiagError, 298 Severity: hcl.DiagError,
@@ -264,9 +300,21 @@ func checkInvalidTokens(tokens Tokens) hcl.Diagnostics {
264 Detail: "This character is not used within the language.", 300 Detail: "This character is not used within the language.",
265 Subject: &tok.Range, 301 Subject: &tok.Range,
266 }) 302 })
267
268 toldTabs++
269 } 303 }
270 } 304 }
271 return diags 305 return diags
272} 306}
307
308var utf8BOM = []byte{0xef, 0xbb, 0xbf}
309
310// stripUTF8BOM checks whether the given buffer begins with a UTF-8 byte order
311// mark (0xEF 0xBB 0xBF) and, if so, returns a truncated slice with the same
312// backing array but with the BOM skipped.
313//
314// If there is no BOM present, the given slice is returned verbatim.
315func stripUTF8BOM(src []byte) []byte {
316 if bytes.HasPrefix(src, utf8BOM) {
317 return src[3:]
318 }
319 return src
320}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token_type_string.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token_type_string.go
index 93de7ee..c23c4f0 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token_type_string.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/token_type_string.go
@@ -4,7 +4,67 @@ package hclsyntax
4 4
5import "strconv" 5import "strconv"
6 6
7const _TokenType_name = "TokenNilTokenNewlineTokenBangTokenPercentTokenBitwiseAndTokenOParenTokenCParenTokenStarTokenPlusTokenCommaTokenMinusTokenDotTokenSlashTokenColonTokenSemicolonTokenLessThanTokenEqualTokenGreaterThanTokenQuestionTokenCommentTokenOHeredocTokenIdentTokenNumberLitTokenQuotedLitTokenStringLitTokenOBrackTokenCBrackTokenBitwiseXorTokenBacktickTokenCHeredocTokenOBraceTokenBitwiseOrTokenCBraceTokenBitwiseNotTokenOQuoteTokenCQuoteTokenTemplateControlTokenEllipsisTokenFatArrowTokenTemplateSeqEndTokenAndTokenOrTokenTemplateInterpTokenEqualOpTokenNotEqualTokenLessThanEqTokenGreaterThanEqTokenEOFTokenTabsTokenStarStarTokenInvalidTokenBadUTF8" 7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[TokenOBrace-123]
12 _ = x[TokenCBrace-125]
13 _ = x[TokenOBrack-91]
14 _ = x[TokenCBrack-93]
15 _ = x[TokenOParen-40]
16 _ = x[TokenCParen-41]
17 _ = x[TokenOQuote-171]
18 _ = x[TokenCQuote-187]
19 _ = x[TokenOHeredoc-72]
20 _ = x[TokenCHeredoc-104]
21 _ = x[TokenStar-42]
22 _ = x[TokenSlash-47]
23 _ = x[TokenPlus-43]
24 _ = x[TokenMinus-45]
25 _ = x[TokenPercent-37]
26 _ = x[TokenEqual-61]
27 _ = x[TokenEqualOp-8788]
28 _ = x[TokenNotEqual-8800]
29 _ = x[TokenLessThan-60]
30 _ = x[TokenLessThanEq-8804]
31 _ = x[TokenGreaterThan-62]
32 _ = x[TokenGreaterThanEq-8805]
33 _ = x[TokenAnd-8743]
34 _ = x[TokenOr-8744]
35 _ = x[TokenBang-33]
36 _ = x[TokenDot-46]
37 _ = x[TokenComma-44]
38 _ = x[TokenEllipsis-8230]
39 _ = x[TokenFatArrow-8658]
40 _ = x[TokenQuestion-63]
41 _ = x[TokenColon-58]
42 _ = x[TokenTemplateInterp-8747]
43 _ = x[TokenTemplateControl-955]
44 _ = x[TokenTemplateSeqEnd-8718]
45 _ = x[TokenQuotedLit-81]
46 _ = x[TokenStringLit-83]
47 _ = x[TokenNumberLit-78]
48 _ = x[TokenIdent-73]
49 _ = x[TokenComment-67]
50 _ = x[TokenNewline-10]
51 _ = x[TokenEOF-9220]
52 _ = x[TokenBitwiseAnd-38]
53 _ = x[TokenBitwiseOr-124]
54 _ = x[TokenBitwiseNot-126]
55 _ = x[TokenBitwiseXor-94]
56 _ = x[TokenStarStar-10138]
57 _ = x[TokenApostrophe-39]
58 _ = x[TokenBacktick-96]
59 _ = x[TokenSemicolon-59]
60 _ = x[TokenTabs-9225]
61 _ = x[TokenInvalid-65533]
62 _ = x[TokenBadUTF8-128169]
63 _ = x[TokenQuotedNewline-9252]
64 _ = x[TokenNil-0]
65}
66
67const _TokenType_name = "TokenNilTokenNewlineTokenBangTokenPercentTokenBitwiseAndTokenApostropheTokenOParenTokenCParenTokenStarTokenPlusTokenCommaTokenMinusTokenDotTokenSlashTokenColonTokenSemicolonTokenLessThanTokenEqualTokenGreaterThanTokenQuestionTokenCommentTokenOHeredocTokenIdentTokenNumberLitTokenQuotedLitTokenStringLitTokenOBrackTokenCBrackTokenBitwiseXorTokenBacktickTokenCHeredocTokenOBraceTokenBitwiseOrTokenCBraceTokenBitwiseNotTokenOQuoteTokenCQuoteTokenTemplateControlTokenEllipsisTokenFatArrowTokenTemplateSeqEndTokenAndTokenOrTokenTemplateInterpTokenEqualOpTokenNotEqualTokenLessThanEqTokenGreaterThanEqTokenEOFTokenTabsTokenQuotedNewlineTokenStarStarTokenInvalidTokenBadUTF8"
8 68
9var _TokenType_map = map[TokenType]string{ 69var _TokenType_map = map[TokenType]string{
10 0: _TokenType_name[0:8], 70 0: _TokenType_name[0:8],
@@ -12,53 +72,55 @@ var _TokenType_map = map[TokenType]string{
12 33: _TokenType_name[20:29], 72 33: _TokenType_name[20:29],
13 37: _TokenType_name[29:41], 73 37: _TokenType_name[29:41],
14 38: _TokenType_name[41:56], 74 38: _TokenType_name[41:56],
15 40: _TokenType_name[56:67], 75 39: _TokenType_name[56:71],
16 41: _TokenType_name[67:78], 76 40: _TokenType_name[71:82],
17 42: _TokenType_name[78:87], 77 41: _TokenType_name[82:93],
18 43: _TokenType_name[87:96], 78 42: _TokenType_name[93:102],
19 44: _TokenType_name[96:106], 79 43: _TokenType_name[102:111],
20 45: _TokenType_name[106:116], 80 44: _TokenType_name[111:121],
21 46: _TokenType_name[116:124], 81 45: _TokenType_name[121:131],
22 47: _TokenType_name[124:134], 82 46: _TokenType_name[131:139],
23 58: _TokenType_name[134:144], 83 47: _TokenType_name[139:149],
24 59: _TokenType_name[144:158], 84 58: _TokenType_name[149:159],
25 60: _TokenType_name[158:171], 85 59: _TokenType_name[159:173],
26 61: _TokenType_name[171:181], 86 60: _TokenType_name[173:186],
27 62: _TokenType_name[181:197], 87 61: _TokenType_name[186:196],
28 63: _TokenType_name[197:210], 88 62: _TokenType_name[196:212],
29 67: _TokenType_name[210:222], 89 63: _TokenType_name[212:225],
30 72: _TokenType_name[222:235], 90 67: _TokenType_name[225:237],
31 73: _TokenType_name[235:245], 91 72: _TokenType_name[237:250],
32 78: _TokenType_name[245:259], 92 73: _TokenType_name[250:260],
33 81: _TokenType_name[259:273], 93 78: _TokenType_name[260:274],
34 83: _TokenType_name[273:287], 94 81: _TokenType_name[274:288],
35 91: _TokenType_name[287:298], 95 83: _TokenType_name[288:302],
36 93: _TokenType_name[298:309], 96 91: _TokenType_name[302:313],
37 94: _TokenType_name[309:324], 97 93: _TokenType_name[313:324],
38 96: _TokenType_name[324:337], 98 94: _TokenType_name[324:339],
39 104: _TokenType_name[337:350], 99 96: _TokenType_name[339:352],
40 123: _TokenType_name[350:361], 100 104: _TokenType_name[352:365],
41 124: _TokenType_name[361:375], 101 123: _TokenType_name[365:376],
42 125: _TokenType_name[375:386], 102 124: _TokenType_name[376:390],
43 126: _TokenType_name[386:401], 103 125: _TokenType_name[390:401],
44 171: _TokenType_name[401:412], 104 126: _TokenType_name[401:416],
45 187: _TokenType_name[412:423], 105 171: _TokenType_name[416:427],
46 955: _TokenType_name[423:443], 106 187: _TokenType_name[427:438],
47 8230: _TokenType_name[443:456], 107 955: _TokenType_name[438:458],
48 8658: _TokenType_name[456:469], 108 8230: _TokenType_name[458:471],
49 8718: _TokenType_name[469:488], 109 8658: _TokenType_name[471:484],
50 8743: _TokenType_name[488:496], 110 8718: _TokenType_name[484:503],
51 8744: _TokenType_name[496:503], 111 8743: _TokenType_name[503:511],
52 8747: _TokenType_name[503:522], 112 8744: _TokenType_name[511:518],
53 8788: _TokenType_name[522:534], 113 8747: _TokenType_name[518:537],
54 8800: _TokenType_name[534:547], 114 8788: _TokenType_name[537:549],
55 8804: _TokenType_name[547:562], 115 8800: _TokenType_name[549:562],
56 8805: _TokenType_name[562:580], 116 8804: _TokenType_name[562:577],
57 9220: _TokenType_name[580:588], 117 8805: _TokenType_name[577:595],
58 9225: _TokenType_name[588:597], 118 9220: _TokenType_name[595:603],
59 10138: _TokenType_name[597:610], 119 9225: _TokenType_name[603:612],
60 65533: _TokenType_name[610:622], 120 9252: _TokenType_name[612:630],
61 128169: _TokenType_name[622:634], 121 10138: _TokenType_name[630:643],
122 65533: _TokenType_name[643:655],
123 128169: _TokenType_name[655:667],
62} 124}
63 125
64func (i TokenType) String() string { 126func (i TokenType) String() string {
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/variables.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/variables.go
index eeee1a5..91f417f 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/variables.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/variables.go
@@ -72,15 +72,15 @@ func (w *variablesWalker) Exit(n Node) hcl.Diagnostics {
72// that the child scope struct wraps. 72// that the child scope struct wraps.
73type ChildScope struct { 73type ChildScope struct {
74 LocalNames map[string]struct{} 74 LocalNames map[string]struct{}
75 Expr *Expression // pointer because it can be replaced on walk 75 Expr Expression
76} 76}
77 77
78func (e ChildScope) walkChildNodes(w internalWalkFunc) { 78func (e ChildScope) walkChildNodes(w internalWalkFunc) {
79 *(e.Expr) = w(*(e.Expr)).(Expression) 79 w(e.Expr)
80} 80}
81 81
82// Range returns the range of the expression that the ChildScope is 82// Range returns the range of the expression that the ChildScope is
83// encapsulating. It isn't really very useful to call Range on a ChildScope. 83// encapsulating. It isn't really very useful to call Range on a ChildScope.
84func (e ChildScope) Range() hcl.Range { 84func (e ChildScope) Range() hcl.Range {
85 return (*e.Expr).Range() 85 return e.Expr.Range()
86} 86}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/walk.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/walk.go
index 3405d26..90f81c9 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/walk.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/walk.go
@@ -15,9 +15,8 @@ type VisitFunc func(node Node) hcl.Diagnostics
15// and returned as a single set. 15// and returned as a single set.
16func VisitAll(node Node, f VisitFunc) hcl.Diagnostics { 16func VisitAll(node Node, f VisitFunc) hcl.Diagnostics {
17 diags := f(node) 17 diags := f(node)
18 node.walkChildNodes(func(node Node) Node { 18 node.walkChildNodes(func(node Node) {
19 diags = append(diags, VisitAll(node, f)...) 19 diags = append(diags, VisitAll(node, f)...)
20 return node
21 }) 20 })
22 return diags 21 return diags
23} 22}
@@ -33,45 +32,10 @@ type Walker interface {
33// Enter and Exit functions. 32// Enter and Exit functions.
34func Walk(node Node, w Walker) hcl.Diagnostics { 33func Walk(node Node, w Walker) hcl.Diagnostics {
35 diags := w.Enter(node) 34 diags := w.Enter(node)
36 node.walkChildNodes(func(node Node) Node { 35 node.walkChildNodes(func(node Node) {
37 diags = append(diags, Walk(node, w)...) 36 diags = append(diags, Walk(node, w)...)
38 return node
39 }) 37 })
38 moreDiags := w.Exit(node)
39 diags = append(diags, moreDiags...)
40 return diags 40 return diags
41} 41}
42
43// Transformer is an interface used with Transform
44type Transformer interface {
45 // Transform accepts a node and returns a replacement node along with
46 // a flag for whether to also visit child nodes. If the flag is false,
47 // none of the child nodes will be visited and the TransformExit method
48 // will not be called for the node.
49 //
50 // It is acceptable and appropriate for Transform to return the same node
51 // it was given, for situations where no transform is needed.
52 Transform(node Node) (Node, bool, hcl.Diagnostics)
53
54 // TransformExit signals the end of transformations of child nodes of the
55 // given node. If Transform returned a new node, the given node is the
56 // node that was returned, rather than the node that was originally
57 // encountered.
58 TransformExit(node Node) hcl.Diagnostics
59}
60
61// Transform allows for in-place transformations of an AST starting with a
62// particular node. The provider Transformer implementation drives the
63// transformation process. The return value is the node that replaced the
64// given top-level node.
65func Transform(node Node, t Transformer) (Node, hcl.Diagnostics) {
66 newNode, descend, diags := t.Transform(node)
67 if !descend {
68 return newNode, diags
69 }
70 node.walkChildNodes(func(node Node) Node {
71 newNode, newDiags := Transform(node, t)
72 diags = append(diags, newDiags...)
73 return newNode
74 })
75 diags = append(diags, t.TransformExit(newNode)...)
76 return newNode, diags
77}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go b/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go
index 246fd1c..d368ea8 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/parser.go
@@ -3,9 +3,9 @@ package json
3import ( 3import (
4 "encoding/json" 4 "encoding/json"
5 "fmt" 5 "fmt"
6 "math/big"
7 6
8 "github.com/hashicorp/hcl2/hcl" 7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/zclconf/go-cty/cty"
9) 9)
10 10
11func parseFileContent(buf []byte, filename string) (node, hcl.Diagnostics) { 11func parseFileContent(buf []byte, filename string) (node, hcl.Diagnostics) {
@@ -55,7 +55,7 @@ func parseValue(p *peeker) (node, hcl.Diagnostics) {
55 return wrapInvalid(nil, hcl.Diagnostics{ 55 return wrapInvalid(nil, hcl.Diagnostics{
56 { 56 {
57 Severity: hcl.DiagError, 57 Severity: hcl.DiagError,
58 Summary: "Missing attribute value", 58 Summary: "Missing JSON value",
59 Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.", 59 Detail: "A JSON value must start with a brace, a bracket, a number, a string, or a keyword.",
60 Subject: &tok.Range, 60 Subject: &tok.Range,
61 }, 61 },
@@ -144,8 +144,8 @@ Token:
144 if !ok { 144 if !ok {
145 return nil, diags.Append(&hcl.Diagnostic{ 145 return nil, diags.Append(&hcl.Diagnostic{
146 Severity: hcl.DiagError, 146 Severity: hcl.DiagError,
147 Summary: "Invalid object attribute name", 147 Summary: "Invalid object property name",
148 Detail: "A JSON object attribute name must be a string", 148 Detail: "A JSON object property name must be a string",
149 Subject: keyNode.StartRange().Ptr(), 149 Subject: keyNode.StartRange().Ptr(),
150 }) 150 })
151 } 151 }
@@ -171,7 +171,7 @@ Token:
171 // Possible confusion with native HCL syntax. 171 // Possible confusion with native HCL syntax.
172 return nil, diags.Append(&hcl.Diagnostic{ 172 return nil, diags.Append(&hcl.Diagnostic{
173 Severity: hcl.DiagError, 173 Severity: hcl.DiagError,
174 Summary: "Missing attribute value colon", 174 Summary: "Missing property value colon",
175 Detail: "JSON uses a colon as its name/value delimiter, not an equals sign.", 175 Detail: "JSON uses a colon as its name/value delimiter, not an equals sign.",
176 Subject: &colon.Range, 176 Subject: &colon.Range,
177 }) 177 })
@@ -179,8 +179,8 @@ Token:
179 179
180 return nil, diags.Append(&hcl.Diagnostic{ 180 return nil, diags.Append(&hcl.Diagnostic{
181 Severity: hcl.DiagError, 181 Severity: hcl.DiagError,
182 Summary: "Missing attribute value colon", 182 Summary: "Missing property value colon",
183 Detail: "A colon must appear between an object attribute's name and its value.", 183 Detail: "A colon must appear between an object property's name and its value.",
184 Subject: &colon.Range, 184 Subject: &colon.Range,
185 }) 185 })
186 } 186 }
@@ -205,7 +205,7 @@ Token:
205 return nil, diags.Append(&hcl.Diagnostic{ 205 return nil, diags.Append(&hcl.Diagnostic{
206 Severity: hcl.DiagError, 206 Severity: hcl.DiagError,
207 Summary: "Trailing comma in object", 207 Summary: "Trailing comma in object",
208 Detail: "JSON does not permit a trailing comma after the final attribute in an object.", 208 Detail: "JSON does not permit a trailing comma after the final property in an object.",
209 Subject: &comma.Range, 209 Subject: &comma.Range,
210 }) 210 })
211 } 211 }
@@ -234,7 +234,7 @@ Token:
234 return nil, diags.Append(&hcl.Diagnostic{ 234 return nil, diags.Append(&hcl.Diagnostic{
235 Severity: hcl.DiagError, 235 Severity: hcl.DiagError,
236 Summary: "Missing attribute seperator comma", 236 Summary: "Missing attribute seperator comma",
237 Detail: "A comma must appear between each attribute declaration in an object.", 237 Detail: "A comma must appear between each property definition in an object.",
238 Subject: p.Peek().Range.Ptr(), 238 Subject: p.Peek().Range.Ptr(),
239 }) 239 })
240 } 240 }
@@ -301,7 +301,7 @@ Token:
301 return nil, diags.Append(&hcl.Diagnostic{ 301 return nil, diags.Append(&hcl.Diagnostic{
302 Severity: hcl.DiagError, 302 Severity: hcl.DiagError,
303 Summary: "Trailing comma in array", 303 Summary: "Trailing comma in array",
304 Detail: "JSON does not permit a trailing comma after the final attribute in an array.", 304 Detail: "JSON does not permit a trailing comma after the final value in an array.",
305 Subject: &comma.Range, 305 Subject: &comma.Range,
306 }) 306 })
307 } 307 }
@@ -370,10 +370,15 @@ func parseNumber(p *peeker) (node, hcl.Diagnostics) {
370 } 370 }
371 } 371 }
372 372
373 f, _, err := big.ParseFloat(string(num), 10, 512, big.ToNearestEven) 373 // We want to guarantee that we parse numbers the same way as cty (and thus
374 // native syntax HCL) would here, so we'll use the cty parser even though
375 // in most other cases we don't actually introduce cty concepts until
376 // decoding time. We'll unwrap the parsed float immediately afterwards, so
377 // the cty value is just a temporary helper.
378 nv, err := cty.ParseNumberVal(string(num))
374 if err != nil { 379 if err != nil {
375 // Should never happen if above passed, since JSON numbers are a subset 380 // Should never happen if above passed, since JSON numbers are a subset
376 // of what big.Float can parse... 381 // of what cty can parse...
377 return nil, hcl.Diagnostics{ 382 return nil, hcl.Diagnostics{
378 { 383 {
379 Severity: hcl.DiagError, 384 Severity: hcl.DiagError,
@@ -385,7 +390,7 @@ func parseNumber(p *peeker) (node, hcl.Diagnostics) {
385 } 390 }
386 391
387 return &numberVal{ 392 return &numberVal{
388 Value: f, 393 Value: nv.AsBigFloat(),
389 SrcRange: tok.Range, 394 SrcRange: tok.Range,
390 }, nil 395 }, nil
391} 396}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go b/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go
index 0a8378b..da72884 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/scanner.go
@@ -153,7 +153,7 @@ func byteCanStartKeyword(b byte) bool {
153 // in the parser, where we can generate better diagnostics. 153 // in the parser, where we can generate better diagnostics.
154 // So e.g. we want to be able to say: 154 // So e.g. we want to be able to say:
155 // unrecognized keyword "True". Did you mean "true"? 155 // unrecognized keyword "True". Did you mean "true"?
156 case b >= 'a' || b <= 'z' || b >= 'A' || b <= 'Z': 156 case isAlphabetical(b):
157 return true 157 return true
158 default: 158 default:
159 return false 159 return false
@@ -167,7 +167,7 @@ Byte:
167 for i = 0; i < len(buf); i++ { 167 for i = 0; i < len(buf); i++ {
168 b := buf[i] 168 b := buf[i]
169 switch { 169 switch {
170 case (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_': 170 case isAlphabetical(b) || b == '_':
171 p.Pos.Byte++ 171 p.Pos.Byte++
172 p.Pos.Column++ 172 p.Pos.Column++
173 default: 173 default:
@@ -291,3 +291,7 @@ func posRange(start, end pos) hcl.Range {
291func (t token) GoString() string { 291func (t token) GoString() string {
292 return fmt.Sprintf("json.token{json.%s, []byte(%q), %#v}", t.Type, t.Bytes, t.Range) 292 return fmt.Sprintf("json.token{json.%s, []byte(%q), %#v}", t.Type, t.Bytes, t.Range)
293} 293}
294
295func isAlphabetical(b byte) bool {
296 return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z')
297}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md
index 9b33c7f..dac5729 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/spec.md
@@ -5,7 +5,7 @@ for defining configuration languages for applications. The HCL information
5model is designed to support multiple concrete syntaxes for configuration, 5model is designed to support multiple concrete syntaxes for configuration,
6and this JSON-based format complements [the native syntax](../hclsyntax/spec.md) 6and this JSON-based format complements [the native syntax](../hclsyntax/spec.md)
7by being easy to machine-generate, whereas the native syntax is oriented 7by being easy to machine-generate, whereas the native syntax is oriented
8towards human authoring and maintenence. 8towards human authoring and maintenance
9 9
10This syntax is defined in terms of JSON as defined in 10This syntax is defined in terms of JSON as defined in
11[RFC7159](https://tools.ietf.org/html/rfc7159). As such it inherits the JSON 11[RFC7159](https://tools.ietf.org/html/rfc7159). As such it inherits the JSON
@@ -18,11 +18,11 @@ _Parsing_ such JSON has some additional constraints not beyond what is normally
18supported by JSON parsers, so a specialized parser may be required that 18supported by JSON parsers, so a specialized parser may be required that
19is able to: 19is able to:
20 20
21* Preserve the relative ordering of properties defined in an object. 21- Preserve the relative ordering of properties defined in an object.
22* Preserve multiple definitions of the same property name. 22- Preserve multiple definitions of the same property name.
23* Preserve numeric values to the precision required by the number type 23- Preserve numeric values to the precision required by the number type
24 in [the HCL syntax-agnostic information model](../spec.md). 24 in [the HCL syntax-agnostic information model](../spec.md).
25* Retain source location information for parsed tokens/constructs in order 25- Retain source location information for parsed tokens/constructs in order
26 to produce good error messages. 26 to produce good error messages.
27 27
28## Structural Elements 28## Structural Elements
@@ -118,6 +118,7 @@ type:
118 ] 118 ]
119} 119}
120``` 120```
121
121```json 122```json
122{ 123{
123 "foo": [] 124 "foo": []
@@ -147,7 +148,7 @@ the following examples:
147 "boz": { 148 "boz": {
148 "baz": { 149 "baz": {
149 "child_attr": "baz" 150 "child_attr": "baz"
150 }, 151 }
151 } 152 }
152 } 153 }
153} 154}
@@ -189,7 +190,7 @@ the following examples:
189 "boz": { 190 "boz": {
190 "child_attr": "baz" 191 "child_attr": "baz"
191 } 192 }
192 }, 193 }
193 }, 194 },
194 { 195 {
195 "bar": { 196 "bar": {
@@ -280,9 +281,9 @@ When interpreted as an expression, a JSON array represents a value of a HCL
280tuple type. 281tuple type.
281 282
282Each element of the JSON array represents an element of the HCL tuple type. 283Each element of the JSON array represents an element of the HCL tuple type.
283The tuple type is constructed by enumerationg the JSON array elements, creating 284The tuple type is constructed by enumerating the JSON array elements, creating
284for each an element whose type is the result of recursively applying the 285for each an element whose type is the result of recursively applying the
285expression mapping rules. Correspondance is preserved between the array element 286expression mapping rules. Correspondence is preserved between the array element
286indices and the tuple element indices. 287indices and the tuple element indices.
287 288
288An instance of the constructed tuple type is then created, whose values are 289An instance of the constructed tuple type is then created, whose values are
@@ -325,7 +326,7 @@ HCL null value of the dynamic pseudo-type.
325 326
326### Strings 327### Strings
327 328
328When intepreted as an expression, a JSON string may be interpreted in one of 329When interpreted as an expression, a JSON string may be interpreted in one of
329two ways depending on the evaluation mode. 330two ways depending on the evaluation mode.
330 331
331If evaluating in literal-only mode (as defined by the syntax-agnostic 332If evaluating in literal-only mode (as defined by the syntax-agnostic
@@ -402,4 +403,3 @@ to that expression.
402 403
403If the original expression is not a string or its contents cannot be parsed 404If the original expression is not a string or its contents cannot be parsed
404as a native syntax expression then static call analysis is not supported. 405as a native syntax expression then static call analysis is not supported.
405
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go
index 28dcf52..bdc0e98 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go
@@ -64,7 +64,7 @@ func (b *body) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostic
64 diags = append(diags, &hcl.Diagnostic{ 64 diags = append(diags, &hcl.Diagnostic{
65 Severity: hcl.DiagError, 65 Severity: hcl.DiagError,
66 Summary: "Extraneous JSON object property", 66 Summary: "Extraneous JSON object property",
67 Detail: fmt.Sprintf("No attribute or block type is named %q.%s", k, suggestion), 67 Detail: fmt.Sprintf("No argument or block type is named %q.%s", k, suggestion),
68 Subject: &attr.NameRange, 68 Subject: &attr.NameRange,
69 Context: attr.Range().Ptr(), 69 Context: attr.Range().Ptr(),
70 }) 70 })
@@ -114,8 +114,8 @@ func (b *body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Bod
114 if existing, exists := content.Attributes[attrName]; exists { 114 if existing, exists := content.Attributes[attrName]; exists {
115 diags = append(diags, &hcl.Diagnostic{ 115 diags = append(diags, &hcl.Diagnostic{
116 Severity: hcl.DiagError, 116 Severity: hcl.DiagError,
117 Summary: "Duplicate attribute definition", 117 Summary: "Duplicate argument",
118 Detail: fmt.Sprintf("The attribute %q was already defined at %s.", attrName, existing.Range), 118 Detail: fmt.Sprintf("The argument %q was already set at %s.", attrName, existing.Range),
119 Subject: &jsonAttr.NameRange, 119 Subject: &jsonAttr.NameRange,
120 Context: jsonAttr.Range().Ptr(), 120 Context: jsonAttr.Range().Ptr(),
121 }) 121 })
@@ -149,8 +149,8 @@ func (b *body) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Bod
149 if _, defined := content.Attributes[attrS.Name]; !defined { 149 if _, defined := content.Attributes[attrS.Name]; !defined {
150 diags = append(diags, &hcl.Diagnostic{ 150 diags = append(diags, &hcl.Diagnostic{
151 Severity: hcl.DiagError, 151 Severity: hcl.DiagError,
152 Summary: "Missing required attribute", 152 Summary: "Missing required argument",
153 Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name), 153 Detail: fmt.Sprintf("The argument %q is required, but no definition was found.", attrS.Name),
154 Subject: b.MissingItemRange().Ptr(), 154 Subject: b.MissingItemRange().Ptr(),
155 }) 155 })
156 } 156 }
@@ -175,7 +175,7 @@ func (b *body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
175 diags = append(diags, &hcl.Diagnostic{ 175 diags = append(diags, &hcl.Diagnostic{
176 Severity: hcl.DiagError, 176 Severity: hcl.DiagError,
177 Summary: "Incorrect JSON value type", 177 Summary: "Incorrect JSON value type",
178 Detail: "A JSON object is required here, defining the attributes for this block.", 178 Detail: "A JSON object is required here, setting the arguments for this block.",
179 Subject: b.val.StartRange().Ptr(), 179 Subject: b.val.StartRange().Ptr(),
180 }) 180 })
181 return attrs, diags 181 return attrs, diags
@@ -197,7 +197,7 @@ func (b *body) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
197 diags = append(diags, &hcl.Diagnostic{ 197 diags = append(diags, &hcl.Diagnostic{
198 Severity: hcl.DiagError, 198 Severity: hcl.DiagError,
199 Summary: "Duplicate attribute definition", 199 Summary: "Duplicate attribute definition",
200 Detail: fmt.Sprintf("The attribute %q was already defined at %s.", name, existing.Range), 200 Detail: fmt.Sprintf("The argument %q was already set at %s.", name, existing.Range),
201 Subject: &jsonAttr.NameRange, 201 Subject: &jsonAttr.NameRange,
202 }) 202 })
203 continue 203 continue
@@ -266,6 +266,9 @@ func (b *body) unpackBlock(v node, typeName string, typeRange *hcl.Range, labels
266 copy(labelR, labelRanges) 266 copy(labelR, labelRanges)
267 267
268 switch tv := v.(type) { 268 switch tv := v.(type) {
269 case *nullVal:
270 // There is no block content, e.g the value is null.
271 return
269 case *objectVal: 272 case *objectVal:
270 // Single instance of the block 273 // Single instance of the block
271 *blocks = append(*blocks, &hcl.Block{ 274 *blocks = append(*blocks, &hcl.Block{
@@ -324,6 +327,8 @@ func (b *body) collectDeepAttrs(v node, labelName *string) ([]*objectAttr, hcl.D
324 var attrs []*objectAttr 327 var attrs []*objectAttr
325 328
326 switch tv := v.(type) { 329 switch tv := v.(type) {
330 case *nullVal:
331 // If a value is null, then we don't return any attributes or return an error.
327 332
328 case *objectVal: 333 case *objectVal:
329 attrs = append(attrs, tv.Attrs...) 334 attrs = append(attrs, tv.Attrs...)
@@ -345,7 +350,7 @@ func (b *body) collectDeepAttrs(v node, labelName *string) ([]*objectAttr, hcl.D
345 diags = append(diags, &hcl.Diagnostic{ 350 diags = append(diags, &hcl.Diagnostic{
346 Severity: hcl.DiagError, 351 Severity: hcl.DiagError,
347 Summary: "Incorrect JSON value type", 352 Summary: "Incorrect JSON value type",
348 Detail: "A JSON object is required here, to define attributes and child blocks.", 353 Detail: "A JSON object is required here, to define arguments and child blocks.",
349 Subject: ev.StartRange().Ptr(), 354 Subject: ev.StartRange().Ptr(),
350 }) 355 })
351 } 356 }
@@ -364,7 +369,7 @@ func (b *body) collectDeepAttrs(v node, labelName *string) ([]*objectAttr, hcl.D
364 diags = append(diags, &hcl.Diagnostic{ 369 diags = append(diags, &hcl.Diagnostic{
365 Severity: hcl.DiagError, 370 Severity: hcl.DiagError,
366 Summary: "Incorrect JSON value type", 371 Summary: "Incorrect JSON value type",
367 Detail: "Either a JSON object or JSON array of objects is required here, to define attributes and child blocks.", 372 Detail: "Either a JSON object or JSON array of objects is required here, to define arguments and child blocks.",
368 Subject: v.StartRange().Ptr(), 373 Subject: v.StartRange().Ptr(),
369 }) 374 })
370 } 375 }
@@ -424,7 +429,7 @@ func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
424 known := true 429 known := true
425 for _, jsonAttr := range v.Attrs { 430 for _, jsonAttr := range v.Attrs {
426 // In this one context we allow keys to contain interpolation 431 // In this one context we allow keys to contain interpolation
427 // experessions too, assuming we're evaluating in interpolation 432 // expressions too, assuming we're evaluating in interpolation
428 // mode. This achieves parity with the native syntax where 433 // mode. This achieves parity with the native syntax where
429 // object expressions can have dynamic keys, while block contents 434 // object expressions can have dynamic keys, while block contents
430 // may not. 435 // may not.
@@ -432,7 +437,8 @@ func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
432 Value: jsonAttr.Name, 437 Value: jsonAttr.Name,
433 SrcRange: jsonAttr.NameRange, 438 SrcRange: jsonAttr.NameRange,
434 }}).Value(ctx) 439 }}).Value(ctx)
435 val, valDiags := (&expression{src: jsonAttr.Value}).Value(ctx) 440 valExpr := &expression{src: jsonAttr.Value}
441 val, valDiags := valExpr.Value(ctx)
436 diags = append(diags, nameDiags...) 442 diags = append(diags, nameDiags...)
437 diags = append(diags, valDiags...) 443 diags = append(diags, valDiags...)
438 444
@@ -440,19 +446,23 @@ func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
440 name, err = convert.Convert(name, cty.String) 446 name, err = convert.Convert(name, cty.String)
441 if err != nil { 447 if err != nil {
442 diags = append(diags, &hcl.Diagnostic{ 448 diags = append(diags, &hcl.Diagnostic{
443 Severity: hcl.DiagError, 449 Severity: hcl.DiagError,
444 Summary: "Invalid object key expression", 450 Summary: "Invalid object key expression",
445 Detail: fmt.Sprintf("Cannot use this expression as an object key: %s.", err), 451 Detail: fmt.Sprintf("Cannot use this expression as an object key: %s.", err),
446 Subject: &jsonAttr.NameRange, 452 Subject: &jsonAttr.NameRange,
453 Expression: valExpr,
454 EvalContext: ctx,
447 }) 455 })
448 continue 456 continue
449 } 457 }
450 if name.IsNull() { 458 if name.IsNull() {
451 diags = append(diags, &hcl.Diagnostic{ 459 diags = append(diags, &hcl.Diagnostic{
452 Severity: hcl.DiagError, 460 Severity: hcl.DiagError,
453 Summary: "Invalid object key expression", 461 Summary: "Invalid object key expression",
454 Detail: "Cannot use null value as an object key.", 462 Detail: "Cannot use null value as an object key.",
455 Subject: &jsonAttr.NameRange, 463 Subject: &jsonAttr.NameRange,
464 Expression: valExpr,
465 EvalContext: ctx,
456 }) 466 })
457 continue 467 continue
458 } 468 }
@@ -471,10 +481,12 @@ func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
471 nameStr := name.AsString() 481 nameStr := name.AsString()
472 if _, defined := attrs[nameStr]; defined { 482 if _, defined := attrs[nameStr]; defined {
473 diags = append(diags, &hcl.Diagnostic{ 483 diags = append(diags, &hcl.Diagnostic{
474 Severity: hcl.DiagError, 484 Severity: hcl.DiagError,
475 Summary: "Duplicate object attribute", 485 Summary: "Duplicate object attribute",
476 Detail: fmt.Sprintf("An attribute named %q was already defined at %s.", nameStr, attrRanges[nameStr]), 486 Detail: fmt.Sprintf("An attribute named %q was already defined at %s.", nameStr, attrRanges[nameStr]),
477 Subject: &jsonAttr.NameRange, 487 Subject: &jsonAttr.NameRange,
488 Expression: e,
489 EvalContext: ctx,
478 }) 490 })
479 continue 491 continue
480 } 492 }
@@ -487,6 +499,8 @@ func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
487 return cty.DynamicVal, diags 499 return cty.DynamicVal, diags
488 } 500 }
489 return cty.ObjectVal(attrs), diags 501 return cty.ObjectVal(attrs), diags
502 case *nullVal:
503 return cty.NullVal(cty.DynamicPseudoType), nil
490 default: 504 default:
491 // Default to DynamicVal so that ASTs containing invalid nodes can 505 // Default to DynamicVal so that ASTs containing invalid nodes can
492 // still be partially-evaluated. 506 // still be partially-evaluated.
@@ -526,6 +540,11 @@ func (e *expression) Variables() []hcl.Traversal {
526 } 540 }
527 case *objectVal: 541 case *objectVal:
528 for _, jsonAttr := range v.Attrs { 542 for _, jsonAttr := range v.Attrs {
543 keyExpr := &stringVal{ // we're going to treat key as an expression in this context
544 Value: jsonAttr.Name,
545 SrcRange: jsonAttr.NameRange,
546 }
547 vars = append(vars, (&expression{src: keyExpr}).Variables()...)
529 vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...) 548 vars = append(vars, (&expression{src: jsonAttr.Value}).Variables()...)
530 } 549 }
531 } 550 }
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/merged.go b/vendor/github.com/hashicorp/hcl2/hcl/merged.go
index ca2b728..96e62a5 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/merged.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/merged.go
@@ -109,9 +109,9 @@ func (mb mergedBodies) JustAttributes() (Attributes, Diagnostics) {
109 if existing := attrs[name]; existing != nil { 109 if existing := attrs[name]; existing != nil {
110 diags = diags.Append(&Diagnostic{ 110 diags = diags.Append(&Diagnostic{
111 Severity: DiagError, 111 Severity: DiagError,
112 Summary: "Duplicate attribute", 112 Summary: "Duplicate argument",
113 Detail: fmt.Sprintf( 113 Detail: fmt.Sprintf(
114 "Attribute %q was already assigned at %s", 114 "Argument %q was already set at %s",
115 name, existing.NameRange.String(), 115 name, existing.NameRange.String(),
116 ), 116 ),
117 Subject: &attr.NameRange, 117 Subject: &attr.NameRange,
@@ -171,7 +171,7 @@ func (mb mergedBodies) mergedContent(schema *BodySchema, partial bool) (*BodyCon
171 } 171 }
172 172
173 if thisLeftovers != nil { 173 if thisLeftovers != nil {
174 mergedLeftovers = append(mergedLeftovers) 174 mergedLeftovers = append(mergedLeftovers, thisLeftovers)
175 } 175 }
176 if len(thisDiags) != 0 { 176 if len(thisDiags) != 0 {
177 diags = append(diags, thisDiags...) 177 diags = append(diags, thisDiags...)
@@ -182,9 +182,9 @@ func (mb mergedBodies) mergedContent(schema *BodySchema, partial bool) (*BodyCon
182 if existing := content.Attributes[name]; existing != nil { 182 if existing := content.Attributes[name]; existing != nil {
183 diags = diags.Append(&Diagnostic{ 183 diags = diags.Append(&Diagnostic{
184 Severity: DiagError, 184 Severity: DiagError,
185 Summary: "Duplicate attribute", 185 Summary: "Duplicate argument",
186 Detail: fmt.Sprintf( 186 Detail: fmt.Sprintf(
187 "Attribute %q was already assigned at %s", 187 "Argument %q was already set at %s",
188 name, existing.NameRange.String(), 188 name, existing.NameRange.String(),
189 ), 189 ),
190 Subject: &attr.NameRange, 190 Subject: &attr.NameRange,
@@ -212,9 +212,9 @@ func (mb mergedBodies) mergedContent(schema *BodySchema, partial bool) (*BodyCon
212 // use of required attributes on merged bodies. 212 // use of required attributes on merged bodies.
213 diags = diags.Append(&Diagnostic{ 213 diags = diags.Append(&Diagnostic{
214 Severity: DiagError, 214 Severity: DiagError,
215 Summary: "Missing required attribute", 215 Summary: "Missing required argument",
216 Detail: fmt.Sprintf( 216 Detail: fmt.Sprintf(
217 "The attribute %q is required, but was not assigned.", 217 "The argument %q is required, but was not set.",
218 attrS.Name, 218 attrS.Name,
219 ), 219 ),
220 }) 220 })
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/ops.go b/vendor/github.com/hashicorp/hcl2/hcl/ops.go
index f4e30b0..5d2910c 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/ops.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/ops.go
@@ -2,6 +2,7 @@ package hcl
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "math/big"
5 6
6 "github.com/zclconf/go-cty/cty" 7 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/convert" 8 "github.com/zclconf/go-cty/cty/convert"
@@ -84,6 +85,27 @@ func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics)
84 } 85 }
85 } 86 }
86 if has.False() { 87 if has.False() {
88 // We have a more specialized error message for the situation of
89 // using a fractional number to index into a sequence, because
90 // that will tend to happen if the user is trying to use division
91 // to calculate an index and not realizing that HCL does float
92 // division rather than integer division.
93 if (ty.IsListType() || ty.IsTupleType()) && key.Type().Equals(cty.Number) {
94 if key.IsKnown() && !key.IsNull() {
95 bf := key.AsBigFloat()
96 if _, acc := bf.Int(nil); acc != big.Exact {
97 return cty.DynamicVal, Diagnostics{
98 {
99 Severity: DiagError,
100 Summary: "Invalid index",
101 Detail: fmt.Sprintf("The given key does not identify an element in this collection value: indexing a sequence requires a whole number, but the given index (%g) has a fractional part.", bf),
102 Subject: srcRange,
103 },
104 }
105 }
106 }
107 }
108
87 return cty.DynamicVal, Diagnostics{ 109 return cty.DynamicVal, Diagnostics{
88 { 110 {
89 Severity: DiagError, 111 Severity: DiagError,
@@ -145,3 +167,122 @@ func Index(collection, key cty.Value, srcRange *Range) (cty.Value, Diagnostics)
145 } 167 }
146 168
147} 169}
170
171// GetAttr is a helper function that performs the same operation as the
172// attribute access in the HCL expression language. That is, the result is the
173// same as it would be for obj.attr in a configuration expression.
174//
175// This is exported so that applications can access attributes in a manner
176// consistent with how the language does it, including handling of null and
177// unknown values, etc.
178//
179// Diagnostics are produced if the given combination of values is not valid.
180// Therefore a pointer to a source range must be provided to use in diagnostics,
181// though nil can be provided if the calling application is going to
182// ignore the subject of the returned diagnostics anyway.
183func GetAttr(obj cty.Value, attrName string, srcRange *Range) (cty.Value, Diagnostics) {
184 if obj.IsNull() {
185 return cty.DynamicVal, Diagnostics{
186 {
187 Severity: DiagError,
188 Summary: "Attempt to get attribute from null value",
189 Detail: "This value is null, so it does not have any attributes.",
190 Subject: srcRange,
191 },
192 }
193 }
194
195 ty := obj.Type()
196 switch {
197 case ty.IsObjectType():
198 if !ty.HasAttribute(attrName) {
199 return cty.DynamicVal, Diagnostics{
200 {
201 Severity: DiagError,
202 Summary: "Unsupported attribute",
203 Detail: fmt.Sprintf("This object does not have an attribute named %q.", attrName),
204 Subject: srcRange,
205 },
206 }
207 }
208
209 if !obj.IsKnown() {
210 return cty.UnknownVal(ty.AttributeType(attrName)), nil
211 }
212
213 return obj.GetAttr(attrName), nil
214 case ty.IsMapType():
215 if !obj.IsKnown() {
216 return cty.UnknownVal(ty.ElementType()), nil
217 }
218
219 idx := cty.StringVal(attrName)
220 if obj.HasIndex(idx).False() {
221 return cty.DynamicVal, Diagnostics{
222 {
223 Severity: DiagError,
224 Summary: "Missing map element",
225 Detail: fmt.Sprintf("This map does not have an element with the key %q.", attrName),
226 Subject: srcRange,
227 },
228 }
229 }
230
231 return obj.Index(idx), nil
232 case ty == cty.DynamicPseudoType:
233 return cty.DynamicVal, nil
234 default:
235 return cty.DynamicVal, Diagnostics{
236 {
237 Severity: DiagError,
238 Summary: "Unsupported attribute",
239 Detail: "This value does not have any attributes.",
240 Subject: srcRange,
241 },
242 }
243 }
244
245}
246
247// ApplyPath is a helper function that applies a cty.Path to a value using the
248// indexing and attribute access operations from HCL.
249//
250// This is similar to calling the path's own Apply method, but ApplyPath uses
251// the more relaxed typing rules that apply to these operations in HCL, rather
252// than cty's relatively-strict rules. ApplyPath is implemented in terms of
253// Index and GetAttr, and so it has the same behavior for individual steps
254// but will stop and return any errors returned by intermediate steps.
255//
256// Diagnostics are produced if the given path cannot be applied to the given
257// value. Therefore a pointer to a source range must be provided to use in
258// diagnostics, though nil can be provided if the calling application is going
259// to ignore the subject of the returned diagnostics anyway.
260func ApplyPath(val cty.Value, path cty.Path, srcRange *Range) (cty.Value, Diagnostics) {
261 var diags Diagnostics
262
263 for _, step := range path {
264 var stepDiags Diagnostics
265 switch ts := step.(type) {
266 case cty.IndexStep:
267 val, stepDiags = Index(val, ts.Key, srcRange)
268 case cty.GetAttrStep:
269 val, stepDiags = GetAttr(val, ts.Name, srcRange)
270 default:
271 // Should never happen because the above are all of the step types.
272 diags = diags.Append(&Diagnostic{
273 Severity: DiagError,
274 Summary: "Invalid path step",
275 Detail: fmt.Sprintf("Go type %T is not a valid path step. This is a bug in this program.", step),
276 Subject: srcRange,
277 })
278 return cty.DynamicVal, diags
279 }
280
281 diags = append(diags, stepDiags...)
282 if stepDiags.HasErrors() {
283 return cty.DynamicVal, diags
284 }
285 }
286
287 return val, diags
288}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/pos.go b/vendor/github.com/hashicorp/hcl2/hcl/pos.go
index 1a4b329..06db8bf 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/pos.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/pos.go
@@ -31,6 +31,9 @@ type Pos struct {
31 Byte int 31 Byte int
32} 32}
33 33
34// InitialPos is a suitable position to use to mark the start of a file.
35var InitialPos = Pos{Byte: 0, Line: 1, Column: 1}
36
34// Range represents a span of characters between two positions in a source 37// Range represents a span of characters between two positions in a source
35// file. 38// file.
36// 39//
@@ -94,6 +97,16 @@ func RangeOver(a, b Range) Range {
94 } 97 }
95} 98}
96 99
100// ContainsPos returns true if and only if the given position is contained within
101// the receiving range.
102//
103// In the unlikely case that the line/column information disagree with the byte
104// offset information in the given position or receiving range, the byte
105// offsets are given priority.
106func (r Range) ContainsPos(pos Pos) bool {
107 return r.ContainsOffset(pos.Byte)
108}
109
97// ContainsOffset returns true if and only if the given byte offset is within 110// ContainsOffset returns true if and only if the given byte offset is within
98// the receiving Range. 111// the receiving Range.
99func (r Range) ContainsOffset(offset int) bool { 112func (r Range) ContainsOffset(offset int) bool {
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go b/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go
index 7c8f2df..17c0d7c 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go
@@ -29,8 +29,8 @@ type RangeScanner struct {
29 err error // error from last scan, if any 29 err error // error from last scan, if any
30} 30}
31 31
32// Create a new RangeScanner for the given buffer, producing ranges for the 32// NewRangeScanner creates a new RangeScanner for the given buffer, producing
33// given filename. 33// ranges for the given filename.
34// 34//
35// Since ranges have grapheme-cluster granularity rather than byte granularity, 35// Since ranges have grapheme-cluster granularity rather than byte granularity,
36// the scanner will produce incorrect results if the given SplitFunc creates 36// the scanner will produce incorrect results if the given SplitFunc creates
@@ -39,15 +39,19 @@ type RangeScanner struct {
39// around individual UTF-8 sequences, which will split any multi-sequence 39// around individual UTF-8 sequences, which will split any multi-sequence
40// grapheme clusters. 40// grapheme clusters.
41func NewRangeScanner(b []byte, filename string, cb bufio.SplitFunc) *RangeScanner { 41func NewRangeScanner(b []byte, filename string, cb bufio.SplitFunc) *RangeScanner {
42 return NewRangeScannerFragment(b, filename, InitialPos, cb)
43}
44
45// NewRangeScannerFragment is like NewRangeScanner but the ranges it produces
46// will be offset by the given starting position, which is appropriate for
47// sub-slices of a file, whereas NewRangeScanner assumes it is scanning an
48// entire file.
49func NewRangeScannerFragment(b []byte, filename string, start Pos, cb bufio.SplitFunc) *RangeScanner {
42 return &RangeScanner{ 50 return &RangeScanner{
43 filename: filename, 51 filename: filename,
44 b: b, 52 b: b,
45 cb: cb, 53 cb: cb,
46 pos: Pos{ 54 pos: start,
47 Byte: 0,
48 Line: 1,
49 Column: 1,
50 },
51 } 55 }
52} 56}
53 57
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/spec.md b/vendor/github.com/hashicorp/hcl2/hcl/spec.md
index 58257bf..8bbaff8 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/spec.md
+++ b/vendor/github.com/hashicorp/hcl2/hcl/spec.md
@@ -29,7 +29,7 @@ which are discussed in detail in a later section.
29A _block_ is a nested structure that has a _type name_, zero or more string 29A _block_ is a nested structure that has a _type name_, zero or more string
30_labels_ (e.g. identifiers), and a nested body. 30_labels_ (e.g. identifiers), and a nested body.
31 31
32Together the structural elements create a heirarchical data structure, with 32Together the structural elements create a hierarchical data structure, with
33attributes intended to represent the direct properties of a particular object 33attributes intended to represent the direct properties of a particular object
34in the calling application, and blocks intended to represent child objects 34in the calling application, and blocks intended to represent child objects
35of a particular object. 35of a particular object.
@@ -57,10 +57,10 @@ access to the specific attributes and blocks requested.
57A _body schema_ consists of a list of _attribute schemata_ and 57A _body schema_ consists of a list of _attribute schemata_ and
58_block header schemata_: 58_block header schemata_:
59 59
60* An _attribute schema_ provides the name of an attribute and whether its 60- An _attribute schema_ provides the name of an attribute and whether its
61 presence is required. 61 presence is required.
62 62
63* A _block header schema_ provides a block type name and the semantic names 63- A _block header schema_ provides a block type name and the semantic names
64 assigned to each of the labels of that block type, if any. 64 assigned to each of the labels of that block type, if any.
65 65
66Within a schema, it is an error to request the same attribute name twice or 66Within a schema, it is an error to request the same attribute name twice or
@@ -72,11 +72,11 @@ a block whose type name is identical to the attribute name.
72The result of applying a body schema to a body is _body content_, which 72The result of applying a body schema to a body is _body content_, which
73consists of an _attribute map_ and a _block sequence_: 73consists of an _attribute map_ and a _block sequence_:
74 74
75* The _attribute map_ is a map data structure whose keys are attribute names 75- The _attribute map_ is a map data structure whose keys are attribute names
76 and whose values are _expressions_ that represent the corresponding attribute 76 and whose values are _expressions_ that represent the corresponding attribute
77 values. 77 values.
78 78
79* The _block sequence_ is an ordered sequence of blocks, with each specifying 79- The _block sequence_ is an ordered sequence of blocks, with each specifying
80 a block _type name_, the sequence of _labels_ specified for the block, 80 a block _type name_, the sequence of _labels_ specified for the block,
81 and the body object (not body _content_) representing the block's own body. 81 and the body object (not body _content_) representing the block's own body.
82 82
@@ -132,13 +132,13 @@ the schema has been processed.
132 132
133Specifically: 133Specifically:
134 134
135* Any attribute whose name is specified in the schema is returned in body 135- Any attribute whose name is specified in the schema is returned in body
136 content and elided from the new body. 136 content and elided from the new body.
137 137
138* Any block whose type is specified in the schema is returned in body content 138- Any block whose type is specified in the schema is returned in body content
139 and elided from the new body. 139 and elided from the new body.
140 140
141* Any attribute or block _not_ meeting the above conditions is placed into 141- Any attribute or block _not_ meeting the above conditions is placed into
142 the new body, unmodified. 142 the new body, unmodified.
143 143
144The new body can then be recursively processed using any of the body 144The new body can then be recursively processed using any of the body
@@ -168,20 +168,20 @@ In order to obtain a concrete value, each expression must be _evaluated_.
168Evaluation is performed in terms of an evaluation context, which 168Evaluation is performed in terms of an evaluation context, which
169consists of the following: 169consists of the following:
170 170
171* An _evaluation mode_, which is defined below. 171- An _evaluation mode_, which is defined below.
172* A _variable scope_, which provides a set of named variables for use in 172- A _variable scope_, which provides a set of named variables for use in
173 expressions. 173 expressions.
174* A _function table_, which provides a set of named functions for use in 174- A _function table_, which provides a set of named functions for use in
175 expressions. 175 expressions.
176 176
177The _evaluation mode_ allows for two different interpretations of an 177The _evaluation mode_ allows for two different interpretations of an
178expression: 178expression:
179 179
180* In _literal-only mode_, variables and functions are not available and it 180- In _literal-only mode_, variables and functions are not available and it
181 is assumed that the calling application's intent is to treat the attribute 181 is assumed that the calling application's intent is to treat the attribute
182 value as a literal. 182 value as a literal.
183 183
184* In _full expression mode_, variables and functions are defined and it is 184- In _full expression mode_, variables and functions are defined and it is
185 assumed that the calling application wishes to provide a full expression 185 assumed that the calling application wishes to provide a full expression
186 language for definition of the attribute value. 186 language for definition of the attribute value.
187 187
@@ -235,15 +235,15 @@ for interpretation into any suitable number representation. An implementation
235may in practice implement numbers with limited precision so long as the 235may in practice implement numbers with limited precision so long as the
236following constraints are met: 236following constraints are met:
237 237
238* Integers are represented with at least 256 bits. 238- Integers are represented with at least 256 bits.
239* Non-integer numbers are represented as floating point values with a 239- Non-integer numbers are represented as floating point values with a
240 mantissa of at least 256 bits and a signed binary exponent of at least 240 mantissa of at least 256 bits and a signed binary exponent of at least
241 16 bits. 241 16 bits.
242* An error is produced if an integer value given in source cannot be 242- An error is produced if an integer value given in source cannot be
243 represented precisely. 243 represented precisely.
244* An error is produced if a non-integer value cannot be represented due to 244- An error is produced if a non-integer value cannot be represented due to
245 overflow. 245 overflow.
246* A non-integer number is rounded to the nearest possible value when a 246- A non-integer number is rounded to the nearest possible value when a
247 value is of too high a precision to be represented. 247 value is of too high a precision to be represented.
248 248
249The _number_ type also requires representation of both positive and negative 249The _number_ type also requires representation of both positive and negative
@@ -265,11 +265,11 @@ _Structural types_ are types that are constructed by combining other types.
265Each distinct combination of other types is itself a distinct type. There 265Each distinct combination of other types is itself a distinct type. There
266are two structural type _kinds_: 266are two structural type _kinds_:
267 267
268* _Object types_ are constructed of a set of named attributes, each of which 268- _Object types_ are constructed of a set of named attributes, each of which
269 has a type. Attribute names are always strings. (_Object_ attributes are a 269 has a type. Attribute names are always strings. (_Object_ attributes are a
270 distinct idea from _body_ attributes, though calling applications 270 distinct idea from _body_ attributes, though calling applications
271 may choose to blur the distinction by use of common naming schemes.) 271 may choose to blur the distinction by use of common naming schemes.)
272* _Tuple tupes_ are constructed of a sequence of elements, each of which 272- _Tuple types_ are constructed of a sequence of elements, each of which
273 has a type. 273 has a type.
274 274
275Values of structural types are compared for equality in terms of their 275Values of structural types are compared for equality in terms of their
@@ -284,9 +284,9 @@ have attributes or elements with identical types.
284_Collection types_ are types that combine together an arbitrary number of 284_Collection types_ are types that combine together an arbitrary number of
285values of some other single type. There are three collection type _kinds_: 285values of some other single type. There are three collection type _kinds_:
286 286
287* _List types_ represent ordered sequences of values of their element type. 287- _List types_ represent ordered sequences of values of their element type.
288* _Map types_ represent values of their element type accessed via string keys. 288- _Map types_ represent values of their element type accessed via string keys.
289* _Set types_ represent unordered sets of distinct values of their element type. 289- _Set types_ represent unordered sets of distinct values of their element type.
290 290
291For each of these kinds and each distinct element type there is a distinct 291For each of these kinds and each distinct element type there is a distinct
292collection type. For example, "list of string" is a distinct type from 292collection type. For example, "list of string" is a distinct type from
@@ -301,10 +301,10 @@ the same element type.
301 301
302### Null values 302### Null values
303 303
304Each type has a null value. The null value of a type represents the absense 304Each type has a null value. The null value of a type represents the absence
305of a value, but with type information retained to allow for type checking. 305of a value, but with type information retained to allow for type checking.
306 306
307Null values are used primarily to represent the conditional absense of a 307Null values are used primarily to represent the conditional absence of a
308body attribute. In a syntax with a conditional operator, one of the result 308body attribute. In a syntax with a conditional operator, one of the result
309values of that conditional may be null to indicate that the attribute should be 309values of that conditional may be null to indicate that the attribute should be
310considered not present in that case. 310considered not present in that case.
@@ -376,9 +376,9 @@ a type has a non-commutative _matches_ relationship with a _type specification_.
376A type specification is, in practice, just a different interpretation of a 376A type specification is, in practice, just a different interpretation of a
377type such that: 377type such that:
378 378
379* Any type _matches_ any type that it is identical to. 379- Any type _matches_ any type that it is identical to.
380 380
381* Any type _matches_ the dynamic pseudo-type. 381- Any type _matches_ the dynamic pseudo-type.
382 382
383For example, given a type specification "list of dynamic pseudo-type", the 383For example, given a type specification "list of dynamic pseudo-type", the
384concrete types "list of string" and "list of map" match, but the 384concrete types "list of string" and "list of map" match, but the
@@ -397,51 +397,51 @@ applications to provide functions that are interoperable with all syntaxes.
397 397
398A _function_ is defined from the following elements: 398A _function_ is defined from the following elements:
399 399
400* Zero or more _positional parameters_, each with a name used for documentation, 400- Zero or more _positional parameters_, each with a name used for documentation,
401 a type specification for expected argument values, and a flag for whether 401 a type specification for expected argument values, and a flag for whether
402 each of null values, unknown values, and values of the dynamic pseudo-type 402 each of null values, unknown values, and values of the dynamic pseudo-type
403 are accepted. 403 are accepted.
404 404
405* Zero or one _variadic parameters_, with the same structure as the _positional_ 405- Zero or one _variadic parameters_, with the same structure as the _positional_
406 parameters, which if present collects any additional arguments provided at 406 parameters, which if present collects any additional arguments provided at
407 the function call site. 407 the function call site.
408 408
409* A _result type definition_, which specifies the value type returned for each 409- A _result type definition_, which specifies the value type returned for each
410 valid sequence of argument values. 410 valid sequence of argument values.
411 411
412* A _result value definition_, which specifies the value returned for each 412- A _result value definition_, which specifies the value returned for each
413 valid sequence of argument values. 413 valid sequence of argument values.
414 414
415A _function call_, regardless of source syntax, consists of a sequence of 415A _function call_, regardless of source syntax, consists of a sequence of
416argument values. The argument values are each mapped to a corresponding 416argument values. The argument values are each mapped to a corresponding
417parameter as follows: 417parameter as follows:
418 418
419* For each of the function's positional parameters in sequence, take the next 419- For each of the function's positional parameters in sequence, take the next
420 argument. If there are no more arguments, the call is erroneous. 420 argument. If there are no more arguments, the call is erroneous.
421 421
422* If the function has a variadic parameter, take all remaining arguments that 422- If the function has a variadic parameter, take all remaining arguments that
423 where not yet assigned to a positional parameter and collect them into 423 where not yet assigned to a positional parameter and collect them into
424 a sequence of variadic arguments that each correspond to the variadic 424 a sequence of variadic arguments that each correspond to the variadic
425 parameter. 425 parameter.
426 426
427* If the function has _no_ variadic parameter, it is an error if any arguments 427- If the function has _no_ variadic parameter, it is an error if any arguments
428 remain after taking one argument for each positional parameter. 428 remain after taking one argument for each positional parameter.
429 429
430After mapping each argument to a parameter, semantic checking proceeds 430After mapping each argument to a parameter, semantic checking proceeds
431for each argument: 431for each argument:
432 432
433* If the argument value corresponding to a parameter does not match the 433- If the argument value corresponding to a parameter does not match the
434 parameter's type specification, the call is erroneous. 434 parameter's type specification, the call is erroneous.
435 435
436* If the argument value corresponding to a parameter is null and the parameter 436- If the argument value corresponding to a parameter is null and the parameter
437 is not specified as accepting nulls, the call is erroneous. 437 is not specified as accepting nulls, the call is erroneous.
438 438
439* If the argument value corresponding to a parameter is the dynamic value 439- If the argument value corresponding to a parameter is the dynamic value
440 and the parameter is not specified as accepting values of the dynamic 440 and the parameter is not specified as accepting values of the dynamic
441 pseudo-type, the call is valid but its _result type_ is forced to be the 441 pseudo-type, the call is valid but its _result type_ is forced to be the
442 dynamic pseudo type. 442 dynamic pseudo type.
443 443
444* If neither of the above conditions holds for any argument, the call is 444- If neither of the above conditions holds for any argument, the call is
445 valid and the function's value type definition is used to determine the 445 valid and the function's value type definition is used to determine the
446 call's _result type_. A function _may_ vary its result type depending on 446 call's _result type_. A function _may_ vary its result type depending on
447 the argument _values_ as well as the argument _types_; for example, a 447 the argument _values_ as well as the argument _types_; for example, a
@@ -450,15 +450,15 @@ for each argument:
450 450
451If semantic checking succeeds without error, the call is _executed_: 451If semantic checking succeeds without error, the call is _executed_:
452 452
453* For each argument, if its value is unknown and its corresponding parameter 453- For each argument, if its value is unknown and its corresponding parameter
454 is not specified as accepting unknowns, the _result value_ is forced to be an 454 is not specified as accepting unknowns, the _result value_ is forced to be an
455 unknown value of the result type. 455 unknown value of the result type.
456 456
457* If the previous condition does not apply, the function's result value 457- If the previous condition does not apply, the function's result value
458 definition is used to determine the call's _result value_. 458 definition is used to determine the call's _result value_.
459 459
460The result of a function call expression is either an error, if one of the 460The result of a function call expression is either an error, if one of the
461erroenous conditions above applies, or the _result value_. 461erroneous conditions above applies, or the _result value_.
462 462
463## Type Conversions and Unification 463## Type Conversions and Unification
464 464
@@ -505,7 +505,7 @@ Bidirectional conversions are available between the string and number types,
505and between the string and boolean types. 505and between the string and boolean types.
506 506
507The bool value true corresponds to the string containing the characters "true", 507The bool value true corresponds to the string containing the characters "true",
508while the bool value false corresponds to teh string containing the characters 508while the bool value false corresponds to the string containing the characters
509"false". Conversion from bool to string is safe, while the converse is 509"false". Conversion from bool to string is safe, while the converse is
510unsafe. The strings "1" and "0" are alternative string representations 510unsafe. The strings "1" and "0" are alternative string representations
511of true and false respectively. It is an error to convert a string other than 511of true and false respectively. It is an error to convert a string other than
@@ -631,20 +631,20 @@ diagnostics if they are applied to inappropriate expressions.
631 631
632The following are the required static analysis functions: 632The following are the required static analysis functions:
633 633
634* **Static List**: Require list/tuple construction syntax to be used and 634- **Static List**: Require list/tuple construction syntax to be used and
635 return a list of expressions for each of the elements given. 635 return a list of expressions for each of the elements given.
636 636
637* **Static Map**: Require map/object construction syntax to be used and 637- **Static Map**: Require map/object construction syntax to be used and
638 return a list of key/value pairs -- both expressions -- for each of 638 return a list of key/value pairs -- both expressions -- for each of
639 the elements given. The usual constraint that a map key must be a string 639 the elements given. The usual constraint that a map key must be a string
640 must not apply to this analysis, thus allowing applications to interpret 640 must not apply to this analysis, thus allowing applications to interpret
641 arbitrary keys as they see fit. 641 arbitrary keys as they see fit.
642 642
643* **Static Call**: Require function call syntax to be used and return an 643- **Static Call**: Require function call syntax to be used and return an
644 object describing the called function name and a list of expressions 644 object describing the called function name and a list of expressions
645 representing each of the call arguments. 645 representing each of the call arguments.
646 646
647* **Static Traversal**: Require a reference to a symbol in the variable 647- **Static Traversal**: Require a reference to a symbol in the variable
648 scope and return a description of the path from the root scope to the 648 scope and return a description of the path from the root scope to the
649 accessed attribute or index. 649 accessed attribute or index.
650 650
@@ -670,18 +670,18 @@ with the goals of this specification.
670The language-agnosticism of this specification assumes that certain behaviors 670The language-agnosticism of this specification assumes that certain behaviors
671are implemented separately for each syntax: 671are implemented separately for each syntax:
672 672
673* Matching of a body schema with the physical elements of a body in the 673- Matching of a body schema with the physical elements of a body in the
674 source language, to determine correspondance between physical constructs 674 source language, to determine correspondence between physical constructs
675 and schema elements. 675 and schema elements.
676 676
677* Implementing the _dynamic attributes_ body processing mode by either 677- Implementing the _dynamic attributes_ body processing mode by either
678 interpreting all physical constructs as attributes or producing an error 678 interpreting all physical constructs as attributes or producing an error
679 if non-attribute constructs are present. 679 if non-attribute constructs are present.
680 680
681* Providing an evaluation function for all possible expressions that produces 681- Providing an evaluation function for all possible expressions that produces
682 a value given an evaluation context. 682 a value given an evaluation context.
683 683
684* Providing the static analysis functionality described above in a manner that 684- Providing the static analysis functionality described above in a manner that
685 makes sense within the convention of the syntax. 685 makes sense within the convention of the syntax.
686 686
687The suggested implementation strategy is to use an implementation language's 687The suggested implementation strategy is to use an implementation language's
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/structure_at_pos.go b/vendor/github.com/hashicorp/hcl2/hcl/structure_at_pos.go
new file mode 100644
index 0000000..8521814
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcl/structure_at_pos.go
@@ -0,0 +1,117 @@
1package hcl
2
3// -----------------------------------------------------------------------------
4// The methods in this file all have the general pattern of making a best-effort
5// to find one or more constructs that contain a given source position.
6//
7// These all operate by delegating to an optional method of the same name and
8// signature on the file's root body, allowing each syntax to potentially
9// provide its own implementations of these. For syntaxes that don't implement
10// them, the result is always nil.
11// -----------------------------------------------------------------------------
12
13// BlocksAtPos attempts to find all of the blocks that contain the given
14// position, ordered so that the outermost block is first and the innermost
15// block is last. This is a best-effort method that may not be able to produce
16// a complete result for all positions or for all HCL syntaxes.
17//
18// If the returned slice is non-empty, the first element is guaranteed to
19// represent the same block as would be the result of OutermostBlockAtPos and
20// the last element the result of InnermostBlockAtPos. However, the
21// implementation may return two different objects describing the same block,
22// so comparison by pointer identity is not possible.
23//
24// The result is nil if no blocks at all contain the given position.
25func (f *File) BlocksAtPos(pos Pos) []*Block {
26 // The root body of the file must implement this interface in order
27 // to support BlocksAtPos.
28 type Interface interface {
29 BlocksAtPos(pos Pos) []*Block
30 }
31
32 impl, ok := f.Body.(Interface)
33 if !ok {
34 return nil
35 }
36 return impl.BlocksAtPos(pos)
37}
38
39// OutermostBlockAtPos attempts to find a top-level block in the receiving file
40// that contains the given position. This is a best-effort method that may not
41// be able to produce a result for all positions or for all HCL syntaxes.
42//
43// The result is nil if no single block could be selected for any reason.
44func (f *File) OutermostBlockAtPos(pos Pos) *Block {
45 // The root body of the file must implement this interface in order
46 // to support OutermostBlockAtPos.
47 type Interface interface {
48 OutermostBlockAtPos(pos Pos) *Block
49 }
50
51 impl, ok := f.Body.(Interface)
52 if !ok {
53 return nil
54 }
55 return impl.OutermostBlockAtPos(pos)
56}
57
58// InnermostBlockAtPos attempts to find the most deeply-nested block in the
59// receiving file that contains the given position. This is a best-effort
60// method that may not be able to produce a result for all positions or for
61// all HCL syntaxes.
62//
63// The result is nil if no single block could be selected for any reason.
64func (f *File) InnermostBlockAtPos(pos Pos) *Block {
65 // The root body of the file must implement this interface in order
66 // to support InnermostBlockAtPos.
67 type Interface interface {
68 InnermostBlockAtPos(pos Pos) *Block
69 }
70
71 impl, ok := f.Body.(Interface)
72 if !ok {
73 return nil
74 }
75 return impl.InnermostBlockAtPos(pos)
76}
77
78// OutermostExprAtPos attempts to find an expression in the receiving file
79// that contains the given position. This is a best-effort method that may not
80// be able to produce a result for all positions or for all HCL syntaxes.
81//
82// Since expressions are often nested inside one another, this method returns
83// the outermost "root" expression that is not contained by any other.
84//
85// The result is nil if no single expression could be selected for any reason.
86func (f *File) OutermostExprAtPos(pos Pos) Expression {
87 // The root body of the file must implement this interface in order
88 // to support OutermostExprAtPos.
89 type Interface interface {
90 OutermostExprAtPos(pos Pos) Expression
91 }
92
93 impl, ok := f.Body.(Interface)
94 if !ok {
95 return nil
96 }
97 return impl.OutermostExprAtPos(pos)
98}
99
100// AttributeAtPos attempts to find an attribute definition in the receiving
101// file that contains the given position. This is a best-effort method that may
102// not be able to produce a result for all positions or for all HCL syntaxes.
103//
104// The result is nil if no single attribute could be selected for any reason.
105func (f *File) AttributeAtPos(pos Pos) *Attribute {
106 // The root body of the file must implement this interface in order
107 // to support OutermostExprAtPos.
108 type Interface interface {
109 AttributeAtPos(pos Pos) *Attribute
110 }
111
112 impl, ok := f.Body.(Interface)
113 if !ok {
114 return nil
115 }
116 return impl.AttributeAtPos(pos)
117}
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/traversal.go b/vendor/github.com/hashicorp/hcl2/hcl/traversal.go
index 24f4c91..d710197 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/traversal.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/traversal.go
@@ -255,66 +255,7 @@ type TraverseAttr struct {
255} 255}
256 256
257func (tn TraverseAttr) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { 257func (tn TraverseAttr) TraversalStep(val cty.Value) (cty.Value, Diagnostics) {
258 if val.IsNull() { 258 return GetAttr(val, tn.Name, &tn.SrcRange)
259 return cty.DynamicVal, Diagnostics{
260 {
261 Severity: DiagError,
262 Summary: "Attempt to get attribute from null value",
263 Detail: "This value is null, so it does not have any attributes.",
264 Subject: &tn.SrcRange,
265 },
266 }
267 }
268
269 ty := val.Type()
270 switch {
271 case ty.IsObjectType():
272 if !ty.HasAttribute(tn.Name) {
273 return cty.DynamicVal, Diagnostics{
274 {
275 Severity: DiagError,
276 Summary: "Unsupported attribute",
277 Detail: fmt.Sprintf("This object does not have an attribute named %q.", tn.Name),
278 Subject: &tn.SrcRange,
279 },
280 }
281 }
282
283 if !val.IsKnown() {
284 return cty.UnknownVal(ty.AttributeType(tn.Name)), nil
285 }
286
287 return val.GetAttr(tn.Name), nil
288 case ty.IsMapType():
289 if !val.IsKnown() {
290 return cty.UnknownVal(ty.ElementType()), nil
291 }
292
293 idx := cty.StringVal(tn.Name)
294 if val.HasIndex(idx).False() {
295 return cty.DynamicVal, Diagnostics{
296 {
297 Severity: DiagError,
298 Summary: "Missing map element",
299 Detail: fmt.Sprintf("This map does not have an element with the key %q.", tn.Name),
300 Subject: &tn.SrcRange,
301 },
302 }
303 }
304
305 return val.Index(idx), nil
306 case ty == cty.DynamicPseudoType:
307 return cty.DynamicVal, nil
308 default:
309 return cty.DynamicVal, Diagnostics{
310 {
311 Severity: DiagError,
312 Summary: "Unsupported attribute",
313 Detail: "This value does not have any attributes.",
314 Subject: &tn.SrcRange,
315 },
316 }
317 }
318} 259}
319 260
320func (tn TraverseAttr) SourceRange() Range { 261func (tn TraverseAttr) SourceRange() Range {
diff --git a/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go
index 5f52946..d4a565a 100644
--- a/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go
+++ b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go
@@ -52,11 +52,14 @@ func AbsTraversalForExpr(expr Expression) (Traversal, Diagnostics) {
52func RelTraversalForExpr(expr Expression) (Traversal, Diagnostics) { 52func RelTraversalForExpr(expr Expression) (Traversal, Diagnostics) {
53 traversal, diags := AbsTraversalForExpr(expr) 53 traversal, diags := AbsTraversalForExpr(expr)
54 if len(traversal) > 0 { 54 if len(traversal) > 0 {
55 ret := make(Traversal, len(traversal))
56 copy(ret, traversal)
55 root := traversal[0].(TraverseRoot) 57 root := traversal[0].(TraverseRoot)
56 traversal[0] = TraverseAttr{ 58 ret[0] = TraverseAttr{
57 Name: root.Name, 59 Name: root.Name,
58 SrcRange: root.SrcRange, 60 SrcRange: root.SrcRange,
59 } 61 }
62 return ret, diags
60 } 63 }
61 return traversal, diags 64 return traversal, diags
62} 65}
diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/public.go b/vendor/github.com/hashicorp/hcl2/hcldec/public.go
index 5d1f10a..3c80363 100644
--- a/vendor/github.com/hashicorp/hcl2/hcldec/public.go
+++ b/vendor/github.com/hashicorp/hcl2/hcldec/public.go
@@ -65,7 +65,10 @@ func ChildBlockTypes(spec Spec) map[string]Spec {
65 visit = func(s Spec) { 65 visit = func(s Spec) {
66 if bs, ok := s.(blockSpec); ok { 66 if bs, ok := s.(blockSpec); ok {
67 for _, blockS := range bs.blockHeaderSchemata() { 67 for _, blockS := range bs.blockHeaderSchemata() {
68 ret[blockS.Type] = bs.nestedSpec() 68 nested := bs.nestedSpec()
69 if nested != nil { // nil can be returned to dynamically opt out of this interface
70 ret[blockS.Type] = nested
71 }
69 } 72 }
70 } 73 }
71 74
diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/spec.go b/vendor/github.com/hashicorp/hcl2/hcldec/spec.go
index 25cafcd..f9da7f6 100644
--- a/vendor/github.com/hashicorp/hcl2/hcldec/spec.go
+++ b/vendor/github.com/hashicorp/hcl2/hcldec/spec.go
@@ -3,6 +3,7 @@ package hcldec
3import ( 3import (
4 "bytes" 4 "bytes"
5 "fmt" 5 "fmt"
6 "sort"
6 7
7 "github.com/hashicorp/hcl2/hcl" 8 "github.com/hashicorp/hcl2/hcl"
8 "github.com/zclconf/go-cty/cty" 9 "github.com/zclconf/go-cty/cty"
@@ -477,6 +478,44 @@ func (s *BlockListSpec) decode(content *hcl.BodyContent, blockLabels []blockLabe
477 if len(elems) == 0 { 478 if len(elems) == 0 {
478 ret = cty.ListValEmpty(s.Nested.impliedType()) 479 ret = cty.ListValEmpty(s.Nested.impliedType())
479 } else { 480 } else {
481 // Since our target is a list, all of the decoded elements must have the
482 // same type or cty.ListVal will panic below. Different types can arise
483 // if there is an attribute spec of type cty.DynamicPseudoType in the
484 // nested spec; all given values must be convertable to a single type
485 // in order for the result to be considered valid.
486 etys := make([]cty.Type, len(elems))
487 for i, v := range elems {
488 etys[i] = v.Type()
489 }
490 ety, convs := convert.UnifyUnsafe(etys)
491 if ety == cty.NilType {
492 // FIXME: This is a pretty terrible error message.
493 diags = append(diags, &hcl.Diagnostic{
494 Severity: hcl.DiagError,
495 Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName),
496 Detail: "Corresponding attributes in all blocks of this type must be the same.",
497 Subject: &sourceRanges[0],
498 })
499 return cty.DynamicVal, diags
500 }
501 for i, v := range elems {
502 if convs[i] != nil {
503 newV, err := convs[i](v)
504 if err != nil {
505 // FIXME: This is a pretty terrible error message.
506 diags = append(diags, &hcl.Diagnostic{
507 Severity: hcl.DiagError,
508 Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName),
509 Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err),
510 Subject: &sourceRanges[i],
511 })
512 // Bail early here so we won't panic below in cty.ListVal
513 return cty.DynamicVal, diags
514 }
515 elems[i] = newV
516 }
517 }
518
480 ret = cty.ListVal(elems) 519 ret = cty.ListVal(elems)
481 } 520 }
482 521
@@ -508,6 +547,127 @@ func (s *BlockListSpec) sourceRange(content *hcl.BodyContent, blockLabels []bloc
508 return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) 547 return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)
509} 548}
510 549
550// A BlockTupleSpec is a Spec that produces a cty tuple of the results of
551// decoding all of the nested blocks of a given type, using a nested spec.
552//
553// This is similar to BlockListSpec, but it permits the nested blocks to have
554// different result types in situations where cty.DynamicPseudoType attributes
555// are present.
556type BlockTupleSpec struct {
557 TypeName string
558 Nested Spec
559 MinItems int
560 MaxItems int
561}
562
563func (s *BlockTupleSpec) visitSameBodyChildren(cb visitFunc) {
564 // leaf node ("Nested" does not use the same body)
565}
566
567// blockSpec implementation
568func (s *BlockTupleSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema {
569 return []hcl.BlockHeaderSchema{
570 {
571 Type: s.TypeName,
572 LabelNames: findLabelSpecs(s.Nested),
573 },
574 }
575}
576
577// blockSpec implementation
578func (s *BlockTupleSpec) nestedSpec() Spec {
579 return s.Nested
580}
581
582// specNeedingVariables implementation
583func (s *BlockTupleSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal {
584 var ret []hcl.Traversal
585
586 for _, childBlock := range content.Blocks {
587 if childBlock.Type != s.TypeName {
588 continue
589 }
590
591 ret = append(ret, Variables(childBlock.Body, s.Nested)...)
592 }
593
594 return ret
595}
596
597func (s *BlockTupleSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
598 var diags hcl.Diagnostics
599
600 if s.Nested == nil {
601 panic("BlockListSpec with no Nested Spec")
602 }
603
604 var elems []cty.Value
605 var sourceRanges []hcl.Range
606 for _, childBlock := range content.Blocks {
607 if childBlock.Type != s.TypeName {
608 continue
609 }
610
611 val, _, childDiags := decode(childBlock.Body, labelsForBlock(childBlock), ctx, s.Nested, false)
612 diags = append(diags, childDiags...)
613 elems = append(elems, val)
614 sourceRanges = append(sourceRanges, sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested))
615 }
616
617 if len(elems) < s.MinItems {
618 diags = append(diags, &hcl.Diagnostic{
619 Severity: hcl.DiagError,
620 Summary: fmt.Sprintf("Insufficient %s blocks", s.TypeName),
621 Detail: fmt.Sprintf("At least %d %q blocks are required.", s.MinItems, s.TypeName),
622 Subject: &content.MissingItemRange,
623 })
624 } else if s.MaxItems > 0 && len(elems) > s.MaxItems {
625 diags = append(diags, &hcl.Diagnostic{
626 Severity: hcl.DiagError,
627 Summary: fmt.Sprintf("Too many %s blocks", s.TypeName),
628 Detail: fmt.Sprintf("No more than %d %q blocks are allowed", s.MaxItems, s.TypeName),
629 Subject: &sourceRanges[s.MaxItems],
630 })
631 }
632
633 var ret cty.Value
634
635 if len(elems) == 0 {
636 ret = cty.EmptyTupleVal
637 } else {
638 ret = cty.TupleVal(elems)
639 }
640
641 return ret, diags
642}
643
644func (s *BlockTupleSpec) impliedType() cty.Type {
645 // We can't predict our type, because we don't know how many blocks
646 // there will be until we decode.
647 return cty.DynamicPseudoType
648}
649
650func (s *BlockTupleSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
651 // We return the source range of the _first_ block of the given type,
652 // since they are not guaranteed to form a contiguous range.
653
654 var childBlock *hcl.Block
655 for _, candidate := range content.Blocks {
656 if candidate.Type != s.TypeName {
657 continue
658 }
659
660 childBlock = candidate
661 break
662 }
663
664 if childBlock == nil {
665 return content.MissingItemRange
666 }
667
668 return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)
669}
670
511// A BlockSetSpec is a Spec that produces a cty set of the results of 671// A BlockSetSpec is a Spec that produces a cty set of the results of
512// decoding all of the nested blocks of a given type, using a nested spec. 672// decoding all of the nested blocks of a given type, using a nested spec.
513type BlockSetSpec struct { 673type BlockSetSpec struct {
@@ -592,6 +752,44 @@ func (s *BlockSetSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel
592 if len(elems) == 0 { 752 if len(elems) == 0 {
593 ret = cty.SetValEmpty(s.Nested.impliedType()) 753 ret = cty.SetValEmpty(s.Nested.impliedType())
594 } else { 754 } else {
755 // Since our target is a set, all of the decoded elements must have the
756 // same type or cty.SetVal will panic below. Different types can arise
757 // if there is an attribute spec of type cty.DynamicPseudoType in the
758 // nested spec; all given values must be convertable to a single type
759 // in order for the result to be considered valid.
760 etys := make([]cty.Type, len(elems))
761 for i, v := range elems {
762 etys[i] = v.Type()
763 }
764 ety, convs := convert.UnifyUnsafe(etys)
765 if ety == cty.NilType {
766 // FIXME: This is a pretty terrible error message.
767 diags = append(diags, &hcl.Diagnostic{
768 Severity: hcl.DiagError,
769 Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName),
770 Detail: "Corresponding attributes in all blocks of this type must be the same.",
771 Subject: &sourceRanges[0],
772 })
773 return cty.DynamicVal, diags
774 }
775 for i, v := range elems {
776 if convs[i] != nil {
777 newV, err := convs[i](v)
778 if err != nil {
779 // FIXME: This is a pretty terrible error message.
780 diags = append(diags, &hcl.Diagnostic{
781 Severity: hcl.DiagError,
782 Summary: fmt.Sprintf("Unconsistent argument types in %s blocks", s.TypeName),
783 Detail: fmt.Sprintf("Block with index %d has inconsistent argument types: %s.", i, err),
784 Subject: &sourceRanges[i],
785 })
786 // Bail early here so we won't panic below in cty.ListVal
787 return cty.DynamicVal, diags
788 }
789 elems[i] = newV
790 }
791 }
792
595 ret = cty.SetVal(elems) 793 ret = cty.SetVal(elems)
596 } 794 }
597 795
@@ -672,7 +870,10 @@ func (s *BlockMapSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel
672 var diags hcl.Diagnostics 870 var diags hcl.Diagnostics
673 871
674 if s.Nested == nil { 872 if s.Nested == nil {
675 panic("BlockSetSpec with no Nested Spec") 873 panic("BlockMapSpec with no Nested Spec")
874 }
875 if ImpliedType(s).HasDynamicTypes() {
876 panic("cty.DynamicPseudoType attributes may not be used inside a BlockMapSpec")
676 } 877 }
677 878
678 elems := map[string]interface{}{} 879 elems := map[string]interface{}{}
@@ -765,6 +966,307 @@ func (s *BlockMapSpec) sourceRange(content *hcl.BodyContent, blockLabels []block
765 return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested) 966 return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)
766} 967}
767 968
969// A BlockObjectSpec is a Spec that produces a cty object of the results of
970// decoding all of the nested blocks of a given type, using a nested spec.
971//
972// One level of object structure is created for each of the given label names.
973// There must be at least one given label name.
974//
975// This is similar to BlockMapSpec, but it permits the nested blocks to have
976// different result types in situations where cty.DynamicPseudoType attributes
977// are present.
978type BlockObjectSpec struct {
979 TypeName string
980 LabelNames []string
981 Nested Spec
982}
983
984func (s *BlockObjectSpec) visitSameBodyChildren(cb visitFunc) {
985 // leaf node ("Nested" does not use the same body)
986}
987
988// blockSpec implementation
989func (s *BlockObjectSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema {
990 return []hcl.BlockHeaderSchema{
991 {
992 Type: s.TypeName,
993 LabelNames: append(s.LabelNames, findLabelSpecs(s.Nested)...),
994 },
995 }
996}
997
998// blockSpec implementation
999func (s *BlockObjectSpec) nestedSpec() Spec {
1000 return s.Nested
1001}
1002
1003// specNeedingVariables implementation
1004func (s *BlockObjectSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal {
1005 var ret []hcl.Traversal
1006
1007 for _, childBlock := range content.Blocks {
1008 if childBlock.Type != s.TypeName {
1009 continue
1010 }
1011
1012 ret = append(ret, Variables(childBlock.Body, s.Nested)...)
1013 }
1014
1015 return ret
1016}
1017
1018func (s *BlockObjectSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
1019 var diags hcl.Diagnostics
1020
1021 if s.Nested == nil {
1022 panic("BlockObjectSpec with no Nested Spec")
1023 }
1024
1025 elems := map[string]interface{}{}
1026 for _, childBlock := range content.Blocks {
1027 if childBlock.Type != s.TypeName {
1028 continue
1029 }
1030
1031 childLabels := labelsForBlock(childBlock)
1032 val, _, childDiags := decode(childBlock.Body, childLabels[len(s.LabelNames):], ctx, s.Nested, false)
1033 targetMap := elems
1034 for _, key := range childBlock.Labels[:len(s.LabelNames)-1] {
1035 if _, exists := targetMap[key]; !exists {
1036 targetMap[key] = make(map[string]interface{})
1037 }
1038 targetMap = targetMap[key].(map[string]interface{})
1039 }
1040
1041 diags = append(diags, childDiags...)
1042
1043 key := childBlock.Labels[len(s.LabelNames)-1]
1044 if _, exists := targetMap[key]; exists {
1045 labelsBuf := bytes.Buffer{}
1046 for _, label := range childBlock.Labels {
1047 fmt.Fprintf(&labelsBuf, " %q", label)
1048 }
1049 diags = append(diags, &hcl.Diagnostic{
1050 Severity: hcl.DiagError,
1051 Summary: fmt.Sprintf("Duplicate %s block", s.TypeName),
1052 Detail: fmt.Sprintf(
1053 "A block for %s%s was already defined. The %s labels must be unique.",
1054 s.TypeName, labelsBuf.String(), s.TypeName,
1055 ),
1056 Subject: &childBlock.DefRange,
1057 })
1058 continue
1059 }
1060
1061 targetMap[key] = val
1062 }
1063
1064 if len(elems) == 0 {
1065 return cty.EmptyObjectVal, diags
1066 }
1067
1068 var ctyObj func(map[string]interface{}, int) cty.Value
1069 ctyObj = func(raw map[string]interface{}, depth int) cty.Value {
1070 vals := make(map[string]cty.Value, len(raw))
1071 if depth == 1 {
1072 for k, v := range raw {
1073 vals[k] = v.(cty.Value)
1074 }
1075 } else {
1076 for k, v := range raw {
1077 vals[k] = ctyObj(v.(map[string]interface{}), depth-1)
1078 }
1079 }
1080 return cty.ObjectVal(vals)
1081 }
1082
1083 return ctyObj(elems, len(s.LabelNames)), diags
1084}
1085
1086func (s *BlockObjectSpec) impliedType() cty.Type {
1087 // We can't predict our type, since we don't know how many blocks are
1088 // present and what labels they have until we decode.
1089 return cty.DynamicPseudoType
1090}
1091
1092func (s *BlockObjectSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
1093 // We return the source range of the _first_ block of the given type,
1094 // since they are not guaranteed to form a contiguous range.
1095
1096 var childBlock *hcl.Block
1097 for _, candidate := range content.Blocks {
1098 if candidate.Type != s.TypeName {
1099 continue
1100 }
1101
1102 childBlock = candidate
1103 break
1104 }
1105
1106 if childBlock == nil {
1107 return content.MissingItemRange
1108 }
1109
1110 return sourceRange(childBlock.Body, labelsForBlock(childBlock), s.Nested)
1111}
1112
1113// A BlockAttrsSpec is a Spec that interprets a single block as if it were
1114// a map of some element type. That is, each attribute within the block
1115// becomes a key in the resulting map and the attribute's value becomes the
1116// element value, after conversion to the given element type. The resulting
1117// value is a cty.Map of the given element type.
1118//
1119// This spec imposes a validation constraint that there be exactly one block
1120// of the given type name and that this block may contain only attributes. The
1121// block does not accept any labels.
1122//
1123// This is an alternative to an AttrSpec of a map type for situations where
1124// block syntax is desired. Note that block syntax does not permit dynamic
1125// keys, construction of the result via a "for" expression, etc. In most cases
1126// an AttrSpec is preferred if the desired result is a map whose keys are
1127// chosen by the user rather than by schema.
1128type BlockAttrsSpec struct {
1129 TypeName string
1130 ElementType cty.Type
1131 Required bool
1132}
1133
1134func (s *BlockAttrsSpec) visitSameBodyChildren(cb visitFunc) {
1135 // leaf node
1136}
1137
1138// blockSpec implementation
1139func (s *BlockAttrsSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema {
1140 return []hcl.BlockHeaderSchema{
1141 {
1142 Type: s.TypeName,
1143 LabelNames: nil,
1144 },
1145 }
1146}
1147
1148// blockSpec implementation
1149func (s *BlockAttrsSpec) nestedSpec() Spec {
1150 // This is an odd case: we aren't actually going to apply a nested spec
1151 // in this case, since we're going to interpret the body directly as
1152 // attributes, but we need to return something non-nil so that the
1153 // decoder will recognize this as a block spec. We won't actually be
1154 // using this for anything at decode time.
1155 return noopSpec{}
1156}
1157
1158// specNeedingVariables implementation
1159func (s *BlockAttrsSpec) variablesNeeded(content *hcl.BodyContent) []hcl.Traversal {
1160
1161 block, _ := s.findBlock(content)
1162 if block == nil {
1163 return nil
1164 }
1165
1166 var vars []hcl.Traversal
1167
1168 attrs, diags := block.Body.JustAttributes()
1169 if diags.HasErrors() {
1170 return nil
1171 }
1172
1173 for _, attr := range attrs {
1174 vars = append(vars, attr.Expr.Variables()...)
1175 }
1176
1177 // We'll return the variables references in source order so that any
1178 // error messages that result are also in source order.
1179 sort.Slice(vars, func(i, j int) bool {
1180 return vars[i].SourceRange().Start.Byte < vars[j].SourceRange().Start.Byte
1181 })
1182
1183 return vars
1184}
1185
1186func (s *BlockAttrsSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
1187 var diags hcl.Diagnostics
1188
1189 block, other := s.findBlock(content)
1190 if block == nil {
1191 if s.Required {
1192 diags = append(diags, &hcl.Diagnostic{
1193 Severity: hcl.DiagError,
1194 Summary: fmt.Sprintf("Missing %s block", s.TypeName),
1195 Detail: fmt.Sprintf(
1196 "A block of type %q is required here.", s.TypeName,
1197 ),
1198 Subject: &content.MissingItemRange,
1199 })
1200 }
1201 return cty.NullVal(cty.Map(s.ElementType)), diags
1202 }
1203 if other != nil {
1204 diags = append(diags, &hcl.Diagnostic{
1205 Severity: hcl.DiagError,
1206 Summary: fmt.Sprintf("Duplicate %s block", s.TypeName),
1207 Detail: fmt.Sprintf(
1208 "Only one block of type %q is allowed. Previous definition was at %s.",
1209 s.TypeName, block.DefRange.String(),
1210 ),
1211 Subject: &other.DefRange,
1212 })
1213 }
1214
1215 attrs, attrDiags := block.Body.JustAttributes()
1216 diags = append(diags, attrDiags...)
1217
1218 if len(attrs) == 0 {
1219 return cty.MapValEmpty(s.ElementType), diags
1220 }
1221
1222 vals := make(map[string]cty.Value, len(attrs))
1223 for name, attr := range attrs {
1224 attrVal, attrDiags := attr.Expr.Value(ctx)
1225 diags = append(diags, attrDiags...)
1226
1227 attrVal, err := convert.Convert(attrVal, s.ElementType)
1228 if err != nil {
1229 diags = append(diags, &hcl.Diagnostic{
1230 Severity: hcl.DiagError,
1231 Summary: "Invalid attribute value",
1232 Detail: fmt.Sprintf("Invalid value for attribute of %q block: %s.", s.TypeName, err),
1233 Subject: attr.Expr.Range().Ptr(),
1234 })
1235 attrVal = cty.UnknownVal(s.ElementType)
1236 }
1237
1238 vals[name] = attrVal
1239 }
1240
1241 return cty.MapVal(vals), diags
1242}
1243
1244func (s *BlockAttrsSpec) impliedType() cty.Type {
1245 return cty.Map(s.ElementType)
1246}
1247
1248func (s *BlockAttrsSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
1249 block, _ := s.findBlock(content)
1250 if block == nil {
1251 return content.MissingItemRange
1252 }
1253 return block.DefRange
1254}
1255
1256func (s *BlockAttrsSpec) findBlock(content *hcl.BodyContent) (block *hcl.Block, other *hcl.Block) {
1257 for _, candidate := range content.Blocks {
1258 if candidate.Type != s.TypeName {
1259 continue
1260 }
1261 if block != nil {
1262 return block, candidate
1263 }
1264 block = candidate
1265 }
1266
1267 return block, nil
1268}
1269
768// A BlockLabelSpec is a Spec that returns a cty.String representing the 1270// A BlockLabelSpec is a Spec that returns a cty.String representing the
769// label of the block its given body belongs to, if indeed its given body 1271// label of the block its given body belongs to, if indeed its given body
770// belongs to a block. It is a programming error to use this in a non-block 1272// belongs to a block. It is a programming error to use this in a non-block
@@ -848,6 +1350,16 @@ func findLabelSpecs(spec Spec) []string {
848// 1350//
849// The two specifications must have the same implied result type for correct 1351// The two specifications must have the same implied result type for correct
850// operation. If not, the result is undefined. 1352// operation. If not, the result is undefined.
1353//
1354// Any requirements imposed by the "Default" spec apply even if "Primary" does
1355// not return null. For example, if the "Default" spec is for a required
1356// attribute then that attribute is always required, regardless of the result
1357// of the "Primary" spec.
1358//
1359// The "Default" spec must not describe a nested block, since otherwise the
1360// result of ChildBlockTypes would not be decidable without evaluation. If
1361// the default spec _does_ describe a nested block then the result is
1362// undefined.
851type DefaultSpec struct { 1363type DefaultSpec struct {
852 Primary Spec 1364 Primary Spec
853 Default Spec 1365 Default Spec
@@ -872,6 +1384,38 @@ func (s *DefaultSpec) impliedType() cty.Type {
872 return s.Primary.impliedType() 1384 return s.Primary.impliedType()
873} 1385}
874 1386
1387// attrSpec implementation
1388func (s *DefaultSpec) attrSchemata() []hcl.AttributeSchema {
1389 // We must pass through the union of both of our nested specs so that
1390 // we'll have both values available in the result.
1391 var ret []hcl.AttributeSchema
1392 if as, ok := s.Primary.(attrSpec); ok {
1393 ret = append(ret, as.attrSchemata()...)
1394 }
1395 if as, ok := s.Default.(attrSpec); ok {
1396 ret = append(ret, as.attrSchemata()...)
1397 }
1398 return ret
1399}
1400
1401// blockSpec implementation
1402func (s *DefaultSpec) blockHeaderSchemata() []hcl.BlockHeaderSchema {
1403 // Only the primary spec may describe a block, since otherwise
1404 // our nestedSpec method below can't know which to return.
1405 if bs, ok := s.Primary.(blockSpec); ok {
1406 return bs.blockHeaderSchemata()
1407 }
1408 return nil
1409}
1410
1411// blockSpec implementation
1412func (s *DefaultSpec) nestedSpec() Spec {
1413 if bs, ok := s.Primary.(blockSpec); ok {
1414 return bs.nestedSpec()
1415 }
1416 return nil
1417}
1418
875func (s *DefaultSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range { 1419func (s *DefaultSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
876 // We can't tell from here which of the two specs will ultimately be used 1420 // We can't tell from here which of the two specs will ultimately be used
877 // in our result, so we'll just assume the first. This is usually the right 1421 // in our result, so we'll just assume the first. This is usually the right
@@ -996,3 +1540,28 @@ func (s *TransformFuncSpec) sourceRange(content *hcl.BodyContent, blockLabels []
996 // not super-accurate, because there's nothing better to return. 1540 // not super-accurate, because there's nothing better to return.
997 return s.Wrapped.sourceRange(content, blockLabels) 1541 return s.Wrapped.sourceRange(content, blockLabels)
998} 1542}
1543
1544// noopSpec is a placeholder spec that does nothing, used in situations where
1545// a non-nil placeholder spec is required. It is not exported because there is
1546// no reason to use it directly; it is always an implementation detail only.
1547type noopSpec struct {
1548}
1549
1550func (s noopSpec) decode(content *hcl.BodyContent, blockLabels []blockLabel, ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
1551 return cty.NullVal(cty.DynamicPseudoType), nil
1552}
1553
1554func (s noopSpec) impliedType() cty.Type {
1555 return cty.DynamicPseudoType
1556}
1557
1558func (s noopSpec) visitSameBodyChildren(cb visitFunc) {
1559 // nothing to do
1560}
1561
1562func (s noopSpec) sourceRange(content *hcl.BodyContent, blockLabels []blockLabel) hcl.Range {
1563 // No useful range for a noopSpec, and nobody should be calling this anyway.
1564 return hcl.Range{
1565 Filename: "noopSpec",
1566 }
1567}
diff --git a/vendor/github.com/hashicorp/hcl2/hcldec/variables.go b/vendor/github.com/hashicorp/hcl2/hcldec/variables.go
index 427b0d0..7662516 100644
--- a/vendor/github.com/hashicorp/hcl2/hcldec/variables.go
+++ b/vendor/github.com/hashicorp/hcl2/hcldec/variables.go
@@ -15,20 +15,22 @@ import (
15// be incomplete, but that's assumed to be okay because the eventual call 15// be incomplete, but that's assumed to be okay because the eventual call
16// to Decode will produce error diagnostics anyway. 16// to Decode will produce error diagnostics anyway.
17func Variables(body hcl.Body, spec Spec) []hcl.Traversal { 17func Variables(body hcl.Body, spec Spec) []hcl.Traversal {
18 var vars []hcl.Traversal
18 schema := ImpliedSchema(spec) 19 schema := ImpliedSchema(spec)
19
20 content, _, _ := body.PartialContent(schema) 20 content, _, _ := body.PartialContent(schema)
21 21
22 var vars []hcl.Traversal
23
24 if vs, ok := spec.(specNeedingVariables); ok { 22 if vs, ok := spec.(specNeedingVariables); ok {
25 vars = append(vars, vs.variablesNeeded(content)...) 23 vars = append(vars, vs.variablesNeeded(content)...)
26 } 24 }
27 spec.visitSameBodyChildren(func(s Spec) { 25
26 var visitFn visitFunc
27 visitFn = func(s Spec) {
28 if vs, ok := s.(specNeedingVariables); ok { 28 if vs, ok := s.(specNeedingVariables); ok {
29 vars = append(vars, vs.variablesNeeded(content)...) 29 vars = append(vars, vs.variablesNeeded(content)...)
30 } 30 }
31 }) 31 s.visitSameBodyChildren(visitFn)
32 }
33 spec.visitSameBodyChildren(visitFn)
32 34
33 return vars 35 return vars
34} 36}
diff --git a/vendor/github.com/hashicorp/hcl2/hcled/doc.go b/vendor/github.com/hashicorp/hcl2/hcled/doc.go
new file mode 100644
index 0000000..1a80144
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcled/doc.go
@@ -0,0 +1,4 @@
1// Package hcled provides functionality intended to help an application
2// that embeds HCL to deliver relevant information to a text editor or IDE
3// for navigating around and analyzing configuration files.
4package hcled
diff --git a/vendor/github.com/hashicorp/hcl2/hcled/navigation.go b/vendor/github.com/hashicorp/hcl2/hcled/navigation.go
new file mode 100644
index 0000000..5d10cd8
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hcled/navigation.go
@@ -0,0 +1,34 @@
1package hcled
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7type contextStringer interface {
8 ContextString(offset int) string
9}
10
11// ContextString returns a string describing the context of the given byte
12// offset, if available. An empty string is returned if no such information
13// is available, or otherwise the returned string is in a form that depends
14// on the language used to write the referenced file.
15func ContextString(file *hcl.File, offset int) string {
16 if cser, ok := file.Nav.(contextStringer); ok {
17 return cser.ContextString(offset)
18 }
19 return ""
20}
21
22type contextDefRanger interface {
23 ContextDefRange(offset int) hcl.Range
24}
25
26func ContextDefRange(file *hcl.File, offset int) hcl.Range {
27 if cser, ok := file.Nav.(contextDefRanger); ok {
28 defRange := cser.ContextDefRange(offset)
29 if !defRange.Empty() {
30 return defRange
31 }
32 }
33 return file.Body.MissingItemRange()
34}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast.go
new file mode 100644
index 0000000..0904165
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast.go
@@ -0,0 +1,121 @@
1package hclwrite
2
3import (
4 "bytes"
5 "io"
6)
7
8type File struct {
9 inTree
10
11 srcBytes []byte
12 body *node
13}
14
15// NewEmptyFile constructs a new file with no content, ready to be mutated
16// by other calls that append to its body.
17func NewEmptyFile() *File {
18 f := &File{
19 inTree: newInTree(),
20 }
21 body := newBody()
22 f.body = f.children.Append(body)
23 return f
24}
25
26// Body returns the root body of the file, which contains the top-level
27// attributes and blocks.
28func (f *File) Body() *Body {
29 return f.body.content.(*Body)
30}
31
32// WriteTo writes the tokens underlying the receiving file to the given writer.
33//
34// The tokens first have a simple formatting pass applied that adjusts only
35// the spaces between them.
36func (f *File) WriteTo(wr io.Writer) (int64, error) {
37 tokens := f.inTree.children.BuildTokens(nil)
38 format(tokens)
39 return tokens.WriteTo(wr)
40}
41
42// Bytes returns a buffer containing the source code resulting from the
43// tokens underlying the receiving file. If any updates have been made via
44// the AST API, these will be reflected in the result.
45func (f *File) Bytes() []byte {
46 buf := &bytes.Buffer{}
47 f.WriteTo(buf)
48 return buf.Bytes()
49}
50
51type comments struct {
52 leafNode
53
54 parent *node
55 tokens Tokens
56}
57
58func newComments(tokens Tokens) *comments {
59 return &comments{
60 tokens: tokens,
61 }
62}
63
64func (c *comments) BuildTokens(to Tokens) Tokens {
65 return c.tokens.BuildTokens(to)
66}
67
68type identifier struct {
69 leafNode
70
71 parent *node
72 token *Token
73}
74
75func newIdentifier(token *Token) *identifier {
76 return &identifier{
77 token: token,
78 }
79}
80
81func (i *identifier) BuildTokens(to Tokens) Tokens {
82 return append(to, i.token)
83}
84
85func (i *identifier) hasName(name string) bool {
86 return name == string(i.token.Bytes)
87}
88
89type number struct {
90 leafNode
91
92 parent *node
93 token *Token
94}
95
96func newNumber(token *Token) *number {
97 return &number{
98 token: token,
99 }
100}
101
102func (n *number) BuildTokens(to Tokens) Tokens {
103 return append(to, n.token)
104}
105
106type quoted struct {
107 leafNode
108
109 parent *node
110 tokens Tokens
111}
112
113func newQuoted(tokens Tokens) *quoted {
114 return &quoted{
115 tokens: tokens,
116 }
117}
118
119func (q *quoted) BuildTokens(to Tokens) Tokens {
120 return q.tokens.BuildTokens(to)
121}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go
new file mode 100644
index 0000000..975fa74
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_attribute.go
@@ -0,0 +1,48 @@
1package hclwrite
2
3import (
4 "github.com/hashicorp/hcl2/hcl/hclsyntax"
5)
6
7type Attribute struct {
8 inTree
9
10 leadComments *node
11 name *node
12 expr *node
13 lineComments *node
14}
15
16func newAttribute() *Attribute {
17 return &Attribute{
18 inTree: newInTree(),
19 }
20}
21
22func (a *Attribute) init(name string, expr *Expression) {
23 expr.assertUnattached()
24
25 nameTok := newIdentToken(name)
26 nameObj := newIdentifier(nameTok)
27 a.leadComments = a.children.Append(newComments(nil))
28 a.name = a.children.Append(nameObj)
29 a.children.AppendUnstructuredTokens(Tokens{
30 {
31 Type: hclsyntax.TokenEqual,
32 Bytes: []byte{'='},
33 },
34 })
35 a.expr = a.children.Append(expr)
36 a.expr.list = a.children
37 a.lineComments = a.children.Append(newComments(nil))
38 a.children.AppendUnstructuredTokens(Tokens{
39 {
40 Type: hclsyntax.TokenNewline,
41 Bytes: []byte{'\n'},
42 },
43 })
44}
45
46func (a *Attribute) Expr() *Expression {
47 return a.expr.content.(*Expression)
48}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go
new file mode 100644
index 0000000..d5fd32b
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_block.go
@@ -0,0 +1,74 @@
1package hclwrite
2
3import (
4 "github.com/hashicorp/hcl2/hcl/hclsyntax"
5 "github.com/zclconf/go-cty/cty"
6)
7
8type Block struct {
9 inTree
10
11 leadComments *node
12 typeName *node
13 labels nodeSet
14 open *node
15 body *node
16 close *node
17}
18
19func newBlock() *Block {
20 return &Block{
21 inTree: newInTree(),
22 labels: newNodeSet(),
23 }
24}
25
26// NewBlock constructs a new, empty block with the given type name and labels.
27func NewBlock(typeName string, labels []string) *Block {
28 block := newBlock()
29 block.init(typeName, labels)
30 return block
31}
32
33func (b *Block) init(typeName string, labels []string) {
34 nameTok := newIdentToken(typeName)
35 nameObj := newIdentifier(nameTok)
36 b.leadComments = b.children.Append(newComments(nil))
37 b.typeName = b.children.Append(nameObj)
38 for _, label := range labels {
39 labelToks := TokensForValue(cty.StringVal(label))
40 labelObj := newQuoted(labelToks)
41 labelNode := b.children.Append(labelObj)
42 b.labels.Add(labelNode)
43 }
44 b.open = b.children.AppendUnstructuredTokens(Tokens{
45 {
46 Type: hclsyntax.TokenOBrace,
47 Bytes: []byte{'{'},
48 },
49 {
50 Type: hclsyntax.TokenNewline,
51 Bytes: []byte{'\n'},
52 },
53 })
54 body := newBody() // initially totally empty; caller can append to it subsequently
55 b.body = b.children.Append(body)
56 b.close = b.children.AppendUnstructuredTokens(Tokens{
57 {
58 Type: hclsyntax.TokenCBrace,
59 Bytes: []byte{'}'},
60 },
61 {
62 Type: hclsyntax.TokenNewline,
63 Bytes: []byte{'\n'},
64 },
65 })
66}
67
68// Body returns the body that represents the content of the receiving block.
69//
70// Appending to or otherwise modifying this body will make changes to the
71// tokens that are generated between the blocks open and close braces.
72func (b *Block) Body() *Body {
73 return b.body.content.(*Body)
74}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go
new file mode 100644
index 0000000..cf69fee
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_body.go
@@ -0,0 +1,153 @@
1package hclwrite
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/hcl2/hcl/hclsyntax"
6 "github.com/zclconf/go-cty/cty"
7)
8
9type Body struct {
10 inTree
11
12 items nodeSet
13}
14
15func newBody() *Body {
16 return &Body{
17 inTree: newInTree(),
18 items: newNodeSet(),
19 }
20}
21
22func (b *Body) appendItem(c nodeContent) *node {
23 nn := b.children.Append(c)
24 b.items.Add(nn)
25 return nn
26}
27
28func (b *Body) appendItemNode(nn *node) *node {
29 nn.assertUnattached()
30 b.children.AppendNode(nn)
31 b.items.Add(nn)
32 return nn
33}
34
35// Clear removes all of the items from the body, making it empty.
36func (b *Body) Clear() {
37 b.children.Clear()
38}
39
40func (b *Body) AppendUnstructuredTokens(ts Tokens) {
41 b.inTree.children.Append(ts)
42}
43
44// Attributes returns a new map of all of the attributes in the body, with
45// the attribute names as the keys.
46func (b *Body) Attributes() map[string]*Attribute {
47 ret := make(map[string]*Attribute)
48 for n := range b.items {
49 if attr, isAttr := n.content.(*Attribute); isAttr {
50 nameObj := attr.name.content.(*identifier)
51 name := string(nameObj.token.Bytes)
52 ret[name] = attr
53 }
54 }
55 return ret
56}
57
58// Blocks returns a new slice of all the blocks in the body.
59func (b *Body) Blocks() []*Block {
60 ret := make([]*Block, 0, len(b.items))
61 for n := range b.items {
62 if block, isBlock := n.content.(*Block); isBlock {
63 ret = append(ret, block)
64 }
65 }
66 return ret
67}
68
69// GetAttribute returns the attribute from the body that has the given name,
70// or returns nil if there is currently no matching attribute.
71func (b *Body) GetAttribute(name string) *Attribute {
72 for n := range b.items {
73 if attr, isAttr := n.content.(*Attribute); isAttr {
74 nameObj := attr.name.content.(*identifier)
75 if nameObj.hasName(name) {
76 // We've found it!
77 return attr
78 }
79 }
80 }
81
82 return nil
83}
84
85// SetAttributeValue either replaces the expression of an existing attribute
86// of the given name or adds a new attribute definition to the end of the block.
87//
88// The value is given as a cty.Value, and must therefore be a literal. To set
89// a variable reference or other traversal, use SetAttributeTraversal.
90//
91// The return value is the attribute that was either modified in-place or
92// created.
93func (b *Body) SetAttributeValue(name string, val cty.Value) *Attribute {
94 attr := b.GetAttribute(name)
95 expr := NewExpressionLiteral(val)
96 if attr != nil {
97 attr.expr = attr.expr.ReplaceWith(expr)
98 } else {
99 attr := newAttribute()
100 attr.init(name, expr)
101 b.appendItem(attr)
102 }
103 return attr
104}
105
106// SetAttributeTraversal either replaces the expression of an existing attribute
107// of the given name or adds a new attribute definition to the end of the body.
108//
109// The new expression is given as a hcl.Traversal, which must be an absolute
110// traversal. To set a literal value, use SetAttributeValue.
111//
112// The return value is the attribute that was either modified in-place or
113// created.
114func (b *Body) SetAttributeTraversal(name string, traversal hcl.Traversal) *Attribute {
115 attr := b.GetAttribute(name)
116 expr := NewExpressionAbsTraversal(traversal)
117 if attr != nil {
118 attr.expr = attr.expr.ReplaceWith(expr)
119 } else {
120 attr := newAttribute()
121 attr.init(name, expr)
122 b.appendItem(attr)
123 }
124 return attr
125}
126
127// AppendBlock appends an existing block (which must not be already attached
128// to a body) to the end of the receiving body.
129func (b *Body) AppendBlock(block *Block) *Block {
130 b.appendItem(block)
131 return block
132}
133
134// AppendNewBlock appends a new nested block to the end of the receiving body
135// with the given type name and labels.
136func (b *Body) AppendNewBlock(typeName string, labels []string) *Block {
137 block := newBlock()
138 block.init(typeName, labels)
139 b.appendItem(block)
140 return block
141}
142
143// AppendNewline appends a newline token to th end of the receiving body,
144// which generally serves as a separator between different sets of body
145// contents.
146func (b *Body) AppendNewline() {
147 b.AppendUnstructuredTokens(Tokens{
148 {
149 Type: hclsyntax.TokenNewline,
150 Bytes: []byte{'\n'},
151 },
152 })
153}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go
new file mode 100644
index 0000000..62d89fb
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/ast_expression.go
@@ -0,0 +1,201 @@
1package hclwrite
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/hashicorp/hcl2/hcl/hclsyntax"
8 "github.com/zclconf/go-cty/cty"
9)
10
11type Expression struct {
12 inTree
13
14 absTraversals nodeSet
15}
16
17func newExpression() *Expression {
18 return &Expression{
19 inTree: newInTree(),
20 absTraversals: newNodeSet(),
21 }
22}
23
24// NewExpressionLiteral constructs an an expression that represents the given
25// literal value.
26//
27// Since an unknown value cannot be represented in source code, this function
28// will panic if the given value is unknown or contains a nested unknown value.
29// Use val.IsWhollyKnown before calling to be sure.
30//
31// HCL native syntax does not directly represent lists, maps, and sets, and
32// instead relies on the automatic conversions to those collection types from
33// either list or tuple constructor syntax. Therefore converting collection
34// values to source code and re-reading them will lose type information, and
35// the reader must provide a suitable type at decode time to recover the
36// original value.
37func NewExpressionLiteral(val cty.Value) *Expression {
38 toks := TokensForValue(val)
39 expr := newExpression()
40 expr.children.AppendUnstructuredTokens(toks)
41 return expr
42}
43
44// NewExpressionAbsTraversal constructs an expression that represents the
45// given traversal, which must be absolute or this function will panic.
46func NewExpressionAbsTraversal(traversal hcl.Traversal) *Expression {
47 if traversal.IsRelative() {
48 panic("can't construct expression from relative traversal")
49 }
50
51 physT := newTraversal()
52 rootName := traversal.RootName()
53 steps := traversal[1:]
54
55 {
56 tn := newTraverseName()
57 tn.name = tn.children.Append(newIdentifier(&Token{
58 Type: hclsyntax.TokenIdent,
59 Bytes: []byte(rootName),
60 }))
61 physT.steps.Add(physT.children.Append(tn))
62 }
63
64 for _, step := range steps {
65 switch ts := step.(type) {
66 case hcl.TraverseAttr:
67 tn := newTraverseName()
68 tn.children.AppendUnstructuredTokens(Tokens{
69 {
70 Type: hclsyntax.TokenDot,
71 Bytes: []byte{'.'},
72 },
73 })
74 tn.name = tn.children.Append(newIdentifier(&Token{
75 Type: hclsyntax.TokenIdent,
76 Bytes: []byte(ts.Name),
77 }))
78 physT.steps.Add(physT.children.Append(tn))
79 case hcl.TraverseIndex:
80 ti := newTraverseIndex()
81 ti.children.AppendUnstructuredTokens(Tokens{
82 {
83 Type: hclsyntax.TokenOBrack,
84 Bytes: []byte{'['},
85 },
86 })
87 indexExpr := NewExpressionLiteral(ts.Key)
88 ti.key = ti.children.Append(indexExpr)
89 ti.children.AppendUnstructuredTokens(Tokens{
90 {
91 Type: hclsyntax.TokenCBrack,
92 Bytes: []byte{']'},
93 },
94 })
95 physT.steps.Add(physT.children.Append(ti))
96 }
97 }
98
99 expr := newExpression()
100 expr.absTraversals.Add(expr.children.Append(physT))
101 return expr
102}
103
104// Variables returns the absolute traversals that exist within the receiving
105// expression.
106func (e *Expression) Variables() []*Traversal {
107 nodes := e.absTraversals.List()
108 ret := make([]*Traversal, len(nodes))
109 for i, node := range nodes {
110 ret[i] = node.content.(*Traversal)
111 }
112 return ret
113}
114
115// RenameVariablePrefix examines each of the absolute traversals in the
116// receiving expression to see if they have the given sequence of names as
117// a prefix prefix. If so, they are updated in place to have the given
118// replacement names instead of that prefix.
119//
120// This can be used to implement symbol renaming. The calling application can
121// visit all relevant expressions in its input and apply the same renaming
122// to implement a global symbol rename.
123//
124// The search and replacement traversals must be the same length, or this
125// method will panic. Only attribute access operations can be matched and
126// replaced. Index steps never match the prefix.
127func (e *Expression) RenameVariablePrefix(search, replacement []string) {
128 if len(search) != len(replacement) {
129 panic(fmt.Sprintf("search and replacement length mismatch (%d and %d)", len(search), len(replacement)))
130 }
131Traversals:
132 for node := range e.absTraversals {
133 traversal := node.content.(*Traversal)
134 if len(traversal.steps) < len(search) {
135 // If it's shorter then it can't have our prefix
136 continue
137 }
138
139 stepNodes := traversal.steps.List()
140 for i, name := range search {
141 step, isName := stepNodes[i].content.(*TraverseName)
142 if !isName {
143 continue Traversals // only name nodes can match
144 }
145 foundNameBytes := step.name.content.(*identifier).token.Bytes
146 if len(foundNameBytes) != len(name) {
147 continue Traversals
148 }
149 if string(foundNameBytes) != name {
150 continue Traversals
151 }
152 }
153
154 // If we get here then the prefix matched, so now we'll swap in
155 // the replacement strings.
156 for i, name := range replacement {
157 step := stepNodes[i].content.(*TraverseName)
158 token := step.name.content.(*identifier).token
159 token.Bytes = []byte(name)
160 }
161 }
162}
163
164// Traversal represents a sequence of variable, attribute, and/or index
165// operations.
166type Traversal struct {
167 inTree
168
169 steps nodeSet
170}
171
172func newTraversal() *Traversal {
173 return &Traversal{
174 inTree: newInTree(),
175 steps: newNodeSet(),
176 }
177}
178
179type TraverseName struct {
180 inTree
181
182 name *node
183}
184
185func newTraverseName() *TraverseName {
186 return &TraverseName{
187 inTree: newInTree(),
188 }
189}
190
191type TraverseIndex struct {
192 inTree
193
194 key *node
195}
196
197func newTraverseIndex() *TraverseIndex {
198 return &TraverseIndex{
199 inTree: newInTree(),
200 }
201}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/doc.go b/vendor/github.com/hashicorp/hcl2/hclwrite/doc.go
new file mode 100644
index 0000000..56d5b77
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/doc.go
@@ -0,0 +1,11 @@
1// Package hclwrite deals with the problem of generating HCL configuration
2// and of making specific surgical changes to existing HCL configurations.
3//
4// It operates at a different level of abstraction than the main HCL parser
5// and AST, since details such as the placement of comments and newlines
6// are preserved when unchanged.
7//
8// The hclwrite API follows a similar principle to XML/HTML DOM, allowing nodes
9// to be read out, created and inserted, etc. Nodes represent syntax constructs
10// rather than semantic concepts.
11package hclwrite
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/format.go b/vendor/github.com/hashicorp/hcl2/hclwrite/format.go
new file mode 100644
index 0000000..f20ae23
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/format.go
@@ -0,0 +1,492 @@
1package hclwrite
2
3import (
4 "github.com/hashicorp/hcl2/hcl/hclsyntax"
5)
6
7var inKeyword = hclsyntax.Keyword([]byte{'i', 'n'})
8
9// placeholder token used when we don't have a token but we don't want
10// to pass a real "nil" and complicate things with nil pointer checks
11var nilToken = &Token{
12 Type: hclsyntax.TokenNil,
13 Bytes: []byte{},
14 SpacesBefore: 0,
15}
16
17// format rewrites tokens within the given sequence, in-place, to adjust the
18// whitespace around their content to achieve canonical formatting.
19func format(tokens Tokens) {
20 // Formatting is a multi-pass process. More details on the passes below,
21 // but this is the overview:
22 // - adjust the leading space on each line to create appropriate
23 // indentation
24 // - adjust spaces between tokens in a single cell using a set of rules
25 // - adjust the leading space in the "assign" and "comment" cells on each
26 // line to vertically align with neighboring lines.
27 // All of these steps operate in-place on the given tokens, so a caller
28 // may collect a flat sequence of all of the tokens underlying an AST
29 // and pass it here and we will then indirectly modify the AST itself.
30 // Formatting must change only whitespace. Specifically, that means
31 // changing the SpacesBefore attribute on a token while leaving the
32 // other token attributes unchanged.
33
34 lines := linesForFormat(tokens)
35 formatIndent(lines)
36 formatSpaces(lines)
37 formatCells(lines)
38}
39
40func formatIndent(lines []formatLine) {
41 // Our methodology for indents is to take the input one line at a time
42 // and count the bracketing delimiters on each line. If a line has a net
43 // increase in open brackets, we increase the indent level by one and
44 // remember how many new openers we had. If the line has a net _decrease_,
45 // we'll compare it to the most recent number of openers and decrease the
46 // dedent level by one each time we pass an indent level remembered
47 // earlier.
48 // The "indent stack" used here allows for us to recognize degenerate
49 // input where brackets are not symmetrical within lines and avoid
50 // pushing things too far left or right, creating confusion.
51
52 // We'll start our indent stack at a reasonable capacity to minimize the
53 // chance of us needing to grow it; 10 here means 10 levels of indent,
54 // which should be more than enough for reasonable HCL uses.
55 indents := make([]int, 0, 10)
56
57 inHeredoc := false
58 for i := range lines {
59 line := &lines[i]
60 if len(line.lead) == 0 {
61 continue
62 }
63
64 if inHeredoc {
65 for _, token := range line.lead {
66 if token.Type == hclsyntax.TokenCHeredoc {
67 inHeredoc = false
68 }
69 }
70 continue // don't touch indentation inside heredocs
71 }
72
73 if line.lead[0].Type == hclsyntax.TokenNewline {
74 // Never place spaces before a newline
75 line.lead[0].SpacesBefore = 0
76 continue
77 }
78
79 netBrackets := 0
80 for _, token := range line.lead {
81 netBrackets += tokenBracketChange(token)
82 if token.Type == hclsyntax.TokenOHeredoc {
83 inHeredoc = true
84 }
85 }
86 for _, token := range line.assign {
87 netBrackets += tokenBracketChange(token)
88 }
89
90 switch {
91 case netBrackets > 0:
92 line.lead[0].SpacesBefore = 2 * len(indents)
93 indents = append(indents, netBrackets)
94 case netBrackets < 0:
95 closed := -netBrackets
96 for closed > 0 && len(indents) > 0 {
97 switch {
98
99 case closed > indents[len(indents)-1]:
100 closed -= indents[len(indents)-1]
101 indents = indents[:len(indents)-1]
102
103 case closed < indents[len(indents)-1]:
104 indents[len(indents)-1] -= closed
105 closed = 0
106
107 default:
108 indents = indents[:len(indents)-1]
109 closed = 0
110 }
111 }
112 line.lead[0].SpacesBefore = 2 * len(indents)
113 default:
114 line.lead[0].SpacesBefore = 2 * len(indents)
115 }
116 }
117}
118
119func formatSpaces(lines []formatLine) {
120 for _, line := range lines {
121 for i, token := range line.lead {
122 var before, after *Token
123 if i > 0 {
124 before = line.lead[i-1]
125 } else {
126 before = nilToken
127 }
128 if i < (len(line.lead) - 1) {
129 after = line.lead[i+1]
130 } else {
131 after = nilToken
132 }
133 if spaceAfterToken(token, before, after) {
134 after.SpacesBefore = 1
135 } else {
136 after.SpacesBefore = 0
137 }
138 }
139 for i, token := range line.assign {
140 if i == 0 {
141 // first token in "assign" always has one space before to
142 // separate the equals sign from what it's assigning.
143 token.SpacesBefore = 1
144 }
145
146 var before, after *Token
147 if i > 0 {
148 before = line.assign[i-1]
149 } else {
150 before = nilToken
151 }
152 if i < (len(line.assign) - 1) {
153 after = line.assign[i+1]
154 } else {
155 after = nilToken
156 }
157 if spaceAfterToken(token, before, after) {
158 after.SpacesBefore = 1
159 } else {
160 after.SpacesBefore = 0
161 }
162 }
163
164 }
165}
166
167func formatCells(lines []formatLine) {
168
169 chainStart := -1
170 maxColumns := 0
171
172 // We'll deal with the "assign" cell first, since moving that will
173 // also impact the "comment" cell.
174 closeAssignChain := func(i int) {
175 for _, chainLine := range lines[chainStart:i] {
176 columns := chainLine.lead.Columns()
177 spaces := (maxColumns - columns) + 1
178 chainLine.assign[0].SpacesBefore = spaces
179 }
180 chainStart = -1
181 maxColumns = 0
182 }
183 for i, line := range lines {
184 if line.assign == nil {
185 if chainStart != -1 {
186 closeAssignChain(i)
187 }
188 } else {
189 if chainStart == -1 {
190 chainStart = i
191 }
192 columns := line.lead.Columns()
193 if columns > maxColumns {
194 maxColumns = columns
195 }
196 }
197 }
198 if chainStart != -1 {
199 closeAssignChain(len(lines))
200 }
201
202 // Now we'll deal with the comments
203 closeCommentChain := func(i int) {
204 for _, chainLine := range lines[chainStart:i] {
205 columns := chainLine.lead.Columns() + chainLine.assign.Columns()
206 spaces := (maxColumns - columns) + 1
207 chainLine.comment[0].SpacesBefore = spaces
208 }
209 chainStart = -1
210 maxColumns = 0
211 }
212 for i, line := range lines {
213 if line.comment == nil {
214 if chainStart != -1 {
215 closeCommentChain(i)
216 }
217 } else {
218 if chainStart == -1 {
219 chainStart = i
220 }
221 columns := line.lead.Columns() + line.assign.Columns()
222 if columns > maxColumns {
223 maxColumns = columns
224 }
225 }
226 }
227 if chainStart != -1 {
228 closeCommentChain(len(lines))
229 }
230
231}
232
233// spaceAfterToken decides whether a particular subject token should have a
234// space after it when surrounded by the given before and after tokens.
235// "before" can be TokenNil, if the subject token is at the start of a sequence.
236func spaceAfterToken(subject, before, after *Token) bool {
237 switch {
238
239 case after.Type == hclsyntax.TokenNewline || after.Type == hclsyntax.TokenNil:
240 // Never add spaces before a newline
241 return false
242
243 case subject.Type == hclsyntax.TokenIdent && after.Type == hclsyntax.TokenOParen:
244 // Don't split a function name from open paren in a call
245 return false
246
247 case subject.Type == hclsyntax.TokenDot || after.Type == hclsyntax.TokenDot:
248 // Don't use spaces around attribute access dots
249 return false
250
251 case after.Type == hclsyntax.TokenComma || after.Type == hclsyntax.TokenEllipsis:
252 // No space right before a comma or ... in an argument list
253 return false
254
255 case subject.Type == hclsyntax.TokenComma:
256 // Always a space after a comma
257 return true
258
259 case subject.Type == hclsyntax.TokenQuotedLit || subject.Type == hclsyntax.TokenStringLit || subject.Type == hclsyntax.TokenOQuote || subject.Type == hclsyntax.TokenOHeredoc || after.Type == hclsyntax.TokenQuotedLit || after.Type == hclsyntax.TokenStringLit || after.Type == hclsyntax.TokenCQuote || after.Type == hclsyntax.TokenCHeredoc:
260 // No extra spaces within templates
261 return false
262
263 case inKeyword.TokenMatches(subject.asHCLSyntax()) && before.Type == hclsyntax.TokenIdent:
264 // This is a special case for inside for expressions where a user
265 // might want to use a literal tuple constructor:
266 // [for x in [foo]: x]
267 // ... in that case, we would normally produce in[foo] thinking that
268 // in is a reference, but we'll recognize it as a keyword here instead
269 // to make the result less confusing.
270 return true
271
272 case after.Type == hclsyntax.TokenOBrack && (subject.Type == hclsyntax.TokenIdent || subject.Type == hclsyntax.TokenNumberLit || tokenBracketChange(subject) < 0):
273 return false
274
275 case subject.Type == hclsyntax.TokenMinus:
276 // Since a minus can either be subtraction or negation, and the latter
277 // should _not_ have a space after it, we need to use some heuristics
278 // to decide which case this is.
279 // We guess that we have a negation if the token before doesn't look
280 // like it could be the end of an expression.
281
282 switch before.Type {
283
284 case hclsyntax.TokenNil:
285 // Minus at the start of input must be a negation
286 return false
287
288 case hclsyntax.TokenOParen, hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenEqual, hclsyntax.TokenColon, hclsyntax.TokenComma, hclsyntax.TokenQuestion:
289 // Minus immediately after an opening bracket or separator must be a negation.
290 return false
291
292 case hclsyntax.TokenPlus, hclsyntax.TokenStar, hclsyntax.TokenSlash, hclsyntax.TokenPercent, hclsyntax.TokenMinus:
293 // Minus immediately after another arithmetic operator must be negation.
294 return false
295
296 case hclsyntax.TokenEqualOp, hclsyntax.TokenNotEqual, hclsyntax.TokenGreaterThan, hclsyntax.TokenGreaterThanEq, hclsyntax.TokenLessThan, hclsyntax.TokenLessThanEq:
297 // Minus immediately after another comparison operator must be negation.
298 return false
299
300 case hclsyntax.TokenAnd, hclsyntax.TokenOr, hclsyntax.TokenBang:
301 // Minus immediately after logical operator doesn't make sense but probably intended as negation.
302 return false
303
304 default:
305 return true
306 }
307
308 case subject.Type == hclsyntax.TokenOBrace || after.Type == hclsyntax.TokenCBrace:
309 // Unlike other bracket types, braces have spaces on both sides of them,
310 // both in single-line nested blocks foo { bar = baz } and in object
311 // constructor expressions foo = { bar = baz }.
312 if subject.Type == hclsyntax.TokenOBrace && after.Type == hclsyntax.TokenCBrace {
313 // An open brace followed by a close brace is an exception, however.
314 // e.g. foo {} rather than foo { }
315 return false
316 }
317 return true
318
319 // In the unlikely event that an interpolation expression is just
320 // a single object constructor, we'll put a space between the ${ and
321 // the following { to make this more obvious, and then the same
322 // thing for the two braces at the end.
323 case (subject.Type == hclsyntax.TokenTemplateInterp || subject.Type == hclsyntax.TokenTemplateControl) && after.Type == hclsyntax.TokenOBrace:
324 return true
325 case subject.Type == hclsyntax.TokenCBrace && after.Type == hclsyntax.TokenTemplateSeqEnd:
326 return true
327
328 // Don't add spaces between interpolated items
329 case subject.Type == hclsyntax.TokenTemplateSeqEnd && after.Type == hclsyntax.TokenTemplateInterp:
330 return false
331
332 case tokenBracketChange(subject) > 0:
333 // No spaces after open brackets
334 return false
335
336 case tokenBracketChange(after) < 0:
337 // No spaces before close brackets
338 return false
339
340 default:
341 // Most tokens are space-separated
342 return true
343
344 }
345}
346
347func linesForFormat(tokens Tokens) []formatLine {
348 if len(tokens) == 0 {
349 return make([]formatLine, 0)
350 }
351
352 // first we'll count our lines, so we can allocate the array for them in
353 // a single block. (We want to minimize memory pressure in this codepath,
354 // so it can be run somewhat-frequently by editor integrations.)
355 lineCount := 1 // if there are zero newlines then there is one line
356 for _, tok := range tokens {
357 if tokenIsNewline(tok) {
358 lineCount++
359 }
360 }
361
362 // To start, we'll just put everything in the "lead" cell on each line,
363 // and then do another pass over the lines afterwards to adjust.
364 lines := make([]formatLine, lineCount)
365 li := 0
366 lineStart := 0
367 for i, tok := range tokens {
368 if tok.Type == hclsyntax.TokenEOF {
369 // The EOF token doesn't belong to any line, and terminates the
370 // token sequence.
371 lines[li].lead = tokens[lineStart:i]
372 break
373 }
374
375 if tokenIsNewline(tok) {
376 lines[li].lead = tokens[lineStart : i+1]
377 lineStart = i + 1
378 li++
379 }
380 }
381
382 // If a set of tokens doesn't end in TokenEOF (e.g. because it's a
383 // fragment of tokens from the middle of a file) then we might fall
384 // out here with a line still pending.
385 if lineStart < len(tokens) {
386 lines[li].lead = tokens[lineStart:]
387 if lines[li].lead[len(lines[li].lead)-1].Type == hclsyntax.TokenEOF {
388 lines[li].lead = lines[li].lead[:len(lines[li].lead)-1]
389 }
390 }
391
392 // Now we'll pick off any trailing comments and attribute assignments
393 // to shuffle off into the "comment" and "assign" cells.
394 inHeredoc := false
395 for i := range lines {
396 line := &lines[i]
397 if len(line.lead) == 0 {
398 // if the line is empty then there's nothing for us to do
399 // (this should happen only for the final line, because all other
400 // lines would have a newline token of some kind)
401 continue
402 }
403
404 if inHeredoc {
405 for _, tok := range line.lead {
406 if tok.Type == hclsyntax.TokenCHeredoc {
407 inHeredoc = false
408 break
409 }
410 }
411 // Inside a heredoc everything is "lead", even if there's a
412 // template interpolation embedded in there that might otherwise
413 // confuse our logic below.
414 continue
415 }
416
417 for _, tok := range line.lead {
418 if tok.Type == hclsyntax.TokenOHeredoc {
419 inHeredoc = true
420 break
421 }
422 }
423
424 if len(line.lead) > 1 && line.lead[len(line.lead)-1].Type == hclsyntax.TokenComment {
425 line.comment = line.lead[len(line.lead)-1:]
426 line.lead = line.lead[:len(line.lead)-1]
427 }
428
429 for i, tok := range line.lead {
430 if i > 0 && tok.Type == hclsyntax.TokenEqual {
431 // We only move the tokens into "assign" if the RHS seems to
432 // be a whole expression, which we determine by counting
433 // brackets. If there's a net positive number of brackets
434 // then that suggests we're introducing a multi-line expression.
435 netBrackets := 0
436 for _, token := range line.lead[i:] {
437 netBrackets += tokenBracketChange(token)
438 }
439
440 if netBrackets == 0 {
441 line.assign = line.lead[i:]
442 line.lead = line.lead[:i]
443 }
444 break
445 }
446 }
447 }
448
449 return lines
450}
451
452func tokenIsNewline(tok *Token) bool {
453 if tok.Type == hclsyntax.TokenNewline {
454 return true
455 } else if tok.Type == hclsyntax.TokenComment {
456 // Single line tokens (# and //) consume their terminating newline,
457 // so we need to treat them as newline tokens as well.
458 if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' {
459 return true
460 }
461 }
462 return false
463}
464
465func tokenBracketChange(tok *Token) int {
466 switch tok.Type {
467 case hclsyntax.TokenOBrace, hclsyntax.TokenOBrack, hclsyntax.TokenOParen, hclsyntax.TokenTemplateControl, hclsyntax.TokenTemplateInterp:
468 return 1
469 case hclsyntax.TokenCBrace, hclsyntax.TokenCBrack, hclsyntax.TokenCParen, hclsyntax.TokenTemplateSeqEnd:
470 return -1
471 default:
472 return 0
473 }
474}
475
476// formatLine represents a single line of source code for formatting purposes,
477// splitting its tokens into up to three "cells":
478//
479// lead: always present, representing everything up to one of the others
480// assign: if line contains an attribute assignment, represents the tokens
481// starting at (and including) the equals symbol
482// comment: if line contains any non-comment tokens and ends with a
483// single-line comment token, represents the comment.
484//
485// When formatting, the leading spaces of the first tokens in each of these
486// cells is adjusted to align vertically their occurences on consecutive
487// rows.
488type formatLine struct {
489 lead Tokens
490 assign Tokens
491 comment Tokens
492}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/generate.go b/vendor/github.com/hashicorp/hcl2/hclwrite/generate.go
new file mode 100644
index 0000000..d249cfd
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/generate.go
@@ -0,0 +1,250 @@
1package hclwrite
2
3import (
4 "fmt"
5 "unicode"
6 "unicode/utf8"
7
8 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/hcl2/hcl/hclsyntax"
10 "github.com/zclconf/go-cty/cty"
11)
12
13// TokensForValue returns a sequence of tokens that represents the given
14// constant value.
15//
16// This function only supports types that are used by HCL. In particular, it
17// does not support capsule types and will panic if given one.
18//
19// It is not possible to express an unknown value in source code, so this
20// function will panic if the given value is unknown or contains any unknown
21// values. A caller can call the value's IsWhollyKnown method to verify that
22// no unknown values are present before calling TokensForValue.
23func TokensForValue(val cty.Value) Tokens {
24 toks := appendTokensForValue(val, nil)
25 format(toks) // fiddle with the SpacesBefore field to get canonical spacing
26 return toks
27}
28
29// TokensForTraversal returns a sequence of tokens that represents the given
30// traversal.
31//
32// If the traversal is absolute then the result is a self-contained, valid
33// reference expression. If the traversal is relative then the returned tokens
34// could be appended to some other expression tokens to traverse into the
35// represented expression.
36func TokensForTraversal(traversal hcl.Traversal) Tokens {
37 toks := appendTokensForTraversal(traversal, nil)
38 format(toks) // fiddle with the SpacesBefore field to get canonical spacing
39 return toks
40}
41
42func appendTokensForValue(val cty.Value, toks Tokens) Tokens {
43 switch {
44
45 case !val.IsKnown():
46 panic("cannot produce tokens for unknown value")
47
48 case val.IsNull():
49 toks = append(toks, &Token{
50 Type: hclsyntax.TokenIdent,
51 Bytes: []byte(`null`),
52 })
53
54 case val.Type() == cty.Bool:
55 var src []byte
56 if val.True() {
57 src = []byte(`true`)
58 } else {
59 src = []byte(`false`)
60 }
61 toks = append(toks, &Token{
62 Type: hclsyntax.TokenIdent,
63 Bytes: src,
64 })
65
66 case val.Type() == cty.Number:
67 bf := val.AsBigFloat()
68 srcStr := bf.Text('f', -1)
69 toks = append(toks, &Token{
70 Type: hclsyntax.TokenNumberLit,
71 Bytes: []byte(srcStr),
72 })
73
74 case val.Type() == cty.String:
75 // TODO: If it's a multi-line string ending in a newline, format
76 // it as a HEREDOC instead.
77 src := escapeQuotedStringLit(val.AsString())
78 toks = append(toks, &Token{
79 Type: hclsyntax.TokenOQuote,
80 Bytes: []byte{'"'},
81 })
82 if len(src) > 0 {
83 toks = append(toks, &Token{
84 Type: hclsyntax.TokenQuotedLit,
85 Bytes: src,
86 })
87 }
88 toks = append(toks, &Token{
89 Type: hclsyntax.TokenCQuote,
90 Bytes: []byte{'"'},
91 })
92
93 case val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType():
94 toks = append(toks, &Token{
95 Type: hclsyntax.TokenOBrack,
96 Bytes: []byte{'['},
97 })
98
99 i := 0
100 for it := val.ElementIterator(); it.Next(); {
101 if i > 0 {
102 toks = append(toks, &Token{
103 Type: hclsyntax.TokenComma,
104 Bytes: []byte{','},
105 })
106 }
107 _, eVal := it.Element()
108 toks = appendTokensForValue(eVal, toks)
109 i++
110 }
111
112 toks = append(toks, &Token{
113 Type: hclsyntax.TokenCBrack,
114 Bytes: []byte{']'},
115 })
116
117 case val.Type().IsMapType() || val.Type().IsObjectType():
118 toks = append(toks, &Token{
119 Type: hclsyntax.TokenOBrace,
120 Bytes: []byte{'{'},
121 })
122
123 i := 0
124 for it := val.ElementIterator(); it.Next(); {
125 if i > 0 {
126 toks = append(toks, &Token{
127 Type: hclsyntax.TokenComma,
128 Bytes: []byte{','},
129 })
130 }
131 eKey, eVal := it.Element()
132 if hclsyntax.ValidIdentifier(eKey.AsString()) {
133 toks = append(toks, &Token{
134 Type: hclsyntax.TokenIdent,
135 Bytes: []byte(eKey.AsString()),
136 })
137 } else {
138 toks = appendTokensForValue(eKey, toks)
139 }
140 toks = append(toks, &Token{
141 Type: hclsyntax.TokenEqual,
142 Bytes: []byte{'='},
143 })
144 toks = appendTokensForValue(eVal, toks)
145 i++
146 }
147
148 toks = append(toks, &Token{
149 Type: hclsyntax.TokenCBrace,
150 Bytes: []byte{'}'},
151 })
152
153 default:
154 panic(fmt.Sprintf("cannot produce tokens for %#v", val))
155 }
156
157 return toks
158}
159
160func appendTokensForTraversal(traversal hcl.Traversal, toks Tokens) Tokens {
161 for _, step := range traversal {
162 appendTokensForTraversalStep(step, toks)
163 }
164 return toks
165}
166
167func appendTokensForTraversalStep(step hcl.Traverser, toks Tokens) {
168 switch ts := step.(type) {
169 case hcl.TraverseRoot:
170 toks = append(toks, &Token{
171 Type: hclsyntax.TokenIdent,
172 Bytes: []byte(ts.Name),
173 })
174 case hcl.TraverseAttr:
175 toks = append(
176 toks,
177 &Token{
178 Type: hclsyntax.TokenDot,
179 Bytes: []byte{'.'},
180 },
181 &Token{
182 Type: hclsyntax.TokenIdent,
183 Bytes: []byte(ts.Name),
184 },
185 )
186 case hcl.TraverseIndex:
187 toks = append(toks, &Token{
188 Type: hclsyntax.TokenOBrack,
189 Bytes: []byte{'['},
190 })
191 appendTokensForValue(ts.Key, toks)
192 toks = append(toks, &Token{
193 Type: hclsyntax.TokenCBrack,
194 Bytes: []byte{']'},
195 })
196 default:
197 panic(fmt.Sprintf("unsupported traversal step type %T", step))
198 }
199}
200
201func escapeQuotedStringLit(s string) []byte {
202 if len(s) == 0 {
203 return nil
204 }
205 buf := make([]byte, 0, len(s))
206 for i, r := range s {
207 switch r {
208 case '\n':
209 buf = append(buf, '\\', 'n')
210 case '\r':
211 buf = append(buf, '\\', 'r')
212 case '\t':
213 buf = append(buf, '\\', 't')
214 case '"':
215 buf = append(buf, '\\', '"')
216 case '\\':
217 buf = append(buf, '\\', '\\')
218 case '$', '%':
219 buf = appendRune(buf, r)
220 remain := s[i+1:]
221 if len(remain) > 0 && remain[0] == '{' {
222 // Double up our template introducer symbol to escape it.
223 buf = appendRune(buf, r)
224 }
225 default:
226 if !unicode.IsPrint(r) {
227 var fmted string
228 if r < 65536 {
229 fmted = fmt.Sprintf("\\u%04x", r)
230 } else {
231 fmted = fmt.Sprintf("\\U%08x", r)
232 }
233 buf = append(buf, fmted...)
234 } else {
235 buf = appendRune(buf, r)
236 }
237 }
238 }
239 return buf
240}
241
242func appendRune(b []byte, r rune) []byte {
243 l := utf8.RuneLen(r)
244 for i := 0; i < l; i++ {
245 b = append(b, 0) // make room at the end of our buffer
246 }
247 ch := b[len(b)-l:]
248 utf8.EncodeRune(ch, r)
249 return b
250}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go b/vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go
new file mode 100644
index 0000000..a13c0ec
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/native_node_sorter.go
@@ -0,0 +1,23 @@
1package hclwrite
2
3import (
4 "github.com/hashicorp/hcl2/hcl/hclsyntax"
5)
6
7type nativeNodeSorter struct {
8 Nodes []hclsyntax.Node
9}
10
11func (s nativeNodeSorter) Len() int {
12 return len(s.Nodes)
13}
14
15func (s nativeNodeSorter) Less(i, j int) bool {
16 rangeI := s.Nodes[i].Range()
17 rangeJ := s.Nodes[j].Range()
18 return rangeI.Start.Byte < rangeJ.Start.Byte
19}
20
21func (s nativeNodeSorter) Swap(i, j int) {
22 s.Nodes[i], s.Nodes[j] = s.Nodes[j], s.Nodes[i]
23}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/node.go b/vendor/github.com/hashicorp/hcl2/hclwrite/node.go
new file mode 100644
index 0000000..71fd00f
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/node.go
@@ -0,0 +1,236 @@
1package hclwrite
2
3import (
4 "fmt"
5
6 "github.com/google/go-cmp/cmp"
7)
8
9// node represents a node in the AST.
10type node struct {
11 content nodeContent
12
13 list *nodes
14 before, after *node
15}
16
17func newNode(c nodeContent) *node {
18 return &node{
19 content: c,
20 }
21}
22
23func (n *node) Equal(other *node) bool {
24 return cmp.Equal(n.content, other.content)
25}
26
27func (n *node) BuildTokens(to Tokens) Tokens {
28 return n.content.BuildTokens(to)
29}
30
31// Detach removes the receiver from the list it currently belongs to. If the
32// node is not currently in a list, this is a no-op.
33func (n *node) Detach() {
34 if n.list == nil {
35 return
36 }
37 if n.before != nil {
38 n.before.after = n.after
39 }
40 if n.after != nil {
41 n.after.before = n.before
42 }
43 if n.list.first == n {
44 n.list.first = n.after
45 }
46 if n.list.last == n {
47 n.list.last = n.before
48 }
49 n.list = nil
50 n.before = nil
51 n.after = nil
52}
53
54// ReplaceWith removes the receiver from the list it currently belongs to and
55// inserts a new node with the given content in its place. If the node is not
56// currently in a list, this function will panic.
57//
58// The return value is the newly-constructed node, containing the given content.
59// After this function returns, the reciever is no longer attached to a list.
60func (n *node) ReplaceWith(c nodeContent) *node {
61 if n.list == nil {
62 panic("can't replace node that is not in a list")
63 }
64
65 before := n.before
66 after := n.after
67 list := n.list
68 n.before, n.after, n.list = nil, nil, nil
69
70 nn := newNode(c)
71 nn.before = before
72 nn.after = after
73 nn.list = list
74 if before != nil {
75 before.after = nn
76 }
77 if after != nil {
78 after.before = nn
79 }
80 return nn
81}
82
83func (n *node) assertUnattached() {
84 if n.list != nil {
85 panic(fmt.Sprintf("attempt to attach already-attached node %#v", n))
86 }
87}
88
89// nodeContent is the interface type implemented by all AST content types.
90type nodeContent interface {
91 walkChildNodes(w internalWalkFunc)
92 BuildTokens(to Tokens) Tokens
93}
94
95// nodes is a list of nodes.
96type nodes struct {
97 first, last *node
98}
99
100func (ns *nodes) BuildTokens(to Tokens) Tokens {
101 for n := ns.first; n != nil; n = n.after {
102 to = n.BuildTokens(to)
103 }
104 return to
105}
106
107func (ns *nodes) Clear() {
108 ns.first = nil
109 ns.last = nil
110}
111
112func (ns *nodes) Append(c nodeContent) *node {
113 n := &node{
114 content: c,
115 }
116 ns.AppendNode(n)
117 n.list = ns
118 return n
119}
120
121func (ns *nodes) AppendNode(n *node) {
122 if ns.last != nil {
123 n.before = ns.last
124 ns.last.after = n
125 }
126 n.list = ns
127 ns.last = n
128 if ns.first == nil {
129 ns.first = n
130 }
131}
132
133func (ns *nodes) AppendUnstructuredTokens(tokens Tokens) *node {
134 if len(tokens) == 0 {
135 return nil
136 }
137 n := newNode(tokens)
138 ns.AppendNode(n)
139 n.list = ns
140 return n
141}
142
143// nodeSet is an unordered set of nodes. It is used to describe a set of nodes
144// that all belong to the same list that have some role or characteristic
145// in common.
146type nodeSet map[*node]struct{}
147
148func newNodeSet() nodeSet {
149 return make(nodeSet)
150}
151
152func (ns nodeSet) Has(n *node) bool {
153 if ns == nil {
154 return false
155 }
156 _, exists := ns[n]
157 return exists
158}
159
160func (ns nodeSet) Add(n *node) {
161 ns[n] = struct{}{}
162}
163
164func (ns nodeSet) Remove(n *node) {
165 delete(ns, n)
166}
167
168func (ns nodeSet) List() []*node {
169 if len(ns) == 0 {
170 return nil
171 }
172
173 ret := make([]*node, 0, len(ns))
174
175 // Determine which list we are working with. We assume here that all of
176 // the nodes belong to the same list, since that is part of the contract
177 // for nodeSet.
178 var list *nodes
179 for n := range ns {
180 list = n.list
181 break
182 }
183
184 // We recover the order by iterating over the whole list. This is not
185 // the most efficient way to do it, but our node lists should always be
186 // small so not worth making things more complex.
187 for n := list.first; n != nil; n = n.after {
188 if ns.Has(n) {
189 ret = append(ret, n)
190 }
191 }
192 return ret
193}
194
195type internalWalkFunc func(*node)
196
197// inTree can be embedded into a content struct that has child nodes to get
198// a standard implementation of the NodeContent interface and a record of
199// a potential parent node.
200type inTree struct {
201 parent *node
202 children *nodes
203}
204
205func newInTree() inTree {
206 return inTree{
207 children: &nodes{},
208 }
209}
210
211func (it *inTree) assertUnattached() {
212 if it.parent != nil {
213 panic(fmt.Sprintf("node is already attached to %T", it.parent.content))
214 }
215}
216
217func (it *inTree) walkChildNodes(w internalWalkFunc) {
218 for n := it.children.first; n != nil; n = n.after {
219 w(n)
220 }
221}
222
223func (it *inTree) BuildTokens(to Tokens) Tokens {
224 for n := it.children.first; n != nil; n = n.after {
225 to = n.BuildTokens(to)
226 }
227 return to
228}
229
230// leafNode can be embedded into a content struct to give it a do-nothing
231// implementation of walkChildNodes
232type leafNode struct {
233}
234
235func (n *leafNode) walkChildNodes(w internalWalkFunc) {
236}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/parser.go b/vendor/github.com/hashicorp/hcl2/hclwrite/parser.go
new file mode 100644
index 0000000..1876818
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/parser.go
@@ -0,0 +1,594 @@
1package hclwrite
2
3import (
4 "fmt"
5 "sort"
6
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hcl2/hcl/hclsyntax"
9 "github.com/zclconf/go-cty/cty"
10)
11
12// Our "parser" here is actually not doing any parsing of its own. Instead,
13// it leans on the native parser in hclsyntax, and then uses the source ranges
14// from the AST to partition the raw token sequence to match the raw tokens
15// up to AST nodes.
16//
17// This strategy feels somewhat counter-intuitive, since most of the work the
18// parser does is thrown away here, but this strategy is chosen because the
19// normal parsing work done by hclsyntax is considered to be the "main case",
20// while modifying and re-printing source is more of an edge case, used only
21// in ancillary tools, and so it's good to keep all the main parsing logic
22// with the main case but keep all of the extra complexity of token wrangling
23// out of the main parser, which is already rather complex just serving the
24// use-cases it already serves.
25//
26// If the parsing step produces any errors, the returned File is nil because
27// we can't reliably extract tokens from the partial AST produced by an
28// erroneous parse.
29func parse(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) {
30 file, diags := hclsyntax.ParseConfig(src, filename, start)
31 if diags.HasErrors() {
32 return nil, diags
33 }
34
35 // To do our work here, we use the "native" tokens (those from hclsyntax)
36 // to match against source ranges in the AST, but ultimately produce
37 // slices from our sequence of "writer" tokens, which contain only
38 // *relative* position information that is more appropriate for
39 // transformation/writing use-cases.
40 nativeTokens, diags := hclsyntax.LexConfig(src, filename, start)
41 if diags.HasErrors() {
42 // should never happen, since we would've caught these diags in
43 // the first call above.
44 return nil, diags
45 }
46 writerTokens := writerTokens(nativeTokens)
47
48 from := inputTokens{
49 nativeTokens: nativeTokens,
50 writerTokens: writerTokens,
51 }
52
53 before, root, after := parseBody(file.Body.(*hclsyntax.Body), from)
54 ret := &File{
55 inTree: newInTree(),
56
57 srcBytes: src,
58 body: root,
59 }
60
61 nodes := ret.inTree.children
62 nodes.Append(before.Tokens())
63 nodes.AppendNode(root)
64 nodes.Append(after.Tokens())
65
66 return ret, diags
67}
68
69type inputTokens struct {
70 nativeTokens hclsyntax.Tokens
71 writerTokens Tokens
72}
73
74func (it inputTokens) Partition(rng hcl.Range) (before, within, after inputTokens) {
75 start, end := partitionTokens(it.nativeTokens, rng)
76 before = it.Slice(0, start)
77 within = it.Slice(start, end)
78 after = it.Slice(end, len(it.nativeTokens))
79 return
80}
81
82func (it inputTokens) PartitionType(ty hclsyntax.TokenType) (before, within, after inputTokens) {
83 for i, t := range it.writerTokens {
84 if t.Type == ty {
85 return it.Slice(0, i), it.Slice(i, i+1), it.Slice(i+1, len(it.nativeTokens))
86 }
87 }
88 panic(fmt.Sprintf("didn't find any token of type %s", ty))
89}
90
91func (it inputTokens) PartitionTypeSingle(ty hclsyntax.TokenType) (before inputTokens, found *Token, after inputTokens) {
92 before, within, after := it.PartitionType(ty)
93 if within.Len() != 1 {
94 panic("PartitionType found more than one token")
95 }
96 return before, within.Tokens()[0], after
97}
98
99// PartitionIncludeComments is like Partition except the returned "within"
100// range includes any lead and line comments associated with the range.
101func (it inputTokens) PartitionIncludingComments(rng hcl.Range) (before, within, after inputTokens) {
102 start, end := partitionTokens(it.nativeTokens, rng)
103 start = partitionLeadCommentTokens(it.nativeTokens[:start])
104 _, afterNewline := partitionLineEndTokens(it.nativeTokens[end:])
105 end += afterNewline
106
107 before = it.Slice(0, start)
108 within = it.Slice(start, end)
109 after = it.Slice(end, len(it.nativeTokens))
110 return
111
112}
113
114// PartitionBlockItem is similar to PartitionIncludeComments but it returns
115// the comments as separate token sequences so that they can be captured into
116// AST attributes. It makes assumptions that apply only to block items, so
117// should not be used for other constructs.
118func (it inputTokens) PartitionBlockItem(rng hcl.Range) (before, leadComments, within, lineComments, newline, after inputTokens) {
119 before, within, after = it.Partition(rng)
120 before, leadComments = before.PartitionLeadComments()
121 lineComments, newline, after = after.PartitionLineEndTokens()
122 return
123}
124
125func (it inputTokens) PartitionLeadComments() (before, within inputTokens) {
126 start := partitionLeadCommentTokens(it.nativeTokens)
127 before = it.Slice(0, start)
128 within = it.Slice(start, len(it.nativeTokens))
129 return
130}
131
132func (it inputTokens) PartitionLineEndTokens() (comments, newline, after inputTokens) {
133 afterComments, afterNewline := partitionLineEndTokens(it.nativeTokens)
134 comments = it.Slice(0, afterComments)
135 newline = it.Slice(afterComments, afterNewline)
136 after = it.Slice(afterNewline, len(it.nativeTokens))
137 return
138}
139
140func (it inputTokens) Slice(start, end int) inputTokens {
141 // When we slice, we create a new slice with no additional capacity because
142 // we expect that these slices will be mutated in order to insert
143 // new code into the AST, and we want to ensure that a new underlying
144 // array gets allocated in that case, rather than writing into some
145 // following slice and corrupting it.
146 return inputTokens{
147 nativeTokens: it.nativeTokens[start:end:end],
148 writerTokens: it.writerTokens[start:end:end],
149 }
150}
151
152func (it inputTokens) Len() int {
153 return len(it.nativeTokens)
154}
155
156func (it inputTokens) Tokens() Tokens {
157 return it.writerTokens
158}
159
160func (it inputTokens) Types() []hclsyntax.TokenType {
161 ret := make([]hclsyntax.TokenType, len(it.nativeTokens))
162 for i, tok := range it.nativeTokens {
163 ret[i] = tok.Type
164 }
165 return ret
166}
167
168// parseBody locates the given body within the given input tokens and returns
169// the resulting *Body object as well as the tokens that appeared before and
170// after it.
171func parseBody(nativeBody *hclsyntax.Body, from inputTokens) (inputTokens, *node, inputTokens) {
172 before, within, after := from.PartitionIncludingComments(nativeBody.SrcRange)
173
174 // The main AST doesn't retain the original source ordering of the
175 // body items, so we need to reconstruct that ordering by inspecting
176 // their source ranges.
177 nativeItems := make([]hclsyntax.Node, 0, len(nativeBody.Attributes)+len(nativeBody.Blocks))
178 for _, nativeAttr := range nativeBody.Attributes {
179 nativeItems = append(nativeItems, nativeAttr)
180 }
181 for _, nativeBlock := range nativeBody.Blocks {
182 nativeItems = append(nativeItems, nativeBlock)
183 }
184 sort.Sort(nativeNodeSorter{nativeItems})
185
186 body := &Body{
187 inTree: newInTree(),
188 items: newNodeSet(),
189 }
190
191 remain := within
192 for _, nativeItem := range nativeItems {
193 beforeItem, item, afterItem := parseBodyItem(nativeItem, remain)
194
195 if beforeItem.Len() > 0 {
196 body.AppendUnstructuredTokens(beforeItem.Tokens())
197 }
198 body.appendItemNode(item)
199
200 remain = afterItem
201 }
202
203 if remain.Len() > 0 {
204 body.AppendUnstructuredTokens(remain.Tokens())
205 }
206
207 return before, newNode(body), after
208}
209
210func parseBodyItem(nativeItem hclsyntax.Node, from inputTokens) (inputTokens, *node, inputTokens) {
211 before, leadComments, within, lineComments, newline, after := from.PartitionBlockItem(nativeItem.Range())
212
213 var item *node
214
215 switch tItem := nativeItem.(type) {
216 case *hclsyntax.Attribute:
217 item = parseAttribute(tItem, within, leadComments, lineComments, newline)
218 case *hclsyntax.Block:
219 item = parseBlock(tItem, within, leadComments, lineComments, newline)
220 default:
221 // should never happen if caller is behaving
222 panic("unsupported native item type")
223 }
224
225 return before, item, after
226}
227
228func parseAttribute(nativeAttr *hclsyntax.Attribute, from, leadComments, lineComments, newline inputTokens) *node {
229 attr := &Attribute{
230 inTree: newInTree(),
231 }
232 children := attr.inTree.children
233
234 {
235 cn := newNode(newComments(leadComments.Tokens()))
236 attr.leadComments = cn
237 children.AppendNode(cn)
238 }
239
240 before, nameTokens, from := from.Partition(nativeAttr.NameRange)
241 {
242 children.AppendUnstructuredTokens(before.Tokens())
243 if nameTokens.Len() != 1 {
244 // Should never happen with valid input
245 panic("attribute name is not exactly one token")
246 }
247 token := nameTokens.Tokens()[0]
248 in := newNode(newIdentifier(token))
249 attr.name = in
250 children.AppendNode(in)
251 }
252
253 before, equalsTokens, from := from.Partition(nativeAttr.EqualsRange)
254 children.AppendUnstructuredTokens(before.Tokens())
255 children.AppendUnstructuredTokens(equalsTokens.Tokens())
256
257 before, exprTokens, from := from.Partition(nativeAttr.Expr.Range())
258 {
259 children.AppendUnstructuredTokens(before.Tokens())
260 exprNode := parseExpression(nativeAttr.Expr, exprTokens)
261 attr.expr = exprNode
262 children.AppendNode(exprNode)
263 }
264
265 {
266 cn := newNode(newComments(lineComments.Tokens()))
267 attr.lineComments = cn
268 children.AppendNode(cn)
269 }
270
271 children.AppendUnstructuredTokens(newline.Tokens())
272
273 // Collect any stragglers, though there shouldn't be any
274 children.AppendUnstructuredTokens(from.Tokens())
275
276 return newNode(attr)
277}
278
279func parseBlock(nativeBlock *hclsyntax.Block, from, leadComments, lineComments, newline inputTokens) *node {
280 block := &Block{
281 inTree: newInTree(),
282 labels: newNodeSet(),
283 }
284 children := block.inTree.children
285
286 {
287 cn := newNode(newComments(leadComments.Tokens()))
288 block.leadComments = cn
289 children.AppendNode(cn)
290 }
291
292 before, typeTokens, from := from.Partition(nativeBlock.TypeRange)
293 {
294 children.AppendUnstructuredTokens(before.Tokens())
295 if typeTokens.Len() != 1 {
296 // Should never happen with valid input
297 panic("block type name is not exactly one token")
298 }
299 token := typeTokens.Tokens()[0]
300 in := newNode(newIdentifier(token))
301 block.typeName = in
302 children.AppendNode(in)
303 }
304
305 for _, rng := range nativeBlock.LabelRanges {
306 var labelTokens inputTokens
307 before, labelTokens, from = from.Partition(rng)
308 children.AppendUnstructuredTokens(before.Tokens())
309 tokens := labelTokens.Tokens()
310 ln := newNode(newQuoted(tokens))
311 block.labels.Add(ln)
312 children.AppendNode(ln)
313 }
314
315 before, oBrace, from := from.Partition(nativeBlock.OpenBraceRange)
316 children.AppendUnstructuredTokens(before.Tokens())
317 children.AppendUnstructuredTokens(oBrace.Tokens())
318
319 // We go a bit out of order here: we go hunting for the closing brace
320 // so that we have a delimited body, but then we'll deal with the body
321 // before we actually append the closing brace and any straggling tokens
322 // that appear after it.
323 bodyTokens, cBrace, from := from.Partition(nativeBlock.CloseBraceRange)
324 before, body, after := parseBody(nativeBlock.Body, bodyTokens)
325 children.AppendUnstructuredTokens(before.Tokens())
326 block.body = body
327 children.AppendNode(body)
328 children.AppendUnstructuredTokens(after.Tokens())
329
330 children.AppendUnstructuredTokens(cBrace.Tokens())
331
332 // stragglers
333 children.AppendUnstructuredTokens(from.Tokens())
334 if lineComments.Len() > 0 {
335 // blocks don't actually have line comments, so we'll just treat
336 // them as extra stragglers
337 children.AppendUnstructuredTokens(lineComments.Tokens())
338 }
339 children.AppendUnstructuredTokens(newline.Tokens())
340
341 return newNode(block)
342}
343
344func parseExpression(nativeExpr hclsyntax.Expression, from inputTokens) *node {
345 expr := newExpression()
346 children := expr.inTree.children
347
348 nativeVars := nativeExpr.Variables()
349
350 for _, nativeTraversal := range nativeVars {
351 before, traversal, after := parseTraversal(nativeTraversal, from)
352 children.AppendUnstructuredTokens(before.Tokens())
353 children.AppendNode(traversal)
354 expr.absTraversals.Add(traversal)
355 from = after
356 }
357 // Attach any stragglers that don't belong to a traversal to the expression
358 // itself. In an expression with no traversals at all, this is just the
359 // entirety of "from".
360 children.AppendUnstructuredTokens(from.Tokens())
361
362 return newNode(expr)
363}
364
365func parseTraversal(nativeTraversal hcl.Traversal, from inputTokens) (before inputTokens, n *node, after inputTokens) {
366 traversal := newTraversal()
367 children := traversal.inTree.children
368 before, from, after = from.Partition(nativeTraversal.SourceRange())
369
370 stepAfter := from
371 for _, nativeStep := range nativeTraversal {
372 before, step, after := parseTraversalStep(nativeStep, stepAfter)
373 children.AppendUnstructuredTokens(before.Tokens())
374 children.AppendNode(step)
375 traversal.steps.Add(step)
376 stepAfter = after
377 }
378
379 return before, newNode(traversal), after
380}
381
382func parseTraversalStep(nativeStep hcl.Traverser, from inputTokens) (before inputTokens, n *node, after inputTokens) {
383 var children *nodes
384 switch tNativeStep := nativeStep.(type) {
385
386 case hcl.TraverseRoot, hcl.TraverseAttr:
387 step := newTraverseName()
388 children = step.inTree.children
389 before, from, after = from.Partition(nativeStep.SourceRange())
390 inBefore, token, inAfter := from.PartitionTypeSingle(hclsyntax.TokenIdent)
391 name := newIdentifier(token)
392 children.AppendUnstructuredTokens(inBefore.Tokens())
393 step.name = children.Append(name)
394 children.AppendUnstructuredTokens(inAfter.Tokens())
395 return before, newNode(step), after
396
397 case hcl.TraverseIndex:
398 step := newTraverseIndex()
399 children = step.inTree.children
400 before, from, after = from.Partition(nativeStep.SourceRange())
401
402 var inBefore, oBrack, keyTokens, cBrack inputTokens
403 inBefore, oBrack, from = from.PartitionType(hclsyntax.TokenOBrack)
404 children.AppendUnstructuredTokens(inBefore.Tokens())
405 children.AppendUnstructuredTokens(oBrack.Tokens())
406 keyTokens, cBrack, from = from.PartitionType(hclsyntax.TokenCBrack)
407
408 keyVal := tNativeStep.Key
409 switch keyVal.Type() {
410 case cty.String:
411 key := newQuoted(keyTokens.Tokens())
412 step.key = children.Append(key)
413 case cty.Number:
414 valBefore, valToken, valAfter := keyTokens.PartitionTypeSingle(hclsyntax.TokenNumberLit)
415 children.AppendUnstructuredTokens(valBefore.Tokens())
416 key := newNumber(valToken)
417 step.key = children.Append(key)
418 children.AppendUnstructuredTokens(valAfter.Tokens())
419 }
420
421 children.AppendUnstructuredTokens(cBrack.Tokens())
422 children.AppendUnstructuredTokens(from.Tokens())
423
424 return before, newNode(step), after
425 default:
426 panic(fmt.Sprintf("unsupported traversal step type %T", nativeStep))
427 }
428
429}
430
431// writerTokens takes a sequence of tokens as produced by the main hclsyntax
432// package and transforms it into an equivalent sequence of tokens using
433// this package's own token model.
434//
435// The resulting list contains the same number of tokens and uses the same
436// indices as the input, allowing the two sets of tokens to be correlated
437// by index.
438func writerTokens(nativeTokens hclsyntax.Tokens) Tokens {
439 // Ultimately we want a slice of token _pointers_, but since we can
440 // predict how much memory we're going to devote to tokens we'll allocate
441 // it all as a single flat buffer and thus give the GC less work to do.
442 tokBuf := make([]Token, len(nativeTokens))
443 var lastByteOffset int
444 for i, mainToken := range nativeTokens {
445 // Create a copy of the bytes so that we can mutate without
446 // corrupting the original token stream.
447 bytes := make([]byte, len(mainToken.Bytes))
448 copy(bytes, mainToken.Bytes)
449
450 tokBuf[i] = Token{
451 Type: mainToken.Type,
452 Bytes: bytes,
453
454 // We assume here that spaces are always ASCII spaces, since
455 // that's what the scanner also assumes, and thus the number
456 // of bytes skipped is also the number of space characters.
457 SpacesBefore: mainToken.Range.Start.Byte - lastByteOffset,
458 }
459
460 lastByteOffset = mainToken.Range.End.Byte
461 }
462
463 // Now make a slice of pointers into the previous slice.
464 ret := make(Tokens, len(tokBuf))
465 for i := range ret {
466 ret[i] = &tokBuf[i]
467 }
468
469 return ret
470}
471
472// partitionTokens takes a sequence of tokens and a hcl.Range and returns
473// two indices within the token sequence that correspond with the range
474// boundaries, such that the slice operator could be used to produce
475// three token sequences for before, within, and after respectively:
476//
477// start, end := partitionTokens(toks, rng)
478// before := toks[:start]
479// within := toks[start:end]
480// after := toks[end:]
481//
482// This works best when the range is aligned with token boundaries (e.g.
483// because it was produced in terms of the scanner's result) but if that isn't
484// true then it will make a best effort that may produce strange results at
485// the boundaries.
486//
487// Native hclsyntax tokens are used here, because they contain the necessary
488// absolute position information. However, since writerTokens produces a
489// correlatable sequence of writer tokens, the resulting indices can be
490// used also to index into its result, allowing the partitioning of writer
491// tokens to be driven by the partitioning of native tokens.
492//
493// The tokens are assumed to be in source order and non-overlapping, which
494// will be true if the token sequence from the scanner is used directly.
495func partitionTokens(toks hclsyntax.Tokens, rng hcl.Range) (start, end int) {
496 // We us a linear search here because we assume tha in most cases our
497 // target range is close to the beginning of the sequence, and the seqences
498 // are generally small for most reasonable files anyway.
499 for i := 0; ; i++ {
500 if i >= len(toks) {
501 // No tokens for the given range at all!
502 return len(toks), len(toks)
503 }
504
505 if toks[i].Range.Start.Byte >= rng.Start.Byte {
506 start = i
507 break
508 }
509 }
510
511 for i := start; ; i++ {
512 if i >= len(toks) {
513 // The range "hangs off" the end of the token sequence
514 return start, len(toks)
515 }
516
517 if toks[i].Range.Start.Byte >= rng.End.Byte {
518 end = i // end marker is exclusive
519 break
520 }
521 }
522
523 return start, end
524}
525
526// partitionLeadCommentTokens takes a sequence of tokens that is assumed
527// to immediately precede a construct that can have lead comment tokens,
528// and returns the index into that sequence where the lead comments begin.
529//
530// Lead comments are defined as whole lines containing only comment tokens
531// with no blank lines between. If no such lines are found, the returned
532// index will be len(toks).
533func partitionLeadCommentTokens(toks hclsyntax.Tokens) int {
534 // single-line comments (which is what we're interested in here)
535 // consume their trailing newline, so we can just walk backwards
536 // until we stop seeing comment tokens.
537 for i := len(toks) - 1; i >= 0; i-- {
538 if toks[i].Type != hclsyntax.TokenComment {
539 return i + 1
540 }
541 }
542 return 0
543}
544
545// partitionLineEndTokens takes a sequence of tokens that is assumed
546// to immediately follow a construct that can have a line comment, and
547// returns first the index where any line comments end and then second
548// the index immediately after the trailing newline.
549//
550// Line comments are defined as comments that appear immediately after
551// a construct on the same line where its significant tokens ended.
552//
553// Since single-line comment tokens (# and //) include the newline that
554// terminates them, in the presence of these the two returned indices
555// will be the same since the comment itself serves as the line end.
556func partitionLineEndTokens(toks hclsyntax.Tokens) (afterComment, afterNewline int) {
557 for i := 0; i < len(toks); i++ {
558 tok := toks[i]
559 if tok.Type != hclsyntax.TokenComment {
560 switch tok.Type {
561 case hclsyntax.TokenNewline:
562 return i, i + 1
563 case hclsyntax.TokenEOF:
564 // Although this is valid, we mustn't include the EOF
565 // itself as our "newline" or else strange things will
566 // happen when we try to append new items.
567 return i, i
568 default:
569 // If we have well-formed input here then nothing else should be
570 // possible. This path should never happen, because we only try
571 // to extract tokens from the sequence if the parser succeeded,
572 // and it should catch this problem itself.
573 panic("malformed line trailers: expected only comments and newlines")
574 }
575 }
576
577 if len(tok.Bytes) > 0 && tok.Bytes[len(tok.Bytes)-1] == '\n' {
578 // Newline at the end of a single-line comment serves both as
579 // the end of comments *and* the end of the line.
580 return i + 1, i + 1
581 }
582 }
583 return len(toks), len(toks)
584}
585
586// lexConfig uses the hclsyntax scanner to get a token stream and then
587// rewrites it into this package's token model.
588//
589// Any errors produced during scanning are ignored, so the results of this
590// function should be used with care.
591func lexConfig(src []byte) Tokens {
592 mainTokens, _ := hclsyntax.LexConfig(src, "", hcl.Pos{Byte: 0, Line: 1, Column: 1})
593 return writerTokens(mainTokens)
594}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/public.go b/vendor/github.com/hashicorp/hcl2/hclwrite/public.go
new file mode 100644
index 0000000..4d5ce2a
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/public.go
@@ -0,0 +1,44 @@
1package hclwrite
2
3import (
4 "bytes"
5
6 "github.com/hashicorp/hcl2/hcl"
7)
8
9// NewFile creates a new file object that is empty and ready to have constructs
10// added t it.
11func NewFile() *File {
12 body := &Body{
13 inTree: newInTree(),
14 items: newNodeSet(),
15 }
16 file := &File{
17 inTree: newInTree(),
18 }
19 file.body = file.inTree.children.Append(body)
20 return file
21}
22
23// ParseConfig interprets the given source bytes into a *hclwrite.File. The
24// resulting AST can be used to perform surgical edits on the source code
25// before turning it back into bytes again.
26func ParseConfig(src []byte, filename string, start hcl.Pos) (*File, hcl.Diagnostics) {
27 return parse(src, filename, start)
28}
29
30// Format takes source code and performs simple whitespace changes to transform
31// it to a canonical layout style.
32//
33// Format skips constructing an AST and works directly with tokens, so it
34// is less expensive than formatting via the AST for situations where no other
35// changes will be made. It also ignores syntax errors and can thus be applied
36// to partial source code, although the result in that case may not be
37// desirable.
38func Format(src []byte) []byte {
39 tokens := lexConfig(src)
40 format(tokens)
41 buf := &bytes.Buffer{}
42 tokens.WriteTo(buf)
43 return buf.Bytes()
44}
diff --git a/vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go b/vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go
new file mode 100644
index 0000000..d87f818
--- /dev/null
+++ b/vendor/github.com/hashicorp/hcl2/hclwrite/tokens.go
@@ -0,0 +1,122 @@
1package hclwrite
2
3import (
4 "bytes"
5 "io"
6
7 "github.com/apparentlymart/go-textseg/textseg"
8 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/hcl2/hcl/hclsyntax"
10)
11
12// Token is a single sequence of bytes annotated with a type. It is similar
13// in purpose to hclsyntax.Token, but discards the source position information
14// since that is not useful in code generation.
15type Token struct {
16 Type hclsyntax.TokenType
17 Bytes []byte
18
19 // We record the number of spaces before each token so that we can
20 // reproduce the exact layout of the original file when we're making
21 // surgical changes in-place. When _new_ code is created it will always
22 // be in the canonical style, but we preserve layout of existing code.
23 SpacesBefore int
24}
25
26// asHCLSyntax returns the receiver expressed as an incomplete hclsyntax.Token.
27// A complete token is not possible since we don't have source location
28// information here, and so this method is unexported so we can be sure it will
29// only be used for internal purposes where we know the range isn't important.
30//
31// This is primarily intended to allow us to re-use certain functionality from
32// hclsyntax rather than re-implementing it against our own token type here.
33func (t *Token) asHCLSyntax() hclsyntax.Token {
34 return hclsyntax.Token{
35 Type: t.Type,
36 Bytes: t.Bytes,
37 Range: hcl.Range{
38 Filename: "<invalid>",
39 },
40 }
41}
42
43// Tokens is a flat list of tokens.
44type Tokens []*Token
45
46func (ts Tokens) Bytes() []byte {
47 buf := &bytes.Buffer{}
48 ts.WriteTo(buf)
49 return buf.Bytes()
50}
51
52func (ts Tokens) testValue() string {
53 return string(ts.Bytes())
54}
55
56// Columns returns the number of columns (grapheme clusters) the token sequence
57// occupies. The result is not meaningful if there are newline or single-line
58// comment tokens in the sequence.
59func (ts Tokens) Columns() int {
60 ret := 0
61 for _, token := range ts {
62 ret += token.SpacesBefore // spaces are always worth one column each
63 ct, _ := textseg.TokenCount(token.Bytes, textseg.ScanGraphemeClusters)
64 ret += ct
65 }
66 return ret
67}
68
69// WriteTo takes an io.Writer and writes the bytes for each token to it,
70// along with the spacing that separates each token. In other words, this
71// allows serializing the tokens to a file or other such byte stream.
72func (ts Tokens) WriteTo(wr io.Writer) (int64, error) {
73 // We know we're going to be writing a lot of small chunks of repeated
74 // space characters, so we'll prepare a buffer of these that we can
75 // easily pass to wr.Write without any further allocation.
76 spaces := make([]byte, 40)
77 for i := range spaces {
78 spaces[i] = ' '
79 }
80
81 var n int64
82 var err error
83 for _, token := range ts {
84 if err != nil {
85 return n, err
86 }
87
88 for spacesBefore := token.SpacesBefore; spacesBefore > 0; spacesBefore -= len(spaces) {
89 thisChunk := spacesBefore
90 if thisChunk > len(spaces) {
91 thisChunk = len(spaces)
92 }
93 var thisN int
94 thisN, err = wr.Write(spaces[:thisChunk])
95 n += int64(thisN)
96 if err != nil {
97 return n, err
98 }
99 }
100
101 var thisN int
102 thisN, err = wr.Write(token.Bytes)
103 n += int64(thisN)
104 }
105
106 return n, err
107}
108
109func (ts Tokens) walkChildNodes(w internalWalkFunc) {
110 // Unstructured tokens have no child nodes
111}
112
113func (ts Tokens) BuildTokens(to Tokens) Tokens {
114 return append(to, ts...)
115}
116
117func newIdentToken(name string) *Token {
118 return &Token{
119 Type: hclsyntax.TokenIdent,
120 Bytes: []byte(name),
121 }
122}
diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go
index f2024d0..184e029 100644
--- a/vendor/github.com/hashicorp/hil/convert.go
+++ b/vendor/github.com/hashicorp/hil/convert.go
@@ -47,8 +47,23 @@ func hilMapstructureWeakDecode(m interface{}, rawVal interface{}) error {
47} 47}
48 48
49func InterfaceToVariable(input interface{}) (ast.Variable, error) { 49func InterfaceToVariable(input interface{}) (ast.Variable, error) {
50 if inputVariable, ok := input.(ast.Variable); ok { 50 if iv, ok := input.(ast.Variable); ok {
51 return inputVariable, nil 51 return iv, nil
52 }
53
54 // This is just to maintain backward compatibility
55 // after https://github.com/mitchellh/mapstructure/pull/98
56 if v, ok := input.([]ast.Variable); ok {
57 return ast.Variable{
58 Type: ast.TypeList,
59 Value: v,
60 }, nil
61 }
62 if v, ok := input.(map[string]ast.Variable); ok {
63 return ast.Variable{
64 Type: ast.TypeMap,
65 Value: v,
66 }, nil
52 } 67 }
53 68
54 var stringVal string 69 var stringVal string
diff --git a/vendor/github.com/hashicorp/hil/go.mod b/vendor/github.com/hashicorp/hil/go.mod
new file mode 100644
index 0000000..45719a6
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/go.mod
@@ -0,0 +1,6 @@
1module github.com/hashicorp/hil
2
3require (
4 github.com/mitchellh/mapstructure v1.1.2
5 github.com/mitchellh/reflectwalk v1.0.0
6)
diff --git a/vendor/github.com/hashicorp/hil/go.sum b/vendor/github.com/hashicorp/hil/go.sum
new file mode 100644
index 0000000..83639b6
--- /dev/null
+++ b/vendor/github.com/hashicorp/hil/go.sum
@@ -0,0 +1,4 @@
1github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
2github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
3github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
4github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
diff --git a/vendor/github.com/hashicorp/logutils/go.mod b/vendor/github.com/hashicorp/logutils/go.mod
new file mode 100644
index 0000000..ba38a45
--- /dev/null
+++ b/vendor/github.com/hashicorp/logutils/go.mod
@@ -0,0 +1 @@
module github.com/hashicorp/logutils
diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE b/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE
new file mode 100644
index 0000000..82b4de9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform-config-inspect/LICENSE
@@ -0,0 +1,353 @@
1Mozilla Public License, version 2.0
2
31. Definitions
4
51.1. “Contributor”
6
7 means each individual or legal entity that creates, contributes to the
8 creation of, or owns Covered Software.
9
101.2. “Contributor Version”
11
12 means the combination of the Contributions of others (if any) used by a
13 Contributor and that particular Contributor’s Contribution.
14
151.3. “Contribution”
16
17 means Covered Software of a particular Contributor.
18
191.4. “Covered Software”
20
21 means Source Code Form to which the initial Contributor has attached the
22 notice in Exhibit A, the Executable Form of such Source Code Form, and
23 Modifications of such Source Code Form, in each case including portions
24 thereof.
25
261.5. “Incompatible With Secondary Licenses”
27 means
28
29 a. that the initial Contributor has attached the notice described in
30 Exhibit B to the Covered Software; or
31
32 b. that the Covered Software was made available under the terms of version
33 1.1 or earlier of the License, but not also under the terms of a
34 Secondary License.
35
361.6. “Executable Form”
37
38 means any form of the work other than Source Code Form.
39
401.7. “Larger Work”
41
42 means a work that combines Covered Software with other material, in a separate
43 file or files, that is not Covered Software.
44
451.8. “License”
46
47 means this document.
48
491.9. “Licensable”
50
51 means having the right to grant, to the maximum extent possible, whether at the
52 time of the initial grant or subsequently, any and all of the rights conveyed by
53 this License.
54
551.10. “Modifications”
56
57 means any of the following:
58
59 a. any file in Source Code Form that results from an addition to, deletion
60 from, or modification of the contents of Covered Software; or
61
62 b. any new file in Source Code Form that contains any Covered Software.
63
641.11. “Patent Claims” of a Contributor
65
66 means any patent claim(s), including without limitation, method, process,
67 and apparatus claims, in any patent Licensable by such Contributor that
68 would be infringed, but for the grant of the License, by the making,
69 using, selling, offering for sale, having made, import, or transfer of
70 either its Contributions or its Contributor Version.
71
721.12. “Secondary License”
73
74 means either the GNU General Public License, Version 2.0, the GNU Lesser
75 General Public License, Version 2.1, the GNU Affero General Public
76 License, Version 3.0, or any later versions of those licenses.
77
781.13. “Source Code Form”
79
80 means the form of the work preferred for making modifications.
81
821.14. “You” (or “Your”)
83
84 means an individual or a legal entity exercising rights under this
85 License. For legal entities, “You” includes any entity that controls, is
86 controlled by, or is under common control with You. For purposes of this
87 definition, “control” means (a) the power, direct or indirect, to cause
88 the direction or management of such entity, whether by contract or
89 otherwise, or (b) ownership of more than fifty percent (50%) of the
90 outstanding shares or beneficial ownership of such entity.
91
92
932. License Grants and Conditions
94
952.1. Grants
96
97 Each Contributor hereby grants You a world-wide, royalty-free,
98 non-exclusive license:
99
100 a. under intellectual property rights (other than patent or trademark)
101 Licensable by such Contributor to use, reproduce, make available,
102 modify, display, perform, distribute, and otherwise exploit its
103 Contributions, either on an unmodified basis, with Modifications, or as
104 part of a Larger Work; and
105
106 b. under Patent Claims of such Contributor to make, use, sell, offer for
107 sale, have made, import, and otherwise transfer either its Contributions
108 or its Contributor Version.
109
1102.2. Effective Date
111
112 The licenses granted in Section 2.1 with respect to any Contribution become
113 effective for each Contribution on the date the Contributor first distributes
114 such Contribution.
115
1162.3. Limitations on Grant Scope
117
118 The licenses granted in this Section 2 are the only rights granted under this
119 License. No additional rights or licenses will be implied from the distribution
120 or licensing of Covered Software under this License. Notwithstanding Section
121 2.1(b) above, no patent license is granted by a Contributor:
122
123 a. for any code that a Contributor has removed from Covered Software; or
124
125 b. for infringements caused by: (i) Your and any other third party’s
126 modifications of Covered Software, or (ii) the combination of its
127 Contributions with other software (except as part of its Contributor
128 Version); or
129
130 c. under Patent Claims infringed by Covered Software in the absence of its
131 Contributions.
132
133 This License does not grant any rights in the trademarks, service marks, or
134 logos of any Contributor (except as may be necessary to comply with the
135 notice requirements in Section 3.4).
136
1372.4. Subsequent Licenses
138
139 No Contributor makes additional grants as a result of Your choice to
140 distribute the Covered Software under a subsequent version of this License
141 (see Section 10.2) or under the terms of a Secondary License (if permitted
142 under the terms of Section 3.3).
143
1442.5. Representation
145
146 Each Contributor represents that the Contributor believes its Contributions
147 are its original creation(s) or it has sufficient rights to grant the
148 rights to its Contributions conveyed by this License.
149
1502.6. Fair Use
151
152 This License is not intended to limit any rights You have under applicable
153 copyright doctrines of fair use, fair dealing, or other equivalents.
154
1552.7. Conditions
156
157 Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
158 Section 2.1.
159
160
1613. Responsibilities
162
1633.1. Distribution of Source Form
164
165 All distribution of Covered Software in Source Code Form, including any
166 Modifications that You create or to which You contribute, must be under the
167 terms of this License. You must inform recipients that the Source Code Form
168 of the Covered Software is governed by the terms of this License, and how
169 they can obtain a copy of this License. You may not attempt to alter or
170 restrict the recipients’ rights in the Source Code Form.
171
1723.2. Distribution of Executable Form
173
174 If You distribute Covered Software in Executable Form then:
175
176 a. such Covered Software must also be made available in Source Code Form,
177 as described in Section 3.1, and You must inform recipients of the
178 Executable Form how they can obtain a copy of such Source Code Form by
179 reasonable means in a timely manner, at a charge no more than the cost
180 of distribution to the recipient; and
181
182 b. You may distribute such Executable Form under the terms of this License,
183 or sublicense it under different terms, provided that the license for
184 the Executable Form does not attempt to limit or alter the recipients’
185 rights in the Source Code Form under this License.
186
1873.3. Distribution of a Larger Work
188
189 You may create and distribute a Larger Work under terms of Your choice,
190 provided that You also comply with the requirements of this License for the
191 Covered Software. If the Larger Work is a combination of Covered Software
192 with a work governed by one or more Secondary Licenses, and the Covered
193 Software is not Incompatible With Secondary Licenses, this License permits
194 You to additionally distribute such Covered Software under the terms of
195 such Secondary License(s), so that the recipient of the Larger Work may, at
196 their option, further distribute the Covered Software under the terms of
197 either this License or such Secondary License(s).
198
1993.4. Notices
200
201 You may not remove or alter the substance of any license notices (including
202 copyright notices, patent notices, disclaimers of warranty, or limitations
203 of liability) contained within the Source Code Form of the Covered
204 Software, except that You may alter any license notices to the extent
205 required to remedy known factual inaccuracies.
206
2073.5. Application of Additional Terms
208
209 You may choose to offer, and to charge a fee for, warranty, support,
210 indemnity or liability obligations to one or more recipients of Covered
211 Software. However, You may do so only on Your own behalf, and not on behalf
212 of any Contributor. You must make it absolutely clear that any such
213 warranty, support, indemnity, or liability obligation is offered by You
214 alone, and You hereby agree to indemnify every Contributor for any
215 liability incurred by such Contributor as a result of warranty, support,
216 indemnity or liability terms You offer. You may include additional
217 disclaimers of warranty and limitations of liability specific to any
218 jurisdiction.
219
2204. Inability to Comply Due to Statute or Regulation
221
222 If it is impossible for You to comply with any of the terms of this License
223 with respect to some or all of the Covered Software due to statute, judicial
224 order, or regulation then You must: (a) comply with the terms of this License
225 to the maximum extent possible; and (b) describe the limitations and the code
226 they affect. Such description must be placed in a text file included with all
227 distributions of the Covered Software under this License. Except to the
228 extent prohibited by statute or regulation, such description must be
229 sufficiently detailed for a recipient of ordinary skill to be able to
230 understand it.
231
2325. Termination
233
2345.1. The rights granted under this License will terminate automatically if You
235 fail to comply with any of its terms. However, if You become compliant,
236 then the rights granted under this License from a particular Contributor
237 are reinstated (a) provisionally, unless and until such Contributor
238 explicitly and finally terminates Your grants, and (b) on an ongoing basis,
239 if such Contributor fails to notify You of the non-compliance by some
240 reasonable means prior to 60 days after You have come back into compliance.
241 Moreover, Your grants from a particular Contributor are reinstated on an
242 ongoing basis if such Contributor notifies You of the non-compliance by
243 some reasonable means, this is the first time You have received notice of
244 non-compliance with this License from such Contributor, and You become
245 compliant prior to 30 days after Your receipt of the notice.
246
2475.2. If You initiate litigation against any entity by asserting a patent
248 infringement claim (excluding declaratory judgment actions, counter-claims,
249 and cross-claims) alleging that a Contributor Version directly or
250 indirectly infringes any patent, then the rights granted to You by any and
251 all Contributors for the Covered Software under Section 2.1 of this License
252 shall terminate.
253
2545.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
255 license agreements (excluding distributors and resellers) which have been
256 validly granted by You or Your distributors under this License prior to
257 termination shall survive termination.
258
2596. Disclaimer of Warranty
260
261 Covered Software is provided under this License on an “as is” basis, without
262 warranty of any kind, either expressed, implied, or statutory, including,
263 without limitation, warranties that the Covered Software is free of defects,
264 merchantable, fit for a particular purpose or non-infringing. The entire
265 risk as to the quality and performance of the Covered Software is with You.
266 Should any Covered Software prove defective in any respect, You (not any
267 Contributor) assume the cost of any necessary servicing, repair, or
268 correction. This disclaimer of warranty constitutes an essential part of this
269 License. No use of any Covered Software is authorized under this License
270 except under this disclaimer.
271
2727. Limitation of Liability
273
274 Under no circumstances and under no legal theory, whether tort (including
275 negligence), contract, or otherwise, shall any Contributor, or anyone who
276 distributes Covered Software as permitted above, be liable to You for any
277 direct, indirect, special, incidental, or consequential damages of any
278 character including, without limitation, damages for lost profits, loss of
279 goodwill, work stoppage, computer failure or malfunction, or any and all
280 other commercial damages or losses, even if such party shall have been
281 informed of the possibility of such damages. This limitation of liability
282 shall not apply to liability for death or personal injury resulting from such
283 party’s negligence to the extent applicable law prohibits such limitation.
284 Some jurisdictions do not allow the exclusion or limitation of incidental or
285 consequential damages, so this exclusion and limitation may not apply to You.
286
2878. Litigation
288
289 Any litigation relating to this License may be brought only in the courts of
290 a jurisdiction where the defendant maintains its principal place of business
291 and such litigation shall be governed by laws of that jurisdiction, without
292 reference to its conflict-of-law provisions. Nothing in this Section shall
293 prevent a party’s ability to bring cross-claims or counter-claims.
294
2959. Miscellaneous
296
297 This License represents the complete agreement concerning the subject matter
298 hereof. If any provision of this License is held to be unenforceable, such
299 provision shall be reformed only to the extent necessary to make it
300 enforceable. Any law or regulation which provides that the language of a
301 contract shall be construed against the drafter shall not be used to construe
302 this License against a Contributor.
303
304
30510. Versions of the License
306
30710.1. New Versions
308
309 Mozilla Foundation is the license steward. Except as provided in Section
310 10.3, no one other than the license steward has the right to modify or
311 publish new versions of this License. Each version will be given a
312 distinguishing version number.
313
31410.2. Effect of New Versions
315
316 You may distribute the Covered Software under the terms of the version of
317 the License under which You originally received the Covered Software, or
318 under the terms of any subsequent version published by the license
319 steward.
320
32110.3. Modified Versions
322
323 If you create software not governed by this License, and you want to
324 create a new license for such software, you may create and use a modified
325 version of this License if you rename the license and remove any
326 references to the name of the license steward (except to note that such
327 modified license differs from this License).
328
32910.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
330 If You choose to distribute Source Code Form that is Incompatible With
331 Secondary Licenses under the terms of this version of the License, the
332 notice described in Exhibit B of this License must be attached.
333
334Exhibit A - Source Code Form License Notice
335
336 This Source Code Form is subject to the
337 terms of the Mozilla Public License, v.
338 2.0. If a copy of the MPL was not
339 distributed with this file, You can
340 obtain one at
341 http://mozilla.org/MPL/2.0/.
342
343If it is not possible or desirable to put the notice in a particular file, then
344You may include the notice in a location (such as a LICENSE file in a relevant
345directory) where a recipient would be likely to look for such a notice.
346
347You may add additional accurate notices of copyright ownership.
348
349Exhibit B - “Incompatible With Secondary Licenses” Notice
350
351 This Source Code Form is “Incompatible
352 With Secondary Licenses”, as defined by
353 the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go
new file mode 100644
index 0000000..8d04ad4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/diagnostic.go
@@ -0,0 +1,138 @@
1package tfconfig
2
3import (
4 "fmt"
5
6 legacyhclparser "github.com/hashicorp/hcl/hcl/parser"
7 "github.com/hashicorp/hcl2/hcl"
8)
9
10// Diagnostic describes a problem (error or warning) encountered during
11// configuration loading.
12type Diagnostic struct {
13 Severity DiagSeverity `json:"severity"`
14 Summary string `json:"summary"`
15 Detail string `json:"detail,omitempty"`
16
17 // Pos is not populated for all diagnostics, but when populated should
18 // indicate a particular line that the described problem relates to.
19 Pos *SourcePos `json:"pos,omitempty"`
20}
21
22// Diagnostics represents a sequence of diagnostics. This is the type that
23// should be returned from a function that might generate diagnostics.
24type Diagnostics []Diagnostic
25
26// HasErrors returns true if there is at least one Diagnostic of severity
27// DiagError in the receiever.
28//
29// If a function returns a Diagnostics without errors then the result can
30// be assumed to be complete within the "best effort" constraints of this
31// library. If errors are present then the caller may wish to employ more
32// caution in relying on the result.
33func (diags Diagnostics) HasErrors() bool {
34 for _, diag := range diags {
35 if diag.Severity == DiagError {
36 return true
37 }
38 }
39 return false
40}
41
42func (diags Diagnostics) Error() string {
43 switch len(diags) {
44 case 0:
45 return "no problems"
46 case 1:
47 return fmt.Sprintf("%s: %s", diags[0].Summary, diags[0].Detail)
48 default:
49 return fmt.Sprintf("%s: %s (and %d other messages)", diags[0].Summary, diags[0].Detail, len(diags)-1)
50 }
51}
52
53// Err returns an error representing the receiver if the receiver HasErrors, or
54// nil otherwise.
55//
56// The returned error can be type-asserted back to a Diagnostics if needed.
57func (diags Diagnostics) Err() error {
58 if diags.HasErrors() {
59 return diags
60 }
61 return nil
62}
63
64// DiagSeverity describes the severity of a Diagnostic.
65type DiagSeverity rune
66
67// DiagError indicates a problem that prevented proper processing of the
68// configuration. In the precense of DiagError diagnostics the result is
69// likely to be incomplete.
70const DiagError DiagSeverity = 'E'
71
72// DiagWarning indicates a problem that the user may wish to consider but
73// that did not prevent proper processing of the configuration.
74const DiagWarning DiagSeverity = 'W'
75
76// MarshalJSON is an implementation of encoding/json.Marshaler
77func (s DiagSeverity) MarshalJSON() ([]byte, error) {
78 switch s {
79 case DiagError:
80 return []byte(`"error"`), nil
81 case DiagWarning:
82 return []byte(`"warning"`), nil
83 default:
84 return []byte(`"invalid"`), nil
85 }
86}
87
88func diagnosticsHCL(diags hcl.Diagnostics) Diagnostics {
89 if len(diags) == 0 {
90 return nil
91 }
92 ret := make(Diagnostics, len(diags))
93 for i, diag := range diags {
94 ret[i] = Diagnostic{
95 Summary: diag.Summary,
96 Detail: diag.Detail,
97 }
98 switch diag.Severity {
99 case hcl.DiagError:
100 ret[i].Severity = DiagError
101 case hcl.DiagWarning:
102 ret[i].Severity = DiagWarning
103 }
104 if diag.Subject != nil {
105 pos := sourcePosHCL(*diag.Subject)
106 ret[i].Pos = &pos
107 }
108 }
109 return ret
110}
111
112func diagnosticsError(err error) Diagnostics {
113 if err == nil {
114 return nil
115 }
116
117 if posErr, ok := err.(*legacyhclparser.PosError); ok {
118 pos := sourcePosLegacyHCL(posErr.Pos, "")
119 return Diagnostics{
120 Diagnostic{
121 Severity: DiagError,
122 Summary: posErr.Err.Error(),
123 Pos: &pos,
124 },
125 }
126 }
127
128 return Diagnostics{
129 Diagnostic{
130 Severity: DiagError,
131 Summary: err.Error(),
132 },
133 }
134}
135
136func diagnosticsErrorf(format string, args ...interface{}) Diagnostics {
137 return diagnosticsError(fmt.Errorf(format, args...))
138}
diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go
new file mode 100644
index 0000000..1604a6e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/doc.go
@@ -0,0 +1,21 @@
1// Package tfconfig is a helper library that does careful, shallow parsing of
2// Terraform modules to provide access to high-level metadata while
3// remaining broadly compatible with configurations targeting various
4// different Terraform versions.
5//
6// This packge focuses on describing top-level objects only, and in particular
7// does not attempt any sort of processing that would require access to plugins.
8// Currently it allows callers to extract high-level information about
9// variables, outputs, resource blocks, provider dependencies, and Terraform
10// Core dependencies.
11//
12// This package only works at the level of single modules. A full configuration
13// is a tree of potentially several modules, some of which may be references
14// to remote packages. There are some basic helpers for traversing calls to
15// modules at relative local paths, however.
16//
17// This package employs a "best effort" parsing strategy, producing as complete
18// a result as possible even though the input may not be entirely valid. The
19// intended use-case is high-level analysis and indexing of externally-facing
20// module characteristics, as opposed to validating or even applying the module.
21package tfconfig
diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go
new file mode 100644
index 0000000..2d13fe1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load.go
@@ -0,0 +1,130 @@
1package tfconfig
2
3import (
4 "fmt"
5 "io/ioutil"
6 "path/filepath"
7 "strings"
8
9 "github.com/hashicorp/hcl2/hcl"
10)
11
12// LoadModule reads the directory at the given path and attempts to interpret
13// it as a Terraform module.
14func LoadModule(dir string) (*Module, Diagnostics) {
15
16 // For broad compatibility here we actually have two separate loader
17 // codepaths. The main one uses the new HCL parser and API and is intended
18 // for configurations from Terraform 0.12 onwards (though will work for
19 // many older configurations too), but we'll also fall back on one that
20 // uses the _old_ HCL implementation so we can deal with some edge-cases
21 // that are not valid in new HCL.
22
23 module, diags := loadModule(dir)
24 if diags.HasErrors() {
25 // Try using the legacy HCL parser and see if we fare better.
26 legacyModule, legacyDiags := loadModuleLegacyHCL(dir)
27 if !legacyDiags.HasErrors() {
28 legacyModule.init(legacyDiags)
29 return legacyModule, legacyDiags
30 }
31 }
32
33 module.init(diags)
34 return module, diags
35}
36
37// IsModuleDir checks if the given path contains terraform configuration files.
38// This allows the caller to decide how to handle directories that do not have tf files.
39func IsModuleDir(dir string) bool {
40 primaryPaths, _ := dirFiles(dir)
41 if len(primaryPaths) == 0 {
42 return false
43 }
44 return true
45}
46
47func (m *Module) init(diags Diagnostics) {
48 // Fill in any additional provider requirements that are implied by
49 // resource configurations, to avoid the caller from needing to apply
50 // this logic itself. Implied requirements don't have version constraints,
51 // but we'll make sure the requirement value is still non-nil in this
52 // case so callers can easily recognize it.
53 for _, r := range m.ManagedResources {
54 if _, exists := m.RequiredProviders[r.Provider.Name]; !exists {
55 m.RequiredProviders[r.Provider.Name] = []string{}
56 }
57 }
58 for _, r := range m.DataResources {
59 if _, exists := m.RequiredProviders[r.Provider.Name]; !exists {
60 m.RequiredProviders[r.Provider.Name] = []string{}
61 }
62 }
63
64 // We redundantly also reference the diagnostics from inside the module
65 // object, primarily so that we can easily included in JSON-serialized
66 // versions of the module object.
67 m.Diagnostics = diags
68}
69
70func dirFiles(dir string) (primary []string, diags hcl.Diagnostics) {
71 infos, err := ioutil.ReadDir(dir)
72 if err != nil {
73 diags = append(diags, &hcl.Diagnostic{
74 Severity: hcl.DiagError,
75 Summary: "Failed to read module directory",
76 Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir),
77 })
78 return
79 }
80
81 var override []string
82 for _, info := range infos {
83 if info.IsDir() {
84 // We only care about files
85 continue
86 }
87
88 name := info.Name()
89 ext := fileExt(name)
90 if ext == "" || isIgnoredFile(name) {
91 continue
92 }
93
94 baseName := name[:len(name)-len(ext)] // strip extension
95 isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override")
96
97 fullPath := filepath.Join(dir, name)
98 if isOverride {
99 override = append(override, fullPath)
100 } else {
101 primary = append(primary, fullPath)
102 }
103 }
104
105 // We are assuming that any _override files will be logically named,
106 // and processing the files in alphabetical order. Primaries first, then overrides.
107 primary = append(primary, override...)
108
109 return
110}
111
112// fileExt returns the Terraform configuration extension of the given
113// path, or a blank string if it is not a recognized extension.
114func fileExt(path string) string {
115 if strings.HasSuffix(path, ".tf") {
116 return ".tf"
117 } else if strings.HasSuffix(path, ".tf.json") {
118 return ".tf.json"
119 } else {
120 return ""
121 }
122}
123
124// isIgnoredFile returns true if the given filename (which must not have a
125// directory path ahead of it) should be ignored as e.g. an editor swap file.
126func isIgnoredFile(name string) bool {
127 return strings.HasPrefix(name, ".") || // Unix-like hidden files
128 strings.HasSuffix(name, "~") || // vim
129 strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs
130}
diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go
new file mode 100644
index 0000000..72b5d4a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_hcl.go
@@ -0,0 +1,322 @@
1package tfconfig
2
3import (
4 "encoding/json"
5 "fmt"
6 "strings"
7
8 "github.com/hashicorp/hcl2/hcl/hclsyntax"
9
10 "github.com/hashicorp/hcl2/gohcl"
11 "github.com/hashicorp/hcl2/hcl"
12 "github.com/hashicorp/hcl2/hclparse"
13 ctyjson "github.com/zclconf/go-cty/cty/json"
14)
15
16func loadModule(dir string) (*Module, Diagnostics) {
17 mod := newModule(dir)
18 primaryPaths, diags := dirFiles(dir)
19
20 parser := hclparse.NewParser()
21
22 for _, filename := range primaryPaths {
23 var file *hcl.File
24 var fileDiags hcl.Diagnostics
25 if strings.HasSuffix(filename, ".json") {
26 file, fileDiags = parser.ParseJSONFile(filename)
27 } else {
28 file, fileDiags = parser.ParseHCLFile(filename)
29 }
30 diags = append(diags, fileDiags...)
31 if file == nil {
32 continue
33 }
34
35 content, _, contentDiags := file.Body.PartialContent(rootSchema)
36 diags = append(diags, contentDiags...)
37
38 for _, block := range content.Blocks {
39 switch block.Type {
40
41 case "terraform":
42 content, _, contentDiags := block.Body.PartialContent(terraformBlockSchema)
43 diags = append(diags, contentDiags...)
44
45 if attr, defined := content.Attributes["required_version"]; defined {
46 var version string
47 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version)
48 diags = append(diags, valDiags...)
49 if !valDiags.HasErrors() {
50 mod.RequiredCore = append(mod.RequiredCore, version)
51 }
52 }
53
54 for _, block := range content.Blocks {
55 // Our schema only allows required_providers here, so we
56 // assume that we'll only get that block type.
57 attrs, attrDiags := block.Body.JustAttributes()
58 diags = append(diags, attrDiags...)
59
60 for name, attr := range attrs {
61 var version string
62 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version)
63 diags = append(diags, valDiags...)
64 if !valDiags.HasErrors() {
65 mod.RequiredProviders[name] = append(mod.RequiredProviders[name], version)
66 }
67 }
68 }
69
70 case "variable":
71 content, _, contentDiags := block.Body.PartialContent(variableSchema)
72 diags = append(diags, contentDiags...)
73
74 name := block.Labels[0]
75 v := &Variable{
76 Name: name,
77 Pos: sourcePosHCL(block.DefRange),
78 }
79
80 mod.Variables[name] = v
81
82 if attr, defined := content.Attributes["type"]; defined {
83 // We handle this particular attribute in a somewhat-tricky way:
84 // since Terraform may evolve its type expression syntax in
85 // future versions, we don't want to be overly-strict in how
86 // we handle it here, and so we'll instead just take the raw
87 // source provided by the user, using the source location
88 // information in the expression object.
89 //
90 // However, older versions of Terraform expected the type
91 // to be a string containing a keyword, so we'll need to
92 // handle that as a special case first for backward compatibility.
93
94 var typeExpr string
95
96 var typeExprAsStr string
97 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &typeExprAsStr)
98 if !valDiags.HasErrors() {
99 typeExpr = typeExprAsStr
100 } else {
101
102 rng := attr.Expr.Range()
103 sourceFilename := rng.Filename
104 source, exists := parser.Sources()[sourceFilename]
105 if exists {
106 typeExpr = string(rng.SliceBytes(source))
107 } else {
108 // This should never happen, so we'll just warn about it and leave the type unspecified.
109 diags = append(diags, &hcl.Diagnostic{
110 Severity: hcl.DiagError,
111 Summary: "Source code not available",
112 Detail: fmt.Sprintf("Source code is not available for the file %q, which declares the variable %q.", sourceFilename, name),
113 Subject: &block.DefRange,
114 })
115 typeExpr = ""
116 }
117
118 }
119
120 v.Type = typeExpr
121 }
122
123 if attr, defined := content.Attributes["description"]; defined {
124 var description string
125 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description)
126 diags = append(diags, valDiags...)
127 v.Description = description
128 }
129
130 if attr, defined := content.Attributes["default"]; defined {
131 // To avoid the caller needing to deal with cty here, we'll
132 // use its JSON encoding to convert into an
133 // approximately-equivalent plain Go interface{} value
134 // to return.
135 val, valDiags := attr.Expr.Value(nil)
136 diags = append(diags, valDiags...)
137 if val.IsWhollyKnown() { // should only be false if there are errors in the input
138 valJSON, err := ctyjson.Marshal(val, val.Type())
139 if err != nil {
140 // Should never happen, since all possible known
141 // values have a JSON mapping.
142 panic(fmt.Errorf("failed to serialize default value as JSON: %s", err))
143 }
144 var def interface{}
145 err = json.Unmarshal(valJSON, &def)
146 if err != nil {
147 // Again should never happen, because valJSON is
148 // guaranteed valid by ctyjson.Marshal.
149 panic(fmt.Errorf("failed to re-parse default value from JSON: %s", err))
150 }
151 v.Default = def
152 }
153 }
154
155 case "output":
156
157 content, _, contentDiags := block.Body.PartialContent(outputSchema)
158 diags = append(diags, contentDiags...)
159
160 name := block.Labels[0]
161 o := &Output{
162 Name: name,
163 Pos: sourcePosHCL(block.DefRange),
164 }
165
166 mod.Outputs[name] = o
167
168 if attr, defined := content.Attributes["description"]; defined {
169 var description string
170 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &description)
171 diags = append(diags, valDiags...)
172 o.Description = description
173 }
174
175 case "provider":
176
177 content, _, contentDiags := block.Body.PartialContent(providerConfigSchema)
178 diags = append(diags, contentDiags...)
179
180 name := block.Labels[0]
181
182 if attr, defined := content.Attributes["version"]; defined {
183 var version string
184 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version)
185 diags = append(diags, valDiags...)
186 if !valDiags.HasErrors() {
187 mod.RequiredProviders[name] = append(mod.RequiredProviders[name], version)
188 }
189 }
190
191 // Even if there wasn't an explicit version required, we still
192 // need an entry in our map to signal the unversioned dependency.
193 if _, exists := mod.RequiredProviders[name]; !exists {
194 mod.RequiredProviders[name] = []string{}
195 }
196
197 case "resource", "data":
198
199 content, _, contentDiags := block.Body.PartialContent(resourceSchema)
200 diags = append(diags, contentDiags...)
201
202 typeName := block.Labels[0]
203 name := block.Labels[1]
204
205 r := &Resource{
206 Type: typeName,
207 Name: name,
208 Pos: sourcePosHCL(block.DefRange),
209 }
210
211 var resourcesMap map[string]*Resource
212
213 switch block.Type {
214 case "resource":
215 r.Mode = ManagedResourceMode
216 resourcesMap = mod.ManagedResources
217 case "data":
218 r.Mode = DataResourceMode
219 resourcesMap = mod.DataResources
220 }
221
222 key := r.MapKey()
223
224 resourcesMap[key] = r
225
226 if attr, defined := content.Attributes["provider"]; defined {
227 // New style here is to provide this as a naked traversal
228 // expression, but we also support quoted references for
229 // older configurations that predated this convention.
230 traversal, travDiags := hcl.AbsTraversalForExpr(attr.Expr)
231 if travDiags.HasErrors() {
232 traversal = nil // in case we got any partial results
233
234 // Fall back on trying to parse as a string
235 var travStr string
236 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &travStr)
237 if !valDiags.HasErrors() {
238 var strDiags hcl.Diagnostics
239 traversal, strDiags = hclsyntax.ParseTraversalAbs([]byte(travStr), "", hcl.Pos{})
240 if strDiags.HasErrors() {
241 traversal = nil
242 }
243 }
244 }
245
246 // If we get out here with a nil traversal then we didn't
247 // succeed in processing the input.
248 if len(traversal) > 0 {
249 providerName := traversal.RootName()
250 alias := ""
251 if len(traversal) > 1 {
252 if getAttr, ok := traversal[1].(hcl.TraverseAttr); ok {
253 alias = getAttr.Name
254 }
255 }
256 r.Provider = ProviderRef{
257 Name: providerName,
258 Alias: alias,
259 }
260 } else {
261 diags = append(diags, &hcl.Diagnostic{
262 Severity: hcl.DiagError,
263 Summary: "Invalid provider reference",
264 Detail: "Provider argument requires a provider name followed by an optional alias, like \"aws.foo\".",
265 Subject: attr.Expr.Range().Ptr(),
266 })
267 }
268 } else {
269 // If provider _isn't_ set then we'll infer it from the
270 // resource type.
271 r.Provider = ProviderRef{
272 Name: resourceTypeDefaultProviderName(r.Type),
273 }
274 }
275
276 case "module":
277
278 content, _, contentDiags := block.Body.PartialContent(moduleCallSchema)
279 diags = append(diags, contentDiags...)
280
281 name := block.Labels[0]
282 mc := &ModuleCall{
283 Name: block.Labels[0],
284 Pos: sourcePosHCL(block.DefRange),
285 }
286
287 // check if this is overriding an existing module
288 var origSource string
289 if origMod, exists := mod.ModuleCalls[name]; exists {
290 origSource = origMod.Source
291 }
292
293 mod.ModuleCalls[name] = mc
294
295 if attr, defined := content.Attributes["source"]; defined {
296 var source string
297 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &source)
298 diags = append(diags, valDiags...)
299 mc.Source = source
300 }
301
302 if mc.Source == "" {
303 mc.Source = origSource
304 }
305
306 if attr, defined := content.Attributes["version"]; defined {
307 var version string
308 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &version)
309 diags = append(diags, valDiags...)
310 mc.Version = version
311 }
312
313 default:
314 // Should never happen because our cases above should be
315 // exhaustive for our schema.
316 panic(fmt.Errorf("unhandled block type %q", block.Type))
317 }
318 }
319 }
320
321 return mod, diagnosticsHCL(diags)
322}
diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go
new file mode 100644
index 0000000..86ffdf1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/load_legacy.go
@@ -0,0 +1,325 @@
1package tfconfig
2
3import (
4 "io/ioutil"
5 "strings"
6
7 legacyhcl "github.com/hashicorp/hcl"
8 legacyast "github.com/hashicorp/hcl/hcl/ast"
9)
10
11func loadModuleLegacyHCL(dir string) (*Module, Diagnostics) {
12 // This implementation is intentionally more quick-and-dirty than the
13 // main loader. In particular, it doesn't bother to keep careful track
14 // of multiple error messages because we always fall back on returning
15 // the main parser's error message if our fallback parsing produces
16 // an error, and thus the errors here are not seen by the end-caller.
17 mod := newModule(dir)
18
19 primaryPaths, diags := dirFiles(dir)
20 if diags.HasErrors() {
21 return mod, diagnosticsHCL(diags)
22 }
23
24 for _, filename := range primaryPaths {
25 src, err := ioutil.ReadFile(filename)
26 if err != nil {
27 return mod, diagnosticsErrorf("Error reading %s: %s", filename, err)
28 }
29
30 hclRoot, err := legacyhcl.Parse(string(src))
31 if err != nil {
32 return mod, diagnosticsErrorf("Error parsing %s: %s", filename, err)
33 }
34
35 list, ok := hclRoot.Node.(*legacyast.ObjectList)
36 if !ok {
37 return mod, diagnosticsErrorf("Error parsing %s: no root object", filename)
38 }
39
40 for _, item := range list.Filter("terraform").Items {
41 if len(item.Keys) > 0 {
42 item = &legacyast.ObjectItem{
43 Val: &legacyast.ObjectType{
44 List: &legacyast.ObjectList{
45 Items: []*legacyast.ObjectItem{item},
46 },
47 },
48 }
49 }
50
51 type TerraformBlock struct {
52 RequiredVersion string `hcl:"required_version"`
53 }
54 var block TerraformBlock
55 err = legacyhcl.DecodeObject(&block, item.Val)
56 if err != nil {
57 return nil, diagnosticsErrorf("terraform block: %s", err)
58 }
59
60 if block.RequiredVersion != "" {
61 mod.RequiredCore = append(mod.RequiredCore, block.RequiredVersion)
62 }
63 }
64
65 if vars := list.Filter("variable"); len(vars.Items) > 0 {
66 vars = vars.Children()
67 type VariableBlock struct {
68 Type string `hcl:"type"`
69 Default interface{}
70 Description string
71 Fields []string `hcl:",decodedFields"`
72 }
73
74 for _, item := range vars.Items {
75 unwrapLegacyHCLObjectKeysFromJSON(item, 1)
76
77 if len(item.Keys) != 1 {
78 return nil, diagnosticsErrorf("variable block at %s has no label", item.Pos())
79 }
80
81 name := item.Keys[0].Token.Value().(string)
82
83 var block VariableBlock
84 err := legacyhcl.DecodeObject(&block, item.Val)
85 if err != nil {
86 return nil, diagnosticsErrorf("invalid variable block at %s: %s", item.Pos(), err)
87 }
88
89 // Clean up legacy HCL decoding ambiguity by unwrapping list of maps
90 if ms, ok := block.Default.([]map[string]interface{}); ok {
91 def := make(map[string]interface{})
92 for _, m := range ms {
93 for k, v := range m {
94 def[k] = v
95 }
96 }
97 block.Default = def
98 }
99
100 v := &Variable{
101 Name: name,
102 Type: block.Type,
103 Description: block.Description,
104 Default: block.Default,
105 Pos: sourcePosLegacyHCL(item.Pos(), filename),
106 }
107 if _, exists := mod.Variables[name]; exists {
108 return nil, diagnosticsErrorf("duplicate variable block for %q", name)
109 }
110 mod.Variables[name] = v
111
112 }
113 }
114
115 if outputs := list.Filter("output"); len(outputs.Items) > 0 {
116 outputs = outputs.Children()
117 type OutputBlock struct {
118 Description string
119 }
120
121 for _, item := range outputs.Items {
122 unwrapLegacyHCLObjectKeysFromJSON(item, 1)
123
124 if len(item.Keys) != 1 {
125 return nil, diagnosticsErrorf("output block at %s has no label", item.Pos())
126 }
127
128 name := item.Keys[0].Token.Value().(string)
129
130 var block OutputBlock
131 err := legacyhcl.DecodeObject(&block, item.Val)
132 if err != nil {
133 return nil, diagnosticsErrorf("invalid output block at %s: %s", item.Pos(), err)
134 }
135
136 o := &Output{
137 Name: name,
138 Description: block.Description,
139 Pos: sourcePosLegacyHCL(item.Pos(), filename),
140 }
141 if _, exists := mod.Outputs[name]; exists {
142 return nil, diagnosticsErrorf("duplicate output block for %q", name)
143 }
144 mod.Outputs[name] = o
145 }
146 }
147
148 for _, blockType := range []string{"resource", "data"} {
149 if resources := list.Filter(blockType); len(resources.Items) > 0 {
150 resources = resources.Children()
151 type ResourceBlock struct {
152 Provider string
153 }
154
155 for _, item := range resources.Items {
156 unwrapLegacyHCLObjectKeysFromJSON(item, 2)
157
158 if len(item.Keys) != 2 {
159 return nil, diagnosticsErrorf("resource block at %s has wrong label count", item.Pos())
160 }
161
162 typeName := item.Keys[0].Token.Value().(string)
163 name := item.Keys[1].Token.Value().(string)
164 var mode ResourceMode
165 var rMap map[string]*Resource
166 switch blockType {
167 case "resource":
168 mode = ManagedResourceMode
169 rMap = mod.ManagedResources
170 case "data":
171 mode = DataResourceMode
172 rMap = mod.DataResources
173 }
174
175 var block ResourceBlock
176 err := legacyhcl.DecodeObject(&block, item.Val)
177 if err != nil {
178 return nil, diagnosticsErrorf("invalid resource block at %s: %s", item.Pos(), err)
179 }
180
181 var providerName, providerAlias string
182 if dotPos := strings.IndexByte(block.Provider, '.'); dotPos != -1 {
183 providerName = block.Provider[:dotPos]
184 providerAlias = block.Provider[dotPos+1:]
185 } else {
186 providerName = block.Provider
187 }
188 if providerName == "" {
189 providerName = resourceTypeDefaultProviderName(typeName)
190 }
191
192 r := &Resource{
193 Mode: mode,
194 Type: typeName,
195 Name: name,
196 Provider: ProviderRef{
197 Name: providerName,
198 Alias: providerAlias,
199 },
200 Pos: sourcePosLegacyHCL(item.Pos(), filename),
201 }
202 key := r.MapKey()
203 if _, exists := rMap[key]; exists {
204 return nil, diagnosticsErrorf("duplicate resource block for %q", key)
205 }
206 rMap[key] = r
207 }
208 }
209
210 }
211
212 if moduleCalls := list.Filter("module"); len(moduleCalls.Items) > 0 {
213 moduleCalls = moduleCalls.Children()
214 type ModuleBlock struct {
215 Source string
216 Version string
217 }
218
219 for _, item := range moduleCalls.Items {
220 unwrapLegacyHCLObjectKeysFromJSON(item, 1)
221
222 if len(item.Keys) != 1 {
223 return nil, diagnosticsErrorf("module block at %s has no label", item.Pos())
224 }
225
226 name := item.Keys[0].Token.Value().(string)
227
228 var block ModuleBlock
229 err := legacyhcl.DecodeObject(&block, item.Val)
230 if err != nil {
231 return nil, diagnosticsErrorf("module block at %s: %s", item.Pos(), err)
232 }
233
234 mc := &ModuleCall{
235 Name: name,
236 Source: block.Source,
237 Version: block.Version,
238 Pos: sourcePosLegacyHCL(item.Pos(), filename),
239 }
240 // it's possible this module call is from an override file
241 if origMod, exists := mod.ModuleCalls[name]; exists {
242 if mc.Source == "" {
243 mc.Source = origMod.Source
244 }
245 }
246 mod.ModuleCalls[name] = mc
247 }
248 }
249
250 if providerConfigs := list.Filter("provider"); len(providerConfigs.Items) > 0 {
251 providerConfigs = providerConfigs.Children()
252 type ProviderBlock struct {
253 Version string
254 }
255
256 for _, item := range providerConfigs.Items {
257 unwrapLegacyHCLObjectKeysFromJSON(item, 1)
258
259 if len(item.Keys) != 1 {
260 return nil, diagnosticsErrorf("provider block at %s has no label", item.Pos())
261 }
262
263 name := item.Keys[0].Token.Value().(string)
264
265 var block ProviderBlock
266 err := legacyhcl.DecodeObject(&block, item.Val)
267 if err != nil {
268 return nil, diagnosticsErrorf("invalid provider block at %s: %s", item.Pos(), err)
269 }
270
271 if block.Version != "" {
272 mod.RequiredProviders[name] = append(mod.RequiredProviders[name], block.Version)
273 }
274
275 // Even if there wasn't an explicit version required, we still
276 // need an entry in our map to signal the unversioned dependency.
277 if _, exists := mod.RequiredProviders[name]; !exists {
278 mod.RequiredProviders[name] = []string{}
279 }
280
281 }
282 }
283 }
284
285 return mod, nil
286}
287
288// unwrapLegacyHCLObjectKeysFromJSON cleans up an edge case that can occur when
289// parsing JSON as input: if we're parsing JSON then directly nested
290// items will show up as additional "keys".
291//
292// For objects that expect a fixed number of keys, this breaks the
293// decoding process. This function unwraps the object into what it would've
294// looked like if it came directly from HCL by specifying the number of keys
295// you expect.
296//
297// Example:
298//
299// { "foo": { "baz": {} } }
300//
301// Will show up with Keys being: []string{"foo", "baz"}
302// when we really just want the first two. This function will fix this.
303func unwrapLegacyHCLObjectKeysFromJSON(item *legacyast.ObjectItem, depth int) {
304 if len(item.Keys) > depth && item.Keys[0].Token.JSON {
305 for len(item.Keys) > depth {
306 // Pop off the last key
307 n := len(item.Keys)
308 key := item.Keys[n-1]
309 item.Keys[n-1] = nil
310 item.Keys = item.Keys[:n-1]
311
312 // Wrap our value in a list
313 item.Val = &legacyast.ObjectType{
314 List: &legacyast.ObjectList{
315 Items: []*legacyast.ObjectItem{
316 &legacyast.ObjectItem{
317 Keys: []*legacyast.ObjectKey{key},
318 Val: item.Val,
319 },
320 },
321 },
322 }
323 }
324 }
325}
diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go
new file mode 100644
index 0000000..65ddb23
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module.go
@@ -0,0 +1,35 @@
1package tfconfig
2
3// Module is the top-level type representing a parsed and processed Terraform
4// module.
5type Module struct {
6 // Path is the local filesystem directory where the module was loaded from.
7 Path string `json:"path"`
8
9 Variables map[string]*Variable `json:"variables"`
10 Outputs map[string]*Output `json:"outputs"`
11
12 RequiredCore []string `json:"required_core,omitempty"`
13 RequiredProviders map[string][]string `json:"required_providers"`
14
15 ManagedResources map[string]*Resource `json:"managed_resources"`
16 DataResources map[string]*Resource `json:"data_resources"`
17 ModuleCalls map[string]*ModuleCall `json:"module_calls"`
18
19 // Diagnostics records any errors and warnings that were detected during
20 // loading, primarily for inclusion in serialized forms of the module
21 // since this slice is also returned as a second argument from LoadModule.
22 Diagnostics Diagnostics `json:"diagnostics,omitempty"`
23}
24
25func newModule(path string) *Module {
26 return &Module{
27 Path: path,
28 Variables: make(map[string]*Variable),
29 Outputs: make(map[string]*Output),
30 RequiredProviders: make(map[string][]string),
31 ManagedResources: make(map[string]*Resource),
32 DataResources: make(map[string]*Resource),
33 ModuleCalls: make(map[string]*ModuleCall),
34 }
35}
diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go
new file mode 100644
index 0000000..5e1e05a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/module_call.go
@@ -0,0 +1,11 @@
1package tfconfig
2
3// ModuleCall represents a "module" block within a module. That is, a
4// declaration of a child module from inside its parent.
5type ModuleCall struct {
6 Name string `json:"name"`
7 Source string `json:"source"`
8 Version string `json:"version,omitempty"`
9
10 Pos SourcePos `json:"pos"`
11}
diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go
new file mode 100644
index 0000000..890b25e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/output.go
@@ -0,0 +1,9 @@
1package tfconfig
2
3// Output represents a single output from a Terraform module.
4type Output struct {
5 Name string `json:"name"`
6 Description string `json:"description,omitempty"`
7
8 Pos SourcePos `json:"pos"`
9}
diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go
new file mode 100644
index 0000000..d924837
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/provider_ref.go
@@ -0,0 +1,9 @@
1package tfconfig
2
3// ProviderRef is a reference to a provider configuration within a module.
4// It represents the contents of a "provider" argument in a resource, or
5// a value in the "providers" map for a module call.
6type ProviderRef struct {
7 Name string `json:"name"`
8 Alias string `json:"alias,omitempty"` // Empty if the default provider configuration is referenced
9}
diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go
new file mode 100644
index 0000000..401c8fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/resource.go
@@ -0,0 +1,64 @@
1package tfconfig
2
3import (
4 "fmt"
5 "strconv"
6 "strings"
7)
8
9// Resource represents a single "resource" or "data" block within a module.
10type Resource struct {
11 Mode ResourceMode `json:"mode"`
12 Type string `json:"type"`
13 Name string `json:"name"`
14
15 Provider ProviderRef `json:"provider"`
16
17 Pos SourcePos `json:"pos"`
18}
19
20// MapKey returns a string that can be used to uniquely identify the receiver
21// in a map[string]*Resource.
22func (r *Resource) MapKey() string {
23 switch r.Mode {
24 case ManagedResourceMode:
25 return fmt.Sprintf("%s.%s", r.Type, r.Name)
26 case DataResourceMode:
27 return fmt.Sprintf("data.%s.%s", r.Type, r.Name)
28 default:
29 // should never happen
30 return fmt.Sprintf("[invalid_mode!].%s.%s", r.Type, r.Name)
31 }
32}
33
34// ResourceMode represents the "mode" of a resource, which is used to
35// distinguish between managed resources ("resource" blocks in config) and
36// data resources ("data" blocks in config).
37type ResourceMode rune
38
39const InvalidResourceMode ResourceMode = 0
40const ManagedResourceMode ResourceMode = 'M'
41const DataResourceMode ResourceMode = 'D'
42
43func (m ResourceMode) String() string {
44 switch m {
45 case ManagedResourceMode:
46 return "managed"
47 case DataResourceMode:
48 return "data"
49 default:
50 return ""
51 }
52}
53
54// MarshalJSON implements encoding/json.Marshaler.
55func (m ResourceMode) MarshalJSON() ([]byte, error) {
56 return []byte(strconv.Quote(m.String())), nil
57}
58
59func resourceTypeDefaultProviderName(typeName string) string {
60 if underPos := strings.IndexByte(typeName, '_'); underPos != -1 {
61 return typeName[:underPos]
62 }
63 return typeName
64}
diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go
new file mode 100644
index 0000000..3af742f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/schema.go
@@ -0,0 +1,106 @@
1package tfconfig
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7var rootSchema = &hcl.BodySchema{
8 Blocks: []hcl.BlockHeaderSchema{
9 {
10 Type: "terraform",
11 LabelNames: nil,
12 },
13 {
14 Type: "variable",
15 LabelNames: []string{"name"},
16 },
17 {
18 Type: "output",
19 LabelNames: []string{"name"},
20 },
21 {
22 Type: "provider",
23 LabelNames: []string{"name"},
24 },
25 {
26 Type: "resource",
27 LabelNames: []string{"type", "name"},
28 },
29 {
30 Type: "data",
31 LabelNames: []string{"type", "name"},
32 },
33 {
34 Type: "module",
35 LabelNames: []string{"name"},
36 },
37 },
38}
39
40var terraformBlockSchema = &hcl.BodySchema{
41 Attributes: []hcl.AttributeSchema{
42 {
43 Name: "required_version",
44 },
45 },
46 Blocks: []hcl.BlockHeaderSchema{
47 {
48 Type: "required_providers",
49 },
50 },
51}
52
53var providerConfigSchema = &hcl.BodySchema{
54 Attributes: []hcl.AttributeSchema{
55 {
56 Name: "version",
57 },
58 {
59 Name: "alias",
60 },
61 },
62}
63
64var variableSchema = &hcl.BodySchema{
65 Attributes: []hcl.AttributeSchema{
66 {
67 Name: "type",
68 },
69 {
70 Name: "description",
71 },
72 {
73 Name: "default",
74 },
75 },
76}
77
78var outputSchema = &hcl.BodySchema{
79 Attributes: []hcl.AttributeSchema{
80 {
81 Name: "description",
82 },
83 },
84}
85
86var moduleCallSchema = &hcl.BodySchema{
87 Attributes: []hcl.AttributeSchema{
88 {
89 Name: "source",
90 },
91 {
92 Name: "version",
93 },
94 {
95 Name: "providers",
96 },
97 },
98}
99
100var resourceSchema = &hcl.BodySchema{
101 Attributes: []hcl.AttributeSchema{
102 {
103 Name: "provider",
104 },
105 },
106}
diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go
new file mode 100644
index 0000000..883914e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/source_pos.go
@@ -0,0 +1,50 @@
1package tfconfig
2
3import (
4 legacyhcltoken "github.com/hashicorp/hcl/hcl/token"
5 "github.com/hashicorp/hcl2/hcl"
6)
7
8// SourcePos is a pointer to a particular location in a source file.
9//
10// This type is embedded into other structs to allow callers to locate the
11// definition of each described module element. The SourcePos of an element
12// is usually the first line of its definition, although the definition can
13// be a little "fuzzy" with JSON-based config files.
14type SourcePos struct {
15 Filename string `json:"filename"`
16 Line int `json:"line"`
17}
18
19func sourcePos(filename string, line int) SourcePos {
20 return SourcePos{
21 Filename: filename,
22 Line: line,
23 }
24}
25
26func sourcePosHCL(rng hcl.Range) SourcePos {
27 // We intentionally throw away the column information here because
28 // current and legacy HCL both disagree on the definition of a column
29 // and so a line-only reference is the best granularity we can do
30 // such that the result is consistent between both parsers.
31 return SourcePos{
32 Filename: rng.Filename,
33 Line: rng.Start.Line,
34 }
35}
36
37func sourcePosLegacyHCL(pos legacyhcltoken.Pos, filename string) SourcePos {
38 useFilename := pos.Filename
39 // We'll try to use the filename given in legacy HCL position, but
40 // in practice there's no way to actually get this populated via
41 // the HCL API so it's usually empty except in some specialized
42 // situations, such as positions in error objects.
43 if useFilename == "" {
44 useFilename = filename
45 }
46 return SourcePos{
47 Filename: useFilename,
48 Line: pos.Line,
49 }
50}
diff --git a/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go
new file mode 100644
index 0000000..0f73fc9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform-config-inspect/tfconfig/variable.go
@@ -0,0 +1,16 @@
1package tfconfig
2
3// Variable represents a single variable from a Terraform module.
4type Variable struct {
5 Name string `json:"name"`
6 Type string `json:"type,omitempty"`
7 Description string `json:"description,omitempty"`
8
9 // Default is an approximate representation of the default value in
10 // the native Go type system. The conversion from the value given in
11 // configuration may be slightly lossy. Only values that can be
12 // serialized by json.Marshal will be included here.
13 Default interface{} `json:"default,omitempty"`
14
15 Pos SourcePos `json:"pos"`
16}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/count_attr.go b/vendor/github.com/hashicorp/terraform/addrs/count_attr.go
new file mode 100644
index 0000000..90a5faf
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/count_attr.go
@@ -0,0 +1,12 @@
1package addrs
2
3// CountAttr is the address of an attribute of the "count" object in
4// the interpolation scope, like "count.index".
5type CountAttr struct {
6 referenceable
7 Name string
8}
9
10func (ca CountAttr) String() string {
11 return "count." + ca.Name
12}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/doc.go b/vendor/github.com/hashicorp/terraform/addrs/doc.go
new file mode 100644
index 0000000..4609331
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/doc.go
@@ -0,0 +1,17 @@
1// Package addrs contains types that represent "addresses", which are
2// references to specific objects within a Terraform configuration or
3// state.
4//
5// All addresses have string representations based on HCL traversal syntax
6// which should be used in the user-interface, and also in-memory
7// representations that can be used internally.
8//
9// For object types that exist within Terraform modules a pair of types is
10// used. The "local" part of the address is represented by a type, and then
11// an absolute path to that object in the context of its module is represented
12// by a type of the same name with an "Abs" prefix added, for "absolute".
13//
14// All types within this package should be treated as immutable, even if this
15// is not enforced by the Go compiler. It is always an implementation error
16// to modify an address object in-place after it is initially constructed.
17package addrs
diff --git a/vendor/github.com/hashicorp/terraform/addrs/input_variable.go b/vendor/github.com/hashicorp/terraform/addrs/input_variable.go
new file mode 100644
index 0000000..d2c046c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/input_variable.go
@@ -0,0 +1,41 @@
1package addrs
2
3import (
4 "fmt"
5)
6
7// InputVariable is the address of an input variable.
8type InputVariable struct {
9 referenceable
10 Name string
11}
12
13func (v InputVariable) String() string {
14 return "var." + v.Name
15}
16
17// AbsInputVariableInstance is the address of an input variable within a
18// particular module instance.
19type AbsInputVariableInstance struct {
20 Module ModuleInstance
21 Variable InputVariable
22}
23
24// InputVariable returns the absolute address of the input variable of the
25// given name inside the receiving module instance.
26func (m ModuleInstance) InputVariable(name string) AbsInputVariableInstance {
27 return AbsInputVariableInstance{
28 Module: m,
29 Variable: InputVariable{
30 Name: name,
31 },
32 }
33}
34
35func (v AbsInputVariableInstance) String() string {
36 if len(v.Module) == 0 {
37 return v.String()
38 }
39
40 return fmt.Sprintf("%s.%s", v.Module.String(), v.Variable.String())
41}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/instance_key.go b/vendor/github.com/hashicorp/terraform/addrs/instance_key.go
new file mode 100644
index 0000000..cef8b27
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/instance_key.go
@@ -0,0 +1,123 @@
1package addrs
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/gocty"
8)
9
10// InstanceKey represents the key of an instance within an object that
11// contains multiple instances due to using "count" or "for_each" arguments
12// in configuration.
13//
14// IntKey and StringKey are the two implementations of this type. No other
15// implementations are allowed. The single instance of an object that _isn't_
16// using "count" or "for_each" is represented by NoKey, which is a nil
17// InstanceKey.
18type InstanceKey interface {
19 instanceKeySigil()
20 String() string
21}
22
23// ParseInstanceKey returns the instance key corresponding to the given value,
24// which must be known and non-null.
25//
26// If an unknown or null value is provided then this function will panic. This
27// function is intended to deal with the values that would naturally be found
28// in a hcl.TraverseIndex, which (when parsed from source, at least) can never
29// contain unknown or null values.
30func ParseInstanceKey(key cty.Value) (InstanceKey, error) {
31 switch key.Type() {
32 case cty.String:
33 return StringKey(key.AsString()), nil
34 case cty.Number:
35 var idx int
36 err := gocty.FromCtyValue(key, &idx)
37 return IntKey(idx), err
38 default:
39 return NoKey, fmt.Errorf("either a string or an integer is required")
40 }
41}
42
43// NoKey represents the absense of an InstanceKey, for the single instance
44// of a configuration object that does not use "count" or "for_each" at all.
45var NoKey InstanceKey
46
47// IntKey is the InstanceKey representation representing integer indices, as
48// used when the "count" argument is specified or if for_each is used with
49// a sequence type.
50type IntKey int
51
52func (k IntKey) instanceKeySigil() {
53}
54
55func (k IntKey) String() string {
56 return fmt.Sprintf("[%d]", int(k))
57}
58
59// StringKey is the InstanceKey representation representing string indices, as
60// used when the "for_each" argument is specified with a map or object type.
61type StringKey string
62
63func (k StringKey) instanceKeySigil() {
64}
65
66func (k StringKey) String() string {
67 // FIXME: This isn't _quite_ right because Go's quoted string syntax is
68 // slightly different than HCL's, but we'll accept it for now.
69 return fmt.Sprintf("[%q]", string(k))
70}
71
72// InstanceKeyLess returns true if the first given instance key i should sort
73// before the second key j, and false otherwise.
74func InstanceKeyLess(i, j InstanceKey) bool {
75 iTy := instanceKeyType(i)
76 jTy := instanceKeyType(j)
77
78 switch {
79 case i == j:
80 return false
81 case i == NoKey:
82 return true
83 case j == NoKey:
84 return false
85 case iTy != jTy:
86 // The ordering here is arbitrary except that we want NoKeyType
87 // to sort before the others, so we'll just use the enum values
88 // of InstanceKeyType here (where NoKey is zero, sorting before
89 // any other).
90 return uint32(iTy) < uint32(jTy)
91 case iTy == IntKeyType:
92 return int(i.(IntKey)) < int(j.(IntKey))
93 case iTy == StringKeyType:
94 return string(i.(StringKey)) < string(j.(StringKey))
95 default:
96 // Shouldn't be possible to get down here in practice, since the
97 // above is exhaustive.
98 return false
99 }
100}
101
102func instanceKeyType(k InstanceKey) InstanceKeyType {
103 if _, ok := k.(StringKey); ok {
104 return StringKeyType
105 }
106 if _, ok := k.(IntKey); ok {
107 return IntKeyType
108 }
109 return NoKeyType
110}
111
112// InstanceKeyType represents the different types of instance key that are
113// supported. Usually it is sufficient to simply type-assert an InstanceKey
114// value to either IntKey or StringKey, but this type and its values can be
115// used to represent the types themselves, rather than specific values
116// of those types.
117type InstanceKeyType rune
118
119const (
120 NoKeyType InstanceKeyType = 0
121 IntKeyType InstanceKeyType = 'I'
122 StringKeyType InstanceKeyType = 'S'
123)
diff --git a/vendor/github.com/hashicorp/terraform/addrs/local_value.go b/vendor/github.com/hashicorp/terraform/addrs/local_value.go
new file mode 100644
index 0000000..61a07b9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/local_value.go
@@ -0,0 +1,48 @@
1package addrs
2
3import (
4 "fmt"
5)
6
7// LocalValue is the address of a local value.
8type LocalValue struct {
9 referenceable
10 Name string
11}
12
13func (v LocalValue) String() string {
14 return "local." + v.Name
15}
16
17// Absolute converts the receiver into an absolute address within the given
18// module instance.
19func (v LocalValue) Absolute(m ModuleInstance) AbsLocalValue {
20 return AbsLocalValue{
21 Module: m,
22 LocalValue: v,
23 }
24}
25
26// AbsLocalValue is the absolute address of a local value within a module instance.
27type AbsLocalValue struct {
28 Module ModuleInstance
29 LocalValue LocalValue
30}
31
32// LocalValue returns the absolute address of a local value of the given
33// name within the receiving module instance.
34func (m ModuleInstance) LocalValue(name string) AbsLocalValue {
35 return AbsLocalValue{
36 Module: m,
37 LocalValue: LocalValue{
38 Name: name,
39 },
40 }
41}
42
43func (v AbsLocalValue) String() string {
44 if len(v.Module) == 0 {
45 return v.LocalValue.String()
46 }
47 return fmt.Sprintf("%s.%s", v.Module.String(), v.LocalValue.String())
48}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/module.go b/vendor/github.com/hashicorp/terraform/addrs/module.go
new file mode 100644
index 0000000..6420c63
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/module.go
@@ -0,0 +1,75 @@
1package addrs
2
3import (
4 "strings"
5)
6
7// Module is an address for a module call within configuration. This is
8// the static counterpart of ModuleInstance, representing a traversal through
9// the static module call tree in configuration and does not take into account
10// the potentially-multiple instances of a module that might be created by
11// "count" and "for_each" arguments within those calls.
12//
13// This type should be used only in very specialized cases when working with
14// the static module call tree. Type ModuleInstance is appropriate in more cases.
15//
16// Although Module is a slice, it should be treated as immutable after creation.
17type Module []string
18
19// RootModule is the module address representing the root of the static module
20// call tree, which is also the zero value of Module.
21//
22// Note that this is not the root of the dynamic module tree, which is instead
23// represented by RootModuleInstance.
24var RootModule Module
25
26// IsRoot returns true if the receiver is the address of the root module,
27// or false otherwise.
28func (m Module) IsRoot() bool {
29 return len(m) == 0
30}
31
32func (m Module) String() string {
33 if len(m) == 0 {
34 return ""
35 }
36 return strings.Join([]string(m), ".")
37}
38
39// Child returns the address of a child call in the receiver, identified by the
40// given name.
41func (m Module) Child(name string) Module {
42 ret := make(Module, 0, len(m)+1)
43 ret = append(ret, m...)
44 return append(ret, name)
45}
46
47// Parent returns the address of the parent module of the receiver, or the
48// receiver itself if there is no parent (if it's the root module address).
49func (m Module) Parent() Module {
50 if len(m) == 0 {
51 return m
52 }
53 return m[:len(m)-1]
54}
55
56// Call returns the module call address that corresponds to the given module
57// instance, along with the address of the module that contains it.
58//
59// There is no call for the root module, so this method will panic if called
60// on the root module address.
61//
62// In practice, this just turns the last element of the receiver into a
63// ModuleCall and then returns a slice of the receiever that excludes that
64// last part. This is just a convenience for situations where a call address
65// is required, such as when dealing with *Reference and Referencable values.
66func (m Module) Call() (Module, ModuleCall) {
67 if len(m) == 0 {
68 panic("cannot produce ModuleCall for root module")
69 }
70
71 caller, callName := m[:len(m)-1], m[len(m)-1]
72 return caller, ModuleCall{
73 Name: callName,
74 }
75}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/module_call.go b/vendor/github.com/hashicorp/terraform/addrs/module_call.go
new file mode 100644
index 0000000..09596cc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/module_call.go
@@ -0,0 +1,81 @@
1package addrs
2
3import (
4 "fmt"
5)
6
7// ModuleCall is the address of a call from the current module to a child
8// module.
9//
10// There is no "Abs" version of ModuleCall because an absolute module path
11// is represented by ModuleInstance.
12type ModuleCall struct {
13 referenceable
14 Name string
15}
16
17func (c ModuleCall) String() string {
18 return "module." + c.Name
19}
20
21// Instance returns the address of an instance of the receiver identified by
22// the given key.
23func (c ModuleCall) Instance(key InstanceKey) ModuleCallInstance {
24 return ModuleCallInstance{
25 Call: c,
26 Key: key,
27 }
28}
29
30// ModuleCallInstance is the address of one instance of a module created from
31// a module call, which might create multiple instances using "count" or
32// "for_each" arguments.
33type ModuleCallInstance struct {
34 referenceable
35 Call ModuleCall
36 Key InstanceKey
37}
38
39func (c ModuleCallInstance) String() string {
40 if c.Key == NoKey {
41 return c.Call.String()
42 }
43 return fmt.Sprintf("module.%s%s", c.Call.Name, c.Key)
44}
45
46// ModuleInstance returns the address of the module instance that corresponds
47// to the receiving call instance when resolved in the given calling module.
48// In other words, it returns the child module instance that the receving
49// call instance creates.
50func (c ModuleCallInstance) ModuleInstance(caller ModuleInstance) ModuleInstance {
51 return caller.Child(c.Call.Name, c.Key)
52}
53
54// Output returns the address of an output of the receiver identified by its
55// name.
56func (c ModuleCallInstance) Output(name string) ModuleCallOutput {
57 return ModuleCallOutput{
58 Call: c,
59 Name: name,
60 }
61}
62
63// ModuleCallOutput is the address of a particular named output produced by
64// an instance of a module call.
65type ModuleCallOutput struct {
66 referenceable
67 Call ModuleCallInstance
68 Name string
69}
70
71func (co ModuleCallOutput) String() string {
72 return fmt.Sprintf("%s.%s", co.Call.String(), co.Name)
73}
74
75// AbsOutputValue returns the absolute output value address that corresponds
76// to the receving module call output address, once resolved in the given
77// calling module.
78func (co ModuleCallOutput) AbsOutputValue(caller ModuleInstance) AbsOutputValue {
79 moduleAddr := co.Call.ModuleInstance(caller)
80 return moduleAddr.OutputValue(co.Name)
81}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/module_instance.go b/vendor/github.com/hashicorp/terraform/addrs/module_instance.go
new file mode 100644
index 0000000..67e73e5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/module_instance.go
@@ -0,0 +1,415 @@
1package addrs
2
3import (
4 "bytes"
5 "fmt"
6
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hcl2/hcl/hclsyntax"
9 "github.com/zclconf/go-cty/cty"
10 "github.com/zclconf/go-cty/cty/gocty"
11
12 "github.com/hashicorp/terraform/tfdiags"
13)
14
15// ModuleInstance is an address for a particular module instance within the
16// dynamic module tree. This is an extension of the static traversals
17// represented by type Module that deals with the possibility of a single
18// module call producing multiple instances via the "count" and "for_each"
19// arguments.
20//
21// Although ModuleInstance is a slice, it should be treated as immutable after
22// creation.
23type ModuleInstance []ModuleInstanceStep
24
25var (
26 _ Targetable = ModuleInstance(nil)
27)
28
29func ParseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) {
30 mi, remain, diags := parseModuleInstancePrefix(traversal)
31 if len(remain) != 0 {
32 if len(remain) == len(traversal) {
33 diags = diags.Append(&hcl.Diagnostic{
34 Severity: hcl.DiagError,
35 Summary: "Invalid module instance address",
36 Detail: "A module instance address must begin with \"module.\".",
37 Subject: remain.SourceRange().Ptr(),
38 })
39 } else {
40 diags = diags.Append(&hcl.Diagnostic{
41 Severity: hcl.DiagError,
42 Summary: "Invalid module instance address",
43 Detail: "The module instance address is followed by additional invalid content.",
44 Subject: remain.SourceRange().Ptr(),
45 })
46 }
47 }
48 return mi, diags
49}
50
51// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance
52// that takes a string and parses it with the HCL native syntax traversal parser
53// before interpreting it.
54//
55// This should be used only in specialized situations since it will cause the
56// created references to not have any meaningful source location information.
57// If a reference string is coming from a source that should be identified in
58// error messages then the caller should instead parse it directly using a
59// suitable function from the HCL API and pass the traversal itself to
60// ParseProviderConfigCompact.
61//
62// Error diagnostics are returned if either the parsing fails or the analysis
63// of the traversal fails. There is no way for the caller to distinguish the
64// two kinds of diagnostics programmatically. If error diagnostics are returned
65// then the returned address is invalid.
66func ParseModuleInstanceStr(str string) (ModuleInstance, tfdiags.Diagnostics) {
67 var diags tfdiags.Diagnostics
68
69 traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
70 diags = diags.Append(parseDiags)
71 if parseDiags.HasErrors() {
72 return nil, diags
73 }
74
75 addr, addrDiags := ParseModuleInstance(traversal)
76 diags = diags.Append(addrDiags)
77 return addr, diags
78}
79
80func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) {
81 remain := traversal
82 var mi ModuleInstance
83 var diags tfdiags.Diagnostics
84
85 for len(remain) > 0 {
86 var next string
87 switch tt := remain[0].(type) {
88 case hcl.TraverseRoot:
89 next = tt.Name
90 case hcl.TraverseAttr:
91 next = tt.Name
92 default:
93 diags = diags.Append(&hcl.Diagnostic{
94 Severity: hcl.DiagError,
95 Summary: "Invalid address operator",
96 Detail: "Module address prefix must be followed by dot and then a name.",
97 Subject: remain[0].SourceRange().Ptr(),
98 })
99 break
100 }
101
102 if next != "module" {
103 break
104 }
105
106 kwRange := remain[0].SourceRange()
107 remain = remain[1:]
108 // If we have the prefix "module" then we should be followed by an
109 // module call name, as an attribute, and then optionally an index step
110 // giving the instance key.
111 if len(remain) == 0 {
112 diags = diags.Append(&hcl.Diagnostic{
113 Severity: hcl.DiagError,
114 Summary: "Invalid address operator",
115 Detail: "Prefix \"module.\" must be followed by a module name.",
116 Subject: &kwRange,
117 })
118 break
119 }
120
121 var moduleName string
122 switch tt := remain[0].(type) {
123 case hcl.TraverseAttr:
124 moduleName = tt.Name
125 default:
126 diags = diags.Append(&hcl.Diagnostic{
127 Severity: hcl.DiagError,
128 Summary: "Invalid address operator",
129 Detail: "Prefix \"module.\" must be followed by a module name.",
130 Subject: remain[0].SourceRange().Ptr(),
131 })
132 break
133 }
134 remain = remain[1:]
135 step := ModuleInstanceStep{
136 Name: moduleName,
137 }
138
139 if len(remain) > 0 {
140 if idx, ok := remain[0].(hcl.TraverseIndex); ok {
141 remain = remain[1:]
142
143 switch idx.Key.Type() {
144 case cty.String:
145 step.InstanceKey = StringKey(idx.Key.AsString())
146 case cty.Number:
147 var idxInt int
148 err := gocty.FromCtyValue(idx.Key, &idxInt)
149 if err == nil {
150 step.InstanceKey = IntKey(idxInt)
151 } else {
152 diags = diags.Append(&hcl.Diagnostic{
153 Severity: hcl.DiagError,
154 Summary: "Invalid address operator",
155 Detail: fmt.Sprintf("Invalid module index: %s.", err),
156 Subject: idx.SourceRange().Ptr(),
157 })
158 }
159 default:
160 // Should never happen, because no other types are allowed in traversal indices.
161 diags = diags.Append(&hcl.Diagnostic{
162 Severity: hcl.DiagError,
163 Summary: "Invalid address operator",
164 Detail: "Invalid module key: must be either a string or an integer.",
165 Subject: idx.SourceRange().Ptr(),
166 })
167 }
168 }
169 }
170
171 mi = append(mi, step)
172 }
173
174 var retRemain hcl.Traversal
175 if len(remain) > 0 {
176 retRemain = make(hcl.Traversal, len(remain))
177 copy(retRemain, remain)
178 // The first element here might be either a TraverseRoot or a
179 // TraverseAttr, depending on whether we had a module address on the
180 // front. To make life easier for callers, we'll normalize to always
181 // start with a TraverseRoot.
182 if tt, ok := retRemain[0].(hcl.TraverseAttr); ok {
183 retRemain[0] = hcl.TraverseRoot{
184 Name: tt.Name,
185 SrcRange: tt.SrcRange,
186 }
187 }
188 }
189
190 return mi, retRemain, diags
191}
192
193// UnkeyedInstanceShim is a shim method for converting a Module address to the
194// equivalent ModuleInstance address that assumes that no modules have
195// keyed instances.
196//
197// This is a temporary allowance for the fact that Terraform does not presently
198// support "count" and "for_each" on modules, and thus graph building code that
199// derives graph nodes from configuration must just assume unkeyed modules
200// in order to construct the graph. At a later time when "count" and "for_each"
201// support is added for modules, all callers of this method will need to be
202// reworked to allow for keyed module instances.
203func (m Module) UnkeyedInstanceShim() ModuleInstance {
204 path := make(ModuleInstance, len(m))
205 for i, name := range m {
206 path[i] = ModuleInstanceStep{Name: name}
207 }
208 return path
209}
210
211// ModuleInstanceStep is a single traversal step through the dynamic module
212// tree. It is used only as part of ModuleInstance.
213type ModuleInstanceStep struct {
214 Name string
215 InstanceKey InstanceKey
216}
217
218// RootModuleInstance is the module instance address representing the root
219// module, which is also the zero value of ModuleInstance.
220var RootModuleInstance ModuleInstance
221
222// IsRoot returns true if the receiver is the address of the root module instance,
223// or false otherwise.
224func (m ModuleInstance) IsRoot() bool {
225 return len(m) == 0
226}
227
228// Child returns the address of a child module instance of the receiver,
229// identified by the given name and key.
230func (m ModuleInstance) Child(name string, key InstanceKey) ModuleInstance {
231 ret := make(ModuleInstance, 0, len(m)+1)
232 ret = append(ret, m...)
233 return append(ret, ModuleInstanceStep{
234 Name: name,
235 InstanceKey: key,
236 })
237}
238
239// Parent returns the address of the parent module instance of the receiver, or
240// the receiver itself if there is no parent (if it's the root module address).
241func (m ModuleInstance) Parent() ModuleInstance {
242 if len(m) == 0 {
243 return m
244 }
245 return m[:len(m)-1]
246}
247
248// String returns a string representation of the receiver, in the format used
249// within e.g. user-provided resource addresses.
250//
251// The address of the root module has the empty string as its representation.
252func (m ModuleInstance) String() string {
253 var buf bytes.Buffer
254 sep := ""
255 for _, step := range m {
256 buf.WriteString(sep)
257 buf.WriteString("module.")
258 buf.WriteString(step.Name)
259 if step.InstanceKey != NoKey {
260 buf.WriteString(step.InstanceKey.String())
261 }
262 sep = "."
263 }
264 return buf.String()
265}
266
267// Equal returns true if the receiver and the given other value
268// contains the exact same parts.
269func (m ModuleInstance) Equal(o ModuleInstance) bool {
270 return m.String() == o.String()
271}
272
273// Less returns true if the receiver should sort before the given other value
274// in a sorted list of addresses.
275func (m ModuleInstance) Less(o ModuleInstance) bool {
276 if len(m) != len(o) {
277 // Shorter path sorts first.
278 return len(m) < len(o)
279 }
280
281 for i := range m {
282 mS, oS := m[i], o[i]
283 switch {
284 case mS.Name != oS.Name:
285 return mS.Name < oS.Name
286 case mS.InstanceKey != oS.InstanceKey:
287 return InstanceKeyLess(mS.InstanceKey, oS.InstanceKey)
288 }
289 }
290
291 return false
292}
293
294// Ancestors returns a slice containing the receiver and all of its ancestor
295// module instances, all the way up to (and including) the root module.
296// The result is ordered by depth, with the root module always first.
297//
298// Since the result always includes the root module, a caller may choose to
299// ignore it by slicing the result with [1:].
300func (m ModuleInstance) Ancestors() []ModuleInstance {
301 ret := make([]ModuleInstance, 0, len(m)+1)
302 for i := 0; i <= len(m); i++ {
303 ret = append(ret, m[:i])
304 }
305 return ret
306}
307
308// IsAncestor returns true if the receiver is an ancestor of the given
309// other value.
310func (m ModuleInstance) IsAncestor(o ModuleInstance) bool {
311 // Longer or equal sized paths means the receiver cannot
312 // be an ancestor of the given module insatnce.
313 if len(m) >= len(o) {
314 return false
315 }
316
317 for i, ms := range m {
318 if ms.Name != o[i].Name {
319 return false
320 }
321 if ms.InstanceKey != NoKey && ms.InstanceKey != o[i].InstanceKey {
322 return false
323 }
324 }
325
326 return true
327}
328
329// Call returns the module call address that corresponds to the given module
330// instance, along with the address of the module instance that contains it.
331//
332// There is no call for the root module, so this method will panic if called
333// on the root module address.
334//
335// A single module call can produce potentially many module instances, so the
336// result discards any instance key that might be present on the last step
337// of the instance. To retain this, use CallInstance instead.
338//
339// In practice, this just turns the last element of the receiver into a
340// ModuleCall and then returns a slice of the receiever that excludes that
341// last part. This is just a convenience for situations where a call address
342// is required, such as when dealing with *Reference and Referencable values.
343func (m ModuleInstance) Call() (ModuleInstance, ModuleCall) {
344 if len(m) == 0 {
345 panic("cannot produce ModuleCall for root module")
346 }
347
348 inst, lastStep := m[:len(m)-1], m[len(m)-1]
349 return inst, ModuleCall{
350 Name: lastStep.Name,
351 }
352}
353
354// CallInstance returns the module call instance address that corresponds to
355// the given module instance, along with the address of the module instance
356// that contains it.
357//
358// There is no call for the root module, so this method will panic if called
359// on the root module address.
360//
361// In practice, this just turns the last element of the receiver into a
362// ModuleCallInstance and then returns a slice of the receiever that excludes
363// that last part. This is just a convenience for situations where a call\
364// address is required, such as when dealing with *Reference and Referencable
365// values.
366func (m ModuleInstance) CallInstance() (ModuleInstance, ModuleCallInstance) {
367 if len(m) == 0 {
368 panic("cannot produce ModuleCallInstance for root module")
369 }
370
371 inst, lastStep := m[:len(m)-1], m[len(m)-1]
372 return inst, ModuleCallInstance{
373 Call: ModuleCall{
374 Name: lastStep.Name,
375 },
376 Key: lastStep.InstanceKey,
377 }
378}
379
380// TargetContains implements Targetable by returning true if the given other
381// address either matches the receiver, is a sub-module-instance of the
382// receiver, or is a targetable absolute address within a module that
383// is contained within the reciever.
384func (m ModuleInstance) TargetContains(other Targetable) bool {
385 switch to := other.(type) {
386
387 case ModuleInstance:
388 if len(to) < len(m) {
389 // Can't be contained if the path is shorter
390 return false
391 }
392 // Other is contained if its steps match for the length of our own path.
393 for i, ourStep := range m {
394 otherStep := to[i]
395 if ourStep != otherStep {
396 return false
397 }
398 }
399 // If we fall out here then the prefixed matched, so it's contained.
400 return true
401
402 case AbsResource:
403 return m.TargetContains(to.Module)
404
405 case AbsResourceInstance:
406 return m.TargetContains(to.Module)
407
408 default:
409 return false
410 }
411}
412
413func (m ModuleInstance) targetableSigil() {
414 // ModuleInstance is targetable
415}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/output_value.go b/vendor/github.com/hashicorp/terraform/addrs/output_value.go
new file mode 100644
index 0000000..bcd923a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/output_value.go
@@ -0,0 +1,75 @@
1package addrs
2
3import (
4 "fmt"
5)
6
7// OutputValue is the address of an output value, in the context of the module
8// that is defining it.
9//
10// This is related to but separate from ModuleCallOutput, which represents
11// a module output from the perspective of its parent module. Since output
12// values cannot be represented from the module where they are defined,
13// OutputValue is not Referenceable, while ModuleCallOutput is.
14type OutputValue struct {
15 Name string
16}
17
18func (v OutputValue) String() string {
19 return "output." + v.Name
20}
21
22// Absolute converts the receiver into an absolute address within the given
23// module instance.
24func (v OutputValue) Absolute(m ModuleInstance) AbsOutputValue {
25 return AbsOutputValue{
26 Module: m,
27 OutputValue: v,
28 }
29}
30
31// AbsOutputValue is the absolute address of an output value within a module instance.
32//
33// This represents an output globally within the namespace of a particular
34// configuration. It is related to but separate from ModuleCallOutput, which
35// represents a module output from the perspective of its parent module.
36type AbsOutputValue struct {
37 Module ModuleInstance
38 OutputValue OutputValue
39}
40
41// OutputValue returns the absolute address of an output value of the given
42// name within the receiving module instance.
43func (m ModuleInstance) OutputValue(name string) AbsOutputValue {
44 return AbsOutputValue{
45 Module: m,
46 OutputValue: OutputValue{
47 Name: name,
48 },
49 }
50}
51
52func (v AbsOutputValue) String() string {
53 if v.Module.IsRoot() {
54 return v.OutputValue.String()
55 }
56 return fmt.Sprintf("%s.%s", v.Module.String(), v.OutputValue.String())
57}
58
59// ModuleCallOutput converts an AbsModuleOutput into a ModuleCallOutput,
60// returning also the module instance that the ModuleCallOutput is relative
61// to.
62//
63// The root module does not have a call, and so this method cannot be used
64// with outputs in the root module, and will panic in that case.
65func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, ModuleCallOutput) {
66 if v.Module.IsRoot() {
67 panic("ReferenceFromCall used with root module output")
68 }
69
70 caller, call := v.Module.CallInstance()
71 return caller, ModuleCallOutput{
72 Call: call,
73 Name: v.OutputValue.Name,
74 }
75}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go b/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go
new file mode 100644
index 0000000..84fe8a0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/parse_ref.go
@@ -0,0 +1,338 @@
1package addrs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/hashicorp/hcl2/hcl/hclsyntax"
8 "github.com/hashicorp/terraform/tfdiags"
9)
10
11// Reference describes a reference to an address with source location
12// information.
13type Reference struct {
14 Subject Referenceable
15 SourceRange tfdiags.SourceRange
16 Remaining hcl.Traversal
17}
18
19// ParseRef attempts to extract a referencable address from the prefix of the
20// given traversal, which must be an absolute traversal or this function
21// will panic.
22//
23// If no error diagnostics are returned, the returned reference includes the
24// address that was extracted, the source range it was extracted from, and any
25// remaining relative traversal that was not consumed as part of the
26// reference.
27//
28// If error diagnostics are returned then the Reference value is invalid and
29// must not be used.
30func ParseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
31 ref, diags := parseRef(traversal)
32
33 // Normalize a little to make life easier for callers.
34 if ref != nil {
35 if len(ref.Remaining) == 0 {
36 ref.Remaining = nil
37 }
38 }
39
40 return ref, diags
41}
42
43// ParseRefStr is a helper wrapper around ParseRef that takes a string
44// and parses it with the HCL native syntax traversal parser before
45// interpreting it.
46//
47// This should be used only in specialized situations since it will cause the
48// created references to not have any meaningful source location information.
49// If a reference string is coming from a source that should be identified in
50// error messages then the caller should instead parse it directly using a
51// suitable function from the HCL API and pass the traversal itself to
52// ParseRef.
53//
54// Error diagnostics are returned if either the parsing fails or the analysis
55// of the traversal fails. There is no way for the caller to distinguish the
56// two kinds of diagnostics programmatically. If error diagnostics are returned
57// the returned reference may be nil or incomplete.
58func ParseRefStr(str string) (*Reference, tfdiags.Diagnostics) {
59 var diags tfdiags.Diagnostics
60
61 traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
62 diags = diags.Append(parseDiags)
63 if parseDiags.HasErrors() {
64 return nil, diags
65 }
66
67 ref, targetDiags := ParseRef(traversal)
68 diags = diags.Append(targetDiags)
69 return ref, diags
70}
71
72func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
73 var diags tfdiags.Diagnostics
74
75 root := traversal.RootName()
76 rootRange := traversal[0].SourceRange()
77
78 switch root {
79
80 case "count":
81 name, rng, remain, diags := parseSingleAttrRef(traversal)
82 return &Reference{
83 Subject: CountAttr{Name: name},
84 SourceRange: tfdiags.SourceRangeFromHCL(rng),
85 Remaining: remain,
86 }, diags
87
88 case "data":
89 if len(traversal) < 3 {
90 diags = diags.Append(&hcl.Diagnostic{
91 Severity: hcl.DiagError,
92 Summary: "Invalid reference",
93 Detail: `The "data" object must be followed by two attribute names: the data source type and the resource name.`,
94 Subject: traversal.SourceRange().Ptr(),
95 })
96 return nil, diags
97 }
98 remain := traversal[1:] // trim off "data" so we can use our shared resource reference parser
99 return parseResourceRef(DataResourceMode, rootRange, remain)
100
101 case "local":
102 name, rng, remain, diags := parseSingleAttrRef(traversal)
103 return &Reference{
104 Subject: LocalValue{Name: name},
105 SourceRange: tfdiags.SourceRangeFromHCL(rng),
106 Remaining: remain,
107 }, diags
108
109 case "module":
110 callName, callRange, remain, diags := parseSingleAttrRef(traversal)
111 if diags.HasErrors() {
112 return nil, diags
113 }
114
115 // A traversal starting with "module" can either be a reference to
116 // an entire module instance or to a single output from a module
117 // instance, depending on what we find after this introducer.
118
119 callInstance := ModuleCallInstance{
120 Call: ModuleCall{
121 Name: callName,
122 },
123 Key: NoKey,
124 }
125
126 if len(remain) == 0 {
127 // Reference to an entire module instance. Might alternatively
128 // be a reference to a collection of instances of a particular
129 // module, but the caller will need to deal with that ambiguity
130 // since we don't have enough context here.
131 return &Reference{
132 Subject: callInstance,
133 SourceRange: tfdiags.SourceRangeFromHCL(callRange),
134 Remaining: remain,
135 }, diags
136 }
137
138 if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok {
139 var err error
140 callInstance.Key, err = ParseInstanceKey(idxTrav.Key)
141 if err != nil {
142 diags = diags.Append(&hcl.Diagnostic{
143 Severity: hcl.DiagError,
144 Summary: "Invalid index key",
145 Detail: fmt.Sprintf("Invalid index for module instance: %s.", err),
146 Subject: &idxTrav.SrcRange,
147 })
148 return nil, diags
149 }
150 remain = remain[1:]
151
152 if len(remain) == 0 {
153 // Also a reference to an entire module instance, but we have a key
154 // now.
155 return &Reference{
156 Subject: callInstance,
157 SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, idxTrav.SrcRange)),
158 Remaining: remain,
159 }, diags
160 }
161 }
162
163 if attrTrav, ok := remain[0].(hcl.TraverseAttr); ok {
164 remain = remain[1:]
165 return &Reference{
166 Subject: ModuleCallOutput{
167 Name: attrTrav.Name,
168 Call: callInstance,
169 },
170 SourceRange: tfdiags.SourceRangeFromHCL(hcl.RangeBetween(callRange, attrTrav.SrcRange)),
171 Remaining: remain,
172 }, diags
173 }
174
175 diags = diags.Append(&hcl.Diagnostic{
176 Severity: hcl.DiagError,
177 Summary: "Invalid reference",
178 Detail: "Module instance objects do not support this operation.",
179 Subject: remain[0].SourceRange().Ptr(),
180 })
181 return nil, diags
182
183 case "path":
184 name, rng, remain, diags := parseSingleAttrRef(traversal)
185 return &Reference{
186 Subject: PathAttr{Name: name},
187 SourceRange: tfdiags.SourceRangeFromHCL(rng),
188 Remaining: remain,
189 }, diags
190
191 case "self":
192 return &Reference{
193 Subject: Self,
194 SourceRange: tfdiags.SourceRangeFromHCL(rootRange),
195 Remaining: traversal[1:],
196 }, diags
197
198 case "terraform":
199 name, rng, remain, diags := parseSingleAttrRef(traversal)
200 return &Reference{
201 Subject: TerraformAttr{Name: name},
202 SourceRange: tfdiags.SourceRangeFromHCL(rng),
203 Remaining: remain,
204 }, diags
205
206 case "var":
207 name, rng, remain, diags := parseSingleAttrRef(traversal)
208 return &Reference{
209 Subject: InputVariable{Name: name},
210 SourceRange: tfdiags.SourceRangeFromHCL(rng),
211 Remaining: remain,
212 }, diags
213
214 default:
215 return parseResourceRef(ManagedResourceMode, rootRange, traversal)
216 }
217}
218
219func parseResourceRef(mode ResourceMode, startRange hcl.Range, traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
220 var diags tfdiags.Diagnostics
221
222 if len(traversal) < 2 {
223 diags = diags.Append(&hcl.Diagnostic{
224 Severity: hcl.DiagError,
225 Summary: "Invalid reference",
226 Detail: `A reference to a resource type must be followed by at least one attribute access, specifying the resource name.`,
227 Subject: hcl.RangeBetween(traversal[0].SourceRange(), traversal[len(traversal)-1].SourceRange()).Ptr(),
228 })
229 return nil, diags
230 }
231
232 var typeName, name string
233 switch tt := traversal[0].(type) { // Could be either root or attr, depending on our resource mode
234 case hcl.TraverseRoot:
235 typeName = tt.Name
236 case hcl.TraverseAttr:
237 typeName = tt.Name
238 default:
239 // If it isn't a TraverseRoot then it must be a "data" reference.
240 diags = diags.Append(&hcl.Diagnostic{
241 Severity: hcl.DiagError,
242 Summary: "Invalid reference",
243 Detail: `The "data" object does not support this operation.`,
244 Subject: traversal[0].SourceRange().Ptr(),
245 })
246 return nil, diags
247 }
248
249 attrTrav, ok := traversal[1].(hcl.TraverseAttr)
250 if !ok {
251 var what string
252 switch mode {
253 case DataResourceMode:
254 what = "data source"
255 default:
256 what = "resource type"
257 }
258 diags = diags.Append(&hcl.Diagnostic{
259 Severity: hcl.DiagError,
260 Summary: "Invalid reference",
261 Detail: fmt.Sprintf(`A reference to a %s must be followed by at least one attribute access, specifying the resource name.`, what),
262 Subject: traversal[1].SourceRange().Ptr(),
263 })
264 return nil, diags
265 }
266 name = attrTrav.Name
267 rng := hcl.RangeBetween(startRange, attrTrav.SrcRange)
268 remain := traversal[2:]
269
270 resourceAddr := Resource{
271 Mode: mode,
272 Type: typeName,
273 Name: name,
274 }
275 resourceInstAddr := ResourceInstance{
276 Resource: resourceAddr,
277 Key: NoKey,
278 }
279
280 if len(remain) == 0 {
281 // This might actually be a reference to the collection of all instances
282 // of the resource, but we don't have enough context here to decide
283 // so we'll let the caller resolve that ambiguity.
284 return &Reference{
285 Subject: resourceInstAddr,
286 SourceRange: tfdiags.SourceRangeFromHCL(rng),
287 }, diags
288 }
289
290 if idxTrav, ok := remain[0].(hcl.TraverseIndex); ok {
291 var err error
292 resourceInstAddr.Key, err = ParseInstanceKey(idxTrav.Key)
293 if err != nil {
294 diags = diags.Append(&hcl.Diagnostic{
295 Severity: hcl.DiagError,
296 Summary: "Invalid index key",
297 Detail: fmt.Sprintf("Invalid index for resource instance: %s.", err),
298 Subject: &idxTrav.SrcRange,
299 })
300 return nil, diags
301 }
302 remain = remain[1:]
303 rng = hcl.RangeBetween(rng, idxTrav.SrcRange)
304 }
305
306 return &Reference{
307 Subject: resourceInstAddr,
308 SourceRange: tfdiags.SourceRangeFromHCL(rng),
309 Remaining: remain,
310 }, diags
311}
312
313func parseSingleAttrRef(traversal hcl.Traversal) (string, hcl.Range, hcl.Traversal, tfdiags.Diagnostics) {
314 var diags tfdiags.Diagnostics
315
316 root := traversal.RootName()
317 rootRange := traversal[0].SourceRange()
318
319 if len(traversal) < 2 {
320 diags = diags.Append(&hcl.Diagnostic{
321 Severity: hcl.DiagError,
322 Summary: "Invalid reference",
323 Detail: fmt.Sprintf("The %q object cannot be accessed directly. Instead, access one of its attributes.", root),
324 Subject: &rootRange,
325 })
326 return "", hcl.Range{}, nil, diags
327 }
328 if attrTrav, ok := traversal[1].(hcl.TraverseAttr); ok {
329 return attrTrav.Name, hcl.RangeBetween(rootRange, attrTrav.SrcRange), traversal[2:], diags
330 }
331 diags = diags.Append(&hcl.Diagnostic{
332 Severity: hcl.DiagError,
333 Summary: "Invalid reference",
334 Detail: fmt.Sprintf("The %q object does not support this operation.", root),
335 Subject: traversal[1].SourceRange().Ptr(),
336 })
337 return "", hcl.Range{}, nil, diags
338}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/parse_target.go b/vendor/github.com/hashicorp/terraform/addrs/parse_target.go
new file mode 100644
index 0000000..057443a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/parse_target.go
@@ -0,0 +1,318 @@
1package addrs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl/hclsyntax"
7
8 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/terraform/tfdiags"
10)
11
12// Target describes a targeted address with source location information.
13type Target struct {
14 Subject Targetable
15 SourceRange tfdiags.SourceRange
16}
17
18// ParseTarget attempts to interpret the given traversal as a targetable
19// address. The given traversal must be absolute, or this function will
20// panic.
21//
22// If no error diagnostics are returned, the returned target includes the
23// address that was extracted and the source range it was extracted from.
24//
25// If error diagnostics are returned then the Target value is invalid and
26// must not be used.
27func ParseTarget(traversal hcl.Traversal) (*Target, tfdiags.Diagnostics) {
28 path, remain, diags := parseModuleInstancePrefix(traversal)
29 if diags.HasErrors() {
30 return nil, diags
31 }
32
33 rng := tfdiags.SourceRangeFromHCL(traversal.SourceRange())
34
35 if len(remain) == 0 {
36 return &Target{
37 Subject: path,
38 SourceRange: rng,
39 }, diags
40 }
41
42 mode := ManagedResourceMode
43 if remain.RootName() == "data" {
44 mode = DataResourceMode
45 remain = remain[1:]
46 }
47
48 if len(remain) < 2 {
49 diags = diags.Append(&hcl.Diagnostic{
50 Severity: hcl.DiagError,
51 Summary: "Invalid address",
52 Detail: "Resource specification must include a resource type and name.",
53 Subject: remain.SourceRange().Ptr(),
54 })
55 return nil, diags
56 }
57
58 var typeName, name string
59 switch tt := remain[0].(type) {
60 case hcl.TraverseRoot:
61 typeName = tt.Name
62 case hcl.TraverseAttr:
63 typeName = tt.Name
64 default:
65 switch mode {
66 case ManagedResourceMode:
67 diags = diags.Append(&hcl.Diagnostic{
68 Severity: hcl.DiagError,
69 Summary: "Invalid address",
70 Detail: "A resource type name is required.",
71 Subject: remain[0].SourceRange().Ptr(),
72 })
73 case DataResourceMode:
74 diags = diags.Append(&hcl.Diagnostic{
75 Severity: hcl.DiagError,
76 Summary: "Invalid address",
77 Detail: "A data source name is required.",
78 Subject: remain[0].SourceRange().Ptr(),
79 })
80 default:
81 panic("unknown mode")
82 }
83 return nil, diags
84 }
85
86 switch tt := remain[1].(type) {
87 case hcl.TraverseAttr:
88 name = tt.Name
89 default:
90 diags = diags.Append(&hcl.Diagnostic{
91 Severity: hcl.DiagError,
92 Summary: "Invalid address",
93 Detail: "A resource name is required.",
94 Subject: remain[1].SourceRange().Ptr(),
95 })
96 return nil, diags
97 }
98
99 var subject Targetable
100 remain = remain[2:]
101 switch len(remain) {
102 case 0:
103 subject = path.Resource(mode, typeName, name)
104 case 1:
105 if tt, ok := remain[0].(hcl.TraverseIndex); ok {
106 key, err := ParseInstanceKey(tt.Key)
107 if err != nil {
108 diags = diags.Append(&hcl.Diagnostic{
109 Severity: hcl.DiagError,
110 Summary: "Invalid address",
111 Detail: fmt.Sprintf("Invalid resource instance key: %s.", err),
112 Subject: remain[0].SourceRange().Ptr(),
113 })
114 return nil, diags
115 }
116
117 subject = path.ResourceInstance(mode, typeName, name, key)
118 } else {
119 diags = diags.Append(&hcl.Diagnostic{
120 Severity: hcl.DiagError,
121 Summary: "Invalid address",
122 Detail: "Resource instance key must be given in square brackets.",
123 Subject: remain[0].SourceRange().Ptr(),
124 })
125 return nil, diags
126 }
127 default:
128 diags = diags.Append(&hcl.Diagnostic{
129 Severity: hcl.DiagError,
130 Summary: "Invalid address",
131 Detail: "Unexpected extra operators after address.",
132 Subject: remain[1].SourceRange().Ptr(),
133 })
134 return nil, diags
135 }
136
137 return &Target{
138 Subject: subject,
139 SourceRange: rng,
140 }, diags
141}
142
143// ParseTargetStr is a helper wrapper around ParseTarget that takes a string
144// and parses it with the HCL native syntax traversal parser before
145// interpreting it.
146//
147// This should be used only in specialized situations since it will cause the
148// created references to not have any meaningful source location information.
149// If a target string is coming from a source that should be identified in
150// error messages then the caller should instead parse it directly using a
151// suitable function from the HCL API and pass the traversal itself to
152// ParseTarget.
153//
154// Error diagnostics are returned if either the parsing fails or the analysis
155// of the traversal fails. There is no way for the caller to distinguish the
156// two kinds of diagnostics programmatically. If error diagnostics are returned
157// the returned target may be nil or incomplete.
158func ParseTargetStr(str string) (*Target, tfdiags.Diagnostics) {
159 var diags tfdiags.Diagnostics
160
161 traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
162 diags = diags.Append(parseDiags)
163 if parseDiags.HasErrors() {
164 return nil, diags
165 }
166
167 target, targetDiags := ParseTarget(traversal)
168 diags = diags.Append(targetDiags)
169 return target, diags
170}
171
172// ParseAbsResource attempts to interpret the given traversal as an absolute
173// resource address, using the same syntax as expected by ParseTarget.
174//
175// If no error diagnostics are returned, the returned target includes the
176// address that was extracted and the source range it was extracted from.
177//
178// If error diagnostics are returned then the AbsResource value is invalid and
179// must not be used.
180func ParseAbsResource(traversal hcl.Traversal) (AbsResource, tfdiags.Diagnostics) {
181 addr, diags := ParseTarget(traversal)
182 if diags.HasErrors() {
183 return AbsResource{}, diags
184 }
185
186 switch tt := addr.Subject.(type) {
187
188 case AbsResource:
189 return tt, diags
190
191 case AbsResourceInstance: // Catch likely user error with specialized message
192 // Assume that the last element of the traversal must be the index,
193 // since that's required for a valid resource instance address.
194 indexStep := traversal[len(traversal)-1]
195 diags = diags.Append(&hcl.Diagnostic{
196 Severity: hcl.DiagError,
197 Summary: "Invalid address",
198 Detail: "A resource address is required. This instance key identifies a specific resource instance, which is not expected here.",
199 Subject: indexStep.SourceRange().Ptr(),
200 })
201 return AbsResource{}, diags
202
203 case ModuleInstance: // Catch likely user error with specialized message
204 diags = diags.Append(&hcl.Diagnostic{
205 Severity: hcl.DiagError,
206 Summary: "Invalid address",
207 Detail: "A resource address is required here. The module path must be followed by a resource specification.",
208 Subject: traversal.SourceRange().Ptr(),
209 })
210 return AbsResource{}, diags
211
212 default: // Generic message for other address types
213 diags = diags.Append(&hcl.Diagnostic{
214 Severity: hcl.DiagError,
215 Summary: "Invalid address",
216 Detail: "A resource address is required here.",
217 Subject: traversal.SourceRange().Ptr(),
218 })
219 return AbsResource{}, diags
220
221 }
222}
223
224// ParseAbsResourceStr is a helper wrapper around ParseAbsResource that takes a
225// string and parses it with the HCL native syntax traversal parser before
226// interpreting it.
227//
228// Error diagnostics are returned if either the parsing fails or the analysis
229// of the traversal fails. There is no way for the caller to distinguish the
230// two kinds of diagnostics programmatically. If error diagnostics are returned
231// the returned address may be incomplete.
232//
233// Since this function has no context about the source of the given string,
234// any returned diagnostics will not have meaningful source location
235// information.
236func ParseAbsResourceStr(str string) (AbsResource, tfdiags.Diagnostics) {
237 var diags tfdiags.Diagnostics
238
239 traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
240 diags = diags.Append(parseDiags)
241 if parseDiags.HasErrors() {
242 return AbsResource{}, diags
243 }
244
245 addr, addrDiags := ParseAbsResource(traversal)
246 diags = diags.Append(addrDiags)
247 return addr, diags
248}
249
250// ParseAbsResourceInstance attempts to interpret the given traversal as an
251// absolute resource instance address, using the same syntax as expected by
252// ParseTarget.
253//
254// If no error diagnostics are returned, the returned target includes the
255// address that was extracted and the source range it was extracted from.
256//
257// If error diagnostics are returned then the AbsResource value is invalid and
258// must not be used.
259func ParseAbsResourceInstance(traversal hcl.Traversal) (AbsResourceInstance, tfdiags.Diagnostics) {
260 addr, diags := ParseTarget(traversal)
261 if diags.HasErrors() {
262 return AbsResourceInstance{}, diags
263 }
264
265 switch tt := addr.Subject.(type) {
266
267 case AbsResource:
268 return tt.Instance(NoKey), diags
269
270 case AbsResourceInstance:
271 return tt, diags
272
273 case ModuleInstance: // Catch likely user error with specialized message
274 diags = diags.Append(&hcl.Diagnostic{
275 Severity: hcl.DiagError,
276 Summary: "Invalid address",
277 Detail: "A resource instance address is required here. The module path must be followed by a resource instance specification.",
278 Subject: traversal.SourceRange().Ptr(),
279 })
280 return AbsResourceInstance{}, diags
281
282 default: // Generic message for other address types
283 diags = diags.Append(&hcl.Diagnostic{
284 Severity: hcl.DiagError,
285 Summary: "Invalid address",
286 Detail: "A resource address is required here.",
287 Subject: traversal.SourceRange().Ptr(),
288 })
289 return AbsResourceInstance{}, diags
290
291 }
292}
293
294// ParseAbsResourceInstanceStr is a helper wrapper around
295// ParseAbsResourceInstance that takes a string and parses it with the HCL
296// native syntax traversal parser before interpreting it.
297//
298// Error diagnostics are returned if either the parsing fails or the analysis
299// of the traversal fails. There is no way for the caller to distinguish the
300// two kinds of diagnostics programmatically. If error diagnostics are returned
301// the returned address may be incomplete.
302//
303// Since this function has no context about the source of the given string,
304// any returned diagnostics will not have meaningful source location
305// information.
306func ParseAbsResourceInstanceStr(str string) (AbsResourceInstance, tfdiags.Diagnostics) {
307 var diags tfdiags.Diagnostics
308
309 traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
310 diags = diags.Append(parseDiags)
311 if parseDiags.HasErrors() {
312 return AbsResourceInstance{}, diags
313 }
314
315 addr, addrDiags := ParseAbsResourceInstance(traversal)
316 diags = diags.Append(addrDiags)
317 return addr, diags
318}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/path_attr.go b/vendor/github.com/hashicorp/terraform/addrs/path_attr.go
new file mode 100644
index 0000000..cfc13f4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/path_attr.go
@@ -0,0 +1,12 @@
1package addrs
2
3// PathAttr is the address of an attribute of the "path" object in
4// the interpolation scope, like "path.module".
5type PathAttr struct {
6 referenceable
7 Name string
8}
9
10func (pa PathAttr) String() string {
11 return "path." + pa.Name
12}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/provider_config.go b/vendor/github.com/hashicorp/terraform/addrs/provider_config.go
new file mode 100644
index 0000000..340dd19
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/provider_config.go
@@ -0,0 +1,297 @@
1package addrs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/tfdiags"
7
8 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/hcl2/hcl/hclsyntax"
10)
11
12// ProviderConfig is the address of a provider configuration.
13type ProviderConfig struct {
14 Type string
15
16 // If not empty, Alias identifies which non-default (aliased) provider
17 // configuration this address refers to.
18 Alias string
19}
20
21// NewDefaultProviderConfig returns the address of the default (un-aliased)
22// configuration for the provider with the given type name.
23func NewDefaultProviderConfig(typeName string) ProviderConfig {
24 return ProviderConfig{
25 Type: typeName,
26 }
27}
28
29// ParseProviderConfigCompact parses the given absolute traversal as a relative
30// provider address in compact form. The following are examples of traversals
31// that can be successfully parsed as compact relative provider configuration
32// addresses:
33//
34// aws
35// aws.foo
36//
37// This function will panic if given a relative traversal.
38//
39// If the returned diagnostics contains errors then the result value is invalid
40// and must not be used.
41func ParseProviderConfigCompact(traversal hcl.Traversal) (ProviderConfig, tfdiags.Diagnostics) {
42 var diags tfdiags.Diagnostics
43 ret := ProviderConfig{
44 Type: traversal.RootName(),
45 }
46
47 if len(traversal) < 2 {
48 // Just a type name, then.
49 return ret, diags
50 }
51
52 aliasStep := traversal[1]
53 switch ts := aliasStep.(type) {
54 case hcl.TraverseAttr:
55 ret.Alias = ts.Name
56 return ret, diags
57 default:
58 diags = diags.Append(&hcl.Diagnostic{
59 Severity: hcl.DiagError,
60 Summary: "Invalid provider configuration address",
61 Detail: "The provider type name must either stand alone or be followed by an alias name separated with a dot.",
62 Subject: aliasStep.SourceRange().Ptr(),
63 })
64 }
65
66 if len(traversal) > 2 {
67 diags = diags.Append(&hcl.Diagnostic{
68 Severity: hcl.DiagError,
69 Summary: "Invalid provider configuration address",
70 Detail: "Extraneous extra operators after provider configuration address.",
71 Subject: traversal[2:].SourceRange().Ptr(),
72 })
73 }
74
75 return ret, diags
76}
77
78// ParseProviderConfigCompactStr is a helper wrapper around ParseProviderConfigCompact
79// that takes a string and parses it with the HCL native syntax traversal parser
80// before interpreting it.
81//
82// This should be used only in specialized situations since it will cause the
83// created references to not have any meaningful source location information.
84// If a reference string is coming from a source that should be identified in
85// error messages then the caller should instead parse it directly using a
86// suitable function from the HCL API and pass the traversal itself to
87// ParseProviderConfigCompact.
88//
89// Error diagnostics are returned if either the parsing fails or the analysis
90// of the traversal fails. There is no way for the caller to distinguish the
91// two kinds of diagnostics programmatically. If error diagnostics are returned
92// then the returned address is invalid.
93func ParseProviderConfigCompactStr(str string) (ProviderConfig, tfdiags.Diagnostics) {
94 var diags tfdiags.Diagnostics
95
96 traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
97 diags = diags.Append(parseDiags)
98 if parseDiags.HasErrors() {
99 return ProviderConfig{}, diags
100 }
101
102 addr, addrDiags := ParseProviderConfigCompact(traversal)
103 diags = diags.Append(addrDiags)
104 return addr, diags
105}
106
107// Absolute returns an AbsProviderConfig from the receiver and the given module
108// instance address.
109func (pc ProviderConfig) Absolute(module ModuleInstance) AbsProviderConfig {
110 return AbsProviderConfig{
111 Module: module,
112 ProviderConfig: pc,
113 }
114}
115
116func (pc ProviderConfig) String() string {
117 if pc.Type == "" {
118 // Should never happen; always indicates a bug
119 return "provider.<invalid>"
120 }
121
122 if pc.Alias != "" {
123 return fmt.Sprintf("provider.%s.%s", pc.Type, pc.Alias)
124 }
125
126 return "provider." + pc.Type
127}
128
129// StringCompact is an alternative to String that returns the form that can
130// be parsed by ParseProviderConfigCompact, without the "provider." prefix.
131func (pc ProviderConfig) StringCompact() string {
132 if pc.Alias != "" {
133 return fmt.Sprintf("%s.%s", pc.Type, pc.Alias)
134 }
135 return pc.Type
136}
137
138// AbsProviderConfig is the absolute address of a provider configuration
139// within a particular module instance.
140type AbsProviderConfig struct {
141 Module ModuleInstance
142 ProviderConfig ProviderConfig
143}
144
145// ParseAbsProviderConfig parses the given traversal as an absolute provider
146// address. The following are examples of traversals that can be successfully
147// parsed as absolute provider configuration addresses:
148//
149// provider.aws
150// provider.aws.foo
151// module.bar.provider.aws
152// module.bar.module.baz.provider.aws.foo
153// module.foo[1].provider.aws.foo
154//
155// This type of address is used, for example, to record the relationships
156// between resources and provider configurations in the state structure.
157// This type of address is not generally used in the UI, except in error
158// messages that refer to provider configurations.
159func ParseAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags.Diagnostics) {
160 modInst, remain, diags := parseModuleInstancePrefix(traversal)
161 ret := AbsProviderConfig{
162 Module: modInst,
163 }
164 if len(remain) < 2 || remain.RootName() != "provider" {
165 diags = diags.Append(&hcl.Diagnostic{
166 Severity: hcl.DiagError,
167 Summary: "Invalid provider configuration address",
168 Detail: "Provider address must begin with \"provider.\", followed by a provider type name.",
169 Subject: remain.SourceRange().Ptr(),
170 })
171 return ret, diags
172 }
173 if len(remain) > 3 {
174 diags = diags.Append(&hcl.Diagnostic{
175 Severity: hcl.DiagError,
176 Summary: "Invalid provider configuration address",
177 Detail: "Extraneous operators after provider configuration alias.",
178 Subject: hcl.Traversal(remain[3:]).SourceRange().Ptr(),
179 })
180 return ret, diags
181 }
182
183 if tt, ok := remain[1].(hcl.TraverseAttr); ok {
184 ret.ProviderConfig.Type = tt.Name
185 } else {
186 diags = diags.Append(&hcl.Diagnostic{
187 Severity: hcl.DiagError,
188 Summary: "Invalid provider configuration address",
189 Detail: "The prefix \"provider.\" must be followed by a provider type name.",
190 Subject: remain[1].SourceRange().Ptr(),
191 })
192 return ret, diags
193 }
194
195 if len(remain) == 3 {
196 if tt, ok := remain[2].(hcl.TraverseAttr); ok {
197 ret.ProviderConfig.Alias = tt.Name
198 } else {
199 diags = diags.Append(&hcl.Diagnostic{
200 Severity: hcl.DiagError,
201 Summary: "Invalid provider configuration address",
202 Detail: "Provider type name must be followed by a configuration alias name.",
203 Subject: remain[2].SourceRange().Ptr(),
204 })
205 return ret, diags
206 }
207 }
208
209 return ret, diags
210}
211
212// ParseAbsProviderConfigStr is a helper wrapper around ParseAbsProviderConfig
213// that takes a string and parses it with the HCL native syntax traversal parser
214// before interpreting it.
215//
216// This should be used only in specialized situations since it will cause the
217// created references to not have any meaningful source location information.
218// If a reference string is coming from a source that should be identified in
219// error messages then the caller should instead parse it directly using a
220// suitable function from the HCL API and pass the traversal itself to
221// ParseAbsProviderConfig.
222//
223// Error diagnostics are returned if either the parsing fails or the analysis
224// of the traversal fails. There is no way for the caller to distinguish the
225// two kinds of diagnostics programmatically. If error diagnostics are returned
226// the returned address is invalid.
227func ParseAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnostics) {
228 var diags tfdiags.Diagnostics
229
230 traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
231 diags = diags.Append(parseDiags)
232 if parseDiags.HasErrors() {
233 return AbsProviderConfig{}, diags
234 }
235
236 addr, addrDiags := ParseAbsProviderConfig(traversal)
237 diags = diags.Append(addrDiags)
238 return addr, diags
239}
240
241// ProviderConfigDefault returns the address of the default provider config
242// of the given type inside the recieving module instance.
243func (m ModuleInstance) ProviderConfigDefault(name string) AbsProviderConfig {
244 return AbsProviderConfig{
245 Module: m,
246 ProviderConfig: ProviderConfig{
247 Type: name,
248 },
249 }
250}
251
252// ProviderConfigAliased returns the address of an aliased provider config
253// of with given type and alias inside the recieving module instance.
254func (m ModuleInstance) ProviderConfigAliased(name, alias string) AbsProviderConfig {
255 return AbsProviderConfig{
256 Module: m,
257 ProviderConfig: ProviderConfig{
258 Type: name,
259 Alias: alias,
260 },
261 }
262}
263
264// Inherited returns an address that the receiving configuration address might
265// inherit from in a parent module. The second bool return value indicates if
266// such inheritance is possible, and thus whether the returned address is valid.
267//
268// Inheritance is possible only for default (un-aliased) providers in modules
269// other than the root module. Even if a valid address is returned, inheritence
270// may not be performed for other reasons, such as if the calling module
271// provided explicit provider configurations within the call for this module.
272// The ProviderTransformer graph transform in the main terraform module has
273// the authoritative logic for provider inheritance, and this method is here
274// mainly just for its benefit.
275func (pc AbsProviderConfig) Inherited() (AbsProviderConfig, bool) {
276 // Can't inherit if we're already in the root.
277 if len(pc.Module) == 0 {
278 return AbsProviderConfig{}, false
279 }
280
281 // Can't inherit if we have an alias.
282 if pc.ProviderConfig.Alias != "" {
283 return AbsProviderConfig{}, false
284 }
285
286 // Otherwise, we might inherit from a configuration with the same
287 // provider name in the parent module instance.
288 parentMod := pc.Module.Parent()
289 return pc.ProviderConfig.Absolute(parentMod), true
290}
291
292func (pc AbsProviderConfig) String() string {
293 if len(pc.Module) == 0 {
294 return pc.ProviderConfig.String()
295 }
296 return fmt.Sprintf("%s.%s", pc.Module.String(), pc.ProviderConfig.String())
297}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/referenceable.go b/vendor/github.com/hashicorp/terraform/addrs/referenceable.go
new file mode 100644
index 0000000..211083a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/referenceable.go
@@ -0,0 +1,20 @@
1package addrs
2
3// Referenceable is an interface implemented by all address types that can
4// appear as references in configuration language expressions.
5type Referenceable interface {
6 // All implementations of this interface must be covered by the type switch
7 // in lang.Scope.buildEvalContext.
8 referenceableSigil()
9
10 // String produces a string representation of the address that could be
11 // parsed as a HCL traversal and passed to ParseRef to produce an identical
12 // result.
13 String() string
14}
15
16type referenceable struct {
17}
18
19func (r referenceable) referenceableSigil() {
20}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/resource.go b/vendor/github.com/hashicorp/terraform/addrs/resource.go
new file mode 100644
index 0000000..2866770
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/resource.go
@@ -0,0 +1,270 @@
1package addrs
2
3import (
4 "fmt"
5 "strings"
6)
7
8// Resource is an address for a resource block within configuration, which
9// contains potentially-multiple resource instances if that configuration
10// block uses "count" or "for_each".
11type Resource struct {
12 referenceable
13 Mode ResourceMode
14 Type string
15 Name string
16}
17
18func (r Resource) String() string {
19 switch r.Mode {
20 case ManagedResourceMode:
21 return fmt.Sprintf("%s.%s", r.Type, r.Name)
22 case DataResourceMode:
23 return fmt.Sprintf("data.%s.%s", r.Type, r.Name)
24 default:
25 // Should never happen, but we'll return a string here rather than
26 // crashing just in case it does.
27 return fmt.Sprintf("<invalid>.%s.%s", r.Type, r.Name)
28 }
29}
30
31func (r Resource) Equal(o Resource) bool {
32 return r.String() == o.String()
33}
34
35// Instance produces the address for a specific instance of the receiver
36// that is idenfied by the given key.
37func (r Resource) Instance(key InstanceKey) ResourceInstance {
38 return ResourceInstance{
39 Resource: r,
40 Key: key,
41 }
42}
43
44// Absolute returns an AbsResource from the receiver and the given module
45// instance address.
46func (r Resource) Absolute(module ModuleInstance) AbsResource {
47 return AbsResource{
48 Module: module,
49 Resource: r,
50 }
51}
52
53// DefaultProviderConfig returns the address of the provider configuration
54// that should be used for the resource identified by the reciever if it
55// does not have a provider configuration address explicitly set in
56// configuration.
57//
58// This method is not able to verify that such a configuration exists, nor
59// represent the behavior of automatically inheriting certain provider
60// configurations from parent modules. It just does a static analysis of the
61// receiving address and returns an address to start from, relative to the
62// same module that contains the resource.
63func (r Resource) DefaultProviderConfig() ProviderConfig {
64 typeName := r.Type
65 if under := strings.Index(typeName, "_"); under != -1 {
66 typeName = typeName[:under]
67 }
68 return ProviderConfig{
69 Type: typeName,
70 }
71}
72
73// ResourceInstance is an address for a specific instance of a resource.
74// When a resource is defined in configuration with "count" or "for_each" it
75// produces zero or more instances, which can be addressed using this type.
76type ResourceInstance struct {
77 referenceable
78 Resource Resource
79 Key InstanceKey
80}
81
82func (r ResourceInstance) ContainingResource() Resource {
83 return r.Resource
84}
85
86func (r ResourceInstance) String() string {
87 if r.Key == NoKey {
88 return r.Resource.String()
89 }
90 return r.Resource.String() + r.Key.String()
91}
92
93func (r ResourceInstance) Equal(o ResourceInstance) bool {
94 return r.String() == o.String()
95}
96
97// Absolute returns an AbsResourceInstance from the receiver and the given module
98// instance address.
99func (r ResourceInstance) Absolute(module ModuleInstance) AbsResourceInstance {
100 return AbsResourceInstance{
101 Module: module,
102 Resource: r,
103 }
104}
105
106// AbsResource is an absolute address for a resource under a given module path.
107type AbsResource struct {
108 targetable
109 Module ModuleInstance
110 Resource Resource
111}
112
113// Resource returns the address of a particular resource within the receiver.
114func (m ModuleInstance) Resource(mode ResourceMode, typeName string, name string) AbsResource {
115 return AbsResource{
116 Module: m,
117 Resource: Resource{
118 Mode: mode,
119 Type: typeName,
120 Name: name,
121 },
122 }
123}
124
125// Instance produces the address for a specific instance of the receiver
126// that is idenfied by the given key.
127func (r AbsResource) Instance(key InstanceKey) AbsResourceInstance {
128 return AbsResourceInstance{
129 Module: r.Module,
130 Resource: r.Resource.Instance(key),
131 }
132}
133
134// TargetContains implements Targetable by returning true if the given other
135// address is either equal to the receiver or is an instance of the
136// receiver.
137func (r AbsResource) TargetContains(other Targetable) bool {
138 switch to := other.(type) {
139
140 case AbsResource:
141 // We'll use our stringification as a cheat-ish way to test for equality.
142 return to.String() == r.String()
143
144 case AbsResourceInstance:
145 return r.TargetContains(to.ContainingResource())
146
147 default:
148 return false
149
150 }
151}
152
153func (r AbsResource) String() string {
154 if len(r.Module) == 0 {
155 return r.Resource.String()
156 }
157 return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String())
158}
159
160func (r AbsResource) Equal(o AbsResource) bool {
161 return r.String() == o.String()
162}
163
164// AbsResourceInstance is an absolute address for a resource instance under a
165// given module path.
166type AbsResourceInstance struct {
167 targetable
168 Module ModuleInstance
169 Resource ResourceInstance
170}
171
172// ResourceInstance returns the address of a particular resource instance within the receiver.
173func (m ModuleInstance) ResourceInstance(mode ResourceMode, typeName string, name string, key InstanceKey) AbsResourceInstance {
174 return AbsResourceInstance{
175 Module: m,
176 Resource: ResourceInstance{
177 Resource: Resource{
178 Mode: mode,
179 Type: typeName,
180 Name: name,
181 },
182 Key: key,
183 },
184 }
185}
186
187// ContainingResource returns the address of the resource that contains the
188// receving resource instance. In other words, it discards the key portion
189// of the address to produce an AbsResource value.
190func (r AbsResourceInstance) ContainingResource() AbsResource {
191 return AbsResource{
192 Module: r.Module,
193 Resource: r.Resource.ContainingResource(),
194 }
195}
196
197// TargetContains implements Targetable by returning true if the given other
198// address is equal to the receiver.
199func (r AbsResourceInstance) TargetContains(other Targetable) bool {
200 switch to := other.(type) {
201
202 case AbsResourceInstance:
203 // We'll use our stringification as a cheat-ish way to test for equality.
204 return to.String() == r.String()
205
206 default:
207 return false
208
209 }
210}
211
212func (r AbsResourceInstance) String() string {
213 if len(r.Module) == 0 {
214 return r.Resource.String()
215 }
216 return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String())
217}
218
219func (r AbsResourceInstance) Equal(o AbsResourceInstance) bool {
220 return r.String() == o.String()
221}
222
223// Less returns true if the receiver should sort before the given other value
224// in a sorted list of addresses.
225func (r AbsResourceInstance) Less(o AbsResourceInstance) bool {
226 switch {
227
228 case len(r.Module) != len(o.Module):
229 return len(r.Module) < len(o.Module)
230
231 case r.Module.String() != o.Module.String():
232 return r.Module.Less(o.Module)
233
234 case r.Resource.Resource.Mode != o.Resource.Resource.Mode:
235 return r.Resource.Resource.Mode == DataResourceMode
236
237 case r.Resource.Resource.Type != o.Resource.Resource.Type:
238 return r.Resource.Resource.Type < o.Resource.Resource.Type
239
240 case r.Resource.Resource.Name != o.Resource.Resource.Name:
241 return r.Resource.Resource.Name < o.Resource.Resource.Name
242
243 case r.Resource.Key != o.Resource.Key:
244 return InstanceKeyLess(r.Resource.Key, o.Resource.Key)
245
246 default:
247 return false
248
249 }
250}
251
252// ResourceMode defines which lifecycle applies to a given resource. Each
253// resource lifecycle has a slightly different address format.
254type ResourceMode rune
255
256//go:generate stringer -type ResourceMode
257
258const (
259 // InvalidResourceMode is the zero value of ResourceMode and is not
260 // a valid resource mode.
261 InvalidResourceMode ResourceMode = 0
262
263 // ManagedResourceMode indicates a managed resource, as defined by
264 // "resource" blocks in configuration.
265 ManagedResourceMode ResourceMode = 'M'
266
267 // DataResourceMode indicates a data resource, as defined by
268 // "data" blocks in configuration.
269 DataResourceMode ResourceMode = 'D'
270)
diff --git a/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go b/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go
new file mode 100644
index 0000000..9bdbdc4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/resource_phase.go
@@ -0,0 +1,105 @@
1package addrs
2
3import "fmt"
4
5// ResourceInstancePhase is a special kind of reference used only internally
6// during graph building to represent resource instances that are in a
7// non-primary state.
8//
9// Graph nodes can declare themselves referenceable via an instance phase
10// or can declare that they reference an instance phase in order to accomodate
11// secondary graph nodes dealing with, for example, destroy actions.
12//
13// This special reference type cannot be accessed directly by end-users, and
14// should never be shown in the UI.
15type ResourceInstancePhase struct {
16 referenceable
17 ResourceInstance ResourceInstance
18 Phase ResourceInstancePhaseType
19}
20
21var _ Referenceable = ResourceInstancePhase{}
22
23// Phase returns a special "phase address" for the receving instance. See the
24// documentation of ResourceInstancePhase for the limited situations where this
25// is intended to be used.
26func (r ResourceInstance) Phase(rpt ResourceInstancePhaseType) ResourceInstancePhase {
27 return ResourceInstancePhase{
28 ResourceInstance: r,
29 Phase: rpt,
30 }
31}
32
33// ContainingResource returns an address for the same phase of the resource
34// that this instance belongs to.
35func (rp ResourceInstancePhase) ContainingResource() ResourcePhase {
36 return rp.ResourceInstance.Resource.Phase(rp.Phase)
37}
38
39func (rp ResourceInstancePhase) String() string {
40 // We use a different separator here than usual to ensure that we'll
41 // never conflict with any non-phased resource instance string. This
42 // is intentionally something that would fail parsing with ParseRef,
43 // because this special address type should never be exposed in the UI.
44 return fmt.Sprintf("%s#%s", rp.ResourceInstance, rp.Phase)
45}
46
47// ResourceInstancePhaseType is an enumeration used with ResourceInstancePhase.
48type ResourceInstancePhaseType string
49
50const (
51 // ResourceInstancePhaseDestroy represents the "destroy" phase of a
52 // resource instance.
53 ResourceInstancePhaseDestroy ResourceInstancePhaseType = "destroy"
54
55 // ResourceInstancePhaseDestroyCBD is similar to ResourceInstancePhaseDestroy
56 // but is used for resources that have "create_before_destroy" set, thus
57 // requiring a different dependency ordering.
58 ResourceInstancePhaseDestroyCBD ResourceInstancePhaseType = "destroy-cbd"
59)
60
61func (rpt ResourceInstancePhaseType) String() string {
62 return string(rpt)
63}
64
65// ResourcePhase is a special kind of reference used only internally
66// during graph building to represent resources that are in a
67// non-primary state.
68//
69// Graph nodes can declare themselves referenceable via a resource phase
70// or can declare that they reference a resource phase in order to accomodate
71// secondary graph nodes dealing with, for example, destroy actions.
72//
73// Since resources (as opposed to instances) aren't actually phased, this
74// address type is used only as an approximation during initial construction
75// of the resource-oriented plan graph, under the assumption that resource
76// instances with ResourceInstancePhase addresses will be created in dynamic
77// subgraphs during the graph walk.
78//
79// This special reference type cannot be accessed directly by end-users, and
80// should never be shown in the UI.
81type ResourcePhase struct {
82 referenceable
83 Resource Resource
84 Phase ResourceInstancePhaseType
85}
86
87var _ Referenceable = ResourcePhase{}
88
89// Phase returns a special "phase address" for the receving instance. See the
90// documentation of ResourceInstancePhase for the limited situations where this
91// is intended to be used.
92func (r Resource) Phase(rpt ResourceInstancePhaseType) ResourcePhase {
93 return ResourcePhase{
94 Resource: r,
95 Phase: rpt,
96 }
97}
98
99func (rp ResourcePhase) String() string {
100 // We use a different separator here than usual to ensure that we'll
101 // never conflict with any non-phased resource instance string. This
102 // is intentionally something that would fail parsing with ParseRef,
103 // because this special address type should never be exposed in the UI.
104 return fmt.Sprintf("%s#%s", rp.Resource, rp.Phase)
105}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go b/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go
new file mode 100644
index 0000000..0b5c33f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/resourcemode_string.go
@@ -0,0 +1,33 @@
1// Code generated by "stringer -type ResourceMode"; DO NOT EDIT.
2
3package addrs
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[InvalidResourceMode-0]
12 _ = x[ManagedResourceMode-77]
13 _ = x[DataResourceMode-68]
14}
15
16const (
17 _ResourceMode_name_0 = "InvalidResourceMode"
18 _ResourceMode_name_1 = "DataResourceMode"
19 _ResourceMode_name_2 = "ManagedResourceMode"
20)
21
22func (i ResourceMode) String() string {
23 switch {
24 case i == 0:
25 return _ResourceMode_name_0
26 case i == 68:
27 return _ResourceMode_name_1
28 case i == 77:
29 return _ResourceMode_name_2
30 default:
31 return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")"
32 }
33}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/self.go b/vendor/github.com/hashicorp/terraform/addrs/self.go
new file mode 100644
index 0000000..7f24eaf
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/self.go
@@ -0,0 +1,14 @@
1package addrs
2
3// Self is the address of the special object "self" that behaves as an alias
4// for a containing object currently in scope.
5const Self selfT = 0
6
7type selfT int
8
9func (s selfT) referenceableSigil() {
10}
11
12func (s selfT) String() string {
13 return "self"
14}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/targetable.go b/vendor/github.com/hashicorp/terraform/addrs/targetable.go
new file mode 100644
index 0000000..16819a5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/targetable.go
@@ -0,0 +1,26 @@
1package addrs
2
3// Targetable is an interface implemented by all address types that can be
4// used as "targets" for selecting sub-graphs of a graph.
5type Targetable interface {
6 targetableSigil()
7
8 // TargetContains returns true if the receiver is considered to contain
9 // the given other address. Containment, for the purpose of targeting,
10 // means that if a container address is targeted then all of the
11 // addresses within it are also implicitly targeted.
12 //
13 // A targetable address always contains at least itself.
14 TargetContains(other Targetable) bool
15
16 // String produces a string representation of the address that could be
17 // parsed as a HCL traversal and passed to ParseTarget to produce an
18 // identical result.
19 String() string
20}
21
22type targetable struct {
23}
24
25func (r targetable) targetableSigil() {
26}
diff --git a/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go b/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go
new file mode 100644
index 0000000..a880182
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/addrs/terraform_attr.go
@@ -0,0 +1,12 @@
1package addrs
2
3// TerraformAttr is the address of an attribute of the "terraform" object in
4// the interpolation scope, like "terraform.workspace".
5type TerraformAttr struct {
6 referenceable
7 Name string
8}
9
10func (ta TerraformAttr) String() string {
11 return "terraform." + ta.Name
12}
diff --git a/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go b/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go
new file mode 100644
index 0000000..3dd9238
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/command/format/diagnostic.go
@@ -0,0 +1,295 @@
1package format
2
3import (
4 "bufio"
5 "bytes"
6 "fmt"
7 "sort"
8 "strings"
9
10 "github.com/hashicorp/hcl2/hcl"
11 "github.com/hashicorp/hcl2/hcled"
12 "github.com/hashicorp/hcl2/hclparse"
13 "github.com/hashicorp/terraform/tfdiags"
14 "github.com/mitchellh/colorstring"
15 wordwrap "github.com/mitchellh/go-wordwrap"
16 "github.com/zclconf/go-cty/cty"
17)
18
19// Diagnostic formats a single diagnostic message.
20//
21// The width argument specifies at what column the diagnostic messages will
22// be wrapped. If set to zero, messages will not be wrapped by this function
23// at all. Although the long-form text parts of the message are wrapped,
24// not all aspects of the message are guaranteed to fit within the specified
25// terminal width.
26func Diagnostic(diag tfdiags.Diagnostic, sources map[string][]byte, color *colorstring.Colorize, width int) string {
27 if diag == nil {
28 // No good reason to pass a nil diagnostic in here...
29 return ""
30 }
31
32 var buf bytes.Buffer
33
34 switch diag.Severity() {
35 case tfdiags.Error:
36 buf.WriteString(color.Color("\n[bold][red]Error: [reset]"))
37 case tfdiags.Warning:
38 buf.WriteString(color.Color("\n[bold][yellow]Warning: [reset]"))
39 default:
40 // Clear out any coloring that might be applied by Terraform's UI helper,
41 // so our result is not context-sensitive.
42 buf.WriteString(color.Color("\n[reset]"))
43 }
44
45 desc := diag.Description()
46 sourceRefs := diag.Source()
47
48 // We don't wrap the summary, since we expect it to be terse, and since
49 // this is where we put the text of a native Go error it may not always
50 // be pure text that lends itself well to word-wrapping.
51 fmt.Fprintf(&buf, color.Color("[bold]%s[reset]\n\n"), desc.Summary)
52
53 if sourceRefs.Subject != nil {
54 // We'll borrow HCL's range implementation here, because it has some
55 // handy features to help us produce a nice source code snippet.
56 highlightRange := sourceRefs.Subject.ToHCL()
57 snippetRange := highlightRange
58 if sourceRefs.Context != nil {
59 snippetRange = sourceRefs.Context.ToHCL()
60 }
61
62 // Make sure the snippet includes the highlight. This should be true
63 // for any reasonable diagnostic, but we'll make sure.
64 snippetRange = hcl.RangeOver(snippetRange, highlightRange)
65 if snippetRange.Empty() {
66 snippetRange.End.Byte++
67 snippetRange.End.Column++
68 }
69 if highlightRange.Empty() {
70 highlightRange.End.Byte++
71 highlightRange.End.Column++
72 }
73
74 var src []byte
75 if sources != nil {
76 src = sources[snippetRange.Filename]
77 }
78 if src == nil {
79 // This should generally not happen, as long as sources are always
80 // loaded through the main loader. We may load things in other
81 // ways in weird cases, so we'll tolerate it at the expense of
82 // a not-so-helpful error message.
83 fmt.Fprintf(&buf, " on %s line %d:\n (source code not available)\n", highlightRange.Filename, highlightRange.Start.Line)
84 } else {
85 file, offset := parseRange(src, highlightRange)
86
87 headerRange := highlightRange
88
89 contextStr := hcled.ContextString(file, offset-1)
90 if contextStr != "" {
91 contextStr = ", in " + contextStr
92 }
93
94 fmt.Fprintf(&buf, " on %s line %d%s:\n", headerRange.Filename, headerRange.Start.Line, contextStr)
95
96 // Config snippet rendering
97 sc := hcl.NewRangeScanner(src, highlightRange.Filename, bufio.ScanLines)
98 for sc.Scan() {
99 lineRange := sc.Range()
100 if !lineRange.Overlaps(snippetRange) {
101 continue
102 }
103 beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange)
104 before := beforeRange.SliceBytes(src)
105 highlighted := highlightedRange.SliceBytes(src)
106 after := afterRange.SliceBytes(src)
107 fmt.Fprintf(
108 &buf, color.Color("%4d: %s[underline]%s[reset]%s\n"),
109 lineRange.Start.Line,
110 before, highlighted, after,
111 )
112 }
113
114 }
115
116 if fromExpr := diag.FromExpr(); fromExpr != nil {
117 // We may also be able to generate information about the dynamic
118 // values of relevant variables at the point of evaluation, then.
119 // This is particularly useful for expressions that get evaluated
120 // multiple times with different values, such as blocks using
121 // "count" and "for_each", or within "for" expressions.
122 expr := fromExpr.Expression
123 ctx := fromExpr.EvalContext
124 vars := expr.Variables()
125 stmts := make([]string, 0, len(vars))
126 seen := make(map[string]struct{}, len(vars))
127 Traversals:
128 for _, traversal := range vars {
129 for len(traversal) > 1 {
130 val, diags := traversal.TraverseAbs(ctx)
131 if diags.HasErrors() {
132 // Skip anything that generates errors, since we probably
133 // already have the same error in our diagnostics set
134 // already.
135 traversal = traversal[:len(traversal)-1]
136 continue
137 }
138
139 traversalStr := traversalStr(traversal)
140 if _, exists := seen[traversalStr]; exists {
141 continue Traversals // don't show duplicates when the same variable is referenced multiple times
142 }
143 switch {
144 case !val.IsKnown():
145 // Can't say anything about this yet, then.
146 continue Traversals
147 case val.IsNull():
148 stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is null"), traversalStr))
149 default:
150 stmts = append(stmts, fmt.Sprintf(color.Color("[bold]%s[reset] is %s"), traversalStr, compactValueStr(val)))
151 }
152 seen[traversalStr] = struct{}{}
153 }
154 }
155
156 sort.Strings(stmts) // FIXME: Should maybe use a traversal-aware sort that can sort numeric indexes properly?
157
158 if len(stmts) > 0 {
159 fmt.Fprint(&buf, color.Color(" [dark_gray]|----------------[reset]\n"))
160 }
161 for _, stmt := range stmts {
162 fmt.Fprintf(&buf, color.Color(" [dark_gray]|[reset] %s\n"), stmt)
163 }
164 }
165
166 buf.WriteByte('\n')
167 }
168
169 if desc.Detail != "" {
170 detail := desc.Detail
171 if width != 0 {
172 detail = wordwrap.WrapString(detail, uint(width))
173 }
174 fmt.Fprintf(&buf, "%s\n", detail)
175 }
176
177 return buf.String()
178}
179
180func parseRange(src []byte, rng hcl.Range) (*hcl.File, int) {
181 filename := rng.Filename
182 offset := rng.Start.Byte
183
184 // We need to re-parse here to get a *hcl.File we can interrogate. This
185 // is not awesome since we presumably already parsed the file earlier too,
186 // but this re-parsing is architecturally simpler than retaining all of
187 // the hcl.File objects and we only do this in the case of an error anyway
188 // so the overhead here is not a big problem.
189 parser := hclparse.NewParser()
190 var file *hcl.File
191 var diags hcl.Diagnostics
192 if strings.HasSuffix(filename, ".json") {
193 file, diags = parser.ParseJSON(src, filename)
194 } else {
195 file, diags = parser.ParseHCL(src, filename)
196 }
197 if diags.HasErrors() {
198 return file, offset
199 }
200
201 return file, offset
202}
203
204// traversalStr produces a representation of an HCL traversal that is compact,
205// resembles HCL native syntax, and is suitable for display in the UI.
206func traversalStr(traversal hcl.Traversal) string {
207 // This is a specialized subset of traversal rendering tailored to
208 // producing helpful contextual messages in diagnostics. It is not
209 // comprehensive nor intended to be used for other purposes.
210
211 var buf bytes.Buffer
212 for _, step := range traversal {
213 switch tStep := step.(type) {
214 case hcl.TraverseRoot:
215 buf.WriteString(tStep.Name)
216 case hcl.TraverseAttr:
217 buf.WriteByte('.')
218 buf.WriteString(tStep.Name)
219 case hcl.TraverseIndex:
220 buf.WriteByte('[')
221 if keyTy := tStep.Key.Type(); keyTy.IsPrimitiveType() {
222 buf.WriteString(compactValueStr(tStep.Key))
223 } else {
224 // We'll just use a placeholder for more complex values,
225 // since otherwise our result could grow ridiculously long.
226 buf.WriteString("...")
227 }
228 buf.WriteByte(']')
229 }
230 }
231 return buf.String()
232}
233
234// compactValueStr produces a compact, single-line summary of a given value
235// that is suitable for display in the UI.
236//
237// For primitives it returns a full representation, while for more complex
238// types it instead summarizes the type, size, etc to produce something
239// that is hopefully still somewhat useful but not as verbose as a rendering
240// of the entire data structure.
241func compactValueStr(val cty.Value) string {
242 // This is a specialized subset of value rendering tailored to producing
243 // helpful but concise messages in diagnostics. It is not comprehensive
244 // nor intended to be used for other purposes.
245
246 ty := val.Type()
247 switch {
248 case val.IsNull():
249 return "null"
250 case !val.IsKnown():
251 // Should never happen here because we should filter before we get
252 // in here, but we'll do something reasonable rather than panic.
253 return "(not yet known)"
254 case ty == cty.Bool:
255 if val.True() {
256 return "true"
257 }
258 return "false"
259 case ty == cty.Number:
260 bf := val.AsBigFloat()
261 return bf.Text('g', 10)
262 case ty == cty.String:
263 // Go string syntax is not exactly the same as HCL native string syntax,
264 // but we'll accept the minor edge-cases where this is different here
265 // for now, just to get something reasonable here.
266 return fmt.Sprintf("%q", val.AsString())
267 case ty.IsCollectionType() || ty.IsTupleType():
268 l := val.LengthInt()
269 switch l {
270 case 0:
271 return "empty " + ty.FriendlyName()
272 case 1:
273 return ty.FriendlyName() + " with 1 element"
274 default:
275 return fmt.Sprintf("%s with %d elements", ty.FriendlyName(), l)
276 }
277 case ty.IsObjectType():
278 atys := ty.AttributeTypes()
279 l := len(atys)
280 switch l {
281 case 0:
282 return "object with no attributes"
283 case 1:
284 var name string
285 for k := range atys {
286 name = k
287 }
288 return fmt.Sprintf("object with 1 attribute %q", name)
289 default:
290 return fmt.Sprintf("object with %d attributes", l)
291 }
292 default:
293 return ty.FriendlyName()
294 }
295}
diff --git a/vendor/github.com/hashicorp/terraform/command/format/diff.go b/vendor/github.com/hashicorp/terraform/command/format/diff.go
new file mode 100644
index 0000000..c726f0e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/command/format/diff.go
@@ -0,0 +1,1192 @@
1package format
2
3import (
4 "bufio"
5 "bytes"
6 "fmt"
7 "sort"
8 "strings"
9
10 "github.com/mitchellh/colorstring"
11 "github.com/zclconf/go-cty/cty"
12 ctyjson "github.com/zclconf/go-cty/cty/json"
13
14 "github.com/hashicorp/terraform/addrs"
15 "github.com/hashicorp/terraform/configs/configschema"
16 "github.com/hashicorp/terraform/plans"
17 "github.com/hashicorp/terraform/plans/objchange"
18 "github.com/hashicorp/terraform/states"
19)
20
21// ResourceChange returns a string representation of a change to a particular
22// resource, for inclusion in user-facing plan output.
23//
24// The resource schema must be provided along with the change so that the
25// formatted change can reflect the configuration structure for the associated
26// resource.
27//
28// If "color" is non-nil, it will be used to color the result. Otherwise,
29// no color codes will be included.
30func ResourceChange(
31 change *plans.ResourceInstanceChangeSrc,
32 tainted bool,
33 schema *configschema.Block,
34 color *colorstring.Colorize,
35) string {
36 addr := change.Addr
37 var buf bytes.Buffer
38
39 if color == nil {
40 color = &colorstring.Colorize{
41 Colors: colorstring.DefaultColors,
42 Disable: true,
43 Reset: false,
44 }
45 }
46
47 dispAddr := addr.String()
48 if change.DeposedKey != states.NotDeposed {
49 dispAddr = fmt.Sprintf("%s (deposed object %s)", dispAddr, change.DeposedKey)
50 }
51
52 switch change.Action {
53 case plans.Create:
54 buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be created", dispAddr)))
55 case plans.Read:
56 buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be read during apply\n # (config refers to values not yet known)", dispAddr)))
57 case plans.Update:
58 buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be updated in-place", dispAddr)))
59 case plans.CreateThenDelete, plans.DeleteThenCreate:
60 if tainted {
61 buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] is tainted, so must be [bold][red]replaced", dispAddr)))
62 } else {
63 buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] must be [bold][red]replaced", dispAddr)))
64 }
65 case plans.Delete:
66 buf.WriteString(color.Color(fmt.Sprintf("[bold] # %s[reset] will be [bold][red]destroyed", dispAddr)))
67 default:
68 // should never happen, since the above is exhaustive
69 buf.WriteString(fmt.Sprintf("%s has an action the plan renderer doesn't support (this is a bug)", dispAddr))
70 }
71 buf.WriteString(color.Color("[reset]\n"))
72
73 switch change.Action {
74 case plans.Create:
75 buf.WriteString(color.Color("[green] +[reset] "))
76 case plans.Read:
77 buf.WriteString(color.Color("[cyan] <=[reset] "))
78 case plans.Update:
79 buf.WriteString(color.Color("[yellow] ~[reset] "))
80 case plans.DeleteThenCreate:
81 buf.WriteString(color.Color("[red]-[reset]/[green]+[reset] "))
82 case plans.CreateThenDelete:
83 buf.WriteString(color.Color("[green]+[reset]/[red]-[reset] "))
84 case plans.Delete:
85 buf.WriteString(color.Color("[red] -[reset] "))
86 default:
87 buf.WriteString(color.Color("??? "))
88 }
89
90 switch addr.Resource.Resource.Mode {
91 case addrs.ManagedResourceMode:
92 buf.WriteString(fmt.Sprintf(
93 "resource %q %q",
94 addr.Resource.Resource.Type,
95 addr.Resource.Resource.Name,
96 ))
97 case addrs.DataResourceMode:
98 buf.WriteString(fmt.Sprintf(
99 "data %q %q ",
100 addr.Resource.Resource.Type,
101 addr.Resource.Resource.Name,
102 ))
103 default:
104 // should never happen, since the above is exhaustive
105 buf.WriteString(addr.String())
106 }
107
108 buf.WriteString(" {")
109
110 p := blockBodyDiffPrinter{
111 buf: &buf,
112 color: color,
113 action: change.Action,
114 requiredReplace: change.RequiredReplace,
115 }
116
117 // Most commonly-used resources have nested blocks that result in us
118 // going at least three traversals deep while we recurse here, so we'll
119 // start with that much capacity and then grow as needed for deeper
120 // structures.
121 path := make(cty.Path, 0, 3)
122
123 changeV, err := change.Decode(schema.ImpliedType())
124 if err != nil {
125 // Should never happen in here, since we've already been through
126 // loads of layers of encode/decode of the planned changes before now.
127 panic(fmt.Sprintf("failed to decode plan for %s while rendering diff: %s", addr, err))
128 }
129
130 // We currently have an opt-out that permits the legacy SDK to return values
131 // that defy our usual conventions around handling of nesting blocks. To
132 // avoid the rendering code from needing to handle all of these, we'll
133 // normalize first.
134 // (Ideally we'd do this as part of the SDK opt-out implementation in core,
135 // but we've added it here for now to reduce risk of unexpected impacts
136 // on other code in core.)
137 changeV.Change.Before = objchange.NormalizeObjectFromLegacySDK(changeV.Change.Before, schema)
138 changeV.Change.After = objchange.NormalizeObjectFromLegacySDK(changeV.Change.After, schema)
139
140 bodyWritten := p.writeBlockBodyDiff(schema, changeV.Before, changeV.After, 6, path)
141 if bodyWritten {
142 buf.WriteString("\n")
143 buf.WriteString(strings.Repeat(" ", 4))
144 }
145 buf.WriteString("}\n")
146
147 return buf.String()
148}
149
150type blockBodyDiffPrinter struct {
151 buf *bytes.Buffer
152 color *colorstring.Colorize
153 action plans.Action
154 requiredReplace cty.PathSet
155}
156
157const forcesNewResourceCaption = " [red]# forces replacement[reset]"
158
159// writeBlockBodyDiff writes attribute or block differences
160// and returns true if any differences were found and written
161func (p *blockBodyDiffPrinter) writeBlockBodyDiff(schema *configschema.Block, old, new cty.Value, indent int, path cty.Path) bool {
162 path = ctyEnsurePathCapacity(path, 1)
163
164 bodyWritten := false
165 blankBeforeBlocks := false
166 {
167 attrNames := make([]string, 0, len(schema.Attributes))
168 attrNameLen := 0
169 for name := range schema.Attributes {
170 oldVal := ctyGetAttrMaybeNull(old, name)
171 newVal := ctyGetAttrMaybeNull(new, name)
172 if oldVal.IsNull() && newVal.IsNull() {
173 // Skip attributes where both old and new values are null
174 // (we do this early here so that we'll do our value alignment
175 // based on the longest attribute name that has a change, rather
176 // than the longest attribute name in the full set.)
177 continue
178 }
179
180 attrNames = append(attrNames, name)
181 if len(name) > attrNameLen {
182 attrNameLen = len(name)
183 }
184 }
185 sort.Strings(attrNames)
186 if len(attrNames) > 0 {
187 blankBeforeBlocks = true
188 }
189
190 for _, name := range attrNames {
191 attrS := schema.Attributes[name]
192 oldVal := ctyGetAttrMaybeNull(old, name)
193 newVal := ctyGetAttrMaybeNull(new, name)
194
195 bodyWritten = true
196 p.writeAttrDiff(name, attrS, oldVal, newVal, attrNameLen, indent, path)
197 }
198 }
199
200 {
201 blockTypeNames := make([]string, 0, len(schema.BlockTypes))
202 for name := range schema.BlockTypes {
203 blockTypeNames = append(blockTypeNames, name)
204 }
205 sort.Strings(blockTypeNames)
206
207 for _, name := range blockTypeNames {
208 blockS := schema.BlockTypes[name]
209 oldVal := ctyGetAttrMaybeNull(old, name)
210 newVal := ctyGetAttrMaybeNull(new, name)
211
212 bodyWritten = true
213 p.writeNestedBlockDiffs(name, blockS, oldVal, newVal, blankBeforeBlocks, indent, path)
214
215 // Always include a blank for any subsequent block types.
216 blankBeforeBlocks = true
217 }
218 }
219
220 return bodyWritten
221}
222
223func (p *blockBodyDiffPrinter) writeAttrDiff(name string, attrS *configschema.Attribute, old, new cty.Value, nameLen, indent int, path cty.Path) {
224 path = append(path, cty.GetAttrStep{Name: name})
225 p.buf.WriteString("\n")
226 p.buf.WriteString(strings.Repeat(" ", indent))
227 showJustNew := false
228 var action plans.Action
229 switch {
230 case old.IsNull():
231 action = plans.Create
232 showJustNew = true
233 case new.IsNull():
234 action = plans.Delete
235 case ctyEqualWithUnknown(old, new):
236 action = plans.NoOp
237 showJustNew = true
238 default:
239 action = plans.Update
240 }
241
242 p.writeActionSymbol(action)
243
244 p.buf.WriteString(p.color.Color("[bold]"))
245 p.buf.WriteString(name)
246 p.buf.WriteString(p.color.Color("[reset]"))
247 p.buf.WriteString(strings.Repeat(" ", nameLen-len(name)))
248 p.buf.WriteString(" = ")
249
250 if attrS.Sensitive {
251 p.buf.WriteString("(sensitive value)")
252 } else {
253 switch {
254 case showJustNew:
255 p.writeValue(new, action, indent+2)
256 if p.pathForcesNewResource(path) {
257 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
258 }
259 default:
260 // We show new even if it is null to emphasize the fact
261 // that it is being unset, since otherwise it is easy to
262 // misunderstand that the value is still set to the old value.
263 p.writeValueDiff(old, new, indent+2, path)
264 }
265 }
266}
267
268func (p *blockBodyDiffPrinter) writeNestedBlockDiffs(name string, blockS *configschema.NestedBlock, old, new cty.Value, blankBefore bool, indent int, path cty.Path) {
269 path = append(path, cty.GetAttrStep{Name: name})
270 if old.IsNull() && new.IsNull() {
271 // Nothing to do if both old and new is null
272 return
273 }
274
275 // Where old/new are collections representing a nesting mode other than
276 // NestingSingle, we assume the collection value can never be unknown
277 // since we always produce the container for the nested objects, even if
278 // the objects within are computed.
279
280 switch blockS.Nesting {
281 case configschema.NestingSingle, configschema.NestingGroup:
282 var action plans.Action
283 eqV := new.Equals(old)
284 switch {
285 case old.IsNull():
286 action = plans.Create
287 case new.IsNull():
288 action = plans.Delete
289 case !new.IsWhollyKnown() || !old.IsWhollyKnown():
290 // "old" should actually always be known due to our contract
291 // that old values must never be unknown, but we'll allow it
292 // anyway to be robust.
293 action = plans.Update
294 case !eqV.IsKnown() || !eqV.True():
295 action = plans.Update
296 }
297
298 if blankBefore {
299 p.buf.WriteRune('\n')
300 }
301 p.writeNestedBlockDiff(name, nil, &blockS.Block, action, old, new, indent, path)
302 case configschema.NestingList:
303 // For the sake of handling nested blocks, we'll treat a null list
304 // the same as an empty list since the config language doesn't
305 // distinguish these anyway.
306 old = ctyNullBlockListAsEmpty(old)
307 new = ctyNullBlockListAsEmpty(new)
308
309 oldItems := ctyCollectionValues(old)
310 newItems := ctyCollectionValues(new)
311
312 // Here we intentionally preserve the index-based correspondance
313 // between old and new, rather than trying to detect insertions
314 // and removals in the list, because this more accurately reflects
315 // how Terraform Core and providers will understand the change,
316 // particularly when the nested block contains computed attributes
317 // that will themselves maintain correspondance by index.
318
319 // commonLen is number of elements that exist in both lists, which
320 // will be presented as updates (~). Any additional items in one
321 // of the lists will be presented as either creates (+) or deletes (-)
322 // depending on which list they belong to.
323 var commonLen int
324 switch {
325 case len(oldItems) < len(newItems):
326 commonLen = len(oldItems)
327 default:
328 commonLen = len(newItems)
329 }
330
331 if blankBefore && (len(oldItems) > 0 || len(newItems) > 0) {
332 p.buf.WriteRune('\n')
333 }
334
335 for i := 0; i < commonLen; i++ {
336 path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))})
337 oldItem := oldItems[i]
338 newItem := newItems[i]
339 action := plans.Update
340 if oldItem.RawEquals(newItem) {
341 action = plans.NoOp
342 }
343 p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldItem, newItem, indent, path)
344 }
345 for i := commonLen; i < len(oldItems); i++ {
346 path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))})
347 oldItem := oldItems[i]
348 newItem := cty.NullVal(oldItem.Type())
349 p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Delete, oldItem, newItem, indent, path)
350 }
351 for i := commonLen; i < len(newItems); i++ {
352 path := append(path, cty.IndexStep{Key: cty.NumberIntVal(int64(i))})
353 newItem := newItems[i]
354 oldItem := cty.NullVal(newItem.Type())
355 p.writeNestedBlockDiff(name, nil, &blockS.Block, plans.Create, oldItem, newItem, indent, path)
356 }
357 case configschema.NestingSet:
358 // For the sake of handling nested blocks, we'll treat a null set
359 // the same as an empty set since the config language doesn't
360 // distinguish these anyway.
361 old = ctyNullBlockSetAsEmpty(old)
362 new = ctyNullBlockSetAsEmpty(new)
363
364 oldItems := ctyCollectionValues(old)
365 newItems := ctyCollectionValues(new)
366
367 if (len(oldItems) + len(newItems)) == 0 {
368 // Nothing to do if both sets are empty
369 return
370 }
371
372 allItems := make([]cty.Value, 0, len(oldItems)+len(newItems))
373 allItems = append(allItems, oldItems...)
374 allItems = append(allItems, newItems...)
375 all := cty.SetVal(allItems)
376
377 if blankBefore {
378 p.buf.WriteRune('\n')
379 }
380
381 for it := all.ElementIterator(); it.Next(); {
382 _, val := it.Element()
383 var action plans.Action
384 var oldValue, newValue cty.Value
385 switch {
386 case !val.IsKnown():
387 action = plans.Update
388 newValue = val
389 case !old.HasElement(val).True():
390 action = plans.Create
391 oldValue = cty.NullVal(val.Type())
392 newValue = val
393 case !new.HasElement(val).True():
394 action = plans.Delete
395 oldValue = val
396 newValue = cty.NullVal(val.Type())
397 default:
398 action = plans.NoOp
399 oldValue = val
400 newValue = val
401 }
402 path := append(path, cty.IndexStep{Key: val})
403 p.writeNestedBlockDiff(name, nil, &blockS.Block, action, oldValue, newValue, indent, path)
404 }
405
406 case configschema.NestingMap:
407 // For the sake of handling nested blocks, we'll treat a null map
408 // the same as an empty map since the config language doesn't
409 // distinguish these anyway.
410 old = ctyNullBlockMapAsEmpty(old)
411 new = ctyNullBlockMapAsEmpty(new)
412
413 oldItems := old.AsValueMap()
414 newItems := new.AsValueMap()
415 if (len(oldItems) + len(newItems)) == 0 {
416 // Nothing to do if both maps are empty
417 return
418 }
419
420 allKeys := make(map[string]bool)
421 for k := range oldItems {
422 allKeys[k] = true
423 }
424 for k := range newItems {
425 allKeys[k] = true
426 }
427 allKeysOrder := make([]string, 0, len(allKeys))
428 for k := range allKeys {
429 allKeysOrder = append(allKeysOrder, k)
430 }
431 sort.Strings(allKeysOrder)
432
433 if blankBefore {
434 p.buf.WriteRune('\n')
435 }
436
437 for _, k := range allKeysOrder {
438 var action plans.Action
439 oldValue := oldItems[k]
440 newValue := newItems[k]
441 switch {
442 case oldValue == cty.NilVal:
443 oldValue = cty.NullVal(newValue.Type())
444 action = plans.Create
445 case newValue == cty.NilVal:
446 newValue = cty.NullVal(oldValue.Type())
447 action = plans.Delete
448 case !newValue.RawEquals(oldValue):
449 action = plans.Update
450 default:
451 action = plans.NoOp
452 }
453
454 path := append(path, cty.IndexStep{Key: cty.StringVal(k)})
455 p.writeNestedBlockDiff(name, &k, &blockS.Block, action, oldValue, newValue, indent, path)
456 }
457 }
458}
459
460func (p *blockBodyDiffPrinter) writeNestedBlockDiff(name string, label *string, blockS *configschema.Block, action plans.Action, old, new cty.Value, indent int, path cty.Path) {
461 p.buf.WriteString("\n")
462 p.buf.WriteString(strings.Repeat(" ", indent))
463 p.writeActionSymbol(action)
464
465 if label != nil {
466 fmt.Fprintf(p.buf, "%s %q {", name, *label)
467 } else {
468 fmt.Fprintf(p.buf, "%s {", name)
469 }
470
471 if action != plans.NoOp && (p.pathForcesNewResource(path) || p.pathForcesNewResource(path[:len(path)-1])) {
472 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
473 }
474
475 bodyWritten := p.writeBlockBodyDiff(blockS, old, new, indent+4, path)
476 if bodyWritten {
477 p.buf.WriteString("\n")
478 p.buf.WriteString(strings.Repeat(" ", indent+2))
479 }
480 p.buf.WriteString("}")
481}
482
483func (p *blockBodyDiffPrinter) writeValue(val cty.Value, action plans.Action, indent int) {
484 if !val.IsKnown() {
485 p.buf.WriteString("(known after apply)")
486 return
487 }
488 if val.IsNull() {
489 p.buf.WriteString(p.color.Color("[dark_gray]null[reset]"))
490 return
491 }
492
493 ty := val.Type()
494
495 switch {
496 case ty.IsPrimitiveType():
497 switch ty {
498 case cty.String:
499 {
500 // Special behavior for JSON strings containing array or object
501 src := []byte(val.AsString())
502 ty, err := ctyjson.ImpliedType(src)
503 // check for the special case of "null", which decodes to nil,
504 // and just allow it to be printed out directly
505 if err == nil && !ty.IsPrimitiveType() && val.AsString() != "null" {
506 jv, err := ctyjson.Unmarshal(src, ty)
507 if err == nil {
508 p.buf.WriteString("jsonencode(")
509 if jv.LengthInt() == 0 {
510 p.writeValue(jv, action, 0)
511 } else {
512 p.buf.WriteByte('\n')
513 p.buf.WriteString(strings.Repeat(" ", indent+4))
514 p.writeValue(jv, action, indent+4)
515 p.buf.WriteByte('\n')
516 p.buf.WriteString(strings.Repeat(" ", indent))
517 }
518 p.buf.WriteByte(')')
519 break // don't *also* do the normal behavior below
520 }
521 }
522 }
523 fmt.Fprintf(p.buf, "%q", val.AsString())
524 case cty.Bool:
525 if val.True() {
526 p.buf.WriteString("true")
527 } else {
528 p.buf.WriteString("false")
529 }
530 case cty.Number:
531 bf := val.AsBigFloat()
532 p.buf.WriteString(bf.Text('f', -1))
533 default:
534 // should never happen, since the above is exhaustive
535 fmt.Fprintf(p.buf, "%#v", val)
536 }
537 case ty.IsListType() || ty.IsSetType() || ty.IsTupleType():
538 p.buf.WriteString("[")
539
540 it := val.ElementIterator()
541 for it.Next() {
542 _, val := it.Element()
543
544 p.buf.WriteString("\n")
545 p.buf.WriteString(strings.Repeat(" ", indent+2))
546 p.writeActionSymbol(action)
547 p.writeValue(val, action, indent+4)
548 p.buf.WriteString(",")
549 }
550
551 if val.LengthInt() > 0 {
552 p.buf.WriteString("\n")
553 p.buf.WriteString(strings.Repeat(" ", indent))
554 }
555 p.buf.WriteString("]")
556 case ty.IsMapType():
557 p.buf.WriteString("{")
558
559 keyLen := 0
560 for it := val.ElementIterator(); it.Next(); {
561 key, _ := it.Element()
562 if keyStr := key.AsString(); len(keyStr) > keyLen {
563 keyLen = len(keyStr)
564 }
565 }
566
567 for it := val.ElementIterator(); it.Next(); {
568 key, val := it.Element()
569
570 p.buf.WriteString("\n")
571 p.buf.WriteString(strings.Repeat(" ", indent+2))
572 p.writeActionSymbol(action)
573 p.writeValue(key, action, indent+4)
574 p.buf.WriteString(strings.Repeat(" ", keyLen-len(key.AsString())))
575 p.buf.WriteString(" = ")
576 p.writeValue(val, action, indent+4)
577 }
578
579 if val.LengthInt() > 0 {
580 p.buf.WriteString("\n")
581 p.buf.WriteString(strings.Repeat(" ", indent))
582 }
583 p.buf.WriteString("}")
584 case ty.IsObjectType():
585 p.buf.WriteString("{")
586
587 atys := ty.AttributeTypes()
588 attrNames := make([]string, 0, len(atys))
589 nameLen := 0
590 for attrName := range atys {
591 attrNames = append(attrNames, attrName)
592 if len(attrName) > nameLen {
593 nameLen = len(attrName)
594 }
595 }
596 sort.Strings(attrNames)
597
598 for _, attrName := range attrNames {
599 val := val.GetAttr(attrName)
600
601 p.buf.WriteString("\n")
602 p.buf.WriteString(strings.Repeat(" ", indent+2))
603 p.writeActionSymbol(action)
604 p.buf.WriteString(attrName)
605 p.buf.WriteString(strings.Repeat(" ", nameLen-len(attrName)))
606 p.buf.WriteString(" = ")
607 p.writeValue(val, action, indent+4)
608 }
609
610 if len(attrNames) > 0 {
611 p.buf.WriteString("\n")
612 p.buf.WriteString(strings.Repeat(" ", indent))
613 }
614 p.buf.WriteString("}")
615 }
616}
617
618func (p *blockBodyDiffPrinter) writeValueDiff(old, new cty.Value, indent int, path cty.Path) {
619 ty := old.Type()
620 typesEqual := ctyTypesEqual(ty, new.Type())
621
622 // We have some specialized diff implementations for certain complex
623 // values where it's useful to see a visualization of the diff of
624 // the nested elements rather than just showing the entire old and
625 // new values verbatim.
626 // However, these specialized implementations can apply only if both
627 // values are known and non-null.
628 if old.IsKnown() && new.IsKnown() && !old.IsNull() && !new.IsNull() && typesEqual {
629 switch {
630 case ty == cty.String:
631 // We have special behavior for both multi-line strings in general
632 // and for strings that can parse as JSON. For the JSON handling
633 // to apply, both old and new must be valid JSON.
634 // For single-line strings that don't parse as JSON we just fall
635 // out of this switch block and do the default old -> new rendering.
636 oldS := old.AsString()
637 newS := new.AsString()
638
639 {
640 // Special behavior for JSON strings containing object or
641 // list values.
642 oldBytes := []byte(oldS)
643 newBytes := []byte(newS)
644 oldType, oldErr := ctyjson.ImpliedType(oldBytes)
645 newType, newErr := ctyjson.ImpliedType(newBytes)
646 if oldErr == nil && newErr == nil && !(oldType.IsPrimitiveType() && newType.IsPrimitiveType()) {
647 oldJV, oldErr := ctyjson.Unmarshal(oldBytes, oldType)
648 newJV, newErr := ctyjson.Unmarshal(newBytes, newType)
649 if oldErr == nil && newErr == nil {
650 if !oldJV.RawEquals(newJV) { // two JSON values may differ only in insignificant whitespace
651 p.buf.WriteString("jsonencode(")
652 p.buf.WriteByte('\n')
653 p.buf.WriteString(strings.Repeat(" ", indent+2))
654 p.writeActionSymbol(plans.Update)
655 p.writeValueDiff(oldJV, newJV, indent+4, path)
656 p.buf.WriteByte('\n')
657 p.buf.WriteString(strings.Repeat(" ", indent))
658 p.buf.WriteByte(')')
659 } else {
660 // if they differ only in insigificant whitespace
661 // then we'll note that but still expand out the
662 // effective value.
663 if p.pathForcesNewResource(path) {
664 p.buf.WriteString(p.color.Color("jsonencode( [red]# whitespace changes force replacement[reset]"))
665 } else {
666 p.buf.WriteString(p.color.Color("jsonencode( [dim]# whitespace changes[reset]"))
667 }
668 p.buf.WriteByte('\n')
669 p.buf.WriteString(strings.Repeat(" ", indent+4))
670 p.writeValue(oldJV, plans.NoOp, indent+4)
671 p.buf.WriteByte('\n')
672 p.buf.WriteString(strings.Repeat(" ", indent))
673 p.buf.WriteByte(')')
674 }
675 return
676 }
677 }
678 }
679
680 if strings.Index(oldS, "\n") < 0 && strings.Index(newS, "\n") < 0 {
681 break
682 }
683
684 p.buf.WriteString("<<~EOT")
685 if p.pathForcesNewResource(path) {
686 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
687 }
688 p.buf.WriteString("\n")
689
690 var oldLines, newLines []cty.Value
691 {
692 r := strings.NewReader(oldS)
693 sc := bufio.NewScanner(r)
694 for sc.Scan() {
695 oldLines = append(oldLines, cty.StringVal(sc.Text()))
696 }
697 }
698 {
699 r := strings.NewReader(newS)
700 sc := bufio.NewScanner(r)
701 for sc.Scan() {
702 newLines = append(newLines, cty.StringVal(sc.Text()))
703 }
704 }
705
706 diffLines := ctySequenceDiff(oldLines, newLines)
707 for _, diffLine := range diffLines {
708 p.buf.WriteString(strings.Repeat(" ", indent+2))
709 p.writeActionSymbol(diffLine.Action)
710
711 switch diffLine.Action {
712 case plans.NoOp, plans.Delete:
713 p.buf.WriteString(diffLine.Before.AsString())
714 case plans.Create:
715 p.buf.WriteString(diffLine.After.AsString())
716 default:
717 // Should never happen since the above covers all
718 // actions that ctySequenceDiff can return for strings
719 p.buf.WriteString(diffLine.After.AsString())
720
721 }
722 p.buf.WriteString("\n")
723 }
724
725 p.buf.WriteString(strings.Repeat(" ", indent)) // +4 here because there's no symbol
726 p.buf.WriteString("EOT")
727
728 return
729
730 case ty.IsSetType():
731 p.buf.WriteString("[")
732 if p.pathForcesNewResource(path) {
733 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
734 }
735 p.buf.WriteString("\n")
736
737 var addedVals, removedVals, allVals []cty.Value
738 for it := old.ElementIterator(); it.Next(); {
739 _, val := it.Element()
740 allVals = append(allVals, val)
741 if new.HasElement(val).False() {
742 removedVals = append(removedVals, val)
743 }
744 }
745 for it := new.ElementIterator(); it.Next(); {
746 _, val := it.Element()
747 allVals = append(allVals, val)
748 if val.IsKnown() && old.HasElement(val).False() {
749 addedVals = append(addedVals, val)
750 }
751 }
752
753 var all, added, removed cty.Value
754 if len(allVals) > 0 {
755 all = cty.SetVal(allVals)
756 } else {
757 all = cty.SetValEmpty(ty.ElementType())
758 }
759 if len(addedVals) > 0 {
760 added = cty.SetVal(addedVals)
761 } else {
762 added = cty.SetValEmpty(ty.ElementType())
763 }
764 if len(removedVals) > 0 {
765 removed = cty.SetVal(removedVals)
766 } else {
767 removed = cty.SetValEmpty(ty.ElementType())
768 }
769
770 for it := all.ElementIterator(); it.Next(); {
771 _, val := it.Element()
772
773 p.buf.WriteString(strings.Repeat(" ", indent+2))
774
775 var action plans.Action
776 switch {
777 case !val.IsKnown():
778 action = plans.Update
779 case added.HasElement(val).True():
780 action = plans.Create
781 case removed.HasElement(val).True():
782 action = plans.Delete
783 default:
784 action = plans.NoOp
785 }
786
787 p.writeActionSymbol(action)
788 p.writeValue(val, action, indent+4)
789 p.buf.WriteString(",\n")
790 }
791
792 p.buf.WriteString(strings.Repeat(" ", indent))
793 p.buf.WriteString("]")
794 return
795 case ty.IsListType() || ty.IsTupleType():
796 p.buf.WriteString("[")
797 if p.pathForcesNewResource(path) {
798 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
799 }
800 p.buf.WriteString("\n")
801
802 elemDiffs := ctySequenceDiff(old.AsValueSlice(), new.AsValueSlice())
803 for _, elemDiff := range elemDiffs {
804 p.buf.WriteString(strings.Repeat(" ", indent+2))
805 p.writeActionSymbol(elemDiff.Action)
806 switch elemDiff.Action {
807 case plans.NoOp, plans.Delete:
808 p.writeValue(elemDiff.Before, elemDiff.Action, indent+4)
809 case plans.Update:
810 p.writeValueDiff(elemDiff.Before, elemDiff.After, indent+4, path)
811 case plans.Create:
812 p.writeValue(elemDiff.After, elemDiff.Action, indent+4)
813 default:
814 // Should never happen since the above covers all
815 // actions that ctySequenceDiff can return.
816 p.writeValue(elemDiff.After, elemDiff.Action, indent+4)
817 }
818
819 p.buf.WriteString(",\n")
820 }
821
822 p.buf.WriteString(strings.Repeat(" ", indent))
823 p.buf.WriteString("]")
824 return
825
826 case ty.IsMapType():
827 p.buf.WriteString("{")
828 if p.pathForcesNewResource(path) {
829 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
830 }
831 p.buf.WriteString("\n")
832
833 var allKeys []string
834 keyLen := 0
835 for it := old.ElementIterator(); it.Next(); {
836 k, _ := it.Element()
837 keyStr := k.AsString()
838 allKeys = append(allKeys, keyStr)
839 if len(keyStr) > keyLen {
840 keyLen = len(keyStr)
841 }
842 }
843 for it := new.ElementIterator(); it.Next(); {
844 k, _ := it.Element()
845 keyStr := k.AsString()
846 allKeys = append(allKeys, keyStr)
847 if len(keyStr) > keyLen {
848 keyLen = len(keyStr)
849 }
850 }
851
852 sort.Strings(allKeys)
853
854 lastK := ""
855 for i, k := range allKeys {
856 if i > 0 && lastK == k {
857 continue // skip duplicates (list is sorted)
858 }
859 lastK = k
860
861 p.buf.WriteString(strings.Repeat(" ", indent+2))
862 kV := cty.StringVal(k)
863 var action plans.Action
864 if old.HasIndex(kV).False() {
865 action = plans.Create
866 } else if new.HasIndex(kV).False() {
867 action = plans.Delete
868 } else if eqV := old.Index(kV).Equals(new.Index(kV)); eqV.IsKnown() && eqV.True() {
869 action = plans.NoOp
870 } else {
871 action = plans.Update
872 }
873
874 path := append(path, cty.IndexStep{Key: kV})
875
876 p.writeActionSymbol(action)
877 p.writeValue(kV, action, indent+4)
878 p.buf.WriteString(strings.Repeat(" ", keyLen-len(k)))
879 p.buf.WriteString(" = ")
880 switch action {
881 case plans.Create, plans.NoOp:
882 v := new.Index(kV)
883 p.writeValue(v, action, indent+4)
884 case plans.Delete:
885 oldV := old.Index(kV)
886 newV := cty.NullVal(oldV.Type())
887 p.writeValueDiff(oldV, newV, indent+4, path)
888 default:
889 oldV := old.Index(kV)
890 newV := new.Index(kV)
891 p.writeValueDiff(oldV, newV, indent+4, path)
892 }
893
894 p.buf.WriteByte('\n')
895 }
896
897 p.buf.WriteString(strings.Repeat(" ", indent))
898 p.buf.WriteString("}")
899 return
900 case ty.IsObjectType():
901 p.buf.WriteString("{")
902 p.buf.WriteString("\n")
903
904 forcesNewResource := p.pathForcesNewResource(path)
905
906 var allKeys []string
907 keyLen := 0
908 for it := old.ElementIterator(); it.Next(); {
909 k, _ := it.Element()
910 keyStr := k.AsString()
911 allKeys = append(allKeys, keyStr)
912 if len(keyStr) > keyLen {
913 keyLen = len(keyStr)
914 }
915 }
916 for it := new.ElementIterator(); it.Next(); {
917 k, _ := it.Element()
918 keyStr := k.AsString()
919 allKeys = append(allKeys, keyStr)
920 if len(keyStr) > keyLen {
921 keyLen = len(keyStr)
922 }
923 }
924
925 sort.Strings(allKeys)
926
927 lastK := ""
928 for i, k := range allKeys {
929 if i > 0 && lastK == k {
930 continue // skip duplicates (list is sorted)
931 }
932 lastK = k
933
934 p.buf.WriteString(strings.Repeat(" ", indent+2))
935 kV := k
936 var action plans.Action
937 if !old.Type().HasAttribute(kV) {
938 action = plans.Create
939 } else if !new.Type().HasAttribute(kV) {
940 action = plans.Delete
941 } else if eqV := old.GetAttr(kV).Equals(new.GetAttr(kV)); eqV.IsKnown() && eqV.True() {
942 action = plans.NoOp
943 } else {
944 action = plans.Update
945 }
946
947 path := append(path, cty.GetAttrStep{Name: kV})
948
949 p.writeActionSymbol(action)
950 p.buf.WriteString(k)
951 p.buf.WriteString(strings.Repeat(" ", keyLen-len(k)))
952 p.buf.WriteString(" = ")
953
954 switch action {
955 case plans.Create, plans.NoOp:
956 v := new.GetAttr(kV)
957 p.writeValue(v, action, indent+4)
958 case plans.Delete:
959 oldV := old.GetAttr(kV)
960 newV := cty.NullVal(oldV.Type())
961 p.writeValueDiff(oldV, newV, indent+4, path)
962 default:
963 oldV := old.GetAttr(kV)
964 newV := new.GetAttr(kV)
965 p.writeValueDiff(oldV, newV, indent+4, path)
966 }
967
968 p.buf.WriteString("\n")
969 }
970
971 p.buf.WriteString(strings.Repeat(" ", indent))
972 p.buf.WriteString("}")
973
974 if forcesNewResource {
975 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
976 }
977 return
978 }
979 }
980
981 // In all other cases, we just show the new and old values as-is
982 p.writeValue(old, plans.Delete, indent)
983 if new.IsNull() {
984 p.buf.WriteString(p.color.Color(" [dark_gray]->[reset] "))
985 } else {
986 p.buf.WriteString(p.color.Color(" [yellow]->[reset] "))
987 }
988
989 p.writeValue(new, plans.Create, indent)
990 if p.pathForcesNewResource(path) {
991 p.buf.WriteString(p.color.Color(forcesNewResourceCaption))
992 }
993}
994
995// writeActionSymbol writes a symbol to represent the given action, followed
996// by a space.
997//
998// It only supports the actions that can be represented with a single character:
999// Create, Delete, Update and NoAction.
1000func (p *blockBodyDiffPrinter) writeActionSymbol(action plans.Action) {
1001 switch action {
1002 case plans.Create:
1003 p.buf.WriteString(p.color.Color("[green]+[reset] "))
1004 case plans.Delete:
1005 p.buf.WriteString(p.color.Color("[red]-[reset] "))
1006 case plans.Update:
1007 p.buf.WriteString(p.color.Color("[yellow]~[reset] "))
1008 case plans.NoOp:
1009 p.buf.WriteString(" ")
1010 default:
1011 // Should never happen
1012 p.buf.WriteString(p.color.Color("? "))
1013 }
1014}
1015
1016func (p *blockBodyDiffPrinter) pathForcesNewResource(path cty.Path) bool {
1017 if !p.action.IsReplace() {
1018 // "requiredReplace" only applies when the instance is being replaced
1019 return false
1020 }
1021 return p.requiredReplace.Has(path)
1022}
1023
1024func ctyEmptyString(value cty.Value) bool {
1025 if !value.IsNull() && value.IsKnown() {
1026 valueType := value.Type()
1027 if valueType == cty.String && value.AsString() == "" {
1028 return true
1029 }
1030 }
1031 return false
1032}
1033
1034func ctyGetAttrMaybeNull(val cty.Value, name string) cty.Value {
1035 attrType := val.Type().AttributeType(name)
1036
1037 if val.IsNull() {
1038 return cty.NullVal(attrType)
1039 }
1040
1041 // We treat "" as null here
1042 // as existing SDK doesn't support null yet.
1043 // This allows us to avoid spurious diffs
1044 // until we introduce null to the SDK.
1045 attrValue := val.GetAttr(name)
1046 if ctyEmptyString(attrValue) {
1047 return cty.NullVal(attrType)
1048 }
1049
1050 return attrValue
1051}
1052
1053func ctyCollectionValues(val cty.Value) []cty.Value {
1054 if !val.IsKnown() || val.IsNull() {
1055 return nil
1056 }
1057
1058 ret := make([]cty.Value, 0, val.LengthInt())
1059 for it := val.ElementIterator(); it.Next(); {
1060 _, value := it.Element()
1061 ret = append(ret, value)
1062 }
1063 return ret
1064}
1065
1066// ctySequenceDiff returns differences between given sequences of cty.Value(s)
1067// in the form of Create, Delete, or Update actions (for objects).
1068func ctySequenceDiff(old, new []cty.Value) []*plans.Change {
1069 var ret []*plans.Change
1070 lcs := objchange.LongestCommonSubsequence(old, new)
1071 var oldI, newI, lcsI int
1072 for oldI < len(old) || newI < len(new) || lcsI < len(lcs) {
1073 for oldI < len(old) && (lcsI >= len(lcs) || !old[oldI].RawEquals(lcs[lcsI])) {
1074 isObjectDiff := old[oldI].Type().IsObjectType() && (newI >= len(new) || new[newI].Type().IsObjectType())
1075 if isObjectDiff && newI < len(new) {
1076 ret = append(ret, &plans.Change{
1077 Action: plans.Update,
1078 Before: old[oldI],
1079 After: new[newI],
1080 })
1081 oldI++
1082 newI++ // we also consume the next "new" in this case
1083 continue
1084 }
1085
1086 ret = append(ret, &plans.Change{
1087 Action: plans.Delete,
1088 Before: old[oldI],
1089 After: cty.NullVal(old[oldI].Type()),
1090 })
1091 oldI++
1092 }
1093 for newI < len(new) && (lcsI >= len(lcs) || !new[newI].RawEquals(lcs[lcsI])) {
1094 ret = append(ret, &plans.Change{
1095 Action: plans.Create,
1096 Before: cty.NullVal(new[newI].Type()),
1097 After: new[newI],
1098 })
1099 newI++
1100 }
1101 if lcsI < len(lcs) {
1102 ret = append(ret, &plans.Change{
1103 Action: plans.NoOp,
1104 Before: lcs[lcsI],
1105 After: lcs[lcsI],
1106 })
1107
1108 // All of our indexes advance together now, since the line
1109 // is common to all three sequences.
1110 lcsI++
1111 oldI++
1112 newI++
1113 }
1114 }
1115 return ret
1116}
1117
1118func ctyEqualWithUnknown(old, new cty.Value) bool {
1119 if !old.IsWhollyKnown() || !new.IsWhollyKnown() {
1120 return false
1121 }
1122 return old.Equals(new).True()
1123}
1124
1125// ctyTypesEqual checks equality of two types more loosely
1126// by avoiding checks of object/tuple elements
1127// as we render differences on element-by-element basis anyway
1128func ctyTypesEqual(oldT, newT cty.Type) bool {
1129 if oldT.IsObjectType() && newT.IsObjectType() {
1130 return true
1131 }
1132 if oldT.IsTupleType() && newT.IsTupleType() {
1133 return true
1134 }
1135 return oldT.Equals(newT)
1136}
1137
1138func ctyEnsurePathCapacity(path cty.Path, minExtra int) cty.Path {
1139 if cap(path)-len(path) >= minExtra {
1140 return path
1141 }
1142 newCap := cap(path) * 2
1143 if newCap < (len(path) + minExtra) {
1144 newCap = len(path) + minExtra
1145 }
1146 newPath := make(cty.Path, len(path), newCap)
1147 copy(newPath, path)
1148 return newPath
1149}
1150
1151// ctyNullBlockListAsEmpty either returns the given value verbatim if it is non-nil
1152// or returns an empty value of a suitable type to serve as a placeholder for it.
1153//
1154// In particular, this function handles the special situation where a "list" is
1155// actually represented as a tuple type where nested blocks contain
1156// dynamically-typed values.
1157func ctyNullBlockListAsEmpty(in cty.Value) cty.Value {
1158 if !in.IsNull() {
1159 return in
1160 }
1161 if ty := in.Type(); ty.IsListType() {
1162 return cty.ListValEmpty(ty.ElementType())
1163 }
1164 return cty.EmptyTupleVal // must need a tuple, then
1165}
1166
1167// ctyNullBlockMapAsEmpty either returns the given value verbatim if it is non-nil
1168// or returns an empty value of a suitable type to serve as a placeholder for it.
1169//
1170// In particular, this function handles the special situation where a "map" is
1171// actually represented as an object type where nested blocks contain
1172// dynamically-typed values.
1173func ctyNullBlockMapAsEmpty(in cty.Value) cty.Value {
1174 if !in.IsNull() {
1175 return in
1176 }
1177 if ty := in.Type(); ty.IsMapType() {
1178 return cty.MapValEmpty(ty.ElementType())
1179 }
1180 return cty.EmptyObjectVal // must need an object, then
1181}
1182
1183// ctyNullBlockSetAsEmpty either returns the given value verbatim if it is non-nil
1184// or returns an empty value of a suitable type to serve as a placeholder for it.
1185func ctyNullBlockSetAsEmpty(in cty.Value) cty.Value {
1186 if !in.IsNull() {
1187 return in
1188 }
1189 // Dynamically-typed attributes are not supported inside blocks backed by
1190 // sets, so our result here is always a set.
1191 return cty.SetValEmpty(in.Type().ElementType())
1192}
diff --git a/vendor/github.com/hashicorp/terraform/command/format/format.go b/vendor/github.com/hashicorp/terraform/command/format/format.go
new file mode 100644
index 0000000..aa8d7de
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/command/format/format.go
@@ -0,0 +1,8 @@
1// Package format contains helpers for formatting various Terraform
2// structures for human-readabout output.
3//
4// This package is used by the official Terraform CLI in formatting any
5// output and is exported to encourage non-official frontends to mimic the
6// output formatting as much as possible so that text formats of Terraform
7// structures have a consistent look and feel.
8package format
diff --git a/vendor/github.com/hashicorp/terraform/command/format/object_id.go b/vendor/github.com/hashicorp/terraform/command/format/object_id.go
new file mode 100644
index 0000000..85ebbfe
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/command/format/object_id.go
@@ -0,0 +1,123 @@
1package format
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// ObjectValueID takes a value that is assumed to be an object representation
8// of some resource instance object and attempts to heuristically find an
9// attribute of it that is likely to be a unique identifier in the remote
10// system that it belongs to which will be useful to the user.
11//
12// If such an attribute is found, its name and string value intended for
13// display are returned. Both returned strings are empty if no such attribute
14// exists, in which case the caller should assume that the resource instance
15// address within the Terraform configuration is the best available identifier.
16//
17// This is only a best-effort sort of thing, relying on naming conventions in
18// our resource type schemas. The result is not guaranteed to be unique, but
19// should generally be suitable for display to an end-user anyway.
20//
21// This function will panic if the given value is not of an object type.
22func ObjectValueID(obj cty.Value) (k, v string) {
23 if obj.IsNull() || !obj.IsKnown() {
24 return "", ""
25 }
26
27 atys := obj.Type().AttributeTypes()
28
29 switch {
30
31 case atys["id"] == cty.String:
32 v := obj.GetAttr("id")
33 if v.IsKnown() && !v.IsNull() {
34 return "id", v.AsString()
35 }
36
37 case atys["name"] == cty.String:
38 // "name" isn't always globally unique, but if there isn't also an
39 // "id" then it _often_ is, in practice.
40 v := obj.GetAttr("name")
41 if v.IsKnown() && !v.IsNull() {
42 return "name", v.AsString()
43 }
44 }
45
46 return "", ""
47}
48
49// ObjectValueName takes a value that is assumed to be an object representation
50// of some resource instance object and attempts to heuristically find an
51// attribute of it that is likely to be a human-friendly name in the remote
52// system that it belongs to which will be useful to the user.
53//
54// If such an attribute is found, its name and string value intended for
55// display are returned. Both returned strings are empty if no such attribute
56// exists, in which case the caller should assume that the resource instance
57// address within the Terraform configuration is the best available identifier.
58//
59// This is only a best-effort sort of thing, relying on naming conventions in
60// our resource type schemas. The result is not guaranteed to be unique, but
61// should generally be suitable for display to an end-user anyway.
62//
63// Callers that use both ObjectValueName and ObjectValueID at the same time
64// should be prepared to get the same attribute key and value from both in
65// some cases, since there is overlap betweek the id-extraction and
66// name-extraction heuristics.
67//
68// This function will panic if the given value is not of an object type.
69func ObjectValueName(obj cty.Value) (k, v string) {
70 if obj.IsNull() || !obj.IsKnown() {
71 return "", ""
72 }
73
74 atys := obj.Type().AttributeTypes()
75
76 switch {
77
78 case atys["name"] == cty.String:
79 v := obj.GetAttr("name")
80 if v.IsKnown() && !v.IsNull() {
81 return "name", v.AsString()
82 }
83
84 case atys["tags"].IsMapType() && atys["tags"].ElementType() == cty.String:
85 tags := obj.GetAttr("tags")
86 if tags.IsNull() || !tags.IsWhollyKnown() {
87 break
88 }
89
90 switch {
91 case tags.HasIndex(cty.StringVal("name")).RawEquals(cty.True):
92 v := tags.Index(cty.StringVal("name"))
93 if v.IsKnown() && !v.IsNull() {
94 return "tags.name", v.AsString()
95 }
96 case tags.HasIndex(cty.StringVal("Name")).RawEquals(cty.True):
97 // AWS-style naming convention
98 v := tags.Index(cty.StringVal("Name"))
99 if v.IsKnown() && !v.IsNull() {
100 return "tags.Name", v.AsString()
101 }
102 }
103 }
104
105 return "", ""
106}
107
108// ObjectValueIDOrName is a convenience wrapper around both ObjectValueID
109// and ObjectValueName (in that preference order) to try to extract some sort
110// of human-friendly descriptive string value for an object as additional
111// context about an object when it is being displayed in a compact way (where
112// not all of the attributes are visible.)
113//
114// Just as with the two functions it wraps, it is a best-effort and may return
115// two empty strings if no suitable attribute can be found for a given object.
116func ObjectValueIDOrName(obj cty.Value) (k, v string) {
117 k, v = ObjectValueID(obj)
118 if k != "" {
119 return
120 }
121 k, v = ObjectValueName(obj)
122 return
123}
diff --git a/vendor/github.com/hashicorp/terraform/command/format/plan.go b/vendor/github.com/hashicorp/terraform/command/format/plan.go
new file mode 100644
index 0000000..098653f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/command/format/plan.go
@@ -0,0 +1,302 @@
1package format
2
3import (
4 "bytes"
5 "fmt"
6 "log"
7 "sort"
8 "strings"
9
10 "github.com/mitchellh/colorstring"
11
12 "github.com/hashicorp/terraform/addrs"
13 "github.com/hashicorp/terraform/plans"
14 "github.com/hashicorp/terraform/states"
15 "github.com/hashicorp/terraform/terraform"
16)
17
18// Plan is a representation of a plan optimized for display to
19// an end-user, as opposed to terraform.Plan which is for internal use.
20//
21// DisplayPlan excludes implementation details that may otherwise appear
22// in the main plan, such as destroy actions on data sources (which are
23// there only to clean up the state).
24type Plan struct {
25 Resources []*InstanceDiff
26}
27
28// InstanceDiff is a representation of an instance diff optimized
29// for display, in conjunction with DisplayPlan.
30type InstanceDiff struct {
31 Addr *terraform.ResourceAddress
32 Action plans.Action
33
34 // Attributes describes changes to the attributes of the instance.
35 //
36 // For destroy diffs this is always nil.
37 Attributes []*AttributeDiff
38
39 Tainted bool
40 Deposed bool
41}
42
43// AttributeDiff is a representation of an attribute diff optimized
44// for display, in conjunction with DisplayInstanceDiff.
45type AttributeDiff struct {
46 // Path is a dot-delimited traversal through possibly many levels of list and map structure,
47 // intended for display purposes only.
48 Path string
49
50 Action plans.Action
51
52 OldValue string
53 NewValue string
54
55 NewComputed bool
56 Sensitive bool
57 ForcesNew bool
58}
59
60// PlanStats gives summary counts for a Plan.
61type PlanStats struct {
62 ToAdd, ToChange, ToDestroy int
63}
64
65// NewPlan produces a display-oriented Plan from a terraform.Plan.
66func NewPlan(changes *plans.Changes) *Plan {
67 log.Printf("[TRACE] NewPlan for %#v", changes)
68 ret := &Plan{}
69 if changes == nil {
70 // Nothing to do!
71 return ret
72 }
73
74 for _, rc := range changes.Resources {
75 addr := rc.Addr
76 log.Printf("[TRACE] NewPlan found %s (%s)", addr, rc.Action)
77 dataSource := addr.Resource.Resource.Mode == addrs.DataResourceMode
78
79 // We create "delete" actions for data resources so we can clean
80 // up their entries in state, but this is an implementation detail
81 // that users shouldn't see.
82 if dataSource && rc.Action == plans.Delete {
83 continue
84 }
85
86 // For now we'll shim this to work with our old types.
87 // TODO: Update for the new plan types, ideally also switching over to
88 // a structural diff renderer instead of a flat renderer.
89 did := &InstanceDiff{
90 Addr: terraform.NewLegacyResourceInstanceAddress(addr),
91 Action: rc.Action,
92 }
93
94 if rc.DeposedKey != states.NotDeposed {
95 did.Deposed = true
96 }
97
98 // Since this is just a temporary stub implementation on the way
99 // to us replacing this with the structural diff renderer, we currently
100 // don't include any attributes here.
101 // FIXME: Implement the structural diff renderer to replace this
102 // codepath altogether.
103
104 ret.Resources = append(ret.Resources, did)
105 }
106
107 // Sort the instance diffs by their addresses for display.
108 sort.Slice(ret.Resources, func(i, j int) bool {
109 iAddr := ret.Resources[i].Addr
110 jAddr := ret.Resources[j].Addr
111 return iAddr.Less(jAddr)
112 })
113
114 return ret
115}
116
117// Format produces and returns a text representation of the receiving plan
118// intended for display in a terminal.
119//
120// If color is not nil, it is used to colorize the output.
121func (p *Plan) Format(color *colorstring.Colorize) string {
122 if p.Empty() {
123 return "This plan does nothing."
124 }
125
126 if color == nil {
127 color = &colorstring.Colorize{
128 Colors: colorstring.DefaultColors,
129 Reset: false,
130 }
131 }
132
133 // Find the longest path length of all the paths that are changing,
134 // so we can align them all.
135 keyLen := 0
136 for _, r := range p.Resources {
137 for _, attr := range r.Attributes {
138 key := attr.Path
139
140 if len(key) > keyLen {
141 keyLen = len(key)
142 }
143 }
144 }
145
146 buf := new(bytes.Buffer)
147 for _, r := range p.Resources {
148 formatPlanInstanceDiff(buf, r, keyLen, color)
149 }
150
151 return strings.TrimSpace(buf.String())
152}
153
154// Stats returns statistics about the plan
155func (p *Plan) Stats() PlanStats {
156 var ret PlanStats
157 for _, r := range p.Resources {
158 switch r.Action {
159 case plans.Create:
160 ret.ToAdd++
161 case plans.Update:
162 ret.ToChange++
163 case plans.DeleteThenCreate, plans.CreateThenDelete:
164 ret.ToAdd++
165 ret.ToDestroy++
166 case plans.Delete:
167 ret.ToDestroy++
168 }
169 }
170 return ret
171}
172
173// ActionCounts returns the number of diffs for each action type
174func (p *Plan) ActionCounts() map[plans.Action]int {
175 ret := map[plans.Action]int{}
176 for _, r := range p.Resources {
177 ret[r.Action]++
178 }
179 return ret
180}
181
182// Empty returns true if there is at least one resource diff in the receiving plan.
183func (p *Plan) Empty() bool {
184 return len(p.Resources) == 0
185}
186
187// DiffActionSymbol returns a string that, once passed through a
188// colorstring.Colorize, will produce a result that can be written
189// to a terminal to produce a symbol made of three printable
190// characters, possibly interspersed with VT100 color codes.
191func DiffActionSymbol(action plans.Action) string {
192 switch action {
193 case plans.DeleteThenCreate:
194 return "[red]-[reset]/[green]+[reset]"
195 case plans.CreateThenDelete:
196 return "[green]+[reset]/[red]-[reset]"
197 case plans.Create:
198 return " [green]+[reset]"
199 case plans.Delete:
200 return " [red]-[reset]"
201 case plans.Read:
202 return " [cyan]<=[reset]"
203 case plans.Update:
204 return " [yellow]~[reset]"
205 default:
206 return " ?"
207 }
208}
209
210// formatPlanInstanceDiff writes the text representation of the given instance diff
211// to the given buffer, using the given colorizer.
212func formatPlanInstanceDiff(buf *bytes.Buffer, r *InstanceDiff, keyLen int, colorizer *colorstring.Colorize) {
213 addrStr := r.Addr.String()
214
215 // Determine the color for the text (green for adding, yellow
216 // for change, red for delete), and symbol, and output the
217 // resource header.
218 color := "yellow"
219 symbol := DiffActionSymbol(r.Action)
220 oldValues := true
221 switch r.Action {
222 case plans.DeleteThenCreate, plans.CreateThenDelete:
223 color = "yellow"
224 case plans.Create:
225 color = "green"
226 oldValues = false
227 case plans.Delete:
228 color = "red"
229 case plans.Read:
230 color = "cyan"
231 oldValues = false
232 }
233
234 var extraStr string
235 if r.Tainted {
236 extraStr = extraStr + " (tainted)"
237 }
238 if r.Deposed {
239 extraStr = extraStr + " (deposed)"
240 }
241 if r.Action.IsReplace() {
242 extraStr = extraStr + colorizer.Color(" [red][bold](new resource required)")
243 }
244
245 buf.WriteString(
246 colorizer.Color(fmt.Sprintf(
247 "[%s]%s [%s]%s%s\n",
248 color, symbol, color, addrStr, extraStr,
249 )),
250 )
251
252 for _, attr := range r.Attributes {
253
254 v := attr.NewValue
255 var dispV string
256 switch {
257 case v == "" && attr.NewComputed:
258 dispV = "<computed>"
259 case attr.Sensitive:
260 dispV = "<sensitive>"
261 default:
262 dispV = fmt.Sprintf("%q", v)
263 }
264
265 updateMsg := ""
266 switch {
267 case attr.ForcesNew && r.Action.IsReplace():
268 updateMsg = colorizer.Color(" [red](forces new resource)")
269 case attr.Sensitive && oldValues:
270 updateMsg = colorizer.Color(" [yellow](attribute changed)")
271 }
272
273 if oldValues {
274 u := attr.OldValue
275 var dispU string
276 switch {
277 case attr.Sensitive:
278 dispU = "<sensitive>"
279 default:
280 dispU = fmt.Sprintf("%q", u)
281 }
282 buf.WriteString(fmt.Sprintf(
283 " %s:%s %s => %s%s\n",
284 attr.Path,
285 strings.Repeat(" ", keyLen-len(attr.Path)),
286 dispU, dispV,
287 updateMsg,
288 ))
289 } else {
290 buf.WriteString(fmt.Sprintf(
291 " %s:%s %s%s\n",
292 attr.Path,
293 strings.Repeat(" ", keyLen-len(attr.Path)),
294 dispV,
295 updateMsg,
296 ))
297 }
298 }
299
300 // Write the reset color so we don't bleed color into later text
301 buf.WriteString(colorizer.Color("[reset]\n"))
302}
diff --git a/vendor/github.com/hashicorp/terraform/command/format/state.go b/vendor/github.com/hashicorp/terraform/command/format/state.go
new file mode 100644
index 0000000..f411ef9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/command/format/state.go
@@ -0,0 +1,286 @@
1package format
2
3import (
4 "bytes"
5 "fmt"
6 "sort"
7 "strings"
8
9 "github.com/zclconf/go-cty/cty"
10
11 "github.com/hashicorp/terraform/addrs"
12 "github.com/hashicorp/terraform/configs/configschema"
13 "github.com/hashicorp/terraform/plans"
14 "github.com/hashicorp/terraform/states"
15 "github.com/hashicorp/terraform/terraform"
16 "github.com/mitchellh/colorstring"
17)
18
19// StateOpts are the options for formatting a state.
20type StateOpts struct {
21 // State is the state to format. This is required.
22 State *states.State
23
24 // Schemas are used to decode attributes. This is required.
25 Schemas *terraform.Schemas
26
27 // Color is the colorizer. This is optional.
28 Color *colorstring.Colorize
29}
30
31// State takes a state and returns a string
32func State(opts *StateOpts) string {
33 if opts.Color == nil {
34 panic("colorize not given")
35 }
36
37 if opts.Schemas == nil {
38 panic("schemas not given")
39 }
40
41 s := opts.State
42 if len(s.Modules) == 0 {
43 return "The state file is empty. No resources are represented."
44 }
45
46 buf := bytes.NewBufferString("[reset]")
47 p := blockBodyDiffPrinter{
48 buf: buf,
49 color: opts.Color,
50 action: plans.NoOp,
51 }
52
53 // Format all the modules
54 for _, m := range s.Modules {
55 formatStateModule(p, m, opts.Schemas)
56 }
57
58 // Write the outputs for the root module
59 m := s.RootModule()
60
61 if m.OutputValues != nil {
62 if len(m.OutputValues) > 0 {
63 p.buf.WriteString("Outputs:\n\n")
64 }
65
66 // Sort the outputs
67 ks := make([]string, 0, len(m.OutputValues))
68 for k := range m.OutputValues {
69 ks = append(ks, k)
70 }
71 sort.Strings(ks)
72
73 // Output each output k/v pair
74 for _, k := range ks {
75 v := m.OutputValues[k]
76 p.buf.WriteString(fmt.Sprintf("%s = ", k))
77 p.writeValue(v.Value, plans.NoOp, 0)
78 p.buf.WriteString("\n\n")
79 }
80 }
81
82 return opts.Color.Color(strings.TrimSpace(p.buf.String()))
83
84}
85
86func formatStateModule(p blockBodyDiffPrinter, m *states.Module, schemas *terraform.Schemas) {
87 // First get the names of all the resources so we can show them
88 // in alphabetical order.
89 names := make([]string, 0, len(m.Resources))
90 for name := range m.Resources {
91 names = append(names, name)
92 }
93 sort.Strings(names)
94
95 // Go through each resource and begin building up the output.
96 for _, key := range names {
97 for k, v := range m.Resources[key].Instances {
98 addr := m.Resources[key].Addr
99
100 taintStr := ""
101 if v.Current.Status == 'T' {
102 taintStr = "(tainted)"
103 }
104 p.buf.WriteString(fmt.Sprintf("# %s: %s\n", addr.Absolute(m.Addr).Instance(k), taintStr))
105
106 var schema *configschema.Block
107 provider := m.Resources[key].ProviderConfig.ProviderConfig.StringCompact()
108 if _, exists := schemas.Providers[provider]; !exists {
109 // This should never happen in normal use because we should've
110 // loaded all of the schemas and checked things prior to this
111 // point. We can't return errors here, but since this is UI code
112 // we will try to do _something_ reasonable.
113 p.buf.WriteString(fmt.Sprintf("# missing schema for provider %q\n\n", provider))
114 continue
115 }
116
117 switch addr.Mode {
118 case addrs.ManagedResourceMode:
119 schema, _ = schemas.ResourceTypeConfig(
120 provider,
121 addr.Mode,
122 addr.Type,
123 )
124 if schema == nil {
125 p.buf.WriteString(fmt.Sprintf(
126 "# missing schema for provider %q resource type %s\n\n", provider, addr.Type))
127 continue
128 }
129
130 p.buf.WriteString(fmt.Sprintf(
131 "resource %q %q {",
132 addr.Type,
133 addr.Name,
134 ))
135 case addrs.DataResourceMode:
136 schema, _ = schemas.ResourceTypeConfig(
137 provider,
138 addr.Mode,
139 addr.Type,
140 )
141 if schema == nil {
142 p.buf.WriteString(fmt.Sprintf(
143 "# missing schema for provider %q data source %s\n\n", provider, addr.Type))
144 continue
145 }
146
147 p.buf.WriteString(fmt.Sprintf(
148 "data %q %q {",
149 addr.Type,
150 addr.Name,
151 ))
152 default:
153 // should never happen, since the above is exhaustive
154 p.buf.WriteString(addr.String())
155 }
156
157 val, err := v.Current.Decode(schema.ImpliedType())
158 if err != nil {
159 fmt.Println(err.Error())
160 break
161 }
162
163 path := make(cty.Path, 0, 3)
164 bodyWritten := p.writeBlockBodyDiff(schema, val.Value, val.Value, 2, path)
165 if bodyWritten {
166 p.buf.WriteString("\n")
167 }
168
169 p.buf.WriteString("}\n\n")
170 }
171 }
172 p.buf.WriteString("[reset]\n")
173}
174
175func formatNestedList(indent string, outputList []interface{}) string {
176 outputBuf := new(bytes.Buffer)
177 outputBuf.WriteString(fmt.Sprintf("%s[", indent))
178
179 lastIdx := len(outputList) - 1
180
181 for i, value := range outputList {
182 outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, " ", value))
183 if i != lastIdx {
184 outputBuf.WriteString(",")
185 }
186 }
187
188 outputBuf.WriteString(fmt.Sprintf("\n%s]", indent))
189 return strings.TrimPrefix(outputBuf.String(), "\n")
190}
191
192func formatListOutput(indent, outputName string, outputList []interface{}) string {
193 keyIndent := ""
194
195 outputBuf := new(bytes.Buffer)
196
197 if outputName != "" {
198 outputBuf.WriteString(fmt.Sprintf("%s%s = [", indent, outputName))
199 keyIndent = " "
200 }
201
202 lastIdx := len(outputList) - 1
203
204 for i, value := range outputList {
205 switch typedValue := value.(type) {
206 case string:
207 outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, keyIndent, value))
208 case []interface{}:
209 outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent,
210 formatNestedList(indent+keyIndent, typedValue)))
211 case map[string]interface{}:
212 outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent,
213 formatNestedMap(indent+keyIndent, typedValue)))
214 }
215
216 if lastIdx != i {
217 outputBuf.WriteString(",")
218 }
219 }
220
221 if outputName != "" {
222 if len(outputList) > 0 {
223 outputBuf.WriteString(fmt.Sprintf("\n%s]", indent))
224 } else {
225 outputBuf.WriteString("]")
226 }
227 }
228
229 return strings.TrimPrefix(outputBuf.String(), "\n")
230}
231
232func formatNestedMap(indent string, outputMap map[string]interface{}) string {
233 ks := make([]string, 0, len(outputMap))
234 for k, _ := range outputMap {
235 ks = append(ks, k)
236 }
237 sort.Strings(ks)
238
239 outputBuf := new(bytes.Buffer)
240 outputBuf.WriteString(fmt.Sprintf("%s{", indent))
241
242 lastIdx := len(outputMap) - 1
243 for i, k := range ks {
244 v := outputMap[k]
245 outputBuf.WriteString(fmt.Sprintf("\n%s%s = %v", indent+" ", k, v))
246
247 if lastIdx != i {
248 outputBuf.WriteString(",")
249 }
250 }
251
252 outputBuf.WriteString(fmt.Sprintf("\n%s}", indent))
253
254 return strings.TrimPrefix(outputBuf.String(), "\n")
255}
256
257func formatMapOutput(indent, outputName string, outputMap map[string]interface{}) string {
258 ks := make([]string, 0, len(outputMap))
259 for k, _ := range outputMap {
260 ks = append(ks, k)
261 }
262 sort.Strings(ks)
263
264 keyIndent := ""
265
266 outputBuf := new(bytes.Buffer)
267 if outputName != "" {
268 outputBuf.WriteString(fmt.Sprintf("%s%s = {", indent, outputName))
269 keyIndent = " "
270 }
271
272 for _, k := range ks {
273 v := outputMap[k]
274 outputBuf.WriteString(fmt.Sprintf("\n%s%s%s = %v", indent, keyIndent, k, v))
275 }
276
277 if outputName != "" {
278 if len(outputMap) > 0 {
279 outputBuf.WriteString(fmt.Sprintf("\n%s}", indent))
280 } else {
281 outputBuf.WriteString("}")
282 }
283 }
284
285 return strings.TrimPrefix(outputBuf.String(), "\n")
286}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go
deleted file mode 100644
index 2b1b0ca..0000000
--- a/vendor/github.com/hashicorp/terraform/config/configschema/decoder_spec.go
+++ /dev/null
@@ -1,97 +0,0 @@
1package configschema
2
3import (
4 "github.com/hashicorp/hcl2/hcldec"
5 "github.com/zclconf/go-cty/cty"
6)
7
8var mapLabelNames = []string{"key"}
9
10// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body
11// using the facilities in the hcldec package.
12//
13// The returned specification is guaranteed to return a value of the same type
14// returned by method ImpliedType, but it may contain null or unknown values if
15// any of the block attributes are defined as optional and/or computed
16// respectively.
17func (b *Block) DecoderSpec() hcldec.Spec {
18 ret := hcldec.ObjectSpec{}
19 if b == nil {
20 return ret
21 }
22
23 for name, attrS := range b.Attributes {
24 switch {
25 case attrS.Computed && attrS.Optional:
26 // In this special case we use an unknown value as a default
27 // to get the intended behavior that the result is computed
28 // unless it has been explicitly set in config.
29 ret[name] = &hcldec.DefaultSpec{
30 Primary: &hcldec.AttrSpec{
31 Name: name,
32 Type: attrS.Type,
33 },
34 Default: &hcldec.LiteralSpec{
35 Value: cty.UnknownVal(attrS.Type),
36 },
37 }
38 case attrS.Computed:
39 ret[name] = &hcldec.LiteralSpec{
40 Value: cty.UnknownVal(attrS.Type),
41 }
42 default:
43 ret[name] = &hcldec.AttrSpec{
44 Name: name,
45 Type: attrS.Type,
46 Required: attrS.Required,
47 }
48 }
49 }
50
51 for name, blockS := range b.BlockTypes {
52 if _, exists := ret[name]; exists {
53 // This indicates an invalid schema, since it's not valid to
54 // define both an attribute and a block type of the same name.
55 // However, we don't raise this here since it's checked by
56 // InternalValidate.
57 continue
58 }
59
60 childSpec := blockS.Block.DecoderSpec()
61
62 switch blockS.Nesting {
63 case NestingSingle:
64 ret[name] = &hcldec.BlockSpec{
65 TypeName: name,
66 Nested: childSpec,
67 Required: blockS.MinItems == 1 && blockS.MaxItems >= 1,
68 }
69 case NestingList:
70 ret[name] = &hcldec.BlockListSpec{
71 TypeName: name,
72 Nested: childSpec,
73 MinItems: blockS.MinItems,
74 MaxItems: blockS.MaxItems,
75 }
76 case NestingSet:
77 ret[name] = &hcldec.BlockSetSpec{
78 TypeName: name,
79 Nested: childSpec,
80 MinItems: blockS.MinItems,
81 MaxItems: blockS.MaxItems,
82 }
83 case NestingMap:
84 ret[name] = &hcldec.BlockMapSpec{
85 TypeName: name,
86 Nested: childSpec,
87 LabelNames: mapLabelNames,
88 }
89 default:
90 // Invalid nesting type is just ignored. It's checked by
91 // InternalValidate.
92 continue
93 }
94 }
95
96 return ret
97}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go
deleted file mode 100644
index 67324eb..0000000
--- a/vendor/github.com/hashicorp/terraform/config/configschema/implied_type.go
+++ /dev/null
@@ -1,21 +0,0 @@
1package configschema
2
3import (
4 "github.com/hashicorp/hcl2/hcldec"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// ImpliedType returns the cty.Type that would result from decoding a
9// configuration block using the receiving block schema.
10//
11// ImpliedType always returns a result, even if the given schema is
12// inconsistent. Code that creates configschema.Block objects should be
13// tested using the InternalValidate method to detect any inconsistencies
14// that would cause this method to fall back on defaults and assumptions.
15func (b *Block) ImpliedType() cty.Type {
16 if b == nil {
17 return cty.EmptyObject
18 }
19
20 return hcldec.ImpliedType(b.DecoderSpec())
21}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go
deleted file mode 100644
index 6cb9313..0000000
--- a/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go
+++ /dev/null
@@ -1,16 +0,0 @@
1// Code generated by "stringer -type=NestingMode"; DO NOT EDIT.
2
3package configschema
4
5import "strconv"
6
7const _NestingMode_name = "nestingModeInvalidNestingSingleNestingListNestingSetNestingMap"
8
9var _NestingMode_index = [...]uint8{0, 18, 31, 42, 52, 62}
10
11func (i NestingMode) String() string {
12 if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) {
13 return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")"
14 }
15 return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]]
16}
diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/flatmap.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/flatmap.go
new file mode 100644
index 0000000..bb4228d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/flatmap.go
@@ -0,0 +1,424 @@
1package hcl2shim
2
3import (
4 "fmt"
5 "strconv"
6 "strings"
7
8 "github.com/zclconf/go-cty/cty/convert"
9
10 "github.com/zclconf/go-cty/cty"
11)
12
13// FlatmapValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic
14// types library that HCL2 uses) to a map compatible with what would be
15// produced by the "flatmap" package.
16//
17// The type of the given value informs the structure of the resulting map.
18// The value must be of an object type or this function will panic.
19//
20// Flatmap values can only represent maps when they are of primitive types,
21// so the given value must not have any maps of complex types or the result
22// is undefined.
23func FlatmapValueFromHCL2(v cty.Value) map[string]string {
24 if v.IsNull() {
25 return nil
26 }
27
28 if !v.Type().IsObjectType() {
29 panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", v.Type()))
30 }
31
32 m := make(map[string]string)
33 flatmapValueFromHCL2Map(m, "", v)
34 return m
35}
36
37func flatmapValueFromHCL2Value(m map[string]string, key string, val cty.Value) {
38 ty := val.Type()
39 switch {
40 case ty.IsPrimitiveType() || ty == cty.DynamicPseudoType:
41 flatmapValueFromHCL2Primitive(m, key, val)
42 case ty.IsObjectType() || ty.IsMapType():
43 flatmapValueFromHCL2Map(m, key+".", val)
44 case ty.IsTupleType() || ty.IsListType() || ty.IsSetType():
45 flatmapValueFromHCL2Seq(m, key+".", val)
46 default:
47 panic(fmt.Sprintf("cannot encode %s to flatmap", ty.FriendlyName()))
48 }
49}
50
51func flatmapValueFromHCL2Primitive(m map[string]string, key string, val cty.Value) {
52 if !val.IsKnown() {
53 m[key] = UnknownVariableValue
54 return
55 }
56 if val.IsNull() {
57 // Omit entirely
58 return
59 }
60
61 var err error
62 val, err = convert.Convert(val, cty.String)
63 if err != nil {
64 // Should not be possible, since all primitive types can convert to string.
65 panic(fmt.Sprintf("invalid primitive encoding to flatmap: %s", err))
66 }
67 m[key] = val.AsString()
68}
69
70func flatmapValueFromHCL2Map(m map[string]string, prefix string, val cty.Value) {
71 if val.IsNull() {
72 // Omit entirely
73 return
74 }
75 if !val.IsKnown() {
76 switch {
77 case val.Type().IsObjectType():
78 // Whole objects can't be unknown in flatmap, so instead we'll
79 // just write all of the attribute values out as unknown.
80 for name, aty := range val.Type().AttributeTypes() {
81 flatmapValueFromHCL2Value(m, prefix+name, cty.UnknownVal(aty))
82 }
83 default:
84 m[prefix+"%"] = UnknownVariableValue
85 }
86 return
87 }
88
89 len := 0
90 for it := val.ElementIterator(); it.Next(); {
91 ak, av := it.Element()
92 name := ak.AsString()
93 flatmapValueFromHCL2Value(m, prefix+name, av)
94 len++
95 }
96 if !val.Type().IsObjectType() { // objects don't have an explicit count included, since their attribute count is fixed
97 m[prefix+"%"] = strconv.Itoa(len)
98 }
99}
100
101func flatmapValueFromHCL2Seq(m map[string]string, prefix string, val cty.Value) {
102 if val.IsNull() {
103 // Omit entirely
104 return
105 }
106 if !val.IsKnown() {
107 m[prefix+"#"] = UnknownVariableValue
108 return
109 }
110
111 // For sets this won't actually generate exactly what helper/schema would've
112 // generated, because we don't have access to the set key function it
113 // would've used. However, in practice it doesn't actually matter what the
114 // keys are as long as they are unique, so we'll just generate sequential
115 // indexes for them as if it were a list.
116 //
117 // An important implication of this, however, is that the set ordering will
118 // not be consistent across mutations and so different keys may be assigned
119 // to the same value when round-tripping. Since this shim is intended to
120 // be short-lived and not used for round-tripping, we accept this.
121 i := 0
122 for it := val.ElementIterator(); it.Next(); {
123 _, av := it.Element()
124 key := prefix + strconv.Itoa(i)
125 flatmapValueFromHCL2Value(m, key, av)
126 i++
127 }
128 m[prefix+"#"] = strconv.Itoa(i)
129}
130
131// HCL2ValueFromFlatmap converts a map compatible with what would be produced
132// by the "flatmap" package to a HCL2 (really, the cty dynamic types library
133// that HCL2 uses) object type.
134//
135// The intended result type must be provided in order to guide how the
136// map contents are decoded. This must be an object type or this function
137// will panic.
138//
139// Flatmap values can only represent maps when they are of primitive types,
140// so the given type must not have any maps of complex types or the result
141// is undefined.
142//
143// The result may contain null values if the given map does not contain keys
144// for all of the different key paths implied by the given type.
145func HCL2ValueFromFlatmap(m map[string]string, ty cty.Type) (cty.Value, error) {
146 if m == nil {
147 return cty.NullVal(ty), nil
148 }
149 if !ty.IsObjectType() {
150 panic(fmt.Sprintf("HCL2ValueFromFlatmap called on %#v", ty))
151 }
152
153 return hcl2ValueFromFlatmapObject(m, "", ty.AttributeTypes())
154}
155
156func hcl2ValueFromFlatmapValue(m map[string]string, key string, ty cty.Type) (cty.Value, error) {
157 var val cty.Value
158 var err error
159 switch {
160 case ty.IsPrimitiveType():
161 val, err = hcl2ValueFromFlatmapPrimitive(m, key, ty)
162 case ty.IsObjectType():
163 val, err = hcl2ValueFromFlatmapObject(m, key+".", ty.AttributeTypes())
164 case ty.IsTupleType():
165 val, err = hcl2ValueFromFlatmapTuple(m, key+".", ty.TupleElementTypes())
166 case ty.IsMapType():
167 val, err = hcl2ValueFromFlatmapMap(m, key+".", ty)
168 case ty.IsListType():
169 val, err = hcl2ValueFromFlatmapList(m, key+".", ty)
170 case ty.IsSetType():
171 val, err = hcl2ValueFromFlatmapSet(m, key+".", ty)
172 default:
173 err = fmt.Errorf("cannot decode %s from flatmap", ty.FriendlyName())
174 }
175
176 if err != nil {
177 return cty.DynamicVal, err
178 }
179 return val, nil
180}
181
182func hcl2ValueFromFlatmapPrimitive(m map[string]string, key string, ty cty.Type) (cty.Value, error) {
183 rawVal, exists := m[key]
184 if !exists {
185 return cty.NullVal(ty), nil
186 }
187 if rawVal == UnknownVariableValue {
188 return cty.UnknownVal(ty), nil
189 }
190
191 var err error
192 val := cty.StringVal(rawVal)
193 val, err = convert.Convert(val, ty)
194 if err != nil {
195 // This should never happen for _valid_ input, but flatmap data might
196 // be tampered with by the user and become invalid.
197 return cty.DynamicVal, fmt.Errorf("invalid value for %q in state: %s", key, err)
198 }
199
200 return val, nil
201}
202
203func hcl2ValueFromFlatmapObject(m map[string]string, prefix string, atys map[string]cty.Type) (cty.Value, error) {
204 vals := make(map[string]cty.Value)
205 for name, aty := range atys {
206 val, err := hcl2ValueFromFlatmapValue(m, prefix+name, aty)
207 if err != nil {
208 return cty.DynamicVal, err
209 }
210 vals[name] = val
211 }
212 return cty.ObjectVal(vals), nil
213}
214
215func hcl2ValueFromFlatmapTuple(m map[string]string, prefix string, etys []cty.Type) (cty.Value, error) {
216 var vals []cty.Value
217
218 // if the container is unknown, there is no count string
219 listName := strings.TrimRight(prefix, ".")
220 if m[listName] == UnknownVariableValue {
221 return cty.UnknownVal(cty.Tuple(etys)), nil
222 }
223
224 countStr, exists := m[prefix+"#"]
225 if !exists {
226 return cty.NullVal(cty.Tuple(etys)), nil
227 }
228 if countStr == UnknownVariableValue {
229 return cty.UnknownVal(cty.Tuple(etys)), nil
230 }
231
232 count, err := strconv.Atoi(countStr)
233 if err != nil {
234 return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err)
235 }
236 if count != len(etys) {
237 return cty.DynamicVal, fmt.Errorf("wrong number of values for %q in state: got %d, but need %d", prefix, count, len(etys))
238 }
239
240 vals = make([]cty.Value, len(etys))
241 for i, ety := range etys {
242 key := prefix + strconv.Itoa(i)
243 val, err := hcl2ValueFromFlatmapValue(m, key, ety)
244 if err != nil {
245 return cty.DynamicVal, err
246 }
247 vals[i] = val
248 }
249 return cty.TupleVal(vals), nil
250}
251
252func hcl2ValueFromFlatmapMap(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) {
253 vals := make(map[string]cty.Value)
254 ety := ty.ElementType()
255
256 // if the container is unknown, there is no count string
257 listName := strings.TrimRight(prefix, ".")
258 if m[listName] == UnknownVariableValue {
259 return cty.UnknownVal(ty), nil
260 }
261
262 // We actually don't really care about the "count" of a map for our
263 // purposes here, but we do need to check if it _exists_ in order to
264 // recognize the difference between null (not set at all) and empty.
265 if strCount, exists := m[prefix+"%"]; !exists {
266 return cty.NullVal(ty), nil
267 } else if strCount == UnknownVariableValue {
268 return cty.UnknownVal(ty), nil
269 }
270
271 for fullKey := range m {
272 if !strings.HasPrefix(fullKey, prefix) {
273 continue
274 }
275
276 // The flatmap format doesn't allow us to distinguish between keys
277 // that contain periods and nested objects, so by convention a
278 // map is only ever of primitive type in flatmap, and we just assume
279 // that the remainder of the raw key (dots and all) is the key we
280 // want in the result value.
281 key := fullKey[len(prefix):]
282 if key == "%" {
283 // Ignore the "count" key
284 continue
285 }
286
287 val, err := hcl2ValueFromFlatmapValue(m, fullKey, ety)
288 if err != nil {
289 return cty.DynamicVal, err
290 }
291 vals[key] = val
292 }
293
294 if len(vals) == 0 {
295 return cty.MapValEmpty(ety), nil
296 }
297 return cty.MapVal(vals), nil
298}
299
300func hcl2ValueFromFlatmapList(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) {
301 var vals []cty.Value
302
303 // if the container is unknown, there is no count string
304 listName := strings.TrimRight(prefix, ".")
305 if m[listName] == UnknownVariableValue {
306 return cty.UnknownVal(ty), nil
307 }
308
309 countStr, exists := m[prefix+"#"]
310 if !exists {
311 return cty.NullVal(ty), nil
312 }
313 if countStr == UnknownVariableValue {
314 return cty.UnknownVal(ty), nil
315 }
316
317 count, err := strconv.Atoi(countStr)
318 if err != nil {
319 return cty.DynamicVal, fmt.Errorf("invalid count value for %q in state: %s", prefix, err)
320 }
321
322 ety := ty.ElementType()
323 if count == 0 {
324 return cty.ListValEmpty(ety), nil
325 }
326
327 vals = make([]cty.Value, count)
328 for i := 0; i < count; i++ {
329 key := prefix + strconv.Itoa(i)
330 val, err := hcl2ValueFromFlatmapValue(m, key, ety)
331 if err != nil {
332 return cty.DynamicVal, err
333 }
334 vals[i] = val
335 }
336
337 return cty.ListVal(vals), nil
338}
339
340func hcl2ValueFromFlatmapSet(m map[string]string, prefix string, ty cty.Type) (cty.Value, error) {
341 var vals []cty.Value
342 ety := ty.ElementType()
343
344 // if the container is unknown, there is no count string
345 listName := strings.TrimRight(prefix, ".")
346 if m[listName] == UnknownVariableValue {
347 return cty.UnknownVal(ty), nil
348 }
349
350 strCount, exists := m[prefix+"#"]
351 if !exists {
352 return cty.NullVal(ty), nil
353 } else if strCount == UnknownVariableValue {
354 return cty.UnknownVal(ty), nil
355 }
356
357 // Keep track of keys we've seen, se we don't add the same set value
358 // multiple times. The cty.Set will normally de-duplicate values, but we may
359 // have unknown values that would not show as equivalent.
360 seen := map[string]bool{}
361
362 for fullKey := range m {
363 if !strings.HasPrefix(fullKey, prefix) {
364 continue
365 }
366 subKey := fullKey[len(prefix):]
367 if subKey == "#" {
368 // Ignore the "count" key
369 continue
370 }
371 key := fullKey
372 if dot := strings.IndexByte(subKey, '.'); dot != -1 {
373 key = fullKey[:dot+len(prefix)]
374 }
375
376 if seen[key] {
377 continue
378 }
379
380 seen[key] = true
381
382 // The flatmap format doesn't allow us to distinguish between keys
383 // that contain periods and nested objects, so by convention a
384 // map is only ever of primitive type in flatmap, and we just assume
385 // that the remainder of the raw key (dots and all) is the key we
386 // want in the result value.
387
388 val, err := hcl2ValueFromFlatmapValue(m, key, ety)
389 if err != nil {
390 return cty.DynamicVal, err
391 }
392 vals = append(vals, val)
393 }
394
395 if len(vals) == 0 && strCount == "1" {
396 // An empty set wouldn't be represented in the flatmap, so this must be
397 // a single empty object since the count is actually 1.
398 // Add an appropriately typed null value to the set.
399 var val cty.Value
400 switch {
401 case ety.IsMapType():
402 val = cty.MapValEmpty(ety)
403 case ety.IsListType():
404 val = cty.ListValEmpty(ety)
405 case ety.IsSetType():
406 val = cty.SetValEmpty(ety)
407 case ety.IsObjectType():
408 // TODO: cty.ObjectValEmpty
409 objectMap := map[string]cty.Value{}
410 for attr, ty := range ety.AttributeTypes() {
411 objectMap[attr] = cty.NullVal(ty)
412 }
413 val = cty.ObjectVal(objectMap)
414 default:
415 val = cty.NullVal(ety)
416 }
417 vals = append(vals, val)
418
419 } else if len(vals) == 0 {
420 return cty.SetValEmpty(ety), nil
421 }
422
423 return cty.SetVal(vals), nil
424}
diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/paths.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/paths.go
new file mode 100644
index 0000000..3403c02
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/paths.go
@@ -0,0 +1,276 @@
1package hcl2shim
2
3import (
4 "fmt"
5 "reflect"
6 "strconv"
7 "strings"
8
9 "github.com/zclconf/go-cty/cty"
10)
11
12// RequiresReplace takes a list of flatmapped paths from a
13// InstanceDiff.Attributes along with the corresponding cty.Type, and returns
14// the list of the cty.Paths that are flagged as causing the resource
15// replacement (RequiresNew).
16// This will filter out redundant paths, paths that refer to flatmapped indexes
17// (e.g. "#", "%"), and will return any changes within a set as the path to the
18// set itself.
19func RequiresReplace(attrs []string, ty cty.Type) ([]cty.Path, error) {
20 var paths []cty.Path
21
22 for _, attr := range attrs {
23 p, err := requiresReplacePath(attr, ty)
24 if err != nil {
25 return nil, err
26 }
27
28 paths = append(paths, p)
29 }
30
31 // now trim off any trailing paths that aren't GetAttrSteps, since only an
32 // attribute itself can require replacement
33 paths = trimPaths(paths)
34
35 // There may be redundant paths due to set elements or index attributes
36 // Do some ugly n^2 filtering, but these are always fairly small sets.
37 for i := 0; i < len(paths)-1; i++ {
38 for j := i + 1; j < len(paths); j++ {
39 if reflect.DeepEqual(paths[i], paths[j]) {
40 // swap the tail and slice it off
41 paths[j], paths[len(paths)-1] = paths[len(paths)-1], paths[j]
42 paths = paths[:len(paths)-1]
43 j--
44 }
45 }
46 }
47
48 return paths, nil
49}
50
51// trimPaths removes any trailing steps that aren't of type GetAttrSet, since
52// only an attribute itself can require replacement
53func trimPaths(paths []cty.Path) []cty.Path {
54 var trimmed []cty.Path
55 for _, path := range paths {
56 path = trimPath(path)
57 if len(path) > 0 {
58 trimmed = append(trimmed, path)
59 }
60 }
61 return trimmed
62}
63
64func trimPath(path cty.Path) cty.Path {
65 for len(path) > 0 {
66 _, isGetAttr := path[len(path)-1].(cty.GetAttrStep)
67 if isGetAttr {
68 break
69 }
70 path = path[:len(path)-1]
71 }
72 return path
73}
74
75// requiresReplacePath takes a key from a flatmap along with the cty.Type
76// describing the structure, and returns the cty.Path that would be used to
77// reference the nested value in the data structure.
78// This is used specifically to record the RequiresReplace attributes from a
79// ResourceInstanceDiff.
80func requiresReplacePath(k string, ty cty.Type) (cty.Path, error) {
81 if k == "" {
82 return nil, nil
83 }
84 if !ty.IsObjectType() {
85 panic(fmt.Sprintf("requires replace path on non-object type: %#v", ty))
86 }
87
88 path, err := pathFromFlatmapKeyObject(k, ty.AttributeTypes())
89 if err != nil {
90 return path, fmt.Errorf("[%s] %s", k, err)
91 }
92 return path, nil
93}
94
95func pathSplit(p string) (string, string) {
96 parts := strings.SplitN(p, ".", 2)
97 head := parts[0]
98 rest := ""
99 if len(parts) > 1 {
100 rest = parts[1]
101 }
102 return head, rest
103}
104
105func pathFromFlatmapKeyObject(key string, atys map[string]cty.Type) (cty.Path, error) {
106 k, rest := pathSplit(key)
107
108 path := cty.Path{cty.GetAttrStep{Name: k}}
109
110 ty, ok := atys[k]
111 if !ok {
112 return path, fmt.Errorf("attribute %q not found", k)
113 }
114
115 if rest == "" {
116 return path, nil
117 }
118
119 p, err := pathFromFlatmapKeyValue(rest, ty)
120 if err != nil {
121 return path, err
122 }
123
124 return append(path, p...), nil
125}
126
127func pathFromFlatmapKeyValue(key string, ty cty.Type) (cty.Path, error) {
128 var path cty.Path
129 var err error
130
131 switch {
132 case ty.IsPrimitiveType():
133 err = fmt.Errorf("invalid step %q with type %#v", key, ty)
134 case ty.IsObjectType():
135 path, err = pathFromFlatmapKeyObject(key, ty.AttributeTypes())
136 case ty.IsTupleType():
137 path, err = pathFromFlatmapKeyTuple(key, ty.TupleElementTypes())
138 case ty.IsMapType():
139 path, err = pathFromFlatmapKeyMap(key, ty)
140 case ty.IsListType():
141 path, err = pathFromFlatmapKeyList(key, ty)
142 case ty.IsSetType():
143 path, err = pathFromFlatmapKeySet(key, ty)
144 default:
145 err = fmt.Errorf("unrecognized type: %s", ty.FriendlyName())
146 }
147
148 if err != nil {
149 return path, err
150 }
151
152 return path, nil
153}
154
155func pathFromFlatmapKeyTuple(key string, etys []cty.Type) (cty.Path, error) {
156 var path cty.Path
157 var err error
158
159 k, rest := pathSplit(key)
160
161 // we don't need to convert the index keys to paths
162 if k == "#" {
163 return path, nil
164 }
165
166 idx, err := strconv.Atoi(k)
167 if err != nil {
168 return path, err
169 }
170
171 path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}}
172
173 if idx >= len(etys) {
174 return path, fmt.Errorf("index %s out of range in %#v", key, etys)
175 }
176
177 if rest == "" {
178 return path, nil
179 }
180
181 ty := etys[idx]
182
183 p, err := pathFromFlatmapKeyValue(rest, ty.ElementType())
184 if err != nil {
185 return path, err
186 }
187
188 return append(path, p...), nil
189}
190
191func pathFromFlatmapKeyMap(key string, ty cty.Type) (cty.Path, error) {
192 var path cty.Path
193 var err error
194
195 k, rest := key, ""
196 if !ty.ElementType().IsPrimitiveType() {
197 k, rest = pathSplit(key)
198 }
199
200 // we don't need to convert the index keys to paths
201 if k == "%" {
202 return path, nil
203 }
204
205 path = cty.Path{cty.IndexStep{Key: cty.StringVal(k)}}
206
207 if rest == "" {
208 return path, nil
209 }
210
211 p, err := pathFromFlatmapKeyValue(rest, ty.ElementType())
212 if err != nil {
213 return path, err
214 }
215
216 return append(path, p...), nil
217}
218
219func pathFromFlatmapKeyList(key string, ty cty.Type) (cty.Path, error) {
220 var path cty.Path
221 var err error
222
223 k, rest := pathSplit(key)
224
225 // we don't need to convert the index keys to paths
226 if key == "#" {
227 return path, nil
228 }
229
230 idx, err := strconv.Atoi(k)
231 if err != nil {
232 return path, err
233 }
234
235 path = cty.Path{cty.IndexStep{Key: cty.NumberIntVal(int64(idx))}}
236
237 if rest == "" {
238 return path, nil
239 }
240
241 p, err := pathFromFlatmapKeyValue(rest, ty.ElementType())
242 if err != nil {
243 return path, err
244 }
245
246 return append(path, p...), nil
247}
248
249func pathFromFlatmapKeySet(key string, ty cty.Type) (cty.Path, error) {
250 // once we hit a set, we can't return consistent paths, so just mark the
251 // set as a whole changed.
252 return nil, nil
253}
254
255// FlatmapKeyFromPath returns the flatmap equivalent of the given cty.Path for
256// use in generating legacy style diffs.
257func FlatmapKeyFromPath(path cty.Path) string {
258 var parts []string
259
260 for _, step := range path {
261 switch step := step.(type) {
262 case cty.GetAttrStep:
263 parts = append(parts, step.Name)
264 case cty.IndexStep:
265 switch ty := step.Key.Type(); {
266 case ty == cty.String:
267 parts = append(parts, step.Key.AsString())
268 case ty == cty.Number:
269 i, _ := step.Key.AsBigFloat().Int64()
270 parts = append(parts, strconv.Itoa(int(i)))
271 }
272 }
273 }
274
275 return strings.Join(parts, ".")
276}
diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go
index 0b697a5..daeb0b8 100644
--- a/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go
+++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values.go
@@ -6,6 +6,8 @@ import (
6 6
7 "github.com/hashicorp/hil/ast" 7 "github.com/hashicorp/hil/ast"
8 "github.com/zclconf/go-cty/cty" 8 "github.com/zclconf/go-cty/cty"
9
10 "github.com/hashicorp/terraform/configs/configschema"
9) 11)
10 12
11// UnknownVariableValue is a sentinel value that can be used 13// UnknownVariableValue is a sentinel value that can be used
@@ -14,6 +16,108 @@ import (
14// unknown keys. 16// unknown keys.
15const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66" 17const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
16 18
19// ConfigValueFromHCL2Block is like ConfigValueFromHCL2 but it works only for
20// known object values and uses the provided block schema to perform some
21// additional normalization to better mimic the shape of value that the old
22// HCL1/HIL-based codepaths would've produced.
23//
24// In particular, it discards the collections that we use to represent nested
25// blocks (other than NestingSingle) if they are empty, which better mimics
26// the HCL1 behavior because HCL1 had no knowledge of the schema and so didn't
27// know that an unspecified block _could_ exist.
28//
29// The given object value must conform to the schema's implied type or this
30// function will panic or produce incorrect results.
31//
32// This is primarily useful for the final transition from new-style values to
33// terraform.ResourceConfig before calling to a legacy provider, since
34// helper/schema (the old provider SDK) is particularly sensitive to these
35// subtle differences within its validation code.
36func ConfigValueFromHCL2Block(v cty.Value, schema *configschema.Block) map[string]interface{} {
37 if v.IsNull() {
38 return nil
39 }
40 if !v.IsKnown() {
41 panic("ConfigValueFromHCL2Block used with unknown value")
42 }
43 if !v.Type().IsObjectType() {
44 panic(fmt.Sprintf("ConfigValueFromHCL2Block used with non-object value %#v", v))
45 }
46
47 atys := v.Type().AttributeTypes()
48 ret := make(map[string]interface{})
49
50 for name := range schema.Attributes {
51 if _, exists := atys[name]; !exists {
52 continue
53 }
54
55 av := v.GetAttr(name)
56 if av.IsNull() {
57 // Skip nulls altogether, to better mimic how HCL1 would behave
58 continue
59 }
60 ret[name] = ConfigValueFromHCL2(av)
61 }
62
63 for name, blockS := range schema.BlockTypes {
64 if _, exists := atys[name]; !exists {
65 continue
66 }
67 bv := v.GetAttr(name)
68 if !bv.IsKnown() {
69 ret[name] = UnknownVariableValue
70 continue
71 }
72 if bv.IsNull() {
73 continue
74 }
75
76 switch blockS.Nesting {
77
78 case configschema.NestingSingle, configschema.NestingGroup:
79 ret[name] = ConfigValueFromHCL2Block(bv, &blockS.Block)
80
81 case configschema.NestingList, configschema.NestingSet:
82 l := bv.LengthInt()
83 if l == 0 {
84 // skip empty collections to better mimic how HCL1 would behave
85 continue
86 }
87
88 elems := make([]interface{}, 0, l)
89 for it := bv.ElementIterator(); it.Next(); {
90 _, ev := it.Element()
91 if !ev.IsKnown() {
92 elems = append(elems, UnknownVariableValue)
93 continue
94 }
95 elems = append(elems, ConfigValueFromHCL2Block(ev, &blockS.Block))
96 }
97 ret[name] = elems
98
99 case configschema.NestingMap:
100 if bv.LengthInt() == 0 {
101 // skip empty collections to better mimic how HCL1 would behave
102 continue
103 }
104
105 elems := make(map[string]interface{})
106 for it := bv.ElementIterator(); it.Next(); {
107 ek, ev := it.Element()
108 if !ev.IsKnown() {
109 elems[ek.AsString()] = UnknownVariableValue
110 continue
111 }
112 elems[ek.AsString()] = ConfigValueFromHCL2Block(ev, &blockS.Block)
113 }
114 ret[name] = elems
115 }
116 }
117
118 return ret
119}
120
17// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic 121// ConfigValueFromHCL2 converts a value from HCL2 (really, from the cty dynamic
18// types library that HCL2 uses) to a value type that matches what would've 122// types library that HCL2 uses) to a value type that matches what would've
19// been produced from the HCL-based interpolator for an equivalent structure. 123// been produced from the HCL-based interpolator for an equivalent structure.
@@ -73,7 +177,10 @@ func ConfigValueFromHCL2(v cty.Value) interface{} {
73 it := v.ElementIterator() 177 it := v.ElementIterator()
74 for it.Next() { 178 for it.Next() {
75 ek, ev := it.Element() 179 ek, ev := it.Element()
76 l[ek.AsString()] = ConfigValueFromHCL2(ev) 180 cv := ConfigValueFromHCL2(ev)
181 if cv != nil {
182 l[ek.AsString()] = cv
183 }
77 } 184 }
78 return l 185 return l
79 } 186 }
diff --git a/vendor/github.com/hashicorp/terraform/config/hcl2shim/values_equiv.go b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values_equiv.go
new file mode 100644
index 0000000..92f0213
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/config/hcl2shim/values_equiv.go
@@ -0,0 +1,214 @@
1package hcl2shim
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// ValuesSDKEquivalent returns true if both of the given values seem equivalent
8// as far as the legacy SDK diffing code would be concerned.
9//
10// Since SDK diffing is a fuzzy, inexact operation, this function is also
11// fuzzy and inexact. It will err on the side of returning false if it
12// encounters an ambiguous situation. Ambiguity is most common in the presence
13// of sets because in practice it is impossible to exactly correlate
14// nonequal-but-equivalent set elements because they have no identity separate
15// from their value.
16//
17// This must be used _only_ for comparing values for equivalence within the
18// SDK planning code. It is only meaningful to compare the "prior state"
19// provided by Terraform Core with the "planned new state" produced by the
20// legacy SDK code via shims. In particular it is not valid to use this
21// function with their the config value or the "proposed new state" value
22// because they contain only the subset of data that Terraform Core itself is
23// able to determine.
24func ValuesSDKEquivalent(a, b cty.Value) bool {
25 if a == cty.NilVal || b == cty.NilVal {
26 // We don't generally expect nils to appear, but we'll allow them
27 // for robustness since the data structures produced by legacy SDK code
28 // can sometimes be non-ideal.
29 return a == b // equivalent if they are _both_ nil
30 }
31 if a.RawEquals(b) {
32 // Easy case. We use RawEquals because we want two unknowns to be
33 // considered equal here, whereas "Equals" would return unknown.
34 return true
35 }
36 if !a.IsKnown() || !b.IsKnown() {
37 // Two unknown values are equivalent regardless of type. A known is
38 // never equivalent to an unknown.
39 return a.IsKnown() == b.IsKnown()
40 }
41 if aZero, bZero := valuesSDKEquivalentIsNullOrZero(a), valuesSDKEquivalentIsNullOrZero(b); aZero || bZero {
42 // Two null/zero values are equivalent regardless of type. A non-zero is
43 // never equivalent to a zero.
44 return aZero == bZero
45 }
46
47 // If we get down here then we are guaranteed that both a and b are known,
48 // non-null values.
49
50 aTy := a.Type()
51 bTy := b.Type()
52 switch {
53 case aTy.IsSetType() && bTy.IsSetType():
54 return valuesSDKEquivalentSets(a, b)
55 case aTy.IsListType() && bTy.IsListType():
56 return valuesSDKEquivalentSequences(a, b)
57 case aTy.IsTupleType() && bTy.IsTupleType():
58 return valuesSDKEquivalentSequences(a, b)
59 case aTy.IsMapType() && bTy.IsMapType():
60 return valuesSDKEquivalentMappings(a, b)
61 case aTy.IsObjectType() && bTy.IsObjectType():
62 return valuesSDKEquivalentMappings(a, b)
63 case aTy == cty.Number && bTy == cty.Number:
64 return valuesSDKEquivalentNumbers(a, b)
65 default:
66 // We've now covered all the interesting cases, so anything that falls
67 // down here cannot be equivalent.
68 return false
69 }
70}
71
72// valuesSDKEquivalentIsNullOrZero returns true if the given value is either
73// null or is the "zero value" (in the SDK/Go sense) for its type.
74func valuesSDKEquivalentIsNullOrZero(v cty.Value) bool {
75 if v == cty.NilVal {
76 return true
77 }
78
79 ty := v.Type()
80 switch {
81 case !v.IsKnown():
82 return false
83 case v.IsNull():
84 return true
85
86 // After this point, v is always known and non-null
87 case ty.IsListType() || ty.IsSetType() || ty.IsMapType() || ty.IsObjectType() || ty.IsTupleType():
88 return v.LengthInt() == 0
89 case ty == cty.String:
90 return v.RawEquals(cty.StringVal(""))
91 case ty == cty.Number:
92 return v.RawEquals(cty.Zero)
93 case ty == cty.Bool:
94 return v.RawEquals(cty.False)
95 default:
96 // The above is exhaustive, but for robustness we'll consider anything
97 // else to _not_ be zero unless it is null.
98 return false
99 }
100}
101
102// valuesSDKEquivalentSets returns true only if each of the elements in a can
103// be correlated with at least one equivalent element in b and vice-versa.
104// This is a fuzzy operation that prefers to signal non-equivalence if it cannot
105// be certain that all elements are accounted for.
106func valuesSDKEquivalentSets(a, b cty.Value) bool {
107 if aLen, bLen := a.LengthInt(), b.LengthInt(); aLen != bLen {
108 return false
109 }
110
111 // Our methodology here is a little tricky, to deal with the fact that
112 // it's impossible to directly correlate two non-equal set elements because
113 // they don't have identities separate from their values.
114 // The approach is to count the number of equivalent elements each element
115 // of a has in b and vice-versa, and then return true only if each element
116 // in both sets has at least one equivalent.
117 as := a.AsValueSlice()
118 bs := b.AsValueSlice()
119 aeqs := make([]bool, len(as))
120 beqs := make([]bool, len(bs))
121 for ai, av := range as {
122 for bi, bv := range bs {
123 if ValuesSDKEquivalent(av, bv) {
124 aeqs[ai] = true
125 beqs[bi] = true
126 }
127 }
128 }
129
130 for _, eq := range aeqs {
131 if !eq {
132 return false
133 }
134 }
135 for _, eq := range beqs {
136 if !eq {
137 return false
138 }
139 }
140 return true
141}
142
143// valuesSDKEquivalentSequences decides equivalence for two sequence values
144// (lists or tuples).
145func valuesSDKEquivalentSequences(a, b cty.Value) bool {
146 as := a.AsValueSlice()
147 bs := b.AsValueSlice()
148 if len(as) != len(bs) {
149 return false
150 }
151
152 for i := range as {
153 if !ValuesSDKEquivalent(as[i], bs[i]) {
154 return false
155 }
156 }
157 return true
158}
159
160// valuesSDKEquivalentMappings decides equivalence for two mapping values
161// (maps or objects).
162func valuesSDKEquivalentMappings(a, b cty.Value) bool {
163 as := a.AsValueMap()
164 bs := b.AsValueMap()
165 if len(as) != len(bs) {
166 return false
167 }
168
169 for k, av := range as {
170 bv, ok := bs[k]
171 if !ok {
172 return false
173 }
174 if !ValuesSDKEquivalent(av, bv) {
175 return false
176 }
177 }
178 return true
179}
180
181// valuesSDKEquivalentNumbers decides equivalence for two number values based
182// on the fact that the SDK uses int and float64 representations while
183// cty (and thus Terraform Core) uses big.Float, and so we expect to lose
184// precision in the round-trip.
185//
186// This does _not_ attempt to allow for an epsilon difference that may be
187// caused by accumulated innacuracy in a float calculation, under the
188// expectation that providers generally do not actually do compuations on
189// floats and instead just pass string representations of them on verbatim
190// to remote APIs. A remote API _itself_ may introduce inaccuracy, but that's
191// a problem for the provider itself to deal with, based on its knowledge of
192// the remote system, e.g. using DiffSuppressFunc.
193func valuesSDKEquivalentNumbers(a, b cty.Value) bool {
194 if a.RawEquals(b) {
195 return true // easy
196 }
197
198 af := a.AsBigFloat()
199 bf := b.AsBigFloat()
200
201 if af.IsInt() != bf.IsInt() {
202 return false
203 }
204 if af.IsInt() && bf.IsInt() {
205 return false // a.RawEquals(b) test above is good enough for integers
206 }
207
208 // The SDK supports only int and float64, so if it's not an integer
209 // we know that only a float64-level of precision can possibly be
210 // significant.
211 af64, _ := af.Float64()
212 bf64, _ := bf.Float64()
213 return af64 == bf64
214}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
index 421edb0..6a2050c 100644
--- a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
+++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
@@ -47,6 +47,20 @@ func stringSliceToVariableValue(values []string) []ast.Variable {
47 return output 47 return output
48} 48}
49 49
50// listVariableSliceToVariableValue converts a list of lists into the value
51// required to be returned from interpolation functions which return TypeList.
52func listVariableSliceToVariableValue(values [][]ast.Variable) []ast.Variable {
53 output := make([]ast.Variable, len(values))
54
55 for index, value := range values {
56 output[index] = ast.Variable{
57 Type: ast.TypeList,
58 Value: value,
59 }
60 }
61 return output
62}
63
50func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) { 64func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) {
51 output := make([]string, len(values)) 65 output := make([]string, len(values))
52 for index, value := range values { 66 for index, value := range values {
@@ -61,74 +75,69 @@ func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) {
61// Funcs is the mapping of built-in functions for configuration. 75// Funcs is the mapping of built-in functions for configuration.
62func Funcs() map[string]ast.Function { 76func Funcs() map[string]ast.Function {
63 return map[string]ast.Function{ 77 return map[string]ast.Function{
64 "abs": interpolationFuncAbs(), 78 "abs": interpolationFuncAbs(),
65 "basename": interpolationFuncBasename(), 79 "basename": interpolationFuncBasename(),
66 "base64decode": interpolationFuncBase64Decode(), 80 "base64decode": interpolationFuncBase64Decode(),
67 "base64encode": interpolationFuncBase64Encode(), 81 "base64encode": interpolationFuncBase64Encode(),
68 "base64gzip": interpolationFuncBase64Gzip(), 82 "base64gzip": interpolationFuncBase64Gzip(),
69 "base64sha256": interpolationFuncBase64Sha256(), 83 "base64sha256": interpolationFuncBase64Sha256(),
70 "base64sha512": interpolationFuncBase64Sha512(), 84 "base64sha512": interpolationFuncBase64Sha512(),
71 "bcrypt": interpolationFuncBcrypt(), 85 "bcrypt": interpolationFuncBcrypt(),
72 "ceil": interpolationFuncCeil(), 86 "ceil": interpolationFuncCeil(),
73 "chomp": interpolationFuncChomp(), 87 "chomp": interpolationFuncChomp(),
74 "cidrhost": interpolationFuncCidrHost(), 88 "cidrhost": interpolationFuncCidrHost(),
75 "cidrnetmask": interpolationFuncCidrNetmask(), 89 "cidrnetmask": interpolationFuncCidrNetmask(),
76 "cidrsubnet": interpolationFuncCidrSubnet(), 90 "cidrsubnet": interpolationFuncCidrSubnet(),
77 "coalesce": interpolationFuncCoalesce(), 91 "coalesce": interpolationFuncCoalesce(),
78 "coalescelist": interpolationFuncCoalesceList(), 92 "coalescelist": interpolationFuncCoalesceList(),
79 "compact": interpolationFuncCompact(), 93 "compact": interpolationFuncCompact(),
80 "concat": interpolationFuncConcat(), 94 "concat": interpolationFuncConcat(),
81 "contains": interpolationFuncContains(), 95 "contains": interpolationFuncContains(),
82 "dirname": interpolationFuncDirname(), 96 "dirname": interpolationFuncDirname(),
83 "distinct": interpolationFuncDistinct(), 97 "distinct": interpolationFuncDistinct(),
84 "element": interpolationFuncElement(), 98 "element": interpolationFuncElement(),
85 "chunklist": interpolationFuncChunklist(), 99 "chunklist": interpolationFuncChunklist(),
86 "file": interpolationFuncFile(), 100 "file": interpolationFuncFile(),
87 "filebase64sha256": interpolationFuncMakeFileHash(interpolationFuncBase64Sha256()), 101 "matchkeys": interpolationFuncMatchKeys(),
88 "filebase64sha512": interpolationFuncMakeFileHash(interpolationFuncBase64Sha512()), 102 "flatten": interpolationFuncFlatten(),
89 "filemd5": interpolationFuncMakeFileHash(interpolationFuncMd5()), 103 "floor": interpolationFuncFloor(),
90 "filesha1": interpolationFuncMakeFileHash(interpolationFuncSha1()), 104 "format": interpolationFuncFormat(),
91 "filesha256": interpolationFuncMakeFileHash(interpolationFuncSha256()), 105 "formatlist": interpolationFuncFormatList(),
92 "filesha512": interpolationFuncMakeFileHash(interpolationFuncSha512()), 106 "indent": interpolationFuncIndent(),
93 "matchkeys": interpolationFuncMatchKeys(), 107 "index": interpolationFuncIndex(),
94 "flatten": interpolationFuncFlatten(), 108 "join": interpolationFuncJoin(),
95 "floor": interpolationFuncFloor(), 109 "jsonencode": interpolationFuncJSONEncode(),
96 "format": interpolationFuncFormat(), 110 "length": interpolationFuncLength(),
97 "formatlist": interpolationFuncFormatList(), 111 "list": interpolationFuncList(),
98 "indent": interpolationFuncIndent(), 112 "log": interpolationFuncLog(),
99 "index": interpolationFuncIndex(), 113 "lower": interpolationFuncLower(),
100 "join": interpolationFuncJoin(), 114 "map": interpolationFuncMap(),
101 "jsonencode": interpolationFuncJSONEncode(), 115 "max": interpolationFuncMax(),
102 "length": interpolationFuncLength(), 116 "md5": interpolationFuncMd5(),
103 "list": interpolationFuncList(), 117 "merge": interpolationFuncMerge(),
104 "log": interpolationFuncLog(), 118 "min": interpolationFuncMin(),
105 "lower": interpolationFuncLower(), 119 "pathexpand": interpolationFuncPathExpand(),
106 "map": interpolationFuncMap(), 120 "pow": interpolationFuncPow(),
107 "max": interpolationFuncMax(), 121 "uuid": interpolationFuncUUID(),
108 "md5": interpolationFuncMd5(), 122 "replace": interpolationFuncReplace(),
109 "merge": interpolationFuncMerge(), 123 "reverse": interpolationFuncReverse(),
110 "min": interpolationFuncMin(), 124 "rsadecrypt": interpolationFuncRsaDecrypt(),
111 "pathexpand": interpolationFuncPathExpand(), 125 "sha1": interpolationFuncSha1(),
112 "pow": interpolationFuncPow(), 126 "sha256": interpolationFuncSha256(),
113 "uuid": interpolationFuncUUID(), 127 "sha512": interpolationFuncSha512(),
114 "replace": interpolationFuncReplace(), 128 "signum": interpolationFuncSignum(),
115 "rsadecrypt": interpolationFuncRsaDecrypt(), 129 "slice": interpolationFuncSlice(),
116 "sha1": interpolationFuncSha1(), 130 "sort": interpolationFuncSort(),
117 "sha256": interpolationFuncSha256(), 131 "split": interpolationFuncSplit(),
118 "sha512": interpolationFuncSha512(), 132 "substr": interpolationFuncSubstr(),
119 "signum": interpolationFuncSignum(), 133 "timestamp": interpolationFuncTimestamp(),
120 "slice": interpolationFuncSlice(), 134 "timeadd": interpolationFuncTimeAdd(),
121 "sort": interpolationFuncSort(), 135 "title": interpolationFuncTitle(),
122 "split": interpolationFuncSplit(), 136 "transpose": interpolationFuncTranspose(),
123 "substr": interpolationFuncSubstr(), 137 "trimspace": interpolationFuncTrimSpace(),
124 "timestamp": interpolationFuncTimestamp(), 138 "upper": interpolationFuncUpper(),
125 "timeadd": interpolationFuncTimeAdd(), 139 "urlencode": interpolationFuncURLEncode(),
126 "title": interpolationFuncTitle(), 140 "zipmap": interpolationFuncZipMap(),
127 "transpose": interpolationFuncTranspose(),
128 "trimspace": interpolationFuncTrimSpace(),
129 "upper": interpolationFuncUpper(),
130 "urlencode": interpolationFuncURLEncode(),
131 "zipmap": interpolationFuncZipMap(),
132 } 141 }
133} 142}
134 143
@@ -947,6 +956,25 @@ func interpolationFuncReplace() ast.Function {
947 } 956 }
948} 957}
949 958
959// interpolationFuncReverse implements the "reverse" function that does list reversal
960func interpolationFuncReverse() ast.Function {
961 return ast.Function{
962 ArgTypes: []ast.Type{ast.TypeList},
963 ReturnType: ast.TypeList,
964 Variadic: false,
965 Callback: func(args []interface{}) (interface{}, error) {
966 inputList := args[0].([]ast.Variable)
967
968 reversedList := make([]ast.Variable, len(inputList))
969 for idx := range inputList {
970 reversedList[len(inputList)-1-idx] = inputList[idx]
971 }
972
973 return reversedList, nil
974 },
975 }
976}
977
950func interpolationFuncLength() ast.Function { 978func interpolationFuncLength() ast.Function {
951 return ast.Function{ 979 return ast.Function{
952 ArgTypes: []ast.Type{ast.TypeAny}, 980 ArgTypes: []ast.Type{ast.TypeAny},
@@ -1731,24 +1759,3 @@ func interpolationFuncRsaDecrypt() ast.Function {
1731 }, 1759 },
1732 } 1760 }
1733} 1761}
1734
1735// interpolationFuncMakeFileHash constructs a function that hashes the contents
1736// of a file by combining the implementations of the file(...) function and
1737// a given other function that is assumed to take a single string argument and
1738// return a hash value.
1739func interpolationFuncMakeFileHash(hashFunc ast.Function) ast.Function {
1740 fileFunc := interpolationFuncFile()
1741
1742 return ast.Function{
1743 ArgTypes: []ast.Type{ast.TypeString},
1744 ReturnType: ast.TypeString,
1745 Callback: func(args []interface{}) (interface{}, error) {
1746 filename := args[0].(string)
1747 contents, err := fileFunc.Callback([]interface{}{filename})
1748 if err != nil {
1749 return nil, err
1750 }
1751 return hashFunc.Callback([]interface{}{contents})
1752 },
1753 }
1754}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/storage.go b/vendor/github.com/hashicorp/terraform/config/module/storage.go
index 58e3a10..7734cbc 100644
--- a/vendor/github.com/hashicorp/terraform/config/module/storage.go
+++ b/vendor/github.com/hashicorp/terraform/config/module/storage.go
@@ -7,7 +7,6 @@ import (
7 "log" 7 "log"
8 "os" 8 "os"
9 "path/filepath" 9 "path/filepath"
10 "strings"
11 10
12 getter "github.com/hashicorp/go-getter" 11 getter "github.com/hashicorp/go-getter"
13 "github.com/hashicorp/terraform/registry" 12 "github.com/hashicorp/terraform/registry"
@@ -101,21 +100,6 @@ func (s Storage) loadManifest() (moduleManifest, error) {
101 if err := json.Unmarshal(data, &manifest); err != nil { 100 if err := json.Unmarshal(data, &manifest); err != nil {
102 return manifest, err 101 return manifest, err
103 } 102 }
104
105 for i, rec := range manifest.Modules {
106 // If the path was recorded before we changed to always using a
107 // slash as separator, we delete the record from the manifest so
108 // it can be discovered again and will be recorded using a slash.
109 if strings.Contains(rec.Dir, "\\") {
110 manifest.Modules[i] = manifest.Modules[len(manifest.Modules)-1]
111 manifest.Modules = manifest.Modules[:len(manifest.Modules)-1]
112 continue
113 }
114
115 // Make sure we use the correct path separator.
116 rec.Dir = filepath.FromSlash(rec.Dir)
117 }
118
119 return manifest, nil 103 return manifest, nil
120} 104}
121 105
@@ -146,9 +130,6 @@ func (s Storage) recordModule(rec moduleRecord) error {
146 } 130 }
147 } 131 }
148 132
149 // Make sure we always use a slash separator.
150 rec.Dir = filepath.ToSlash(rec.Dir)
151
152 manifest.Modules = append(manifest.Modules, rec) 133 manifest.Modules = append(manifest.Modules, rec)
153 134
154 js, err := json.Marshal(manifest) 135 js, err := json.Marshal(manifest)
@@ -331,7 +312,7 @@ func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, e
331 // we need to lookup available versions 312 // we need to lookup available versions
332 // Only on Get if it's not found, on unconditionally on Update 313 // Only on Get if it's not found, on unconditionally on Update
333 if (s.Mode == GetModeGet && !found) || (s.Mode == GetModeUpdate) { 314 if (s.Mode == GetModeGet && !found) || (s.Mode == GetModeUpdate) {
334 resp, err := s.registry.Versions(mod) 315 resp, err := s.registry.ModuleVersions(mod)
335 if err != nil { 316 if err != nil {
336 return rec, err 317 return rec, err
337 } 318 }
@@ -351,7 +332,7 @@ func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, e
351 332
352 rec.Version = match.Version 333 rec.Version = match.Version
353 334
354 rec.url, err = s.registry.Location(mod, rec.Version) 335 rec.url, err = s.registry.ModuleLocation(mod, rec.Version)
355 if err != nil { 336 if err != nil {
356 return rec, err 337 return rec, err
357 } 338 }
diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
index 8a55e06..0105278 100644
--- a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
+++ b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
@@ -4,6 +4,14 @@ package config
4 4
5import "strconv" 5import "strconv"
6 6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[ManagedResourceMode-0]
12 _ = x[DataResourceMode-1]
13}
14
7const _ResourceMode_name = "ManagedResourceModeDataResourceMode" 15const _ResourceMode_name = "ManagedResourceModeDataResourceMode"
8 16
9var _ResourceMode_index = [...]uint8{0, 19, 35} 17var _ResourceMode_index = [...]uint8{0, 19, 35}
diff --git a/vendor/github.com/hashicorp/terraform/configs/backend.go b/vendor/github.com/hashicorp/terraform/configs/backend.go
new file mode 100644
index 0000000..6df7ddd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/backend.go
@@ -0,0 +1,55 @@
1package configs
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/hcl2/hcldec"
6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/zclconf/go-cty/cty"
8)
9
10// Backend represents a "backend" block inside a "terraform" block in a module
11// or file.
12type Backend struct {
13 Type string
14 Config hcl.Body
15
16 TypeRange hcl.Range
17 DeclRange hcl.Range
18}
19
20func decodeBackendBlock(block *hcl.Block) (*Backend, hcl.Diagnostics) {
21 return &Backend{
22 Type: block.Labels[0],
23 TypeRange: block.LabelRanges[0],
24 Config: block.Body,
25 DeclRange: block.DefRange,
26 }, nil
27}
28
29// Hash produces a hash value for the reciever that covers the type and the
30// portions of the config that conform to the given schema.
31//
32// If the config does not conform to the schema then the result is not
33// meaningful for comparison since it will be based on an incomplete result.
34//
35// As an exception, required attributes in the schema are treated as optional
36// for the purpose of hashing, so that an incomplete configuration can still
37// be hashed. Other errors, such as extraneous attributes, have no such special
38// case.
39func (b *Backend) Hash(schema *configschema.Block) int {
40 // Don't fail if required attributes are not set. Instead, we'll just
41 // hash them as nulls.
42 schema = schema.NoneRequired()
43 spec := schema.DecoderSpec()
44 val, _ := hcldec.Decode(b.Config, spec, nil)
45 if val == cty.NilVal {
46 val = cty.UnknownVal(schema.ImpliedType())
47 }
48
49 toHash := cty.TupleVal([]cty.Value{
50 cty.StringVal(b.Type),
51 val,
52 })
53
54 return toHash.Hash()
55}
diff --git a/vendor/github.com/hashicorp/terraform/configs/compat_shim.go b/vendor/github.com/hashicorp/terraform/configs/compat_shim.go
new file mode 100644
index 0000000..66037fc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/compat_shim.go
@@ -0,0 +1,116 @@
1package configs
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/hcl2/hcl/hclsyntax"
6 "github.com/zclconf/go-cty/cty"
7)
8
9// -------------------------------------------------------------------------
10// Functions in this file are compatibility shims intended to ease conversion
11// from the old configuration loader. Any use of these functions that makes
12// a change should generate a deprecation warning explaining to the user how
13// to update their code for new patterns.
14//
15// Shims are particularly important for any patterns that have been widely
16// documented in books, tutorials, etc. Users will still be starting from
17// these examples and we want to help them adopt the latest patterns rather
18// than leave them stranded.
19// -------------------------------------------------------------------------
20
21// shimTraversalInString takes any arbitrary expression and checks if it is
22// a quoted string in the native syntax. If it _is_, then it is parsed as a
23// traversal and re-wrapped into a synthetic traversal expression and a
24// warning is generated. Otherwise, the given expression is just returned
25// verbatim.
26//
27// This function has no effect on expressions from the JSON syntax, since
28// traversals in strings are the required pattern in that syntax.
29//
30// If wantKeyword is set, the generated warning diagnostic will talk about
31// keywords rather than references. The behavior is otherwise unchanged, and
32// the caller remains responsible for checking that the result is indeed
33// a keyword, e.g. using hcl.ExprAsKeyword.
34func shimTraversalInString(expr hcl.Expression, wantKeyword bool) (hcl.Expression, hcl.Diagnostics) {
35 // ObjectConsKeyExpr is a special wrapper type used for keys on object
36 // constructors to deal with the fact that naked identifiers are normally
37 // handled as "bareword" strings rather than as variable references. Since
38 // we know we're interpreting as a traversal anyway (and thus it won't
39 // matter whether it's a string or an identifier) we can safely just unwrap
40 // here and then process whatever we find inside as normal.
41 if ocke, ok := expr.(*hclsyntax.ObjectConsKeyExpr); ok {
42 expr = ocke.Wrapped
43 }
44
45 if !exprIsNativeQuotedString(expr) {
46 return expr, nil
47 }
48
49 strVal, diags := expr.Value(nil)
50 if diags.HasErrors() || strVal.IsNull() || !strVal.IsKnown() {
51 // Since we're not even able to attempt a shim here, we'll discard
52 // the diagnostics we saw so far and let the caller's own error
53 // handling take care of reporting the invalid expression.
54 return expr, nil
55 }
56
57 // The position handling here isn't _quite_ right because it won't
58 // take into account any escape sequences in the literal string, but
59 // it should be close enough for any error reporting to make sense.
60 srcRange := expr.Range()
61 startPos := srcRange.Start // copy
62 startPos.Column++ // skip initial quote
63 startPos.Byte++ // skip initial quote
64
65 traversal, tDiags := hclsyntax.ParseTraversalAbs(
66 []byte(strVal.AsString()),
67 srcRange.Filename,
68 startPos,
69 )
70 diags = append(diags, tDiags...)
71
72 // For initial release our deprecation warnings are disabled to allow
73 // a period where modules can be compatible with both old and new
74 // conventions.
75 // FIXME: Re-enable these deprecation warnings in a release prior to
76 // Terraform 0.13 and then remove the shims altogether for 0.13.
77 /*
78 if wantKeyword {
79 diags = append(diags, &hcl.Diagnostic{
80 Severity: hcl.DiagWarning,
81 Summary: "Quoted keywords are deprecated",
82 Detail: "In this context, keywords are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this keyword to silence this warning.",
83 Subject: &srcRange,
84 })
85 } else {
86 diags = append(diags, &hcl.Diagnostic{
87 Severity: hcl.DiagWarning,
88 Summary: "Quoted references are deprecated",
89 Detail: "In this context, references are expected literally rather than in quotes. Previous versions of Terraform required quotes, but that usage is now deprecated. Remove the quotes surrounding this reference to silence this warning.",
90 Subject: &srcRange,
91 })
92 }
93 */
94
95 return &hclsyntax.ScopeTraversalExpr{
96 Traversal: traversal,
97 SrcRange: srcRange,
98 }, diags
99}
100
101// shimIsIgnoreChangesStar returns true if the given expression seems to be
102// a string literal whose value is "*". This is used to support a legacy
103// form of ignore_changes = all .
104//
105// This function does not itself emit any diagnostics, so it's the caller's
106// responsibility to emit a warning diagnostic when this function returns true.
107func shimIsIgnoreChangesStar(expr hcl.Expression) bool {
108 val, valDiags := expr.Value(nil)
109 if valDiags.HasErrors() {
110 return false
111 }
112 if val.Type() != cty.String || val.IsNull() || !val.IsKnown() {
113 return false
114 }
115 return val.AsString() == "*"
116}
diff --git a/vendor/github.com/hashicorp/terraform/configs/config.go b/vendor/github.com/hashicorp/terraform/configs/config.go
new file mode 100644
index 0000000..8294312
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/config.go
@@ -0,0 +1,205 @@
1package configs
2
3import (
4 "sort"
5
6 version "github.com/hashicorp/go-version"
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/terraform/addrs"
9)
10
11// A Config is a node in the tree of modules within a configuration.
12//
13// The module tree is constructed by following ModuleCall instances recursively
14// through the root module transitively into descendent modules.
15//
16// A module tree described in *this* package represents the static tree
17// represented by configuration. During evaluation a static ModuleNode may
18// expand into zero or more module instances depending on the use of count and
19// for_each configuration attributes within each call.
20type Config struct {
21 // RootModule points to the Config for the root module within the same
22 // module tree as this module. If this module _is_ the root module then
23 // this is self-referential.
24 Root *Config
25
26 // ParentModule points to the Config for the module that directly calls
27 // this module. If this is the root module then this field is nil.
28 Parent *Config
29
30 // Path is a sequence of module logical names that traverse from the root
31 // module to this config. Path is empty for the root module.
32 //
33 // This should only be used to display paths to the end-user in rare cases
34 // where we are talking about the static module tree, before module calls
35 // have been resolved. In most cases, an addrs.ModuleInstance describing
36 // a node in the dynamic module tree is better, since it will then include
37 // any keys resulting from evaluating "count" and "for_each" arguments.
38 Path addrs.Module
39
40 // ChildModules points to the Config for each of the direct child modules
41 // called from this module. The keys in this map match the keys in
42 // Module.ModuleCalls.
43 Children map[string]*Config
44
45 // Module points to the object describing the configuration for the
46 // various elements (variables, resources, etc) defined by this module.
47 Module *Module
48
49 // CallRange is the source range for the header of the module block that
50 // requested this module.
51 //
52 // This field is meaningless for the root module, where its contents are undefined.
53 CallRange hcl.Range
54
55 // SourceAddr is the source address that the referenced module was requested
56 // from, as specified in configuration.
57 //
58 // This field is meaningless for the root module, where its contents are undefined.
59 SourceAddr string
60
61 // SourceAddrRange is the location in the configuration source where the
62 // SourceAddr value was set, for use in diagnostic messages.
63 //
64 // This field is meaningless for the root module, where its contents are undefined.
65 SourceAddrRange hcl.Range
66
67 // Version is the specific version that was selected for this module,
68 // based on version constraints given in configuration.
69 //
70 // This field is nil if the module was loaded from a non-registry source,
71 // since versions are not supported for other sources.
72 //
73 // This field is meaningless for the root module, where it will always
74 // be nil.
75 Version *version.Version
76}
77
78// NewEmptyConfig constructs a single-node configuration tree with an empty
79// root module. This is generally a pretty useless thing to do, so most callers
80// should instead use BuildConfig.
81func NewEmptyConfig() *Config {
82 ret := &Config{}
83 ret.Root = ret
84 ret.Children = make(map[string]*Config)
85 ret.Module = &Module{}
86 return ret
87}
88
89// Depth returns the number of "hops" the receiver is from the root of its
90// module tree, with the root module having a depth of zero.
91func (c *Config) Depth() int {
92 ret := 0
93 this := c
94 for this.Parent != nil {
95 ret++
96 this = this.Parent
97 }
98 return ret
99}
100
101// DeepEach calls the given function once for each module in the tree, starting
102// with the receiver.
103//
104// A parent is always called before its children and children of a particular
105// node are visited in lexicographic order by their names.
106func (c *Config) DeepEach(cb func(c *Config)) {
107 cb(c)
108
109 names := make([]string, 0, len(c.Children))
110 for name := range c.Children {
111 names = append(names, name)
112 }
113
114 for _, name := range names {
115 c.Children[name].DeepEach(cb)
116 }
117}
118
119// AllModules returns a slice of all the receiver and all of its descendent
120// nodes in the module tree, in the same order they would be visited by
121// DeepEach.
122func (c *Config) AllModules() []*Config {
123 var ret []*Config
124 c.DeepEach(func(c *Config) {
125 ret = append(ret, c)
126 })
127 return ret
128}
129
130// Descendent returns the descendent config that has the given path beneath
131// the receiver, or nil if there is no such module.
132//
133// The path traverses the static module tree, prior to any expansion to handle
134// count and for_each arguments.
135//
136// An empty path will just return the receiver, and is therefore pointless.
137func (c *Config) Descendent(path addrs.Module) *Config {
138 current := c
139 for _, name := range path {
140 current = current.Children[name]
141 if current == nil {
142 return nil
143 }
144 }
145 return current
146}
147
148// DescendentForInstance is like Descendent except that it accepts a path
149// to a particular module instance in the dynamic module graph, returning
150// the node from the static module graph that corresponds to it.
151//
152// All instances created by a particular module call share the same
153// configuration, so the keys within the given path are disregarded.
154func (c *Config) DescendentForInstance(path addrs.ModuleInstance) *Config {
155 current := c
156 for _, step := range path {
157 current = current.Children[step.Name]
158 if current == nil {
159 return nil
160 }
161 }
162 return current
163}
164
165// ProviderTypes returns the names of each distinct provider type referenced
166// in the receiving configuration.
167//
168// This is a helper for easily determining which provider types are required
169// to fully interpret the configuration, though it does not include version
170// information and so callers are expected to have already dealt with
171// provider version selection in an earlier step and have identified suitable
172// versions for each provider.
173func (c *Config) ProviderTypes() []string {
174 m := make(map[string]struct{})
175 c.gatherProviderTypes(m)
176
177 ret := make([]string, 0, len(m))
178 for k := range m {
179 ret = append(ret, k)
180 }
181 sort.Strings(ret)
182 return ret
183}
184func (c *Config) gatherProviderTypes(m map[string]struct{}) {
185 if c == nil {
186 return
187 }
188
189 for _, pc := range c.Module.ProviderConfigs {
190 m[pc.Name] = struct{}{}
191 }
192 for _, rc := range c.Module.ManagedResources {
193 providerAddr := rc.ProviderConfigAddr()
194 m[providerAddr.Type] = struct{}{}
195 }
196 for _, rc := range c.Module.DataResources {
197 providerAddr := rc.ProviderConfigAddr()
198 m[providerAddr.Type] = struct{}{}
199 }
200
201 // Must also visit our child modules, recursively.
202 for _, cc := range c.Children {
203 cc.gatherProviderTypes(m)
204 }
205}
diff --git a/vendor/github.com/hashicorp/terraform/configs/config_build.go b/vendor/github.com/hashicorp/terraform/configs/config_build.go
new file mode 100644
index 0000000..948b2c8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/config_build.go
@@ -0,0 +1,179 @@
1package configs
2
3import (
4 "sort"
5
6 version "github.com/hashicorp/go-version"
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/terraform/addrs"
9)
10
11// BuildConfig constructs a Config from a root module by loading all of its
12// descendent modules via the given ModuleWalker.
13//
14// The result is a module tree that has so far only had basic module- and
15// file-level invariants validated. If the returned diagnostics contains errors,
16// the returned module tree may be incomplete but can still be used carefully
17// for static analysis.
18func BuildConfig(root *Module, walker ModuleWalker) (*Config, hcl.Diagnostics) {
19 var diags hcl.Diagnostics
20 cfg := &Config{
21 Module: root,
22 }
23 cfg.Root = cfg // Root module is self-referential.
24 cfg.Children, diags = buildChildModules(cfg, walker)
25 return cfg, diags
26}
27
28func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, hcl.Diagnostics) {
29 var diags hcl.Diagnostics
30 ret := map[string]*Config{}
31
32 calls := parent.Module.ModuleCalls
33
34 // We'll sort the calls by their local names so that they'll appear in a
35 // predictable order in any logging that's produced during the walk.
36 callNames := make([]string, 0, len(calls))
37 for k := range calls {
38 callNames = append(callNames, k)
39 }
40 sort.Strings(callNames)
41
42 for _, callName := range callNames {
43 call := calls[callName]
44 path := make([]string, len(parent.Path)+1)
45 copy(path, parent.Path)
46 path[len(path)-1] = call.Name
47
48 req := ModuleRequest{
49 Name: call.Name,
50 Path: path,
51 SourceAddr: call.SourceAddr,
52 SourceAddrRange: call.SourceAddrRange,
53 VersionConstraint: call.Version,
54 Parent: parent,
55 CallRange: call.DeclRange,
56 }
57
58 mod, ver, modDiags := walker.LoadModule(&req)
59 diags = append(diags, modDiags...)
60 if mod == nil {
61 // nil can be returned if the source address was invalid and so
62 // nothing could be loaded whatsoever. LoadModule should've
63 // returned at least one error diagnostic in that case.
64 continue
65 }
66
67 child := &Config{
68 Parent: parent,
69 Root: parent.Root,
70 Path: path,
71 Module: mod,
72 CallRange: call.DeclRange,
73 SourceAddr: call.SourceAddr,
74 SourceAddrRange: call.SourceAddrRange,
75 Version: ver,
76 }
77
78 child.Children, modDiags = buildChildModules(child, walker)
79
80 ret[call.Name] = child
81 }
82
83 return ret, diags
84}
85
86// A ModuleWalker knows how to find and load a child module given details about
87// the module to be loaded and a reference to its partially-loaded parent
88// Config.
89type ModuleWalker interface {
90 // LoadModule finds and loads a requested child module.
91 //
92 // If errors are detected during loading, implementations should return them
93 // in the diagnostics object. If the diagnostics object contains any errors
94 // then the caller will tolerate the returned module being nil or incomplete.
95 // If no errors are returned, it should be non-nil and complete.
96 //
97 // Full validation need not have been performed but an implementation should
98 // ensure that the basic file- and module-validations performed by the
99 // LoadConfigDir function (valid syntax, no namespace collisions, etc) have
100 // been performed before returning a module.
101 LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics)
102}
103
104// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps
105// a callback function, for more convenient use of that interface.
106type ModuleWalkerFunc func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics)
107
108// LoadModule implements ModuleWalker.
109func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) {
110 return f(req)
111}
112
113// ModuleRequest is used with the ModuleWalker interface to describe a child
114// module that must be loaded.
115type ModuleRequest struct {
116 // Name is the "logical name" of the module call within configuration.
117 // This is provided in case the name is used as part of a storage key
118 // for the module, but implementations must otherwise treat it as an
119 // opaque string. It is guaranteed to have already been validated as an
120 // HCL identifier and UTF-8 encoded.
121 Name string
122
123 // Path is a list of logical names that traverse from the root module to
124 // this module. This can be used, for example, to form a lookup key for
125 // each distinct module call in a configuration, allowing for multiple
126 // calls with the same name at different points in the tree.
127 Path addrs.Module
128
129 // SourceAddr is the source address string provided by the user in
130 // configuration.
131 SourceAddr string
132
133 // SourceAddrRange is the source range for the SourceAddr value as it
134 // was provided in configuration. This can and should be used to generate
135 // diagnostics about the source address having invalid syntax, referring
136 // to a non-existent object, etc.
137 SourceAddrRange hcl.Range
138
139 // VersionConstraint is the version constraint applied to the module in
140 // configuration. This data structure includes the source range for
141 // the constraint, which can and should be used to generate diagnostics
142 // about constraint-related issues, such as constraints that eliminate all
143 // available versions of a module whose source is otherwise valid.
144 VersionConstraint VersionConstraint
145
146 // Parent is the partially-constructed module tree node that the loaded
147 // module will be added to. Callers may refer to any field of this
148 // structure except Children, which is still under construction when
149 // ModuleRequest objects are created and thus has undefined content.
150 // The main reason this is provided is so that full module paths can
151 // be constructed for uniqueness.
152 Parent *Config
153
154 // CallRange is the source range for the header of the "module" block
155 // in configuration that prompted this request. This can be used as the
156 // subject of an error diagnostic that relates to the module call itself,
157 // rather than to either its source address or its version number.
158 CallRange hcl.Range
159}
160
161// DisabledModuleWalker is a ModuleWalker that doesn't support
162// child modules at all, and so will return an error if asked to load one.
163//
164// This is provided primarily for testing. There is no good reason to use this
165// in the main application.
166var DisabledModuleWalker ModuleWalker
167
168func init() {
169 DisabledModuleWalker = ModuleWalkerFunc(func(req *ModuleRequest) (*Module, *version.Version, hcl.Diagnostics) {
170 return nil, nil, hcl.Diagnostics{
171 {
172 Severity: hcl.DiagError,
173 Summary: "Child modules are not supported",
174 Detail: "Child module calls are not allowed in this context.",
175 Subject: &req.CallRange,
176 },
177 }
178 })
179}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go b/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go
new file mode 100644
index 0000000..ebbeb3b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/copy_dir.go
@@ -0,0 +1,125 @@
1package configload
2
3import (
4 "io"
5 "os"
6 "path/filepath"
7 "strings"
8)
9
10// copyDir copies the src directory contents into dst. Both directories
11// should already exist.
12func copyDir(dst, src string) error {
13 src, err := filepath.EvalSymlinks(src)
14 if err != nil {
15 return err
16 }
17
18 walkFn := func(path string, info os.FileInfo, err error) error {
19 if err != nil {
20 return err
21 }
22
23 if path == src {
24 return nil
25 }
26
27 if strings.HasPrefix(filepath.Base(path), ".") {
28 // Skip any dot files
29 if info.IsDir() {
30 return filepath.SkipDir
31 } else {
32 return nil
33 }
34 }
35
36 // The "path" has the src prefixed to it. We need to join our
37 // destination with the path without the src on it.
38 dstPath := filepath.Join(dst, path[len(src):])
39
40 // we don't want to try and copy the same file over itself.
41 if eq, err := sameFile(path, dstPath); eq {
42 return nil
43 } else if err != nil {
44 return err
45 }
46
47 // If we have a directory, make that subdirectory, then continue
48 // the walk.
49 if info.IsDir() {
50 if path == filepath.Join(src, dst) {
51 // dst is in src; don't walk it.
52 return nil
53 }
54
55 if err := os.MkdirAll(dstPath, 0755); err != nil {
56 return err
57 }
58
59 return nil
60 }
61
62 // If the current path is a symlink, recreate the symlink relative to
63 // the dst directory
64 if info.Mode()&os.ModeSymlink == os.ModeSymlink {
65 target, err := os.Readlink(path)
66 if err != nil {
67 return err
68 }
69
70 return os.Symlink(target, dstPath)
71 }
72
73 // If we have a file, copy the contents.
74 srcF, err := os.Open(path)
75 if err != nil {
76 return err
77 }
78 defer srcF.Close()
79
80 dstF, err := os.Create(dstPath)
81 if err != nil {
82 return err
83 }
84 defer dstF.Close()
85
86 if _, err := io.Copy(dstF, srcF); err != nil {
87 return err
88 }
89
90 // Chmod it
91 return os.Chmod(dstPath, info.Mode())
92 }
93
94 return filepath.Walk(src, walkFn)
95}
96
97// sameFile tried to determine if to paths are the same file.
98// If the paths don't match, we lookup the inode on supported systems.
99func sameFile(a, b string) (bool, error) {
100 if a == b {
101 return true, nil
102 }
103
104 aIno, err := inode(a)
105 if err != nil {
106 if os.IsNotExist(err) {
107 return false, nil
108 }
109 return false, err
110 }
111
112 bIno, err := inode(b)
113 if err != nil {
114 if os.IsNotExist(err) {
115 return false, nil
116 }
117 return false, err
118 }
119
120 if aIno > 0 && aIno == bIno {
121 return true, nil
122 }
123
124 return false, nil
125}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/doc.go b/vendor/github.com/hashicorp/terraform/configs/configload/doc.go
new file mode 100644
index 0000000..8b615f9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/doc.go
@@ -0,0 +1,4 @@
1// Package configload knows how to install modules into the .terraform/modules
2// directory and to load modules from those installed locations. It is used
3// in conjunction with the LoadConfig function in the parent package.
4package configload
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/getter.go b/vendor/github.com/hashicorp/terraform/configs/configload/getter.go
new file mode 100644
index 0000000..4a3dace
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/getter.go
@@ -0,0 +1,150 @@
1package configload
2
3import (
4 "fmt"
5 "log"
6 "os"
7 "path/filepath"
8
9 cleanhttp "github.com/hashicorp/go-cleanhttp"
10 getter "github.com/hashicorp/go-getter"
11)
12
13// We configure our own go-getter detector and getter sets here, because
14// the set of sources we support is part of Terraform's documentation and
15// so we don't want any new sources introduced in go-getter to sneak in here
16// and work even though they aren't documented. This also insulates us from
17// any meddling that might be done by other go-getter callers linked into our
18// executable.
19
20var goGetterDetectors = []getter.Detector{
21 new(getter.GitHubDetector),
22 new(getter.BitBucketDetector),
23 new(getter.S3Detector),
24 new(getter.FileDetector),
25}
26
27var goGetterNoDetectors = []getter.Detector{}
28
29var goGetterDecompressors = map[string]getter.Decompressor{
30 "bz2": new(getter.Bzip2Decompressor),
31 "gz": new(getter.GzipDecompressor),
32 "xz": new(getter.XzDecompressor),
33 "zip": new(getter.ZipDecompressor),
34
35 "tar.bz2": new(getter.TarBzip2Decompressor),
36 "tar.tbz2": new(getter.TarBzip2Decompressor),
37
38 "tar.gz": new(getter.TarGzipDecompressor),
39 "tgz": new(getter.TarGzipDecompressor),
40
41 "tar.xz": new(getter.TarXzDecompressor),
42 "txz": new(getter.TarXzDecompressor),
43}
44
45var goGetterGetters = map[string]getter.Getter{
46 "file": new(getter.FileGetter),
47 "git": new(getter.GitGetter),
48 "hg": new(getter.HgGetter),
49 "s3": new(getter.S3Getter),
50 "http": getterHTTPGetter,
51 "https": getterHTTPGetter,
52}
53
54var getterHTTPClient = cleanhttp.DefaultClient()
55
56var getterHTTPGetter = &getter.HttpGetter{
57 Client: getterHTTPClient,
58 Netrc: true,
59}
60
61// A reusingGetter is a helper for the module installer that remembers
62// the final resolved addresses of all of the sources it has already been
63// asked to install, and will copy from a prior installation directory if
64// it has the same resolved source address.
65//
66// The keys in a reusingGetter are resolved and trimmed source addresses
67// (with a scheme always present, and without any "subdir" component),
68// and the values are the paths where each source was previously installed.
69type reusingGetter map[string]string
70
71// getWithGoGetter retrieves the package referenced in the given address
72// into the installation path and then returns the full path to any subdir
73// indicated in the address.
74//
75// The errors returned by this function are those surfaced by the underlying
76// go-getter library, which have very inconsistent quality as
77// end-user-actionable error messages. At this time we do not have any
78// reasonable way to improve these error messages at this layer because
79// the underlying errors are not separatelyr recognizable.
80func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) {
81 packageAddr, subDir := splitAddrSubdir(addr)
82
83 log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath)
84
85 realAddr, err := getter.Detect(packageAddr, instPath, getter.Detectors)
86 if err != nil {
87 return "", err
88 }
89
90 var realSubDir string
91 realAddr, realSubDir = splitAddrSubdir(realAddr)
92 if realSubDir != "" {
93 subDir = filepath.Join(realSubDir, subDir)
94 }
95
96 if realAddr != packageAddr {
97 log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr)
98 }
99
100 if prevDir, exists := g[realAddr]; exists {
101 log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath)
102 err := os.Mkdir(instPath, os.ModePerm)
103 if err != nil {
104 return "", fmt.Errorf("failed to create directory %s: %s", instPath, err)
105 }
106 err = copyDir(instPath, prevDir)
107 if err != nil {
108 return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err)
109 }
110 } else {
111 log.Printf("[TRACE] fetching %q to %q", realAddr, instPath)
112 client := getter.Client{
113 Src: realAddr,
114 Dst: instPath,
115 Pwd: instPath,
116
117 Mode: getter.ClientModeDir,
118
119 Detectors: goGetterNoDetectors, // we already did detection above
120 Decompressors: goGetterDecompressors,
121 Getters: goGetterGetters,
122 }
123 err = client.Get()
124 if err != nil {
125 return "", err
126 }
127 // Remember where we installed this so we might reuse this directory
128 // on subsequent calls to avoid re-downloading.
129 g[realAddr] = instPath
130 }
131
132 // Our subDir string can contain wildcards until this point, so that
133 // e.g. a subDir of * can expand to one top-level directory in a .tar.gz
134 // archive. Now that we've expanded the archive successfully we must
135 // resolve that into a concrete path.
136 var finalDir string
137 if subDir != "" {
138 finalDir, err = getter.SubdirGlob(instPath, subDir)
139 log.Printf("[TRACE] expanded %q to %q", subDir, finalDir)
140 if err != nil {
141 return "", err
142 }
143 } else {
144 finalDir = instPath
145 }
146
147 // If we got this far then we have apparently succeeded in downloading
148 // the requested object!
149 return filepath.Clean(finalDir), nil
150}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode.go
new file mode 100644
index 0000000..57df041
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/inode.go
@@ -0,0 +1,21 @@
1// +build linux darwin openbsd netbsd solaris dragonfly
2
3package configload
4
5import (
6 "fmt"
7 "os"
8 "syscall"
9)
10
11// lookup the inode of a file on posix systems
12func inode(path string) (uint64, error) {
13 stat, err := os.Stat(path)
14 if err != nil {
15 return 0, err
16 }
17 if st, ok := stat.Sys().(*syscall.Stat_t); ok {
18 return st.Ino, nil
19 }
20 return 0, fmt.Errorf("could not determine file inode")
21}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go
new file mode 100644
index 0000000..4dc28ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/inode_freebsd.go
@@ -0,0 +1,21 @@
1// +build freebsd
2
3package configload
4
5import (
6 "fmt"
7 "os"
8 "syscall"
9)
10
11// lookup the inode of a file on posix systems
12func inode(path string) (uint64, error) {
13 stat, err := os.Stat(path)
14 if err != nil {
15 return 0, err
16 }
17 if st, ok := stat.Sys().(*syscall.Stat_t); ok {
18 return uint64(st.Ino), nil
19 }
20 return 0, fmt.Errorf("could not determine file inode")
21}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go b/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go
new file mode 100644
index 0000000..0d22e67
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/inode_windows.go
@@ -0,0 +1,8 @@
1// +build windows
2
3package configload
4
5// no syscall.Stat_t on windows, return 0 for inodes
6func inode(path string) (uint64, error) {
7 return 0, nil
8}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader.go
new file mode 100644
index 0000000..416b48f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader.go
@@ -0,0 +1,150 @@
1package configload
2
3import (
4 "fmt"
5 "path/filepath"
6
7 "github.com/hashicorp/terraform/configs"
8 "github.com/hashicorp/terraform/registry"
9 "github.com/hashicorp/terraform/svchost/disco"
10 "github.com/spf13/afero"
11)
12
13// A Loader instance is the main entry-point for loading configurations via
14// this package.
15//
16// It extends the general config-loading functionality in the parent package
17// "configs" to support installation of modules from remote sources and
18// loading full configurations using modules that were previously installed.
19type Loader struct {
20 // parser is used to read configuration
21 parser *configs.Parser
22
23 // modules is used to install and locate descendent modules that are
24 // referenced (directly or indirectly) from the root module.
25 modules moduleMgr
26}
27
28// Config is used with NewLoader to specify configuration arguments for the
29// loader.
30type Config struct {
31 // ModulesDir is a path to a directory where descendent modules are
32 // (or should be) installed. (This is usually the
33 // .terraform/modules directory, in the common case where this package
34 // is being loaded from the main Terraform CLI package.)
35 ModulesDir string
36
37 // Services is the service discovery client to use when locating remote
38 // module registry endpoints. If this is nil then registry sources are
39 // not supported, which should be true only in specialized circumstances
40 // such as in tests.
41 Services *disco.Disco
42}
43
44// NewLoader creates and returns a loader that reads configuration from the
45// real OS filesystem.
46//
47// The loader has some internal state about the modules that are currently
48// installed, which is read from disk as part of this function. If that
49// manifest cannot be read then an error will be returned.
50func NewLoader(config *Config) (*Loader, error) {
51 fs := afero.NewOsFs()
52 parser := configs.NewParser(fs)
53 reg := registry.NewClient(config.Services, nil)
54
55 ret := &Loader{
56 parser: parser,
57 modules: moduleMgr{
58 FS: afero.Afero{Fs: fs},
59 CanInstall: true,
60 Dir: config.ModulesDir,
61 Services: config.Services,
62 Registry: reg,
63 },
64 }
65
66 err := ret.modules.readModuleManifestSnapshot()
67 if err != nil {
68 return nil, fmt.Errorf("failed to read module manifest: %s", err)
69 }
70
71 return ret, nil
72}
73
74// ModulesDir returns the path to the directory where the loader will look for
75// the local cache of remote module packages.
76func (l *Loader) ModulesDir() string {
77 return l.modules.Dir
78}
79
80// RefreshModules updates the in-memory cache of the module manifest from the
81// module manifest file on disk. This is not necessary in normal use because
82// module installation and configuration loading are separate steps, but it
83// can be useful in tests where module installation is done as a part of
84// configuration loading by a helper function.
85//
86// Call this function after any module installation where an existing loader
87// is already alive and may be used again later.
88//
89// An error is returned if the manifest file cannot be read.
90func (l *Loader) RefreshModules() error {
91 if l == nil {
92 // Nothing to do, then.
93 return nil
94 }
95 return l.modules.readModuleManifestSnapshot()
96}
97
98// Parser returns the underlying parser for this loader.
99//
100// This is useful for loading other sorts of files than the module directories
101// that a loader deals with, since then they will share the source code cache
102// for this loader and can thus be shown as snippets in diagnostic messages.
103func (l *Loader) Parser() *configs.Parser {
104 return l.parser
105}
106
107// Sources returns the source code cache for the underlying parser of this
108// loader. This is a shorthand for l.Parser().Sources().
109func (l *Loader) Sources() map[string][]byte {
110 return l.parser.Sources()
111}
112
113// IsConfigDir returns true if and only if the given directory contains at
114// least one Terraform configuration file. This is a wrapper around calling
115// the same method name on the loader's parser.
116func (l *Loader) IsConfigDir(path string) bool {
117 return l.parser.IsConfigDir(path)
118}
119
120// ImportSources writes into the receiver's source code the given source
121// code buffers.
122//
123// This is useful in the situation where an ancillary loader is created for
124// some reason (e.g. loading config from a plan file) but the cached source
125// code from that loader must be imported into the "main" loader in order
126// to return source code snapshots in diagnostic messages.
127//
128// loader.ImportSources(otherLoader.Sources())
129func (l *Loader) ImportSources(sources map[string][]byte) {
130 p := l.Parser()
131 for name, src := range sources {
132 p.ForceFileSource(name, src)
133 }
134}
135
136// ImportSourcesFromSnapshot writes into the receiver's source code the
137// source files from the given snapshot.
138//
139// This is similar to ImportSources but knows how to unpack and flatten a
140// snapshot data structure to get the corresponding flat source file map.
141func (l *Loader) ImportSourcesFromSnapshot(snap *Snapshot) {
142 p := l.Parser()
143 for _, m := range snap.Modules {
144 baseDir := m.Dir
145 for fn, src := range m.Files {
146 fullPath := filepath.Join(baseDir, fn)
147 p.ForceFileSource(fullPath, src)
148 }
149 }
150}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go
new file mode 100644
index 0000000..93a9420
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go
@@ -0,0 +1,97 @@
1package configload
2
3import (
4 "fmt"
5
6 version "github.com/hashicorp/go-version"
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/terraform/configs"
9)
10
11// LoadConfig reads the Terraform module in the given directory and uses it as the
12// root module to build the static module tree that represents a configuration,
13// assuming that all required descendent modules have already been installed.
14//
15// If error diagnostics are returned, the returned configuration may be either
16// nil or incomplete. In the latter case, cautious static analysis is possible
17// in spite of the errors.
18//
19// LoadConfig performs the basic syntax and uniqueness validations that are
20// required to process the individual modules, and also detects
21func (l *Loader) LoadConfig(rootDir string) (*configs.Config, hcl.Diagnostics) {
22 rootMod, diags := l.parser.LoadConfigDir(rootDir)
23 if rootMod == nil {
24 return nil, diags
25 }
26
27 cfg, cDiags := configs.BuildConfig(rootMod, configs.ModuleWalkerFunc(l.moduleWalkerLoad))
28 diags = append(diags, cDiags...)
29
30 return cfg, diags
31}
32
33// moduleWalkerLoad is a configs.ModuleWalkerFunc for loading modules that
34// are presumed to have already been installed. A different function
35// (moduleWalkerInstall) is used for installation.
36func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) {
37 // Since we're just loading here, we expect that all referenced modules
38 // will be already installed and described in our manifest. However, we
39 // do verify that the manifest and the configuration are in agreement
40 // so that we can prompt the user to run "terraform init" if not.
41
42 key := l.modules.manifest.ModuleKey(req.Path)
43 record, exists := l.modules.manifest[key]
44
45 if !exists {
46 return nil, nil, hcl.Diagnostics{
47 {
48 Severity: hcl.DiagError,
49 Summary: "Module not installed",
50 Detail: "This module is not yet installed. Run \"terraform init\" to install all modules required by this configuration.",
51 Subject: &req.CallRange,
52 },
53 }
54 }
55
56 var diags hcl.Diagnostics
57
58 // Check for inconsistencies between manifest and config
59 if req.SourceAddr != record.SourceAddr {
60 diags = append(diags, &hcl.Diagnostic{
61 Severity: hcl.DiagError,
62 Summary: "Module source has changed",
63 Detail: "The source address was changed since this module was installed. Run \"terraform init\" to install all modules required by this configuration.",
64 Subject: &req.SourceAddrRange,
65 })
66 }
67 if !req.VersionConstraint.Required.Check(record.Version) {
68 diags = append(diags, &hcl.Diagnostic{
69 Severity: hcl.DiagError,
70 Summary: "Module version requirements have changed",
71 Detail: fmt.Sprintf(
72 "The version requirements have changed since this module was installed and the installed version (%s) is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.",
73 record.Version,
74 ),
75 Subject: &req.SourceAddrRange,
76 })
77 }
78
79 mod, mDiags := l.parser.LoadConfigDir(record.Dir)
80 diags = append(diags, mDiags...)
81 if mod == nil {
82 // nil specifically indicates that the directory does not exist or
83 // cannot be read, so in this case we'll discard any generic diagnostics
84 // returned from LoadConfigDir and produce our own context-sensitive
85 // error message.
86 return nil, nil, hcl.Diagnostics{
87 {
88 Severity: hcl.DiagError,
89 Summary: "Module not installed",
90 Detail: fmt.Sprintf("This module's local cache directory %s could not be read. Run \"terraform init\" to install all modules required by this configuration.", record.Dir),
91 Subject: &req.CallRange,
92 },
93 }
94 }
95
96 return mod, record.Version, diags
97}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go b/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go
new file mode 100644
index 0000000..44c6439
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/loader_snapshot.go
@@ -0,0 +1,504 @@
1package configload
2
3import (
4 "fmt"
5 "io"
6 "os"
7 "path/filepath"
8 "sort"
9 "time"
10
11 version "github.com/hashicorp/go-version"
12 "github.com/hashicorp/hcl2/hcl"
13 "github.com/hashicorp/terraform/configs"
14 "github.com/hashicorp/terraform/internal/modsdir"
15 "github.com/spf13/afero"
16)
17
18// LoadConfigWithSnapshot is a variant of LoadConfig that also simultaneously
19// creates an in-memory snapshot of the configuration files used, which can
20// be later used to create a loader that may read only from this snapshot.
21func (l *Loader) LoadConfigWithSnapshot(rootDir string) (*configs.Config, *Snapshot, hcl.Diagnostics) {
22 rootMod, diags := l.parser.LoadConfigDir(rootDir)
23 if rootMod == nil {
24 return nil, nil, diags
25 }
26
27 snap := &Snapshot{
28 Modules: map[string]*SnapshotModule{},
29 }
30 walker := l.makeModuleWalkerSnapshot(snap)
31 cfg, cDiags := configs.BuildConfig(rootMod, walker)
32 diags = append(diags, cDiags...)
33
34 addDiags := l.addModuleToSnapshot(snap, "", rootDir, "", nil)
35 diags = append(diags, addDiags...)
36
37 return cfg, snap, diags
38}
39
40// NewLoaderFromSnapshot creates a Loader that reads files only from the
41// given snapshot.
42//
43// A snapshot-based loader cannot install modules, so calling InstallModules
44// on the return value will cause a panic.
45//
46// A snapshot-based loader also has access only to configuration files. Its
47// underlying parser does not have access to other files in the native
48// filesystem, such as values files. For those, either use a normal loader
49// (created by NewLoader) or use the configs.Parser API directly.
50func NewLoaderFromSnapshot(snap *Snapshot) *Loader {
51 fs := snapshotFS{snap}
52 parser := configs.NewParser(fs)
53
54 ret := &Loader{
55 parser: parser,
56 modules: moduleMgr{
57 FS: afero.Afero{Fs: fs},
58 CanInstall: false,
59 manifest: snap.moduleManifest(),
60 },
61 }
62
63 return ret
64}
65
66// Snapshot is an in-memory representation of the source files from a
67// configuration, which can be used as an alternative configurations source
68// for a loader with NewLoaderFromSnapshot.
69//
70// The primary purpose of a Snapshot is to build the configuration portion
71// of a plan file (see ../../plans/planfile) so that it can later be reloaded
72// and used to recover the exact configuration that the plan was built from.
73type Snapshot struct {
74 // Modules is a map from opaque module keys (suitable for use as directory
75 // names on all supported operating systems) to the snapshot information
76 // about each module.
77 Modules map[string]*SnapshotModule
78}
79
80// NewEmptySnapshot constructs and returns a snapshot containing only an empty
81// root module. This is not useful for anything except placeholders in tests.
82func NewEmptySnapshot() *Snapshot {
83 return &Snapshot{
84 Modules: map[string]*SnapshotModule{
85 "": &SnapshotModule{
86 Files: map[string][]byte{},
87 },
88 },
89 }
90}
91
92// SnapshotModule represents a single module within a Snapshot.
93type SnapshotModule struct {
94 // Dir is the path, relative to the root directory given when the
95 // snapshot was created, where the module appears in the snapshot's
96 // virtual filesystem.
97 Dir string
98
99 // Files is a map from each configuration file filename for the
100 // module to a raw byte representation of the source file contents.
101 Files map[string][]byte
102
103 // SourceAddr is the source address given for this module in configuration.
104 SourceAddr string `json:"Source"`
105
106 // Version is the version of the module that is installed, or nil if
107 // the module is installed from a source that does not support versions.
108 Version *version.Version `json:"-"`
109}
110
111// moduleManifest constructs a module manifest based on the contents of
112// the receiving snapshot.
113func (s *Snapshot) moduleManifest() modsdir.Manifest {
114 ret := make(modsdir.Manifest)
115
116 for k, modSnap := range s.Modules {
117 ret[k] = modsdir.Record{
118 Key: k,
119 Dir: modSnap.Dir,
120 SourceAddr: modSnap.SourceAddr,
121 Version: modSnap.Version,
122 }
123 }
124
125 return ret
126}
127
128// makeModuleWalkerSnapshot creates a configs.ModuleWalker that will exhibit
129// the same lookup behaviors as l.moduleWalkerLoad but will additionally write
130// source files from the referenced modules into the given snapshot.
131func (l *Loader) makeModuleWalkerSnapshot(snap *Snapshot) configs.ModuleWalker {
132 return configs.ModuleWalkerFunc(
133 func(req *configs.ModuleRequest) (*configs.Module, *version.Version, hcl.Diagnostics) {
134 mod, v, diags := l.moduleWalkerLoad(req)
135 if diags.HasErrors() {
136 return mod, v, diags
137 }
138
139 key := l.modules.manifest.ModuleKey(req.Path)
140 record, exists := l.modules.manifest[key]
141
142 if !exists {
143 // Should never happen, since otherwise moduleWalkerLoader would've
144 // returned an error and we would've returned already.
145 panic(fmt.Sprintf("module %s is not present in manifest", key))
146 }
147
148 addDiags := l.addModuleToSnapshot(snap, key, record.Dir, record.SourceAddr, record.Version)
149 diags = append(diags, addDiags...)
150
151 return mod, v, diags
152 },
153 )
154}
155
156func (l *Loader) addModuleToSnapshot(snap *Snapshot, key string, dir string, sourceAddr string, v *version.Version) hcl.Diagnostics {
157 var diags hcl.Diagnostics
158
159 primaryFiles, overrideFiles, moreDiags := l.parser.ConfigDirFiles(dir)
160 if moreDiags.HasErrors() {
161 // Any diagnostics we get here should be already present
162 // in diags, so it's weird if we get here but we'll allow it
163 // and return a general error message in that case.
164 diags = append(diags, &hcl.Diagnostic{
165 Severity: hcl.DiagError,
166 Summary: "Failed to read directory for module",
167 Detail: fmt.Sprintf("The source directory %s could not be read", dir),
168 })
169 return diags
170 }
171
172 snapMod := &SnapshotModule{
173 Dir: dir,
174 Files: map[string][]byte{},
175 SourceAddr: sourceAddr,
176 Version: v,
177 }
178
179 files := make([]string, 0, len(primaryFiles)+len(overrideFiles))
180 files = append(files, primaryFiles...)
181 files = append(files, overrideFiles...)
182 sources := l.Sources() // should be populated with all the files we need by now
183 for _, filePath := range files {
184 filename := filepath.Base(filePath)
185 src, exists := sources[filePath]
186 if !exists {
187 diags = append(diags, &hcl.Diagnostic{
188 Severity: hcl.DiagError,
189 Summary: "Missing source file for snapshot",
190 Detail: fmt.Sprintf("The source code for file %s could not be found to produce a configuration snapshot.", filePath),
191 })
192 continue
193 }
194 snapMod.Files[filepath.Clean(filename)] = src
195 }
196
197 snap.Modules[key] = snapMod
198
199 return diags
200}
201
202// snapshotFS is an implementation of afero.Fs that reads from a snapshot.
203//
204// This is not intended as a general-purpose filesystem implementation. Instead,
205// it just supports the minimal functionality required to support the
206// configuration loader and parser as an implementation detail of creating
207// a loader from a snapshot.
208type snapshotFS struct {
209 snap *Snapshot
210}
211
212var _ afero.Fs = snapshotFS{}
213
214func (fs snapshotFS) Create(name string) (afero.File, error) {
215 return nil, fmt.Errorf("cannot create file inside configuration snapshot")
216}
217
218func (fs snapshotFS) Mkdir(name string, perm os.FileMode) error {
219 return fmt.Errorf("cannot create directory inside configuration snapshot")
220}
221
222func (fs snapshotFS) MkdirAll(name string, perm os.FileMode) error {
223 return fmt.Errorf("cannot create directories inside configuration snapshot")
224}
225
226func (fs snapshotFS) Open(name string) (afero.File, error) {
227
228 // Our "filesystem" is sparsely populated only with the directories
229 // mentioned by modules in our snapshot, so the high-level process
230 // for opening a file is:
231 // - Find the module snapshot corresponding to the containing directory
232 // - Find the file within that snapshot
233 // - Wrap the resulting byte slice in a snapshotFile to return
234 //
235 // The other possibility handled here is if the given name is for the
236 // module directory itself, in which case we'll return a snapshotDir
237 // instead.
238 //
239 // This function doesn't try to be incredibly robust in supporting
240 // different permutations of paths, etc because in practice we only
241 // need to support the path forms that our own loader and parser will
242 // generate.
243
244 dir := filepath.Dir(name)
245 fn := filepath.Base(name)
246 directDir := filepath.Clean(name)
247
248 // First we'll check to see if this is an exact path for a module directory.
249 // We need to do this first (rather than as part of the next loop below)
250 // because a module in a child directory of another module can otherwise
251 // appear to be a file in that parent directory.
252 for _, candidate := range fs.snap.Modules {
253 modDir := filepath.Clean(candidate.Dir)
254 if modDir == directDir {
255 // We've matched the module directory itself
256 filenames := make([]string, 0, len(candidate.Files))
257 for n := range candidate.Files {
258 filenames = append(filenames, n)
259 }
260 sort.Strings(filenames)
261 return snapshotDir{
262 filenames: filenames,
263 }, nil
264 }
265 }
266
267 // If we get here then the given path isn't a module directory exactly, so
268 // we'll treat it as a file path and try to find a module directory it
269 // could be located in.
270 var modSnap *SnapshotModule
271 for _, candidate := range fs.snap.Modules {
272 modDir := filepath.Clean(candidate.Dir)
273 if modDir == dir {
274 modSnap = candidate
275 break
276 }
277 }
278 if modSnap == nil {
279 return nil, os.ErrNotExist
280 }
281
282 src, exists := modSnap.Files[fn]
283 if !exists {
284 return nil, os.ErrNotExist
285 }
286
287 return &snapshotFile{
288 src: src,
289 }, nil
290}
291
292func (fs snapshotFS) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) {
293 return fs.Open(name)
294}
295
296func (fs snapshotFS) Remove(name string) error {
297 return fmt.Errorf("cannot remove file inside configuration snapshot")
298}
299
300func (fs snapshotFS) RemoveAll(path string) error {
301 return fmt.Errorf("cannot remove files inside configuration snapshot")
302}
303
304func (fs snapshotFS) Rename(old, new string) error {
305 return fmt.Errorf("cannot rename file inside configuration snapshot")
306}
307
308func (fs snapshotFS) Stat(name string) (os.FileInfo, error) {
309 f, err := fs.Open(name)
310 if err != nil {
311 return nil, err
312 }
313 _, isDir := f.(snapshotDir)
314 return snapshotFileInfo{
315 name: filepath.Base(name),
316 isDir: isDir,
317 }, nil
318}
319
320func (fs snapshotFS) Name() string {
321 return "ConfigSnapshotFS"
322}
323
324func (fs snapshotFS) Chmod(name string, mode os.FileMode) error {
325 return fmt.Errorf("cannot set file mode inside configuration snapshot")
326}
327
328func (fs snapshotFS) Chtimes(name string, atime, mtime time.Time) error {
329 return fmt.Errorf("cannot set file times inside configuration snapshot")
330}
331
332type snapshotFile struct {
333 snapshotFileStub
334 src []byte
335 at int64
336}
337
338var _ afero.File = (*snapshotFile)(nil)
339
340func (f *snapshotFile) Read(p []byte) (n int, err error) {
341 if len(p) > 0 && f.at == int64(len(f.src)) {
342 return 0, io.EOF
343 }
344 if f.at > int64(len(f.src)) {
345 return 0, io.ErrUnexpectedEOF
346 }
347 if int64(len(f.src))-f.at >= int64(len(p)) {
348 n = len(p)
349 } else {
350 n = int(int64(len(f.src)) - f.at)
351 }
352 copy(p, f.src[f.at:f.at+int64(n)])
353 f.at += int64(n)
354 return
355}
356
357func (f *snapshotFile) ReadAt(p []byte, off int64) (n int, err error) {
358 f.at = off
359 return f.Read(p)
360}
361
362func (f *snapshotFile) Seek(offset int64, whence int) (int64, error) {
363 switch whence {
364 case 0:
365 f.at = offset
366 case 1:
367 f.at += offset
368 case 2:
369 f.at = int64(len(f.src)) + offset
370 }
371 return f.at, nil
372}
373
374type snapshotDir struct {
375 snapshotFileStub
376 filenames []string
377 at int
378}
379
380var _ afero.File = snapshotDir{}
381
382func (f snapshotDir) Readdir(count int) ([]os.FileInfo, error) {
383 names, err := f.Readdirnames(count)
384 if err != nil {
385 return nil, err
386 }
387 ret := make([]os.FileInfo, len(names))
388 for i, name := range names {
389 ret[i] = snapshotFileInfo{
390 name: name,
391 isDir: false,
392 }
393 }
394 return ret, nil
395}
396
397func (f snapshotDir) Readdirnames(count int) ([]string, error) {
398 var outLen int
399 names := f.filenames[f.at:]
400 if count > 0 {
401 if len(names) < count {
402 outLen = len(names)
403 } else {
404 outLen = count
405 }
406 if len(names) == 0 {
407 return nil, io.EOF
408 }
409 } else {
410 outLen = len(names)
411 }
412 f.at += outLen
413
414 return names[:outLen], nil
415}
416
417// snapshotFileInfo is a minimal implementation of os.FileInfo to support our
418// virtual filesystem from snapshots.
419type snapshotFileInfo struct {
420 name string
421 isDir bool
422}
423
424var _ os.FileInfo = snapshotFileInfo{}
425
426func (fi snapshotFileInfo) Name() string {
427 return fi.name
428}
429
430func (fi snapshotFileInfo) Size() int64 {
431 // In practice, our parser and loader never call Size
432 return -1
433}
434
435func (fi snapshotFileInfo) Mode() os.FileMode {
436 return os.ModePerm
437}
438
439func (fi snapshotFileInfo) ModTime() time.Time {
440 return time.Now()
441}
442
443func (fi snapshotFileInfo) IsDir() bool {
444 return fi.isDir
445}
446
447func (fi snapshotFileInfo) Sys() interface{} {
448 return nil
449}
450
451type snapshotFileStub struct{}
452
453func (f snapshotFileStub) Close() error {
454 return nil
455}
456
457func (f snapshotFileStub) Read(p []byte) (n int, err error) {
458 return 0, fmt.Errorf("cannot read")
459}
460
461func (f snapshotFileStub) ReadAt(p []byte, off int64) (n int, err error) {
462 return 0, fmt.Errorf("cannot read")
463}
464
465func (f snapshotFileStub) Seek(offset int64, whence int) (int64, error) {
466 return 0, fmt.Errorf("cannot seek")
467}
468
469func (f snapshotFileStub) Write(p []byte) (n int, err error) {
470 return f.WriteAt(p, 0)
471}
472
473func (f snapshotFileStub) WriteAt(p []byte, off int64) (n int, err error) {
474 return 0, fmt.Errorf("cannot write to file in snapshot")
475}
476
477func (f snapshotFileStub) WriteString(s string) (n int, err error) {
478 return 0, fmt.Errorf("cannot write to file in snapshot")
479}
480
481func (f snapshotFileStub) Name() string {
482 // in practice, the loader and parser never use this
483 return "<unimplemented>"
484}
485
486func (f snapshotFileStub) Readdir(count int) ([]os.FileInfo, error) {
487 return nil, fmt.Errorf("cannot use Readdir on a file")
488}
489
490func (f snapshotFileStub) Readdirnames(count int) ([]string, error) {
491 return nil, fmt.Errorf("cannot use Readdir on a file")
492}
493
494func (f snapshotFileStub) Stat() (os.FileInfo, error) {
495 return nil, fmt.Errorf("cannot stat")
496}
497
498func (f snapshotFileStub) Sync() error {
499 return nil
500}
501
502func (f snapshotFileStub) Truncate(size int64) error {
503 return fmt.Errorf("cannot write to file in snapshot")
504}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go b/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go
new file mode 100644
index 0000000..3c410ee
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/module_mgr.go
@@ -0,0 +1,76 @@
1package configload
2
3import (
4 "os"
5 "path/filepath"
6
7 "github.com/hashicorp/terraform/internal/modsdir"
8 "github.com/hashicorp/terraform/registry"
9 "github.com/hashicorp/terraform/svchost/disco"
10 "github.com/spf13/afero"
11)
12
13type moduleMgr struct {
14 FS afero.Afero
15
16 // CanInstall is true for a module manager that can support installation.
17 //
18 // This must be set only if FS is an afero.OsFs, because the installer
19 // (which uses go-getter) is not aware of the virtual filesystem
20 // abstraction and will always write into the "real" filesystem.
21 CanInstall bool
22
23 // Dir is the path where descendent modules are (or will be) installed.
24 Dir string
25
26 // Services is a service discovery client that will be used to find
27 // remote module registry endpoints. This object may be pre-loaded with
28 // cached discovery information.
29 Services *disco.Disco
30
31 // Registry is a client for the module registry protocol, which is used
32 // when a module is requested from a registry source.
33 Registry *registry.Client
34
35 // manifest tracks the currently-installed modules for this manager.
36 //
37 // The loader may read this. Only the installer may write to it, and
38 // after a set of updates are completed the installer must call
39 // writeModuleManifestSnapshot to persist a snapshot of the manifest
40 // to disk for use on subsequent runs.
41 manifest modsdir.Manifest
42}
43
44func (m *moduleMgr) manifestSnapshotPath() string {
45 return filepath.Join(m.Dir, modsdir.ManifestSnapshotFilename)
46}
47
48// readModuleManifestSnapshot loads a manifest snapshot from the filesystem.
49func (m *moduleMgr) readModuleManifestSnapshot() error {
50 r, err := m.FS.Open(m.manifestSnapshotPath())
51 if err != nil {
52 if os.IsNotExist(err) {
53 // We'll treat a missing file as an empty manifest
54 m.manifest = make(modsdir.Manifest)
55 return nil
56 }
57 return err
58 }
59
60 m.manifest, err = modsdir.ReadManifestSnapshot(r)
61 return err
62}
63
64// writeModuleManifestSnapshot writes a snapshot of the current manifest
65// to the filesystem.
66//
67// The caller must guarantee no concurrent modifications of the manifest for
68// the duration of a call to this function, or the behavior is undefined.
69func (m *moduleMgr) writeModuleManifestSnapshot() error {
70 w, err := m.FS.Create(m.manifestSnapshotPath())
71 if err != nil {
72 return err
73 }
74
75 return m.manifest.WriteSnapshot(w)
76}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go b/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go
new file mode 100644
index 0000000..594cf64
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/source_addr.go
@@ -0,0 +1,45 @@
1package configload
2
3import (
4 "strings"
5
6 "github.com/hashicorp/go-getter"
7
8 "github.com/hashicorp/terraform/registry/regsrc"
9)
10
11var localSourcePrefixes = []string{
12 "./",
13 "../",
14 ".\\",
15 "..\\",
16}
17
18func isLocalSourceAddr(addr string) bool {
19 for _, prefix := range localSourcePrefixes {
20 if strings.HasPrefix(addr, prefix) {
21 return true
22 }
23 }
24 return false
25}
26
27func isRegistrySourceAddr(addr string) bool {
28 _, err := regsrc.ParseModuleSource(addr)
29 return err == nil
30}
31
32// splitAddrSubdir splits the given address (which is assumed to be a
33// registry address or go-getter-style address) into a package portion
34// and a sub-directory portion.
35//
36// The package portion defines what should be downloaded and then the
37// sub-directory portion, if present, specifies a sub-directory within
38// the downloaded object (an archive, VCS repository, etc) that contains
39// the module's configuration files.
40//
41// The subDir portion will be returned as empty if no subdir separator
42// ("//") is present in the address.
43func splitAddrSubdir(addr string) (packageAddr, subDir string) {
44 return getter.SourceDirSubdir(addr)
45}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configload/testing.go b/vendor/github.com/hashicorp/terraform/configs/configload/testing.go
new file mode 100644
index 0000000..86ca9d1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configload/testing.go
@@ -0,0 +1,43 @@
1package configload
2
3import (
4 "io/ioutil"
5 "os"
6 "testing"
7)
8
9// NewLoaderForTests is a variant of NewLoader that is intended to be more
10// convenient for unit tests.
11//
12// The loader's modules directory is a separate temporary directory created
13// for each call. Along with the created loader, this function returns a
14// cleanup function that should be called before the test completes in order
15// to remove that temporary directory.
16//
17// In the case of any errors, t.Fatal (or similar) will be called to halt
18// execution of the test, so the calling test does not need to handle errors
19// itself.
20func NewLoaderForTests(t *testing.T) (*Loader, func()) {
21 t.Helper()
22
23 modulesDir, err := ioutil.TempDir("", "tf-configs")
24 if err != nil {
25 t.Fatalf("failed to create temporary modules dir: %s", err)
26 return nil, func() {}
27 }
28
29 cleanup := func() {
30 os.RemoveAll(modulesDir)
31 }
32
33 loader, err := NewLoader(&Config{
34 ModulesDir: modulesDir,
35 })
36 if err != nil {
37 cleanup()
38 t.Fatalf("failed to create config loader: %s", err)
39 return nil, func() {}
40 }
41
42 return loader, cleanup
43}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go b/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go
new file mode 100644
index 0000000..e59f58d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go
@@ -0,0 +1,274 @@
1package configschema
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/convert"
8)
9
10// CoerceValue attempts to force the given value to conform to the type
11// implied by the receiever, while also applying the same validation and
12// transformation rules that would be applied by the decoder specification
13// returned by method DecoderSpec.
14//
15// This is useful in situations where a configuration must be derived from
16// an already-decoded value. It is always better to decode directly from
17// configuration where possible since then source location information is
18// still available to produce diagnostics, but in special situations this
19// function allows a compatible result to be obtained even if the
20// configuration objects are not available.
21//
22// If the given value cannot be converted to conform to the receiving schema
23// then an error is returned describing one of possibly many problems. This
24// error may be a cty.PathError indicating a position within the nested
25// data structure where the problem applies.
26func (b *Block) CoerceValue(in cty.Value) (cty.Value, error) {
27 var path cty.Path
28 return b.coerceValue(in, path)
29}
30
31func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) {
32 switch {
33 case in.IsNull():
34 return cty.NullVal(b.ImpliedType()), nil
35 case !in.IsKnown():
36 return cty.UnknownVal(b.ImpliedType()), nil
37 }
38
39 ty := in.Type()
40 if !ty.IsObjectType() {
41 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("an object is required")
42 }
43
44 for name := range ty.AttributeTypes() {
45 if _, defined := b.Attributes[name]; defined {
46 continue
47 }
48 if _, defined := b.BlockTypes[name]; defined {
49 continue
50 }
51 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("unexpected attribute %q", name)
52 }
53
54 attrs := make(map[string]cty.Value)
55
56 for name, attrS := range b.Attributes {
57 var val cty.Value
58 switch {
59 case ty.HasAttribute(name):
60 val = in.GetAttr(name)
61 case attrS.Computed || attrS.Optional:
62 val = cty.NullVal(attrS.Type)
63 default:
64 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", name)
65 }
66
67 val, err := attrS.coerceValue(val, append(path, cty.GetAttrStep{Name: name}))
68 if err != nil {
69 return cty.UnknownVal(b.ImpliedType()), err
70 }
71
72 attrs[name] = val
73 }
74 for typeName, blockS := range b.BlockTypes {
75 switch blockS.Nesting {
76
77 case NestingSingle, NestingGroup:
78 switch {
79 case ty.HasAttribute(typeName):
80 var err error
81 val := in.GetAttr(typeName)
82 attrs[typeName], err = blockS.coerceValue(val, append(path, cty.GetAttrStep{Name: typeName}))
83 if err != nil {
84 return cty.UnknownVal(b.ImpliedType()), err
85 }
86 case blockS.MinItems != 1 && blockS.MaxItems != 1:
87 if blockS.Nesting == NestingGroup {
88 attrs[typeName] = blockS.EmptyValue()
89 } else {
90 attrs[typeName] = cty.NullVal(blockS.ImpliedType())
91 }
92 default:
93 // We use the word "attribute" here because we're talking about
94 // the cty sense of that word rather than the HCL sense.
95 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", typeName)
96 }
97
98 case NestingList:
99 switch {
100 case ty.HasAttribute(typeName):
101 coll := in.GetAttr(typeName)
102
103 switch {
104 case coll.IsNull():
105 attrs[typeName] = cty.NullVal(cty.List(blockS.ImpliedType()))
106 continue
107 case !coll.IsKnown():
108 attrs[typeName] = cty.UnknownVal(cty.List(blockS.ImpliedType()))
109 continue
110 }
111
112 if !coll.CanIterateElements() {
113 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a list")
114 }
115 l := coll.LengthInt()
116 if l < blockS.MinItems {
117 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems)
118 }
119 if l > blockS.MaxItems && blockS.MaxItems > 0 {
120 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("too many items for attribute %q; cannot have more than %d", typeName, blockS.MaxItems)
121 }
122 if l == 0 {
123 attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType())
124 continue
125 }
126 elems := make([]cty.Value, 0, l)
127 {
128 path = append(path, cty.GetAttrStep{Name: typeName})
129 for it := coll.ElementIterator(); it.Next(); {
130 var err error
131 idx, val := it.Element()
132 val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx}))
133 if err != nil {
134 return cty.UnknownVal(b.ImpliedType()), err
135 }
136 elems = append(elems, val)
137 }
138 }
139 attrs[typeName] = cty.ListVal(elems)
140 case blockS.MinItems == 0:
141 attrs[typeName] = cty.ListValEmpty(blockS.ImpliedType())
142 default:
143 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", typeName)
144 }
145
146 case NestingSet:
147 switch {
148 case ty.HasAttribute(typeName):
149 coll := in.GetAttr(typeName)
150
151 switch {
152 case coll.IsNull():
153 attrs[typeName] = cty.NullVal(cty.Set(blockS.ImpliedType()))
154 continue
155 case !coll.IsKnown():
156 attrs[typeName] = cty.UnknownVal(cty.Set(blockS.ImpliedType()))
157 continue
158 }
159
160 if !coll.CanIterateElements() {
161 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a set")
162 }
163 l := coll.LengthInt()
164 if l < blockS.MinItems {
165 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems)
166 }
167 if l > blockS.MaxItems && blockS.MaxItems > 0 {
168 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("too many items for attribute %q; cannot have more than %d", typeName, blockS.MaxItems)
169 }
170 if l == 0 {
171 attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType())
172 continue
173 }
174 elems := make([]cty.Value, 0, l)
175 {
176 path = append(path, cty.GetAttrStep{Name: typeName})
177 for it := coll.ElementIterator(); it.Next(); {
178 var err error
179 idx, val := it.Element()
180 val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: idx}))
181 if err != nil {
182 return cty.UnknownVal(b.ImpliedType()), err
183 }
184 elems = append(elems, val)
185 }
186 }
187 attrs[typeName] = cty.SetVal(elems)
188 case blockS.MinItems == 0:
189 attrs[typeName] = cty.SetValEmpty(blockS.ImpliedType())
190 default:
191 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("attribute %q is required", typeName)
192 }
193
194 case NestingMap:
195 switch {
196 case ty.HasAttribute(typeName):
197 coll := in.GetAttr(typeName)
198
199 switch {
200 case coll.IsNull():
201 attrs[typeName] = cty.NullVal(cty.Map(blockS.ImpliedType()))
202 continue
203 case !coll.IsKnown():
204 attrs[typeName] = cty.UnknownVal(cty.Map(blockS.ImpliedType()))
205 continue
206 }
207
208 if !coll.CanIterateElements() {
209 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map")
210 }
211 l := coll.LengthInt()
212 if l == 0 {
213 attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType())
214 continue
215 }
216 elems := make(map[string]cty.Value)
217 {
218 path = append(path, cty.GetAttrStep{Name: typeName})
219 for it := coll.ElementIterator(); it.Next(); {
220 var err error
221 key, val := it.Element()
222 if key.Type() != cty.String || key.IsNull() || !key.IsKnown() {
223 return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a map")
224 }
225 val, err = blockS.coerceValue(val, append(path, cty.IndexStep{Key: key}))
226 if err != nil {
227 return cty.UnknownVal(b.ImpliedType()), err
228 }
229 elems[key.AsString()] = val
230 }
231 }
232
233 // If the attribute values here contain any DynamicPseudoTypes,
234 // the concrete type must be an object.
235 useObject := false
236 switch {
237 case coll.Type().IsObjectType():
238 useObject = true
239 default:
240 // It's possible that we were given a map, and need to coerce it to an object
241 ety := coll.Type().ElementType()
242 for _, v := range elems {
243 if !v.Type().Equals(ety) {
244 useObject = true
245 break
246 }
247 }
248 }
249
250 if useObject {
251 attrs[typeName] = cty.ObjectVal(elems)
252 } else {
253 attrs[typeName] = cty.MapVal(elems)
254 }
255 default:
256 attrs[typeName] = cty.MapValEmpty(blockS.ImpliedType())
257 }
258
259 default:
260 // should never happen because above is exhaustive
261 panic(fmt.Errorf("unsupported nesting mode %#v", blockS.Nesting))
262 }
263 }
264
265 return cty.ObjectVal(attrs), nil
266}
267
268func (a *Attribute) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) {
269 val, err := convert.Convert(in, a.Type)
270 if err != nil {
271 return cty.UnknownVal(a.Type), path.NewError(err)
272 }
273 return val, nil
274}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go b/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go
new file mode 100644
index 0000000..d8f41ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go
@@ -0,0 +1,117 @@
1package configschema
2
3import (
4 "github.com/hashicorp/hcl2/hcldec"
5)
6
7var mapLabelNames = []string{"key"}
8
9// DecoderSpec returns a hcldec.Spec that can be used to decode a HCL Body
10// using the facilities in the hcldec package.
11//
12// The returned specification is guaranteed to return a value of the same type
13// returned by method ImpliedType, but it may contain null values if any of the
14// block attributes are defined as optional and/or computed respectively.
15func (b *Block) DecoderSpec() hcldec.Spec {
16 ret := hcldec.ObjectSpec{}
17 if b == nil {
18 return ret
19 }
20
21 for name, attrS := range b.Attributes {
22 ret[name] = attrS.decoderSpec(name)
23 }
24
25 for name, blockS := range b.BlockTypes {
26 if _, exists := ret[name]; exists {
27 // This indicates an invalid schema, since it's not valid to
28 // define both an attribute and a block type of the same name.
29 // However, we don't raise this here since it's checked by
30 // InternalValidate.
31 continue
32 }
33
34 childSpec := blockS.Block.DecoderSpec()
35
36 switch blockS.Nesting {
37 case NestingSingle, NestingGroup:
38 ret[name] = &hcldec.BlockSpec{
39 TypeName: name,
40 Nested: childSpec,
41 Required: blockS.MinItems == 1 && blockS.MaxItems >= 1,
42 }
43 if blockS.Nesting == NestingGroup {
44 ret[name] = &hcldec.DefaultSpec{
45 Primary: ret[name],
46 Default: &hcldec.LiteralSpec{
47 Value: blockS.EmptyValue(),
48 },
49 }
50 }
51 case NestingList:
52 // We prefer to use a list where possible, since it makes our
53 // implied type more complete, but if there are any
54 // dynamically-typed attributes inside we must use a tuple
55 // instead, at the expense of our type then not being predictable.
56 if blockS.Block.ImpliedType().HasDynamicTypes() {
57 ret[name] = &hcldec.BlockTupleSpec{
58 TypeName: name,
59 Nested: childSpec,
60 MinItems: blockS.MinItems,
61 MaxItems: blockS.MaxItems,
62 }
63 } else {
64 ret[name] = &hcldec.BlockListSpec{
65 TypeName: name,
66 Nested: childSpec,
67 MinItems: blockS.MinItems,
68 MaxItems: blockS.MaxItems,
69 }
70 }
71 case NestingSet:
72 // We forbid dynamically-typed attributes inside NestingSet in
73 // InternalValidate, so we don't do anything special to handle
74 // that here. (There is no set analog to tuple and object types,
75 // because cty's set implementation depends on knowing the static
76 // type in order to properly compute its internal hashes.)
77 ret[name] = &hcldec.BlockSetSpec{
78 TypeName: name,
79 Nested: childSpec,
80 MinItems: blockS.MinItems,
81 MaxItems: blockS.MaxItems,
82 }
83 case NestingMap:
84 // We prefer to use a list where possible, since it makes our
85 // implied type more complete, but if there are any
86 // dynamically-typed attributes inside we must use a tuple
87 // instead, at the expense of our type then not being predictable.
88 if blockS.Block.ImpliedType().HasDynamicTypes() {
89 ret[name] = &hcldec.BlockObjectSpec{
90 TypeName: name,
91 Nested: childSpec,
92 LabelNames: mapLabelNames,
93 }
94 } else {
95 ret[name] = &hcldec.BlockMapSpec{
96 TypeName: name,
97 Nested: childSpec,
98 LabelNames: mapLabelNames,
99 }
100 }
101 default:
102 // Invalid nesting type is just ignored. It's checked by
103 // InternalValidate.
104 continue
105 }
106 }
107
108 return ret
109}
110
111func (a *Attribute) decoderSpec(name string) hcldec.Spec {
112 return &hcldec.AttrSpec{
113 Name: name,
114 Type: a.Type,
115 Required: a.Required,
116 }
117}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/doc.go b/vendor/github.com/hashicorp/terraform/configs/configschema/doc.go
index caf8d73..caf8d73 100644
--- a/vendor/github.com/hashicorp/terraform/config/configschema/doc.go
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/doc.go
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go b/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go
new file mode 100644
index 0000000..005da56
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/empty_value.go
@@ -0,0 +1,59 @@
1package configschema
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// EmptyValue returns the "empty value" for the recieving block, which for
8// a block type is a non-null object where all of the attribute values are
9// the empty values of the block's attributes and nested block types.
10//
11// In other words, it returns the value that would be returned if an empty
12// block were decoded against the recieving schema, assuming that no required
13// attribute or block constraints were honored.
14func (b *Block) EmptyValue() cty.Value {
15 vals := make(map[string]cty.Value)
16 for name, attrS := range b.Attributes {
17 vals[name] = attrS.EmptyValue()
18 }
19 for name, blockS := range b.BlockTypes {
20 vals[name] = blockS.EmptyValue()
21 }
22 return cty.ObjectVal(vals)
23}
24
25// EmptyValue returns the "empty value" for the receiving attribute, which is
26// the value that would be returned if there were no definition of the attribute
27// at all, ignoring any required constraint.
28func (a *Attribute) EmptyValue() cty.Value {
29 return cty.NullVal(a.Type)
30}
31
32// EmptyValue returns the "empty value" for when there are zero nested blocks
33// present of the receiving type.
34func (b *NestedBlock) EmptyValue() cty.Value {
35 switch b.Nesting {
36 case NestingSingle:
37 return cty.NullVal(b.Block.ImpliedType())
38 case NestingGroup:
39 return b.Block.EmptyValue()
40 case NestingList:
41 if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() {
42 return cty.EmptyTupleVal
43 } else {
44 return cty.ListValEmpty(ty)
45 }
46 case NestingMap:
47 if ty := b.Block.ImpliedType(); ty.HasDynamicTypes() {
48 return cty.EmptyObjectVal
49 } else {
50 return cty.MapValEmpty(ty)
51 }
52 case NestingSet:
53 return cty.SetValEmpty(b.Block.ImpliedType())
54 default:
55 // Should never get here because the above is intended to be exhaustive,
56 // but we'll be robust and return a result nonetheless.
57 return cty.NullVal(cty.DynamicPseudoType)
58 }
59}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go b/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go
new file mode 100644
index 0000000..c0ee841
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/implied_type.go
@@ -0,0 +1,42 @@
1package configschema
2
3import (
4 "github.com/hashicorp/hcl2/hcldec"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// ImpliedType returns the cty.Type that would result from decoding a
9// configuration block using the receiving block schema.
10//
11// ImpliedType always returns a result, even if the given schema is
12// inconsistent. Code that creates configschema.Block objects should be
13// tested using the InternalValidate method to detect any inconsistencies
14// that would cause this method to fall back on defaults and assumptions.
15func (b *Block) ImpliedType() cty.Type {
16 if b == nil {
17 return cty.EmptyObject
18 }
19
20 return hcldec.ImpliedType(b.DecoderSpec())
21}
22
23// ContainsSensitive returns true if any of the attributes of the receiving
24// block or any of its descendent blocks are marked as sensitive.
25//
26// Blocks themselves cannot be sensitive as a whole -- sensitivity is a
27// per-attribute idea -- but sometimes we want to include a whole object
28// decoded from a block in some UI output, and that is safe to do only if
29// none of the contained attributes are sensitive.
30func (b *Block) ContainsSensitive() bool {
31 for _, attrS := range b.Attributes {
32 if attrS.Sensitive {
33 return true
34 }
35 }
36 for _, blockS := range b.BlockTypes {
37 if blockS.ContainsSensitive() {
38 return true
39 }
40 }
41 return false
42}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go b/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go
index 33cbe88..ebf1abb 100644
--- a/vendor/github.com/hashicorp/terraform/config/configschema/internal_validate.go
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/internal_validate.go
@@ -72,10 +72,23 @@ func (b *Block) internalValidate(prefix string, err error) error {
72 case blockS.MinItems < 0 || blockS.MinItems > 1: 72 case blockS.MinItems < 0 || blockS.MinItems > 1:
73 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name)) 73 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must be set to either 0 or 1 in NestingSingle mode", prefix, name))
74 } 74 }
75 case NestingGroup:
76 if blockS.MinItems != 0 || blockS.MaxItems != 0 {
77 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems cannot be used in NestingGroup mode", prefix, name))
78 }
75 case NestingList, NestingSet: 79 case NestingList, NestingSet:
76 if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 { 80 if blockS.MinItems > blockS.MaxItems && blockS.MaxItems != 0 {
77 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting)) 81 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems must be less than or equal to MaxItems in %s mode", prefix, name, blockS.Nesting))
78 } 82 }
83 if blockS.Nesting == NestingSet {
84 ety := blockS.Block.ImpliedType()
85 if ety.HasDynamicTypes() {
86 // This is not permitted because the HCL (cty) set implementation
87 // needs to know the exact type of set elements in order to
88 // properly hash them, and so can't support mixed types.
89 err = multierror.Append(err, fmt.Errorf("%s%s: NestingSet blocks may not contain attributes of cty.DynamicPseudoType", prefix, name))
90 }
91 }
79 case NestingMap: 92 case NestingMap:
80 if blockS.MinItems != 0 || blockS.MaxItems != 0 { 93 if blockS.MinItems != 0 || blockS.MaxItems != 0 {
81 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name)) 94 err = multierror.Append(err, fmt.Errorf("%s%s: MinItems and MaxItems must both be 0 in NestingMap mode", prefix, name))
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go
new file mode 100644
index 0000000..febe743
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/nestingmode_string.go
@@ -0,0 +1,28 @@
1// Code generated by "stringer -type=NestingMode"; DO NOT EDIT.
2
3package configschema
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[nestingModeInvalid-0]
12 _ = x[NestingSingle-1]
13 _ = x[NestingGroup-2]
14 _ = x[NestingList-3]
15 _ = x[NestingSet-4]
16 _ = x[NestingMap-5]
17}
18
19const _NestingMode_name = "nestingModeInvalidNestingSingleNestingGroupNestingListNestingSetNestingMap"
20
21var _NestingMode_index = [...]uint8{0, 18, 31, 43, 54, 64, 74}
22
23func (i NestingMode) String() string {
24 if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) {
25 return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")"
26 }
27 return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]]
28}
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go b/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go
new file mode 100644
index 0000000..0be3b8f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/none_required.go
@@ -0,0 +1,38 @@
1package configschema
2
3// NoneRequired returns a deep copy of the receiver with any required
4// attributes translated to optional.
5func (b *Block) NoneRequired() *Block {
6 ret := &Block{}
7
8 if b.Attributes != nil {
9 ret.Attributes = make(map[string]*Attribute, len(b.Attributes))
10 }
11 for name, attrS := range b.Attributes {
12 ret.Attributes[name] = attrS.forceOptional()
13 }
14
15 if b.BlockTypes != nil {
16 ret.BlockTypes = make(map[string]*NestedBlock, len(b.BlockTypes))
17 }
18 for name, blockS := range b.BlockTypes {
19 ret.BlockTypes[name] = blockS.noneRequired()
20 }
21
22 return ret
23}
24
25func (b *NestedBlock) noneRequired() *NestedBlock {
26 ret := *b
27 ret.Block = *(ret.Block.NoneRequired())
28 ret.MinItems = 0
29 ret.MaxItems = 0
30 return &ret
31}
32
33func (a *Attribute) forceOptional() *Attribute {
34 ret := *a
35 ret.Optional = true
36 ret.Required = false
37 return &ret
38}
diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/schema.go b/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go
index 9a8ee55..5a67334 100644
--- a/vendor/github.com/hashicorp/terraform/config/configschema/schema.go
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/schema.go
@@ -28,6 +28,12 @@ type Attribute struct {
28 // Type is a type specification that the attribute's value must conform to. 28 // Type is a type specification that the attribute's value must conform to.
29 Type cty.Type 29 Type cty.Type
30 30
31 // Description is an English-language description of the purpose and
32 // usage of the attribute. A description should be concise and use only
33 // one or two sentences, leaving full definition to longer-form
34 // documentation defined elsewhere.
35 Description string
36
31 // Required, if set to true, specifies that an omitted or null value is 37 // Required, if set to true, specifies that an omitted or null value is
32 // not permitted. 38 // not permitted.
33 Required bool 39 Required bool
@@ -87,6 +93,23 @@ const (
87 // provided directly as an object value. 93 // provided directly as an object value.
88 NestingSingle 94 NestingSingle
89 95
96 // NestingGroup is similar to NestingSingle in that it calls for only a
97 // single instance of a given block type with no labels, but it additonally
98 // guarantees that its result will never be null, even if the block is
99 // absent, and instead the nested attributes and blocks will be treated
100 // as absent in that case. (Any required attributes or blocks within the
101 // nested block are not enforced unless the block is explicitly present
102 // in the configuration, so they are all effectively optional when the
103 // block is not present.)
104 //
105 // This is useful for the situation where a remote API has a feature that
106 // is always enabled but has a group of settings related to that feature
107 // that themselves have default values. By using NestingGroup instead of
108 // NestingSingle in that case, generated plans will show the block as
109 // present even when not present in configuration, thus allowing any
110 // default values within to be displayed to the user.
111 NestingGroup
112
90 // NestingList indicates that multiple blocks of the given type are 113 // NestingList indicates that multiple blocks of the given type are
91 // permitted, with no labels, and that their corresponding objects should 114 // permitted, with no labels, and that their corresponding objects should
92 // be provided in a list. 115 // be provided in a list.
diff --git a/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go b/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go
new file mode 100644
index 0000000..a41e930
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/configschema/validate_traversal.go
@@ -0,0 +1,173 @@
1package configschema
2
3import (
4 "fmt"
5 "sort"
6
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hcl2/hcl/hclsyntax"
9 "github.com/zclconf/go-cty/cty"
10
11 "github.com/hashicorp/terraform/helper/didyoumean"
12 "github.com/hashicorp/terraform/tfdiags"
13)
14
15// StaticValidateTraversal checks whether the given traversal (which must be
16// relative) refers to a construct in the receiving schema, returning error
17// diagnostics if any problems are found.
18//
19// This method is "optimistic" in that it will not return errors for possible
20// problems that cannot be detected statically. It is possible that an
21// traversal which passed static validation will still fail when evaluated.
22func (b *Block) StaticValidateTraversal(traversal hcl.Traversal) tfdiags.Diagnostics {
23 if !traversal.IsRelative() {
24 panic("StaticValidateTraversal on absolute traversal")
25 }
26 if len(traversal) == 0 {
27 return nil
28 }
29
30 var diags tfdiags.Diagnostics
31
32 next := traversal[0]
33 after := traversal[1:]
34
35 var name string
36 switch step := next.(type) {
37 case hcl.TraverseAttr:
38 name = step.Name
39 case hcl.TraverseIndex:
40 // No other traversal step types are allowed directly at a block.
41 // If it looks like the user was trying to use index syntax to
42 // access an attribute then we'll produce a specialized message.
43 key := step.Key
44 if key.Type() == cty.String && key.IsKnown() && !key.IsNull() {
45 maybeName := key.AsString()
46 if hclsyntax.ValidIdentifier(maybeName) {
47 diags = diags.Append(&hcl.Diagnostic{
48 Severity: hcl.DiagError,
49 Summary: `Invalid index operation`,
50 Detail: fmt.Sprintf(`Only attribute access is allowed here. Did you mean to access attribute %q using the dot operator?`, maybeName),
51 Subject: &step.SrcRange,
52 })
53 return diags
54 }
55 }
56 // If it looks like some other kind of index then we'll use a generic error.
57 diags = diags.Append(&hcl.Diagnostic{
58 Severity: hcl.DiagError,
59 Summary: `Invalid index operation`,
60 Detail: `Only attribute access is allowed here, using the dot operator.`,
61 Subject: &step.SrcRange,
62 })
63 return diags
64 default:
65 // No other traversal types should appear in a normal valid traversal,
66 // but we'll handle this with a generic error anyway to be robust.
67 diags = diags.Append(&hcl.Diagnostic{
68 Severity: hcl.DiagError,
69 Summary: `Invalid operation`,
70 Detail: `Only attribute access is allowed here, using the dot operator.`,
71 Subject: next.SourceRange().Ptr(),
72 })
73 return diags
74 }
75
76 if attrS, exists := b.Attributes[name]; exists {
77 // For attribute validation we will just apply the rest of the
78 // traversal to an unknown value of the attribute type and pass
79 // through HCL's own errors, since we don't want to replicate all of
80 // HCL's type checking rules here.
81 val := cty.UnknownVal(attrS.Type)
82 _, hclDiags := after.TraverseRel(val)
83 diags = diags.Append(hclDiags)
84 return diags
85 }
86
87 if blockS, exists := b.BlockTypes[name]; exists {
88 moreDiags := blockS.staticValidateTraversal(name, after)
89 diags = diags.Append(moreDiags)
90 return diags
91 }
92
93 // If we get here then the name isn't valid at all. We'll collect up
94 // all of the names that _are_ valid to use as suggestions.
95 var suggestions []string
96 for name := range b.Attributes {
97 suggestions = append(suggestions, name)
98 }
99 for name := range b.BlockTypes {
100 suggestions = append(suggestions, name)
101 }
102 sort.Strings(suggestions)
103 suggestion := didyoumean.NameSuggestion(name, suggestions)
104 if suggestion != "" {
105 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
106 }
107 diags = diags.Append(&hcl.Diagnostic{
108 Severity: hcl.DiagError,
109 Summary: `Unsupported attribute`,
110 Detail: fmt.Sprintf(`This object has no argument, nested block, or exported attribute named %q.%s`, name, suggestion),
111 Subject: next.SourceRange().Ptr(),
112 })
113
114 return diags
115}
116
117func (b *NestedBlock) staticValidateTraversal(typeName string, traversal hcl.Traversal) tfdiags.Diagnostics {
118 if b.Nesting == NestingSingle || b.Nesting == NestingGroup {
119 // Single blocks are easy: just pass right through.
120 return b.Block.StaticValidateTraversal(traversal)
121 }
122
123 if len(traversal) == 0 {
124 // It's always valid to access a nested block's attribute directly.
125 return nil
126 }
127
128 var diags tfdiags.Diagnostics
129 next := traversal[0]
130 after := traversal[1:]
131
132 switch b.Nesting {
133
134 case NestingSet:
135 // Can't traverse into a set at all, since it does not have any keys
136 // to index with.
137 diags = diags.Append(&hcl.Diagnostic{
138 Severity: hcl.DiagError,
139 Summary: `Cannot index a set value`,
140 Detail: fmt.Sprintf(`Block type %q is represented by a set of objects, and set elements do not have addressable keys. To find elements matching specific criteria, use a "for" expression with an "if" clause.`, typeName),
141 Subject: next.SourceRange().Ptr(),
142 })
143 return diags
144
145 case NestingList:
146 if _, ok := next.(hcl.TraverseIndex); ok {
147 moreDiags := b.Block.StaticValidateTraversal(after)
148 diags = diags.Append(moreDiags)
149 } else {
150 diags = diags.Append(&hcl.Diagnostic{
151 Severity: hcl.DiagError,
152 Summary: `Invalid operation`,
153 Detail: fmt.Sprintf(`Block type %q is represented by a list of objects, so it must be indexed using a numeric key, like .%s[0].`, typeName, typeName),
154 Subject: next.SourceRange().Ptr(),
155 })
156 }
157 return diags
158
159 case NestingMap:
160 // Both attribute and index steps are valid for maps, so we'll just
161 // pass through here and let normal evaluation catch an
162 // incorrectly-typed index key later, if present.
163 moreDiags := b.Block.StaticValidateTraversal(after)
164 diags = diags.Append(moreDiags)
165 return diags
166
167 default:
168 // Invalid nesting type is just ignored. It's checked by
169 // InternalValidate. (Note that we handled NestingSingle separately
170 // back at the start of this function.)
171 return nil
172 }
173}
diff --git a/vendor/github.com/hashicorp/terraform/configs/depends_on.go b/vendor/github.com/hashicorp/terraform/configs/depends_on.go
new file mode 100644
index 0000000..b198476
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/depends_on.go
@@ -0,0 +1,23 @@
1package configs
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7func decodeDependsOn(attr *hcl.Attribute) ([]hcl.Traversal, hcl.Diagnostics) {
8 var ret []hcl.Traversal
9 exprs, diags := hcl.ExprList(attr.Expr)
10
11 for _, expr := range exprs {
12 expr, shimDiags := shimTraversalInString(expr, false)
13 diags = append(diags, shimDiags...)
14
15 traversal, travDiags := hcl.AbsTraversalForExpr(expr)
16 diags = append(diags, travDiags...)
17 if len(traversal) != 0 {
18 ret = append(ret, traversal)
19 }
20 }
21
22 return ret, diags
23}
diff --git a/vendor/github.com/hashicorp/terraform/configs/doc.go b/vendor/github.com/hashicorp/terraform/configs/doc.go
new file mode 100644
index 0000000..f01eb79
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/doc.go
@@ -0,0 +1,19 @@
1// Package configs contains types that represent Terraform configurations and
2// the different elements thereof.
3//
4// The functionality in this package can be used for some static analyses of
5// Terraform configurations, but this package generally exposes representations
6// of the configuration source code rather than the result of evaluating these
7// objects. The sibling package "lang" deals with evaluation of structures
8// and expressions in the configuration.
9//
10// Due to its close relationship with HCL, this package makes frequent use
11// of types from the HCL API, including raw HCL diagnostic messages. Such
12// diagnostics can be converted into Terraform-flavored diagnostics, if needed,
13// using functions in the sibling package tfdiags.
14//
15// The Parser type is the main entry-point into this package. The LoadConfigDir
16// method can be used to load a single module directory, and then a full
17// configuration (including any descendent modules) can be produced using
18// the top-level BuildConfig method.
19package configs
diff --git a/vendor/github.com/hashicorp/terraform/configs/module.go b/vendor/github.com/hashicorp/terraform/configs/module.go
new file mode 100644
index 0000000..250f9d3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/module.go
@@ -0,0 +1,404 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7
8 "github.com/hashicorp/terraform/addrs"
9)
10
11// Module is a container for a set of configuration constructs that are
12// evaluated within a common namespace.
13type Module struct {
14 // SourceDir is the filesystem directory that the module was loaded from.
15 //
16 // This is populated automatically only for configurations loaded with
17 // LoadConfigDir. If the parser is using a virtual filesystem then the
18 // path here will be in terms of that virtual filesystem.
19
20 // Any other caller that constructs a module directly with NewModule may
21 // assign a suitable value to this attribute before using it for other
22 // purposes. It should be treated as immutable by all consumers of Module
23 // values.
24 SourceDir string
25
26 CoreVersionConstraints []VersionConstraint
27
28 Backend *Backend
29 ProviderConfigs map[string]*Provider
30 ProviderRequirements map[string][]VersionConstraint
31
32 Variables map[string]*Variable
33 Locals map[string]*Local
34 Outputs map[string]*Output
35
36 ModuleCalls map[string]*ModuleCall
37
38 ManagedResources map[string]*Resource
39 DataResources map[string]*Resource
40}
41
42// File describes the contents of a single configuration file.
43//
44// Individual files are not usually used alone, but rather combined together
45// with other files (conventionally, those in the same directory) to produce
46// a *Module, using NewModule.
47//
48// At the level of an individual file we represent directly the structural
49// elements present in the file, without any attempt to detect conflicting
50// declarations. A File object can therefore be used for some basic static
51// analysis of individual elements, but must be built into a Module to detect
52// duplicate declarations.
53type File struct {
54 CoreVersionConstraints []VersionConstraint
55
56 Backends []*Backend
57 ProviderConfigs []*Provider
58 ProviderRequirements []*ProviderRequirement
59
60 Variables []*Variable
61 Locals []*Local
62 Outputs []*Output
63
64 ModuleCalls []*ModuleCall
65
66 ManagedResources []*Resource
67 DataResources []*Resource
68}
69
70// NewModule takes a list of primary files and a list of override files and
71// produces a *Module by combining the files together.
72//
73// If there are any conflicting declarations in the given files -- for example,
74// if the same variable name is defined twice -- then the resulting module
75// will be incomplete and error diagnostics will be returned. Careful static
76// analysis of the returned Module is still possible in this case, but the
77// module will probably not be semantically valid.
78func NewModule(primaryFiles, overrideFiles []*File) (*Module, hcl.Diagnostics) {
79 var diags hcl.Diagnostics
80 mod := &Module{
81 ProviderConfigs: map[string]*Provider{},
82 ProviderRequirements: map[string][]VersionConstraint{},
83 Variables: map[string]*Variable{},
84 Locals: map[string]*Local{},
85 Outputs: map[string]*Output{},
86 ModuleCalls: map[string]*ModuleCall{},
87 ManagedResources: map[string]*Resource{},
88 DataResources: map[string]*Resource{},
89 }
90
91 for _, file := range primaryFiles {
92 fileDiags := mod.appendFile(file)
93 diags = append(diags, fileDiags...)
94 }
95
96 for _, file := range overrideFiles {
97 fileDiags := mod.mergeFile(file)
98 diags = append(diags, fileDiags...)
99 }
100
101 return mod, diags
102}
103
104// ResourceByAddr returns the configuration for the resource with the given
105// address, or nil if there is no such resource.
106func (m *Module) ResourceByAddr(addr addrs.Resource) *Resource {
107 key := addr.String()
108 switch addr.Mode {
109 case addrs.ManagedResourceMode:
110 return m.ManagedResources[key]
111 case addrs.DataResourceMode:
112 return m.DataResources[key]
113 default:
114 return nil
115 }
116}
117
118func (m *Module) appendFile(file *File) hcl.Diagnostics {
119 var diags hcl.Diagnostics
120
121 for _, constraint := range file.CoreVersionConstraints {
122 // If there are any conflicting requirements then we'll catch them
123 // when we actually check these constraints.
124 m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint)
125 }
126
127 for _, b := range file.Backends {
128 if m.Backend != nil {
129 diags = append(diags, &hcl.Diagnostic{
130 Severity: hcl.DiagError,
131 Summary: "Duplicate backend configuration",
132 Detail: fmt.Sprintf("A module may have only one backend configuration. The backend was previously configured at %s.", m.Backend.DeclRange),
133 Subject: &b.DeclRange,
134 })
135 continue
136 }
137 m.Backend = b
138 }
139
140 for _, pc := range file.ProviderConfigs {
141 key := pc.moduleUniqueKey()
142 if existing, exists := m.ProviderConfigs[key]; exists {
143 if existing.Alias == "" {
144 diags = append(diags, &hcl.Diagnostic{
145 Severity: hcl.DiagError,
146 Summary: "Duplicate provider configuration",
147 Detail: fmt.Sprintf("A default (non-aliased) provider configuration for %q was already given at %s. If multiple configurations are required, set the \"alias\" argument for alternative configurations.", existing.Name, existing.DeclRange),
148 Subject: &pc.DeclRange,
149 })
150 } else {
151 diags = append(diags, &hcl.Diagnostic{
152 Severity: hcl.DiagError,
153 Summary: "Duplicate provider configuration",
154 Detail: fmt.Sprintf("A provider configuration for %q with alias %q was already given at %s. Each configuration for the same provider must have a distinct alias.", existing.Name, existing.Alias, existing.DeclRange),
155 Subject: &pc.DeclRange,
156 })
157 }
158 continue
159 }
160 m.ProviderConfigs[key] = pc
161 }
162
163 for _, reqd := range file.ProviderRequirements {
164 m.ProviderRequirements[reqd.Name] = append(m.ProviderRequirements[reqd.Name], reqd.Requirement)
165 }
166
167 for _, v := range file.Variables {
168 if existing, exists := m.Variables[v.Name]; exists {
169 diags = append(diags, &hcl.Diagnostic{
170 Severity: hcl.DiagError,
171 Summary: "Duplicate variable declaration",
172 Detail: fmt.Sprintf("A variable named %q was already declared at %s. Variable names must be unique within a module.", existing.Name, existing.DeclRange),
173 Subject: &v.DeclRange,
174 })
175 }
176 m.Variables[v.Name] = v
177 }
178
179 for _, l := range file.Locals {
180 if existing, exists := m.Locals[l.Name]; exists {
181 diags = append(diags, &hcl.Diagnostic{
182 Severity: hcl.DiagError,
183 Summary: "Duplicate local value definition",
184 Detail: fmt.Sprintf("A local value named %q was already defined at %s. Local value names must be unique within a module.", existing.Name, existing.DeclRange),
185 Subject: &l.DeclRange,
186 })
187 }
188 m.Locals[l.Name] = l
189 }
190
191 for _, o := range file.Outputs {
192 if existing, exists := m.Outputs[o.Name]; exists {
193 diags = append(diags, &hcl.Diagnostic{
194 Severity: hcl.DiagError,
195 Summary: "Duplicate output definition",
196 Detail: fmt.Sprintf("An output named %q was already defined at %s. Output names must be unique within a module.", existing.Name, existing.DeclRange),
197 Subject: &o.DeclRange,
198 })
199 }
200 m.Outputs[o.Name] = o
201 }
202
203 for _, mc := range file.ModuleCalls {
204 if existing, exists := m.ModuleCalls[mc.Name]; exists {
205 diags = append(diags, &hcl.Diagnostic{
206 Severity: hcl.DiagError,
207 Summary: "Duplicate module call",
208 Detail: fmt.Sprintf("An module call named %q was already defined at %s. Module calls must have unique names within a module.", existing.Name, existing.DeclRange),
209 Subject: &mc.DeclRange,
210 })
211 }
212 m.ModuleCalls[mc.Name] = mc
213 }
214
215 for _, r := range file.ManagedResources {
216 key := r.moduleUniqueKey()
217 if existing, exists := m.ManagedResources[key]; exists {
218 diags = append(diags, &hcl.Diagnostic{
219 Severity: hcl.DiagError,
220 Summary: fmt.Sprintf("Duplicate resource %q configuration", existing.Type),
221 Detail: fmt.Sprintf("A %s resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange),
222 Subject: &r.DeclRange,
223 })
224 continue
225 }
226 m.ManagedResources[key] = r
227 }
228
229 for _, r := range file.DataResources {
230 key := r.moduleUniqueKey()
231 if existing, exists := m.DataResources[key]; exists {
232 diags = append(diags, &hcl.Diagnostic{
233 Severity: hcl.DiagError,
234 Summary: fmt.Sprintf("Duplicate data %q configuration", existing.Type),
235 Detail: fmt.Sprintf("A %s data resource named %q was already declared at %s. Resource names must be unique per type in each module.", existing.Type, existing.Name, existing.DeclRange),
236 Subject: &r.DeclRange,
237 })
238 continue
239 }
240 m.DataResources[key] = r
241 }
242
243 return diags
244}
245
246func (m *Module) mergeFile(file *File) hcl.Diagnostics {
247 var diags hcl.Diagnostics
248
249 if len(file.CoreVersionConstraints) != 0 {
250 // This is a bit of a strange case for overriding since we normally
251 // would union together across multiple files anyway, but we'll
252 // allow it and have each override file clobber any existing list.
253 m.CoreVersionConstraints = nil
254 for _, constraint := range file.CoreVersionConstraints {
255 m.CoreVersionConstraints = append(m.CoreVersionConstraints, constraint)
256 }
257 }
258
259 if len(file.Backends) != 0 {
260 switch len(file.Backends) {
261 case 1:
262 m.Backend = file.Backends[0]
263 default:
264 // An override file with multiple backends is still invalid, even
265 // though it can override backends from _other_ files.
266 diags = append(diags, &hcl.Diagnostic{
267 Severity: hcl.DiagError,
268 Summary: "Duplicate backend configuration",
269 Detail: fmt.Sprintf("Each override file may have only one backend configuration. A backend was previously configured at %s.", file.Backends[0].DeclRange),
270 Subject: &file.Backends[1].DeclRange,
271 })
272 }
273 }
274
275 for _, pc := range file.ProviderConfigs {
276 key := pc.moduleUniqueKey()
277 existing, exists := m.ProviderConfigs[key]
278 if pc.Alias == "" {
279 // We allow overriding a non-existing _default_ provider configuration
280 // because the user model is that an absent provider configuration
281 // implies an empty provider configuration, which is what the user
282 // is therefore overriding here.
283 if exists {
284 mergeDiags := existing.merge(pc)
285 diags = append(diags, mergeDiags...)
286 } else {
287 m.ProviderConfigs[key] = pc
288 }
289 } else {
290 // For aliased providers, there must be a base configuration to
291 // override. This allows us to detect and report alias typos
292 // that might otherwise cause the override to not apply.
293 if !exists {
294 diags = append(diags, &hcl.Diagnostic{
295 Severity: hcl.DiagError,
296 Summary: "Missing base provider configuration for override",
297 Detail: fmt.Sprintf("There is no %s provider configuration with the alias %q. An override file can only override an aliased provider configuration that was already defined in a primary configuration file.", pc.Name, pc.Alias),
298 Subject: &pc.DeclRange,
299 })
300 continue
301 }
302 mergeDiags := existing.merge(pc)
303 diags = append(diags, mergeDiags...)
304 }
305 }
306
307 if len(file.ProviderRequirements) != 0 {
308 mergeProviderVersionConstraints(m.ProviderRequirements, file.ProviderRequirements)
309 }
310
311 for _, v := range file.Variables {
312 existing, exists := m.Variables[v.Name]
313 if !exists {
314 diags = append(diags, &hcl.Diagnostic{
315 Severity: hcl.DiagError,
316 Summary: "Missing base variable declaration to override",
317 Detail: fmt.Sprintf("There is no variable named %q. An override file can only override a variable that was already declared in a primary configuration file.", v.Name),
318 Subject: &v.DeclRange,
319 })
320 continue
321 }
322 mergeDiags := existing.merge(v)
323 diags = append(diags, mergeDiags...)
324 }
325
326 for _, l := range file.Locals {
327 existing, exists := m.Locals[l.Name]
328 if !exists {
329 diags = append(diags, &hcl.Diagnostic{
330 Severity: hcl.DiagError,
331 Summary: "Missing base local value definition to override",
332 Detail: fmt.Sprintf("There is no local value named %q. An override file can only override a local value that was already defined in a primary configuration file.", l.Name),
333 Subject: &l.DeclRange,
334 })
335 continue
336 }
337 mergeDiags := existing.merge(l)
338 diags = append(diags, mergeDiags...)
339 }
340
341 for _, o := range file.Outputs {
342 existing, exists := m.Outputs[o.Name]
343 if !exists {
344 diags = append(diags, &hcl.Diagnostic{
345 Severity: hcl.DiagError,
346 Summary: "Missing base output definition to override",
347 Detail: fmt.Sprintf("There is no output named %q. An override file can only override an output that was already defined in a primary configuration file.", o.Name),
348 Subject: &o.DeclRange,
349 })
350 continue
351 }
352 mergeDiags := existing.merge(o)
353 diags = append(diags, mergeDiags...)
354 }
355
356 for _, mc := range file.ModuleCalls {
357 existing, exists := m.ModuleCalls[mc.Name]
358 if !exists {
359 diags = append(diags, &hcl.Diagnostic{
360 Severity: hcl.DiagError,
361 Summary: "Missing module call to override",
362 Detail: fmt.Sprintf("There is no module call named %q. An override file can only override a module call that was defined in a primary configuration file.", mc.Name),
363 Subject: &mc.DeclRange,
364 })
365 continue
366 }
367 mergeDiags := existing.merge(mc)
368 diags = append(diags, mergeDiags...)
369 }
370
371 for _, r := range file.ManagedResources {
372 key := r.moduleUniqueKey()
373 existing, exists := m.ManagedResources[key]
374 if !exists {
375 diags = append(diags, &hcl.Diagnostic{
376 Severity: hcl.DiagError,
377 Summary: "Missing resource to override",
378 Detail: fmt.Sprintf("There is no %s resource named %q. An override file can only override a resource block defined in a primary configuration file.", r.Type, r.Name),
379 Subject: &r.DeclRange,
380 })
381 continue
382 }
383 mergeDiags := existing.merge(r)
384 diags = append(diags, mergeDiags...)
385 }
386
387 for _, r := range file.DataResources {
388 key := r.moduleUniqueKey()
389 existing, exists := m.DataResources[key]
390 if !exists {
391 diags = append(diags, &hcl.Diagnostic{
392 Severity: hcl.DiagError,
393 Summary: "Missing data resource to override",
394 Detail: fmt.Sprintf("There is no %s data resource named %q. An override file can only override a data block defined in a primary configuration file.", r.Type, r.Name),
395 Subject: &r.DeclRange,
396 })
397 continue
398 }
399 mergeDiags := existing.merge(r)
400 diags = append(diags, mergeDiags...)
401 }
402
403 return diags
404}
diff --git a/vendor/github.com/hashicorp/terraform/configs/module_call.go b/vendor/github.com/hashicorp/terraform/configs/module_call.go
new file mode 100644
index 0000000..8c3ba67
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/module_call.go
@@ -0,0 +1,188 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/gohcl"
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hcl2/hcl/hclsyntax"
9)
10
11// ModuleCall represents a "module" block in a module or file.
12type ModuleCall struct {
13 Name string
14
15 SourceAddr string
16 SourceAddrRange hcl.Range
17 SourceSet bool
18
19 Config hcl.Body
20
21 Version VersionConstraint
22
23 Count hcl.Expression
24 ForEach hcl.Expression
25
26 Providers []PassedProviderConfig
27
28 DependsOn []hcl.Traversal
29
30 DeclRange hcl.Range
31}
32
33func decodeModuleBlock(block *hcl.Block, override bool) (*ModuleCall, hcl.Diagnostics) {
34 mc := &ModuleCall{
35 Name: block.Labels[0],
36 DeclRange: block.DefRange,
37 }
38
39 schema := moduleBlockSchema
40 if override {
41 schema = schemaForOverrides(schema)
42 }
43
44 content, remain, diags := block.Body.PartialContent(schema)
45 mc.Config = remain
46
47 if !hclsyntax.ValidIdentifier(mc.Name) {
48 diags = append(diags, &hcl.Diagnostic{
49 Severity: hcl.DiagError,
50 Summary: "Invalid module instance name",
51 Detail: badIdentifierDetail,
52 Subject: &block.LabelRanges[0],
53 })
54 }
55
56 if attr, exists := content.Attributes["source"]; exists {
57 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &mc.SourceAddr)
58 diags = append(diags, valDiags...)
59 mc.SourceAddrRange = attr.Expr.Range()
60 mc.SourceSet = true
61 }
62
63 if attr, exists := content.Attributes["version"]; exists {
64 var versionDiags hcl.Diagnostics
65 mc.Version, versionDiags = decodeVersionConstraint(attr)
66 diags = append(diags, versionDiags...)
67 }
68
69 if attr, exists := content.Attributes["count"]; exists {
70 mc.Count = attr.Expr
71
72 // We currently parse this, but don't yet do anything with it.
73 diags = append(diags, &hcl.Diagnostic{
74 Severity: hcl.DiagError,
75 Summary: "Reserved argument name in module block",
76 Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name),
77 Subject: &attr.NameRange,
78 })
79 }
80
81 if attr, exists := content.Attributes["for_each"]; exists {
82 mc.ForEach = attr.Expr
83
84 // We currently parse this, but don't yet do anything with it.
85 diags = append(diags, &hcl.Diagnostic{
86 Severity: hcl.DiagError,
87 Summary: "Reserved argument name in module block",
88 Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name),
89 Subject: &attr.NameRange,
90 })
91 }
92
93 if attr, exists := content.Attributes["depends_on"]; exists {
94 deps, depsDiags := decodeDependsOn(attr)
95 diags = append(diags, depsDiags...)
96 mc.DependsOn = append(mc.DependsOn, deps...)
97
98 // We currently parse this, but don't yet do anything with it.
99 diags = append(diags, &hcl.Diagnostic{
100 Severity: hcl.DiagError,
101 Summary: "Reserved argument name in module block",
102 Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name),
103 Subject: &attr.NameRange,
104 })
105 }
106
107 if attr, exists := content.Attributes["providers"]; exists {
108 seen := make(map[string]hcl.Range)
109 pairs, pDiags := hcl.ExprMap(attr.Expr)
110 diags = append(diags, pDiags...)
111 for _, pair := range pairs {
112 key, keyDiags := decodeProviderConfigRef(pair.Key, "providers")
113 diags = append(diags, keyDiags...)
114 value, valueDiags := decodeProviderConfigRef(pair.Value, "providers")
115 diags = append(diags, valueDiags...)
116 if keyDiags.HasErrors() || valueDiags.HasErrors() {
117 continue
118 }
119
120 matchKey := key.String()
121 if prev, exists := seen[matchKey]; exists {
122 diags = append(diags, &hcl.Diagnostic{
123 Severity: hcl.DiagError,
124 Summary: "Duplicate provider address",
125 Detail: fmt.Sprintf("A provider configuration was already passed to %s at %s. Each child provider configuration can be assigned only once.", matchKey, prev),
126 Subject: pair.Value.Range().Ptr(),
127 })
128 continue
129 }
130
131 rng := hcl.RangeBetween(pair.Key.Range(), pair.Value.Range())
132 seen[matchKey] = rng
133 mc.Providers = append(mc.Providers, PassedProviderConfig{
134 InChild: key,
135 InParent: value,
136 })
137 }
138 }
139
140 // Reserved block types (all of them)
141 for _, block := range content.Blocks {
142 diags = append(diags, &hcl.Diagnostic{
143 Severity: hcl.DiagError,
144 Summary: "Reserved block type name in module block",
145 Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type),
146 Subject: &block.TypeRange,
147 })
148 }
149
150 return mc, diags
151}
152
153// PassedProviderConfig represents a provider config explicitly passed down to
154// a child module, possibly giving it a new local address in the process.
155type PassedProviderConfig struct {
156 InChild *ProviderConfigRef
157 InParent *ProviderConfigRef
158}
159
160var moduleBlockSchema = &hcl.BodySchema{
161 Attributes: []hcl.AttributeSchema{
162 {
163 Name: "source",
164 Required: true,
165 },
166 {
167 Name: "version",
168 },
169 {
170 Name: "count",
171 },
172 {
173 Name: "for_each",
174 },
175 {
176 Name: "depends_on",
177 },
178 {
179 Name: "providers",
180 },
181 },
182 Blocks: []hcl.BlockHeaderSchema{
183 // These are all reserved for future use.
184 {Type: "lifecycle"},
185 {Type: "locals"},
186 {Type: "provider", LabelNames: []string{"type"}},
187 },
188}
diff --git a/vendor/github.com/hashicorp/terraform/configs/module_merge.go b/vendor/github.com/hashicorp/terraform/configs/module_merge.go
new file mode 100644
index 0000000..12614c1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/module_merge.go
@@ -0,0 +1,247 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/addrs"
7
8 "github.com/hashicorp/hcl2/hcl"
9 "github.com/zclconf/go-cty/cty"
10 "github.com/zclconf/go-cty/cty/convert"
11)
12
13// The methods in this file are used by Module.mergeFile to apply overrides
14// to our different configuration elements. These methods all follow the
15// pattern of mutating the receiver to incorporate settings from the parameter,
16// returning error diagnostics if any aspect of the parameter cannot be merged
17// into the receiver for some reason.
18//
19// User expectation is that anything _explicitly_ set in the given object
20// should take precedence over the corresponding settings in the receiver,
21// but that anything omitted in the given object should be left unchanged.
22// In some cases it may be reasonable to do a "deep merge" of certain nested
23// features, if it is possible to unambiguously correlate the nested elements
24// and their behaviors are orthogonal to each other.
25
26func (p *Provider) merge(op *Provider) hcl.Diagnostics {
27 var diags hcl.Diagnostics
28
29 if op.Version.Required != nil {
30 p.Version = op.Version
31 }
32
33 p.Config = MergeBodies(p.Config, op.Config)
34
35 return diags
36}
37
38func mergeProviderVersionConstraints(recv map[string][]VersionConstraint, ovrd []*ProviderRequirement) {
39 // Any provider name that's mentioned in the override gets nilled out in
40 // our map so that we'll rebuild it below. Any provider not mentioned is
41 // left unchanged.
42 for _, reqd := range ovrd {
43 delete(recv, reqd.Name)
44 }
45 for _, reqd := range ovrd {
46 recv[reqd.Name] = append(recv[reqd.Name], reqd.Requirement)
47 }
48}
49
50func (v *Variable) merge(ov *Variable) hcl.Diagnostics {
51 var diags hcl.Diagnostics
52
53 if ov.DescriptionSet {
54 v.Description = ov.Description
55 v.DescriptionSet = ov.DescriptionSet
56 }
57 if ov.Default != cty.NilVal {
58 v.Default = ov.Default
59 }
60 if ov.Type != cty.NilType {
61 v.Type = ov.Type
62 }
63 if ov.ParsingMode != 0 {
64 v.ParsingMode = ov.ParsingMode
65 }
66
67 // If the override file overrode type without default or vice-versa then
68 // it may have created an invalid situation, which we'll catch now by
69 // attempting to re-convert the value.
70 //
71 // Note that here we may be re-converting an already-converted base value
72 // from the base config. This will be a no-op if the type was not changed,
73 // but in particular might be user-observable in the edge case where the
74 // literal value in config could've been converted to the overridden type
75 // constraint but the converted value cannot. In practice, this situation
76 // should be rare since most of our conversions are interchangable.
77 if v.Default != cty.NilVal {
78 val, err := convert.Convert(v.Default, v.Type)
79 if err != nil {
80 // What exactly we'll say in the error message here depends on whether
81 // it was Default or Type that was overridden here.
82 switch {
83 case ov.Type != cty.NilType && ov.Default == cty.NilVal:
84 // If only the type was overridden
85 diags = append(diags, &hcl.Diagnostic{
86 Severity: hcl.DiagError,
87 Summary: "Invalid default value for variable",
88 Detail: fmt.Sprintf("Overriding this variable's type constraint has made its default value invalid: %s.", err),
89 Subject: &ov.DeclRange,
90 })
91 case ov.Type == cty.NilType && ov.Default != cty.NilVal:
92 // Only the default was overridden
93 diags = append(diags, &hcl.Diagnostic{
94 Severity: hcl.DiagError,
95 Summary: "Invalid default value for variable",
96 Detail: fmt.Sprintf("The overridden default value for this variable is not compatible with the variable's type constraint: %s.", err),
97 Subject: &ov.DeclRange,
98 })
99 default:
100 diags = append(diags, &hcl.Diagnostic{
101 Severity: hcl.DiagError,
102 Summary: "Invalid default value for variable",
103 Detail: fmt.Sprintf("This variable's default value is not compatible with its type constraint: %s.", err),
104 Subject: &ov.DeclRange,
105 })
106 }
107 } else {
108 v.Default = val
109 }
110 }
111
112 return diags
113}
114
115func (l *Local) merge(ol *Local) hcl.Diagnostics {
116 var diags hcl.Diagnostics
117
118 // Since a local is just a single expression in configuration, the
119 // override definition entirely replaces the base definition, including
120 // the source range so that we'll send the user to the right place if
121 // there is an error.
122 l.Expr = ol.Expr
123 l.DeclRange = ol.DeclRange
124
125 return diags
126}
127
128func (o *Output) merge(oo *Output) hcl.Diagnostics {
129 var diags hcl.Diagnostics
130
131 if oo.Description != "" {
132 o.Description = oo.Description
133 }
134 if oo.Expr != nil {
135 o.Expr = oo.Expr
136 }
137 if oo.SensitiveSet {
138 o.Sensitive = oo.Sensitive
139 o.SensitiveSet = oo.SensitiveSet
140 }
141
142 // We don't allow depends_on to be overridden because that is likely to
143 // cause confusing misbehavior.
144 if len(oo.DependsOn) != 0 {
145 diags = append(diags, &hcl.Diagnostic{
146 Severity: hcl.DiagError,
147 Summary: "Unsupported override",
148 Detail: "The depends_on argument may not be overridden.",
149 Subject: oo.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have
150 })
151 }
152
153 return diags
154}
155
156func (mc *ModuleCall) merge(omc *ModuleCall) hcl.Diagnostics {
157 var diags hcl.Diagnostics
158
159 if omc.SourceSet {
160 mc.SourceAddr = omc.SourceAddr
161 mc.SourceAddrRange = omc.SourceAddrRange
162 mc.SourceSet = omc.SourceSet
163 }
164
165 if omc.Count != nil {
166 mc.Count = omc.Count
167 }
168
169 if omc.ForEach != nil {
170 mc.ForEach = omc.ForEach
171 }
172
173 if len(omc.Version.Required) != 0 {
174 mc.Version = omc.Version
175 }
176
177 mc.Config = MergeBodies(mc.Config, omc.Config)
178
179 // We don't allow depends_on to be overridden because that is likely to
180 // cause confusing misbehavior.
181 if len(mc.DependsOn) != 0 {
182 diags = append(diags, &hcl.Diagnostic{
183 Severity: hcl.DiagError,
184 Summary: "Unsupported override",
185 Detail: "The depends_on argument may not be overridden.",
186 Subject: mc.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have
187 })
188 }
189
190 return diags
191}
192
193func (r *Resource) merge(or *Resource) hcl.Diagnostics {
194 var diags hcl.Diagnostics
195
196 if r.Mode != or.Mode {
197 // This is always a programming error, since managed and data resources
198 // are kept in separate maps in the configuration structures.
199 panic(fmt.Errorf("can't merge %s into %s", or.Mode, r.Mode))
200 }
201
202 if or.Count != nil {
203 r.Count = or.Count
204 }
205 if or.ForEach != nil {
206 r.ForEach = or.ForEach
207 }
208 if or.ProviderConfigRef != nil {
209 r.ProviderConfigRef = or.ProviderConfigRef
210 }
211 if r.Mode == addrs.ManagedResourceMode {
212 // or.Managed is always non-nil for managed resource mode
213
214 if or.Managed.Connection != nil {
215 r.Managed.Connection = or.Managed.Connection
216 }
217 if or.Managed.CreateBeforeDestroySet {
218 r.Managed.CreateBeforeDestroy = or.Managed.CreateBeforeDestroy
219 r.Managed.CreateBeforeDestroySet = or.Managed.CreateBeforeDestroySet
220 }
221 if len(or.Managed.IgnoreChanges) != 0 {
222 r.Managed.IgnoreChanges = or.Managed.IgnoreChanges
223 }
224 if or.Managed.PreventDestroySet {
225 r.Managed.PreventDestroy = or.Managed.PreventDestroy
226 r.Managed.PreventDestroySet = or.Managed.PreventDestroySet
227 }
228 if len(or.Managed.Provisioners) != 0 {
229 r.Managed.Provisioners = or.Managed.Provisioners
230 }
231 }
232
233 r.Config = MergeBodies(r.Config, or.Config)
234
235 // We don't allow depends_on to be overridden because that is likely to
236 // cause confusing misbehavior.
237 if len(or.DependsOn) != 0 {
238 diags = append(diags, &hcl.Diagnostic{
239 Severity: hcl.DiagError,
240 Summary: "Unsupported override",
241 Detail: "The depends_on argument may not be overridden.",
242 Subject: or.DependsOn[0].SourceRange().Ptr(), // the first item is the closest range we have
243 })
244 }
245
246 return diags
247}
diff --git a/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go b/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go
new file mode 100644
index 0000000..0ed561e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/module_merge_body.go
@@ -0,0 +1,143 @@
1package configs
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7// MergeBodies creates a new HCL body that contains a combination of the
8// given base and override bodies. Attributes and blocks defined in the
9// override body take precedence over those of the same name defined in
10// the base body.
11//
12// If any block of a particular type appears in "override" then it will
13// replace _all_ of the blocks of the same type in "base" in the new
14// body.
15func MergeBodies(base, override hcl.Body) hcl.Body {
16 return mergeBody{
17 Base: base,
18 Override: override,
19 }
20}
21
22// mergeBody is a hcl.Body implementation that wraps a pair of other bodies
23// and allows attributes and blocks within the override to take precedence
24// over those defined in the base body.
25//
26// This is used to deal with dynamically-processed bodies in Module.mergeFile.
27// It uses a shallow-only merging strategy where direct attributes defined
28// in Override will override attributes of the same name in Base, while any
29// blocks defined in Override will hide all blocks of the same type in Base.
30//
31// This cannot possibly "do the right thing" in all cases, because we don't
32// have enough information about user intent. However, this behavior is intended
33// to be reasonable for simple overriding use-cases.
34type mergeBody struct {
35 Base hcl.Body
36 Override hcl.Body
37}
38
39var _ hcl.Body = mergeBody{}
40
41func (b mergeBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
42 var diags hcl.Diagnostics
43 baseSchema := schemaWithDynamic(schema)
44 overrideSchema := schemaWithDynamic(schemaForOverrides(schema))
45
46 baseContent, _, cDiags := b.Base.PartialContent(baseSchema)
47 diags = append(diags, cDiags...)
48 overrideContent, _, cDiags := b.Override.PartialContent(overrideSchema)
49 diags = append(diags, cDiags...)
50
51 content := b.prepareContent(baseContent, overrideContent)
52
53 return content, diags
54}
55
56func (b mergeBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
57 var diags hcl.Diagnostics
58 baseSchema := schemaWithDynamic(schema)
59 overrideSchema := schemaWithDynamic(schemaForOverrides(schema))
60
61 baseContent, baseRemain, cDiags := b.Base.PartialContent(baseSchema)
62 diags = append(diags, cDiags...)
63 overrideContent, overrideRemain, cDiags := b.Override.PartialContent(overrideSchema)
64 diags = append(diags, cDiags...)
65
66 content := b.prepareContent(baseContent, overrideContent)
67
68 remain := MergeBodies(baseRemain, overrideRemain)
69
70 return content, remain, diags
71}
72
73func (b mergeBody) prepareContent(base *hcl.BodyContent, override *hcl.BodyContent) *hcl.BodyContent {
74 content := &hcl.BodyContent{
75 Attributes: make(hcl.Attributes),
76 }
77
78 // For attributes we just assign from each map in turn and let the override
79 // map clobber any matching entries from base.
80 for k, a := range base.Attributes {
81 content.Attributes[k] = a
82 }
83 for k, a := range override.Attributes {
84 content.Attributes[k] = a
85 }
86
87 // Things are a little more interesting for blocks because they arrive
88 // as a flat list. Our merging semantics call for us to suppress blocks
89 // from base if at least one block of the same type appears in override.
90 // We explicitly do not try to correlate and deeply merge nested blocks,
91 // since we don't have enough context here to infer user intent.
92
93 overriddenBlockTypes := make(map[string]bool)
94 for _, block := range override.Blocks {
95 if block.Type == "dynamic" {
96 overriddenBlockTypes[block.Labels[0]] = true
97 continue
98 }
99 overriddenBlockTypes[block.Type] = true
100 }
101 for _, block := range base.Blocks {
102 // We skip over dynamic blocks whose type label is an overridden type
103 // but note that below we do still leave them as dynamic blocks in
104 // the result because expanding the dynamic blocks that are left is
105 // done much later during the core graph walks, where we can safely
106 // evaluate the expressions.
107 if block.Type == "dynamic" && overriddenBlockTypes[block.Labels[0]] {
108 continue
109 }
110 if overriddenBlockTypes[block.Type] {
111 continue
112 }
113 content.Blocks = append(content.Blocks, block)
114 }
115 for _, block := range override.Blocks {
116 content.Blocks = append(content.Blocks, block)
117 }
118
119 return content
120}
121
122func (b mergeBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
123 var diags hcl.Diagnostics
124 ret := make(hcl.Attributes)
125
126 baseAttrs, aDiags := b.Base.JustAttributes()
127 diags = append(diags, aDiags...)
128 overrideAttrs, aDiags := b.Override.JustAttributes()
129 diags = append(diags, aDiags...)
130
131 for k, a := range baseAttrs {
132 ret[k] = a
133 }
134 for k, a := range overrideAttrs {
135 ret[k] = a
136 }
137
138 return ret, diags
139}
140
141func (b mergeBody) MissingItemRange() hcl.Range {
142 return b.Base.MissingItemRange()
143}
diff --git a/vendor/github.com/hashicorp/terraform/configs/named_values.go b/vendor/github.com/hashicorp/terraform/configs/named_values.go
new file mode 100644
index 0000000..6f6b469
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/named_values.go
@@ -0,0 +1,364 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/ext/typeexpr"
7 "github.com/hashicorp/hcl2/gohcl"
8 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/hcl2/hcl/hclsyntax"
10 "github.com/zclconf/go-cty/cty"
11 "github.com/zclconf/go-cty/cty/convert"
12
13 "github.com/hashicorp/terraform/addrs"
14)
15
16// A consistent detail message for all "not a valid identifier" diagnostics.
17const badIdentifierDetail = "A name must start with a letter and may contain only letters, digits, underscores, and dashes."
18
19// Variable represents a "variable" block in a module or file.
20type Variable struct {
21 Name string
22 Description string
23 Default cty.Value
24 Type cty.Type
25 ParsingMode VariableParsingMode
26
27 DescriptionSet bool
28
29 DeclRange hcl.Range
30}
31
32func decodeVariableBlock(block *hcl.Block, override bool) (*Variable, hcl.Diagnostics) {
33 v := &Variable{
34 Name: block.Labels[0],
35 DeclRange: block.DefRange,
36 }
37
38 // Unless we're building an override, we'll set some defaults
39 // which we might override with attributes below. We leave these
40 // as zero-value in the override case so we can recognize whether
41 // or not they are set when we merge.
42 if !override {
43 v.Type = cty.DynamicPseudoType
44 v.ParsingMode = VariableParseLiteral
45 }
46
47 content, diags := block.Body.Content(variableBlockSchema)
48
49 if !hclsyntax.ValidIdentifier(v.Name) {
50 diags = append(diags, &hcl.Diagnostic{
51 Severity: hcl.DiagError,
52 Summary: "Invalid variable name",
53 Detail: badIdentifierDetail,
54 Subject: &block.LabelRanges[0],
55 })
56 }
57
58 // Don't allow declaration of variables that would conflict with the
59 // reserved attribute and block type names in a "module" block, since
60 // these won't be usable for child modules.
61 for _, attr := range moduleBlockSchema.Attributes {
62 if attr.Name == v.Name {
63 diags = append(diags, &hcl.Diagnostic{
64 Severity: hcl.DiagError,
65 Summary: "Invalid variable name",
66 Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", attr.Name),
67 Subject: &block.LabelRanges[0],
68 })
69 }
70 }
71 for _, blockS := range moduleBlockSchema.Blocks {
72 if blockS.Type == v.Name {
73 diags = append(diags, &hcl.Diagnostic{
74 Severity: hcl.DiagError,
75 Summary: "Invalid variable name",
76 Detail: fmt.Sprintf("The variable name %q is reserved due to its special meaning inside module blocks.", blockS.Type),
77 Subject: &block.LabelRanges[0],
78 })
79 }
80 }
81
82 if attr, exists := content.Attributes["description"]; exists {
83 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &v.Description)
84 diags = append(diags, valDiags...)
85 v.DescriptionSet = true
86 }
87
88 if attr, exists := content.Attributes["type"]; exists {
89 ty, parseMode, tyDiags := decodeVariableType(attr.Expr)
90 diags = append(diags, tyDiags...)
91 v.Type = ty
92 v.ParsingMode = parseMode
93 }
94
95 if attr, exists := content.Attributes["default"]; exists {
96 val, valDiags := attr.Expr.Value(nil)
97 diags = append(diags, valDiags...)
98
99 // Convert the default to the expected type so we can catch invalid
100 // defaults early and allow later code to assume validity.
101 // Note that this depends on us having already processed any "type"
102 // attribute above.
103 // However, we can't do this if we're in an override file where
104 // the type might not be set; we'll catch that during merge.
105 if v.Type != cty.NilType {
106 var err error
107 val, err = convert.Convert(val, v.Type)
108 if err != nil {
109 diags = append(diags, &hcl.Diagnostic{
110 Severity: hcl.DiagError,
111 Summary: "Invalid default value for variable",
112 Detail: fmt.Sprintf("This default value is not compatible with the variable's type constraint: %s.", err),
113 Subject: attr.Expr.Range().Ptr(),
114 })
115 val = cty.DynamicVal
116 }
117 }
118
119 v.Default = val
120 }
121
122 return v, diags
123}
124
125func decodeVariableType(expr hcl.Expression) (cty.Type, VariableParsingMode, hcl.Diagnostics) {
126 if exprIsNativeQuotedString(expr) {
127 // Here we're accepting the pre-0.12 form of variable type argument where
128 // the string values "string", "list" and "map" are accepted has a hint
129 // about the type used primarily for deciding how to parse values
130 // given on the command line and in environment variables.
131 // Only the native syntax ends up in this codepath; we handle the
132 // JSON syntax (which is, of course, quoted even in the new format)
133 // in the normal codepath below.
134 val, diags := expr.Value(nil)
135 if diags.HasErrors() {
136 return cty.DynamicPseudoType, VariableParseHCL, diags
137 }
138 str := val.AsString()
139 switch str {
140 case "string":
141 return cty.String, VariableParseLiteral, diags
142 case "list":
143 return cty.List(cty.DynamicPseudoType), VariableParseHCL, diags
144 case "map":
145 return cty.Map(cty.DynamicPseudoType), VariableParseHCL, diags
146 default:
147 return cty.DynamicPseudoType, VariableParseHCL, hcl.Diagnostics{{
148 Severity: hcl.DiagError,
149 Summary: "Invalid legacy variable type hint",
150 Detail: `The legacy variable type hint form, using a quoted string, allows only the values "string", "list", and "map". To provide a full type expression, remove the surrounding quotes and give the type expression directly.`,
151 Subject: expr.Range().Ptr(),
152 }}
153 }
154 }
155
156 // First we'll deal with some shorthand forms that the HCL-level type
157 // expression parser doesn't include. These both emulate pre-0.12 behavior
158 // of allowing a list or map of any element type as long as all of the
159 // elements are consistent. This is the same as list(any) or map(any).
160 switch hcl.ExprAsKeyword(expr) {
161 case "list":
162 return cty.List(cty.DynamicPseudoType), VariableParseHCL, nil
163 case "map":
164 return cty.Map(cty.DynamicPseudoType), VariableParseHCL, nil
165 }
166
167 ty, diags := typeexpr.TypeConstraint(expr)
168 if diags.HasErrors() {
169 return cty.DynamicPseudoType, VariableParseHCL, diags
170 }
171
172 switch {
173 case ty.IsPrimitiveType():
174 // Primitive types use literal parsing.
175 return ty, VariableParseLiteral, diags
176 default:
177 // Everything else uses HCL parsing
178 return ty, VariableParseHCL, diags
179 }
180}
181
182// VariableParsingMode defines how values of a particular variable given by
183// text-only mechanisms (command line arguments and environment variables)
184// should be parsed to produce the final value.
185type VariableParsingMode rune
186
187// VariableParseLiteral is a variable parsing mode that just takes the given
188// string directly as a cty.String value.
189const VariableParseLiteral VariableParsingMode = 'L'
190
191// VariableParseHCL is a variable parsing mode that attempts to parse the given
192// string as an HCL expression and returns the result.
193const VariableParseHCL VariableParsingMode = 'H'
194
195// Parse uses the receiving parsing mode to process the given variable value
196// string, returning the result along with any diagnostics.
197//
198// A VariableParsingMode does not know the expected type of the corresponding
199// variable, so it's the caller's responsibility to attempt to convert the
200// result to the appropriate type and return to the user any diagnostics that
201// conversion may produce.
202//
203// The given name is used to create a synthetic filename in case any diagnostics
204// must be generated about the given string value. This should be the name
205// of the root module variable whose value will be populated from the given
206// string.
207//
208// If the returned diagnostics has errors, the returned value may not be
209// valid.
210func (m VariableParsingMode) Parse(name, value string) (cty.Value, hcl.Diagnostics) {
211 switch m {
212 case VariableParseLiteral:
213 return cty.StringVal(value), nil
214 case VariableParseHCL:
215 fakeFilename := fmt.Sprintf("<value for var.%s>", name)
216 expr, diags := hclsyntax.ParseExpression([]byte(value), fakeFilename, hcl.Pos{Line: 1, Column: 1})
217 if diags.HasErrors() {
218 return cty.DynamicVal, diags
219 }
220 val, valDiags := expr.Value(nil)
221 diags = append(diags, valDiags...)
222 return val, diags
223 default:
224 // Should never happen
225 panic(fmt.Errorf("Parse called on invalid VariableParsingMode %#v", m))
226 }
227}
228
229// Output represents an "output" block in a module or file.
230type Output struct {
231 Name string
232 Description string
233 Expr hcl.Expression
234 DependsOn []hcl.Traversal
235 Sensitive bool
236
237 DescriptionSet bool
238 SensitiveSet bool
239
240 DeclRange hcl.Range
241}
242
243func decodeOutputBlock(block *hcl.Block, override bool) (*Output, hcl.Diagnostics) {
244 o := &Output{
245 Name: block.Labels[0],
246 DeclRange: block.DefRange,
247 }
248
249 schema := outputBlockSchema
250 if override {
251 schema = schemaForOverrides(schema)
252 }
253
254 content, diags := block.Body.Content(schema)
255
256 if !hclsyntax.ValidIdentifier(o.Name) {
257 diags = append(diags, &hcl.Diagnostic{
258 Severity: hcl.DiagError,
259 Summary: "Invalid output name",
260 Detail: badIdentifierDetail,
261 Subject: &block.LabelRanges[0],
262 })
263 }
264
265 if attr, exists := content.Attributes["description"]; exists {
266 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Description)
267 diags = append(diags, valDiags...)
268 o.DescriptionSet = true
269 }
270
271 if attr, exists := content.Attributes["value"]; exists {
272 o.Expr = attr.Expr
273 }
274
275 if attr, exists := content.Attributes["sensitive"]; exists {
276 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &o.Sensitive)
277 diags = append(diags, valDiags...)
278 o.SensitiveSet = true
279 }
280
281 if attr, exists := content.Attributes["depends_on"]; exists {
282 deps, depsDiags := decodeDependsOn(attr)
283 diags = append(diags, depsDiags...)
284 o.DependsOn = append(o.DependsOn, deps...)
285 }
286
287 return o, diags
288}
289
290// Local represents a single entry from a "locals" block in a module or file.
291// The "locals" block itself is not represented, because it serves only to
292// provide context for us to interpret its contents.
293type Local struct {
294 Name string
295 Expr hcl.Expression
296
297 DeclRange hcl.Range
298}
299
300func decodeLocalsBlock(block *hcl.Block) ([]*Local, hcl.Diagnostics) {
301 attrs, diags := block.Body.JustAttributes()
302 if len(attrs) == 0 {
303 return nil, diags
304 }
305
306 locals := make([]*Local, 0, len(attrs))
307 for name, attr := range attrs {
308 if !hclsyntax.ValidIdentifier(name) {
309 diags = append(diags, &hcl.Diagnostic{
310 Severity: hcl.DiagError,
311 Summary: "Invalid local value name",
312 Detail: badIdentifierDetail,
313 Subject: &attr.NameRange,
314 })
315 }
316
317 locals = append(locals, &Local{
318 Name: name,
319 Expr: attr.Expr,
320 DeclRange: attr.Range,
321 })
322 }
323 return locals, diags
324}
325
326// Addr returns the address of the local value declared by the receiver,
327// relative to its containing module.
328func (l *Local) Addr() addrs.LocalValue {
329 return addrs.LocalValue{
330 Name: l.Name,
331 }
332}
333
334var variableBlockSchema = &hcl.BodySchema{
335 Attributes: []hcl.AttributeSchema{
336 {
337 Name: "description",
338 },
339 {
340 Name: "default",
341 },
342 {
343 Name: "type",
344 },
345 },
346}
347
348var outputBlockSchema = &hcl.BodySchema{
349 Attributes: []hcl.AttributeSchema{
350 {
351 Name: "description",
352 },
353 {
354 Name: "value",
355 Required: true,
356 },
357 {
358 Name: "depends_on",
359 },
360 {
361 Name: "sensitive",
362 },
363 },
364}
diff --git a/vendor/github.com/hashicorp/terraform/configs/parser.go b/vendor/github.com/hashicorp/terraform/configs/parser.go
new file mode 100644
index 0000000..8176fa1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/parser.go
@@ -0,0 +1,100 @@
1package configs
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hcl2/hclparse"
9 "github.com/spf13/afero"
10)
11
12// Parser is the main interface to read configuration files and other related
13// files from disk.
14//
15// It retains a cache of all files that are loaded so that they can be used
16// to create source code snippets in diagnostics, etc.
17type Parser struct {
18 fs afero.Afero
19 p *hclparse.Parser
20}
21
22// NewParser creates and returns a new Parser that reads files from the given
23// filesystem. If a nil filesystem is passed then the system's "real" filesystem
24// will be used, via afero.OsFs.
25func NewParser(fs afero.Fs) *Parser {
26 if fs == nil {
27 fs = afero.OsFs{}
28 }
29
30 return &Parser{
31 fs: afero.Afero{Fs: fs},
32 p: hclparse.NewParser(),
33 }
34}
35
36// LoadHCLFile is a low-level method that reads the file at the given path,
37// parses it, and returns the hcl.Body representing its root. In many cases
38// it is better to use one of the other Load*File methods on this type,
39// which additionally decode the root body in some way and return a higher-level
40// construct.
41//
42// If the file cannot be read at all -- e.g. because it does not exist -- then
43// this method will return a nil body and error diagnostics. In this case
44// callers may wish to ignore the provided error diagnostics and produce
45// a more context-sensitive error instead.
46//
47// The file will be parsed using the HCL native syntax unless the filename
48// ends with ".json", in which case the HCL JSON syntax will be used.
49func (p *Parser) LoadHCLFile(path string) (hcl.Body, hcl.Diagnostics) {
50 src, err := p.fs.ReadFile(path)
51
52 if err != nil {
53 return nil, hcl.Diagnostics{
54 {
55 Severity: hcl.DiagError,
56 Summary: "Failed to read file",
57 Detail: fmt.Sprintf("The file %q could not be read.", path),
58 },
59 }
60 }
61
62 var file *hcl.File
63 var diags hcl.Diagnostics
64 switch {
65 case strings.HasSuffix(path, ".json"):
66 file, diags = p.p.ParseJSON(src, path)
67 default:
68 file, diags = p.p.ParseHCL(src, path)
69 }
70
71 // If the returned file or body is nil, then we'll return a non-nil empty
72 // body so we'll meet our contract that nil means an error reading the file.
73 if file == nil || file.Body == nil {
74 return hcl.EmptyBody(), diags
75 }
76
77 return file.Body, diags
78}
79
80// Sources returns a map of the cached source buffers for all files that
81// have been loaded through this parser, with source filenames (as requested
82// when each file was opened) as the keys.
83func (p *Parser) Sources() map[string][]byte {
84 return p.p.Sources()
85}
86
87// ForceFileSource artificially adds source code to the cache of file sources,
88// as if it had been loaded from the given filename.
89//
90// This should be used only in special situations where configuration is loaded
91// some other way. Most callers should load configuration via methods of
92// Parser, which will update the sources cache automatically.
93func (p *Parser) ForceFileSource(filename string, src []byte) {
94 // We'll make a synthetic hcl.File here just so we can reuse the
95 // existing cache.
96 p.p.AddFile(filename, &hcl.File{
97 Body: hcl.EmptyBody(),
98 Bytes: src,
99 })
100}
diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_config.go b/vendor/github.com/hashicorp/terraform/configs/parser_config.go
new file mode 100644
index 0000000..7f2ff27
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/parser_config.go
@@ -0,0 +1,247 @@
1package configs
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
7// LoadConfigFile reads the file at the given path and parses it as a config
8// file.
9//
10// If the file cannot be read -- for example, if it does not exist -- then
11// a nil *File will be returned along with error diagnostics. Callers may wish
12// to disregard the returned diagnostics in this case and instead generate
13// their own error message(s) with additional context.
14//
15// If the returned diagnostics has errors when a non-nil map is returned
16// then the map may be incomplete but should be valid enough for careful
17// static analysis.
18//
19// This method wraps LoadHCLFile, and so it inherits the syntax selection
20// behaviors documented for that method.
21func (p *Parser) LoadConfigFile(path string) (*File, hcl.Diagnostics) {
22 return p.loadConfigFile(path, false)
23}
24
25// LoadConfigFileOverride is the same as LoadConfigFile except that it relaxes
26// certain required attribute constraints in order to interpret the given
27// file as an overrides file.
28func (p *Parser) LoadConfigFileOverride(path string) (*File, hcl.Diagnostics) {
29 return p.loadConfigFile(path, true)
30}
31
32func (p *Parser) loadConfigFile(path string, override bool) (*File, hcl.Diagnostics) {
33
34 body, diags := p.LoadHCLFile(path)
35 if body == nil {
36 return nil, diags
37 }
38
39 file := &File{}
40
41 var reqDiags hcl.Diagnostics
42 file.CoreVersionConstraints, reqDiags = sniffCoreVersionRequirements(body)
43 diags = append(diags, reqDiags...)
44
45 content, contentDiags := body.Content(configFileSchema)
46 diags = append(diags, contentDiags...)
47
48 for _, block := range content.Blocks {
49 switch block.Type {
50
51 case "terraform":
52 content, contentDiags := block.Body.Content(terraformBlockSchema)
53 diags = append(diags, contentDiags...)
54
55 // We ignore the "terraform_version" attribute here because
56 // sniffCoreVersionRequirements already dealt with that above.
57
58 for _, innerBlock := range content.Blocks {
59 switch innerBlock.Type {
60
61 case "backend":
62 backendCfg, cfgDiags := decodeBackendBlock(innerBlock)
63 diags = append(diags, cfgDiags...)
64 if backendCfg != nil {
65 file.Backends = append(file.Backends, backendCfg)
66 }
67
68 case "required_providers":
69 reqs, reqsDiags := decodeRequiredProvidersBlock(innerBlock)
70 diags = append(diags, reqsDiags...)
71 file.ProviderRequirements = append(file.ProviderRequirements, reqs...)
72
73 default:
74 // Should never happen because the above cases should be exhaustive
75 // for all block type names in our schema.
76 continue
77
78 }
79 }
80
81 case "provider":
82 cfg, cfgDiags := decodeProviderBlock(block)
83 diags = append(diags, cfgDiags...)
84 if cfg != nil {
85 file.ProviderConfigs = append(file.ProviderConfigs, cfg)
86 }
87
88 case "variable":
89 cfg, cfgDiags := decodeVariableBlock(block, override)
90 diags = append(diags, cfgDiags...)
91 if cfg != nil {
92 file.Variables = append(file.Variables, cfg)
93 }
94
95 case "locals":
96 defs, defsDiags := decodeLocalsBlock(block)
97 diags = append(diags, defsDiags...)
98 file.Locals = append(file.Locals, defs...)
99
100 case "output":
101 cfg, cfgDiags := decodeOutputBlock(block, override)
102 diags = append(diags, cfgDiags...)
103 if cfg != nil {
104 file.Outputs = append(file.Outputs, cfg)
105 }
106
107 case "module":
108 cfg, cfgDiags := decodeModuleBlock(block, override)
109 diags = append(diags, cfgDiags...)
110 if cfg != nil {
111 file.ModuleCalls = append(file.ModuleCalls, cfg)
112 }
113
114 case "resource":
115 cfg, cfgDiags := decodeResourceBlock(block)
116 diags = append(diags, cfgDiags...)
117 if cfg != nil {
118 file.ManagedResources = append(file.ManagedResources, cfg)
119 }
120
121 case "data":
122 cfg, cfgDiags := decodeDataBlock(block)
123 diags = append(diags, cfgDiags...)
124 if cfg != nil {
125 file.DataResources = append(file.DataResources, cfg)
126 }
127
128 default:
129 // Should never happen because the above cases should be exhaustive
130 // for all block type names in our schema.
131 continue
132
133 }
134 }
135
136 return file, diags
137}
138
139// sniffCoreVersionRequirements does minimal parsing of the given body for
140// "terraform" blocks with "required_version" attributes, returning the
141// requirements found.
142//
143// This is intended to maximize the chance that we'll be able to read the
144// requirements (syntax errors notwithstanding) even if the config file contains
145// constructs that might've been added in future Terraform versions
146//
147// This is a "best effort" sort of method which will return constraints it is
148// able to find, but may return no constraints at all if the given body is
149// so invalid that it cannot be decoded at all.
150func sniffCoreVersionRequirements(body hcl.Body) ([]VersionConstraint, hcl.Diagnostics) {
151 rootContent, _, diags := body.PartialContent(configFileVersionSniffRootSchema)
152
153 var constraints []VersionConstraint
154
155 for _, block := range rootContent.Blocks {
156 content, _, blockDiags := block.Body.PartialContent(configFileVersionSniffBlockSchema)
157 diags = append(diags, blockDiags...)
158
159 attr, exists := content.Attributes["required_version"]
160 if !exists {
161 continue
162 }
163
164 constraint, constraintDiags := decodeVersionConstraint(attr)
165 diags = append(diags, constraintDiags...)
166 if !constraintDiags.HasErrors() {
167 constraints = append(constraints, constraint)
168 }
169 }
170
171 return constraints, diags
172}
173
174// configFileSchema is the schema for the top-level of a config file. We use
175// the low-level HCL API for this level so we can easily deal with each
176// block type separately with its own decoding logic.
177var configFileSchema = &hcl.BodySchema{
178 Blocks: []hcl.BlockHeaderSchema{
179 {
180 Type: "terraform",
181 },
182 {
183 Type: "provider",
184 LabelNames: []string{"name"},
185 },
186 {
187 Type: "variable",
188 LabelNames: []string{"name"},
189 },
190 {
191 Type: "locals",
192 },
193 {
194 Type: "output",
195 LabelNames: []string{"name"},
196 },
197 {
198 Type: "module",
199 LabelNames: []string{"name"},
200 },
201 {
202 Type: "resource",
203 LabelNames: []string{"type", "name"},
204 },
205 {
206 Type: "data",
207 LabelNames: []string{"type", "name"},
208 },
209 },
210}
211
212// terraformBlockSchema is the schema for a top-level "terraform" block in
213// a configuration file.
214var terraformBlockSchema = &hcl.BodySchema{
215 Attributes: []hcl.AttributeSchema{
216 {
217 Name: "required_version",
218 },
219 },
220 Blocks: []hcl.BlockHeaderSchema{
221 {
222 Type: "backend",
223 LabelNames: []string{"type"},
224 },
225 {
226 Type: "required_providers",
227 },
228 },
229}
230
231// configFileVersionSniffRootSchema is a schema for sniffCoreVersionRequirements
232var configFileVersionSniffRootSchema = &hcl.BodySchema{
233 Blocks: []hcl.BlockHeaderSchema{
234 {
235 Type: "terraform",
236 },
237 },
238}
239
240// configFileVersionSniffBlockSchema is a schema for sniffCoreVersionRequirements
241var configFileVersionSniffBlockSchema = &hcl.BodySchema{
242 Attributes: []hcl.AttributeSchema{
243 {
244 Name: "required_version",
245 },
246 },
247}
diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go b/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go
new file mode 100644
index 0000000..3014cb4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go
@@ -0,0 +1,142 @@
1package configs
2
3import (
4 "fmt"
5 "path/filepath"
6 "strings"
7
8 "github.com/hashicorp/hcl2/hcl"
9)
10
11// LoadConfigDir reads the .tf and .tf.json files in the given directory
12// as config files (using LoadConfigFile) and then combines these files into
13// a single Module.
14//
15// If this method returns nil, that indicates that the given directory does not
16// exist at all or could not be opened for some reason. Callers may wish to
17// detect this case and ignore the returned diagnostics so that they can
18// produce a more context-aware error message in that case.
19//
20// If this method returns a non-nil module while error diagnostics are returned
21// then the module may be incomplete but can be used carefully for static
22// analysis.
23//
24// This file does not consider a directory with no files to be an error, and
25// will simply return an empty module in that case. Callers should first call
26// Parser.IsConfigDir if they wish to recognize that situation.
27//
28// .tf files are parsed using the HCL native syntax while .tf.json files are
29// parsed using the HCL JSON syntax.
30func (p *Parser) LoadConfigDir(path string) (*Module, hcl.Diagnostics) {
31 primaryPaths, overridePaths, diags := p.dirFiles(path)
32 if diags.HasErrors() {
33 return nil, diags
34 }
35
36 primary, fDiags := p.loadFiles(primaryPaths, false)
37 diags = append(diags, fDiags...)
38 override, fDiags := p.loadFiles(overridePaths, true)
39 diags = append(diags, fDiags...)
40
41 mod, modDiags := NewModule(primary, override)
42 diags = append(diags, modDiags...)
43
44 mod.SourceDir = path
45
46 return mod, diags
47}
48
49// ConfigDirFiles returns lists of the primary and override files configuration
50// files in the given directory.
51//
52// If the given directory does not exist or cannot be read, error diagnostics
53// are returned. If errors are returned, the resulting lists may be incomplete.
54func (p Parser) ConfigDirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) {
55 return p.dirFiles(dir)
56}
57
58// IsConfigDir determines whether the given path refers to a directory that
59// exists and contains at least one Terraform config file (with a .tf or
60// .tf.json extension.)
61func (p *Parser) IsConfigDir(path string) bool {
62 primaryPaths, overridePaths, _ := p.dirFiles(path)
63 return (len(primaryPaths) + len(overridePaths)) > 0
64}
65
66func (p *Parser) loadFiles(paths []string, override bool) ([]*File, hcl.Diagnostics) {
67 var files []*File
68 var diags hcl.Diagnostics
69
70 for _, path := range paths {
71 var f *File
72 var fDiags hcl.Diagnostics
73 if override {
74 f, fDiags = p.LoadConfigFileOverride(path)
75 } else {
76 f, fDiags = p.LoadConfigFile(path)
77 }
78 diags = append(diags, fDiags...)
79 if f != nil {
80 files = append(files, f)
81 }
82 }
83
84 return files, diags
85}
86
87func (p *Parser) dirFiles(dir string) (primary, override []string, diags hcl.Diagnostics) {
88 infos, err := p.fs.ReadDir(dir)
89 if err != nil {
90 diags = append(diags, &hcl.Diagnostic{
91 Severity: hcl.DiagError,
92 Summary: "Failed to read module directory",
93 Detail: fmt.Sprintf("Module directory %s does not exist or cannot be read.", dir),
94 })
95 return
96 }
97
98 for _, info := range infos {
99 if info.IsDir() {
100 // We only care about files
101 continue
102 }
103
104 name := info.Name()
105 ext := fileExt(name)
106 if ext == "" || IsIgnoredFile(name) {
107 continue
108 }
109
110 baseName := name[:len(name)-len(ext)] // strip extension
111 isOverride := baseName == "override" || strings.HasSuffix(baseName, "_override")
112
113 fullPath := filepath.Join(dir, name)
114 if isOverride {
115 override = append(override, fullPath)
116 } else {
117 primary = append(primary, fullPath)
118 }
119 }
120
121 return
122}
123
124// fileExt returns the Terraform configuration extension of the given
125// path, or a blank string if it is not a recognized extension.
126func fileExt(path string) string {
127 if strings.HasSuffix(path, ".tf") {
128 return ".tf"
129 } else if strings.HasSuffix(path, ".tf.json") {
130 return ".tf.json"
131 } else {
132 return ""
133 }
134}
135
136// IsIgnoredFile returns true if the given filename (which must not have a
137// directory path ahead of it) should be ignored as e.g. an editor swap file.
138func IsIgnoredFile(name string) bool {
139 return strings.HasPrefix(name, ".") || // Unix-like hidden files
140 strings.HasSuffix(name, "~") || // vim
141 strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs
142}
diff --git a/vendor/github.com/hashicorp/terraform/configs/parser_values.go b/vendor/github.com/hashicorp/terraform/configs/parser_values.go
new file mode 100644
index 0000000..b7f1c1c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/parser_values.go
@@ -0,0 +1,43 @@
1package configs
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// LoadValuesFile reads the file at the given path and parses it as a "values
9// file", which is an HCL config file whose top-level attributes are treated
10// as arbitrary key.value pairs.
11//
12// If the file cannot be read -- for example, if it does not exist -- then
13// a nil map will be returned along with error diagnostics. Callers may wish
14// to disregard the returned diagnostics in this case and instead generate
15// their own error message(s) with additional context.
16//
17// If the returned diagnostics has errors when a non-nil map is returned
18// then the map may be incomplete but should be valid enough for careful
19// static analysis.
20//
21// This method wraps LoadHCLFile, and so it inherits the syntax selection
22// behaviors documented for that method.
23func (p *Parser) LoadValuesFile(path string) (map[string]cty.Value, hcl.Diagnostics) {
24 body, diags := p.LoadHCLFile(path)
25 if body == nil {
26 return nil, diags
27 }
28
29 vals := make(map[string]cty.Value)
30 attrs, attrDiags := body.JustAttributes()
31 diags = append(diags, attrDiags...)
32 if attrs == nil {
33 return vals, diags
34 }
35
36 for name, attr := range attrs {
37 val, valDiags := attr.Expr.Value(nil)
38 diags = append(diags, valDiags...)
39 vals[name] = val
40 }
41
42 return vals, diags
43}
diff --git a/vendor/github.com/hashicorp/terraform/configs/provider.go b/vendor/github.com/hashicorp/terraform/configs/provider.go
new file mode 100644
index 0000000..d01d5cf
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/provider.go
@@ -0,0 +1,144 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/gohcl"
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hcl2/hcl/hclsyntax"
9
10 "github.com/hashicorp/terraform/addrs"
11)
12
13// Provider represents a "provider" block in a module or file. A provider
14// block is a provider configuration, and there can be zero or more
15// configurations for each actual provider.
16type Provider struct {
17 Name string
18 NameRange hcl.Range
19 Alias string
20 AliasRange *hcl.Range // nil if no alias set
21
22 Version VersionConstraint
23
24 Config hcl.Body
25
26 DeclRange hcl.Range
27}
28
29func decodeProviderBlock(block *hcl.Block) (*Provider, hcl.Diagnostics) {
30 content, config, diags := block.Body.PartialContent(providerBlockSchema)
31
32 provider := &Provider{
33 Name: block.Labels[0],
34 NameRange: block.LabelRanges[0],
35 Config: config,
36 DeclRange: block.DefRange,
37 }
38
39 if attr, exists := content.Attributes["alias"]; exists {
40 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &provider.Alias)
41 diags = append(diags, valDiags...)
42 provider.AliasRange = attr.Expr.Range().Ptr()
43
44 if !hclsyntax.ValidIdentifier(provider.Alias) {
45 diags = append(diags, &hcl.Diagnostic{
46 Severity: hcl.DiagError,
47 Summary: "Invalid provider configuration alias",
48 Detail: fmt.Sprintf("An alias must be a valid name. %s", badIdentifierDetail),
49 })
50 }
51 }
52
53 if attr, exists := content.Attributes["version"]; exists {
54 var versionDiags hcl.Diagnostics
55 provider.Version, versionDiags = decodeVersionConstraint(attr)
56 diags = append(diags, versionDiags...)
57 }
58
59 // Reserved attribute names
60 for _, name := range []string{"count", "depends_on", "for_each", "source"} {
61 if attr, exists := content.Attributes[name]; exists {
62 diags = append(diags, &hcl.Diagnostic{
63 Severity: hcl.DiagError,
64 Summary: "Reserved argument name in provider block",
65 Detail: fmt.Sprintf("The provider argument name %q is reserved for use by Terraform in a future version.", name),
66 Subject: &attr.NameRange,
67 })
68 }
69 }
70
71 // Reserved block types (all of them)
72 for _, block := range content.Blocks {
73 diags = append(diags, &hcl.Diagnostic{
74 Severity: hcl.DiagError,
75 Summary: "Reserved block type name in provider block",
76 Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type),
77 Subject: &block.TypeRange,
78 })
79 }
80
81 return provider, diags
82}
83
84// Addr returns the address of the receiving provider configuration, relative
85// to its containing module.
86func (p *Provider) Addr() addrs.ProviderConfig {
87 return addrs.ProviderConfig{
88 Type: p.Name,
89 Alias: p.Alias,
90 }
91}
92
93func (p *Provider) moduleUniqueKey() string {
94 if p.Alias != "" {
95 return fmt.Sprintf("%s.%s", p.Name, p.Alias)
96 }
97 return p.Name
98}
99
100// ProviderRequirement represents a declaration of a dependency on a particular
101// provider version without actually configuring that provider. This is used in
102// child modules that expect a provider to be passed in from their parent.
103type ProviderRequirement struct {
104 Name string
105 Requirement VersionConstraint
106}
107
108func decodeRequiredProvidersBlock(block *hcl.Block) ([]*ProviderRequirement, hcl.Diagnostics) {
109 attrs, diags := block.Body.JustAttributes()
110 var reqs []*ProviderRequirement
111 for name, attr := range attrs {
112 req, reqDiags := decodeVersionConstraint(attr)
113 diags = append(diags, reqDiags...)
114 if !diags.HasErrors() {
115 reqs = append(reqs, &ProviderRequirement{
116 Name: name,
117 Requirement: req,
118 })
119 }
120 }
121 return reqs, diags
122}
123
124var providerBlockSchema = &hcl.BodySchema{
125 Attributes: []hcl.AttributeSchema{
126 {
127 Name: "alias",
128 },
129 {
130 Name: "version",
131 },
132
133 // Attribute names reserved for future expansion.
134 {Name: "count"},
135 {Name: "depends_on"},
136 {Name: "for_each"},
137 {Name: "source"},
138 },
139 Blocks: []hcl.BlockHeaderSchema{
140 // _All_ of these are reserved for future expansion.
141 {Type: "lifecycle"},
142 {Type: "locals"},
143 },
144}
diff --git a/vendor/github.com/hashicorp/terraform/configs/provisioner.go b/vendor/github.com/hashicorp/terraform/configs/provisioner.go
new file mode 100644
index 0000000..b031dd0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/provisioner.go
@@ -0,0 +1,150 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7)
8
9// Provisioner represents a "provisioner" block when used within a
10// "resource" block in a module or file.
11type Provisioner struct {
12 Type string
13 Config hcl.Body
14 Connection *Connection
15 When ProvisionerWhen
16 OnFailure ProvisionerOnFailure
17
18 DeclRange hcl.Range
19 TypeRange hcl.Range
20}
21
22func decodeProvisionerBlock(block *hcl.Block) (*Provisioner, hcl.Diagnostics) {
23 pv := &Provisioner{
24 Type: block.Labels[0],
25 TypeRange: block.LabelRanges[0],
26 DeclRange: block.DefRange,
27 When: ProvisionerWhenCreate,
28 OnFailure: ProvisionerOnFailureFail,
29 }
30
31 content, config, diags := block.Body.PartialContent(provisionerBlockSchema)
32 pv.Config = config
33
34 if attr, exists := content.Attributes["when"]; exists {
35 expr, shimDiags := shimTraversalInString(attr.Expr, true)
36 diags = append(diags, shimDiags...)
37
38 switch hcl.ExprAsKeyword(expr) {
39 case "create":
40 pv.When = ProvisionerWhenCreate
41 case "destroy":
42 pv.When = ProvisionerWhenDestroy
43 default:
44 diags = append(diags, &hcl.Diagnostic{
45 Severity: hcl.DiagError,
46 Summary: "Invalid \"when\" keyword",
47 Detail: "The \"when\" argument requires one of the following keywords: create or destroy.",
48 Subject: expr.Range().Ptr(),
49 })
50 }
51 }
52
53 if attr, exists := content.Attributes["on_failure"]; exists {
54 expr, shimDiags := shimTraversalInString(attr.Expr, true)
55 diags = append(diags, shimDiags...)
56
57 switch hcl.ExprAsKeyword(expr) {
58 case "continue":
59 pv.OnFailure = ProvisionerOnFailureContinue
60 case "fail":
61 pv.OnFailure = ProvisionerOnFailureFail
62 default:
63 diags = append(diags, &hcl.Diagnostic{
64 Severity: hcl.DiagError,
65 Summary: "Invalid \"on_failure\" keyword",
66 Detail: "The \"on_failure\" argument requires one of the following keywords: continue or fail.",
67 Subject: attr.Expr.Range().Ptr(),
68 })
69 }
70 }
71
72 var seenConnection *hcl.Block
73 for _, block := range content.Blocks {
74 switch block.Type {
75
76 case "connection":
77 if seenConnection != nil {
78 diags = append(diags, &hcl.Diagnostic{
79 Severity: hcl.DiagError,
80 Summary: "Duplicate connection block",
81 Detail: fmt.Sprintf("This provisioner already has a connection block at %s.", seenConnection.DefRange),
82 Subject: &block.DefRange,
83 })
84 continue
85 }
86 seenConnection = block
87
88 //conn, connDiags := decodeConnectionBlock(block)
89 //diags = append(diags, connDiags...)
90 pv.Connection = &Connection{
91 Config: block.Body,
92 DeclRange: block.DefRange,
93 }
94
95 default:
96 // Any other block types are ones we've reserved for future use,
97 // so they get a generic message.
98 diags = append(diags, &hcl.Diagnostic{
99 Severity: hcl.DiagError,
100 Summary: "Reserved block type name in provisioner block",
101 Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type),
102 Subject: &block.TypeRange,
103 })
104 }
105 }
106
107 return pv, diags
108}
109
110// Connection represents a "connection" block when used within either a
111// "resource" or "provisioner" block in a module or file.
112type Connection struct {
113 Config hcl.Body
114
115 DeclRange hcl.Range
116}
117
118// ProvisionerWhen is an enum for valid values for when to run provisioners.
119type ProvisionerWhen int
120
121//go:generate stringer -type ProvisionerWhen
122
123const (
124 ProvisionerWhenInvalid ProvisionerWhen = iota
125 ProvisionerWhenCreate
126 ProvisionerWhenDestroy
127)
128
129// ProvisionerOnFailure is an enum for valid values for on_failure options
130// for provisioners.
131type ProvisionerOnFailure int
132
133//go:generate stringer -type ProvisionerOnFailure
134
135const (
136 ProvisionerOnFailureInvalid ProvisionerOnFailure = iota
137 ProvisionerOnFailureContinue
138 ProvisionerOnFailureFail
139)
140
141var provisionerBlockSchema = &hcl.BodySchema{
142 Attributes: []hcl.AttributeSchema{
143 {Name: "when"},
144 {Name: "on_failure"},
145 },
146 Blocks: []hcl.BlockHeaderSchema{
147 {Type: "connection"},
148 {Type: "lifecycle"}, // reserved for future use
149 },
150}
diff --git a/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go b/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go
new file mode 100644
index 0000000..7ff5a6e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/provisioneronfailure_string.go
@@ -0,0 +1,25 @@
1// Code generated by "stringer -type ProvisionerOnFailure"; DO NOT EDIT.
2
3package configs
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[ProvisionerOnFailureInvalid-0]
12 _ = x[ProvisionerOnFailureContinue-1]
13 _ = x[ProvisionerOnFailureFail-2]
14}
15
16const _ProvisionerOnFailure_name = "ProvisionerOnFailureInvalidProvisionerOnFailureContinueProvisionerOnFailureFail"
17
18var _ProvisionerOnFailure_index = [...]uint8{0, 27, 55, 79}
19
20func (i ProvisionerOnFailure) String() string {
21 if i < 0 || i >= ProvisionerOnFailure(len(_ProvisionerOnFailure_index)-1) {
22 return "ProvisionerOnFailure(" + strconv.FormatInt(int64(i), 10) + ")"
23 }
24 return _ProvisionerOnFailure_name[_ProvisionerOnFailure_index[i]:_ProvisionerOnFailure_index[i+1]]
25}
diff --git a/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go b/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go
new file mode 100644
index 0000000..9f21b3a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/provisionerwhen_string.go
@@ -0,0 +1,25 @@
1// Code generated by "stringer -type ProvisionerWhen"; DO NOT EDIT.
2
3package configs
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[ProvisionerWhenInvalid-0]
12 _ = x[ProvisionerWhenCreate-1]
13 _ = x[ProvisionerWhenDestroy-2]
14}
15
16const _ProvisionerWhen_name = "ProvisionerWhenInvalidProvisionerWhenCreateProvisionerWhenDestroy"
17
18var _ProvisionerWhen_index = [...]uint8{0, 22, 43, 65}
19
20func (i ProvisionerWhen) String() string {
21 if i < 0 || i >= ProvisionerWhen(len(_ProvisionerWhen_index)-1) {
22 return "ProvisionerWhen(" + strconv.FormatInt(int64(i), 10) + ")"
23 }
24 return _ProvisionerWhen_name[_ProvisionerWhen_index[i]:_ProvisionerWhen_index[i+1]]
25}
diff --git a/vendor/github.com/hashicorp/terraform/configs/resource.go b/vendor/github.com/hashicorp/terraform/configs/resource.go
new file mode 100644
index 0000000..de1a343
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/resource.go
@@ -0,0 +1,486 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/gohcl"
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/hcl2/hcl/hclsyntax"
9
10 "github.com/hashicorp/terraform/addrs"
11)
12
13// Resource represents a "resource" or "data" block in a module or file.
14type Resource struct {
15 Mode addrs.ResourceMode
16 Name string
17 Type string
18 Config hcl.Body
19 Count hcl.Expression
20 ForEach hcl.Expression
21
22 ProviderConfigRef *ProviderConfigRef
23
24 DependsOn []hcl.Traversal
25
26 // Managed is populated only for Mode = addrs.ManagedResourceMode,
27 // containing the additional fields that apply to managed resources.
28 // For all other resource modes, this field is nil.
29 Managed *ManagedResource
30
31 DeclRange hcl.Range
32 TypeRange hcl.Range
33}
34
35// ManagedResource represents a "resource" block in a module or file.
36type ManagedResource struct {
37 Connection *Connection
38 Provisioners []*Provisioner
39
40 CreateBeforeDestroy bool
41 PreventDestroy bool
42 IgnoreChanges []hcl.Traversal
43 IgnoreAllChanges bool
44
45 CreateBeforeDestroySet bool
46 PreventDestroySet bool
47}
48
49func (r *Resource) moduleUniqueKey() string {
50 return r.Addr().String()
51}
52
53// Addr returns a resource address for the receiver that is relative to the
54// resource's containing module.
55func (r *Resource) Addr() addrs.Resource {
56 return addrs.Resource{
57 Mode: r.Mode,
58 Type: r.Type,
59 Name: r.Name,
60 }
61}
62
63// ProviderConfigAddr returns the address for the provider configuration
64// that should be used for this resource. This function implements the
65// default behavior of extracting the type from the resource type name if
66// an explicit "provider" argument was not provided.
67func (r *Resource) ProviderConfigAddr() addrs.ProviderConfig {
68 if r.ProviderConfigRef == nil {
69 return r.Addr().DefaultProviderConfig()
70 }
71
72 return addrs.ProviderConfig{
73 Type: r.ProviderConfigRef.Name,
74 Alias: r.ProviderConfigRef.Alias,
75 }
76}
77
78func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) {
79 r := &Resource{
80 Mode: addrs.ManagedResourceMode,
81 Type: block.Labels[0],
82 Name: block.Labels[1],
83 DeclRange: block.DefRange,
84 TypeRange: block.LabelRanges[0],
85 Managed: &ManagedResource{},
86 }
87
88 content, remain, diags := block.Body.PartialContent(resourceBlockSchema)
89 r.Config = remain
90
91 if !hclsyntax.ValidIdentifier(r.Type) {
92 diags = append(diags, &hcl.Diagnostic{
93 Severity: hcl.DiagError,
94 Summary: "Invalid resource type name",
95 Detail: badIdentifierDetail,
96 Subject: &block.LabelRanges[0],
97 })
98 }
99 if !hclsyntax.ValidIdentifier(r.Name) {
100 diags = append(diags, &hcl.Diagnostic{
101 Severity: hcl.DiagError,
102 Summary: "Invalid resource name",
103 Detail: badIdentifierDetail,
104 Subject: &block.LabelRanges[1],
105 })
106 }
107
108 if attr, exists := content.Attributes["count"]; exists {
109 r.Count = attr.Expr
110 }
111
112 if attr, exists := content.Attributes["for_each"]; exists {
113 r.ForEach = attr.Expr
114 // We currently parse this, but don't yet do anything with it.
115 diags = append(diags, &hcl.Diagnostic{
116 Severity: hcl.DiagError,
117 Summary: "Reserved argument name in resource block",
118 Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name),
119 Subject: &attr.NameRange,
120 })
121 }
122
123 if attr, exists := content.Attributes["provider"]; exists {
124 var providerDiags hcl.Diagnostics
125 r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider")
126 diags = append(diags, providerDiags...)
127 }
128
129 if attr, exists := content.Attributes["depends_on"]; exists {
130 deps, depsDiags := decodeDependsOn(attr)
131 diags = append(diags, depsDiags...)
132 r.DependsOn = append(r.DependsOn, deps...)
133 }
134
135 var seenLifecycle *hcl.Block
136 var seenConnection *hcl.Block
137 for _, block := range content.Blocks {
138 switch block.Type {
139 case "lifecycle":
140 if seenLifecycle != nil {
141 diags = append(diags, &hcl.Diagnostic{
142 Severity: hcl.DiagError,
143 Summary: "Duplicate lifecycle block",
144 Detail: fmt.Sprintf("This resource already has a lifecycle block at %s.", seenLifecycle.DefRange),
145 Subject: &block.DefRange,
146 })
147 continue
148 }
149 seenLifecycle = block
150
151 lcContent, lcDiags := block.Body.Content(resourceLifecycleBlockSchema)
152 diags = append(diags, lcDiags...)
153
154 if attr, exists := lcContent.Attributes["create_before_destroy"]; exists {
155 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.CreateBeforeDestroy)
156 diags = append(diags, valDiags...)
157 r.Managed.CreateBeforeDestroySet = true
158 }
159
160 if attr, exists := lcContent.Attributes["prevent_destroy"]; exists {
161 valDiags := gohcl.DecodeExpression(attr.Expr, nil, &r.Managed.PreventDestroy)
162 diags = append(diags, valDiags...)
163 r.Managed.PreventDestroySet = true
164 }
165
166 if attr, exists := lcContent.Attributes["ignore_changes"]; exists {
167
168 // ignore_changes can either be a list of relative traversals
169 // or it can be just the keyword "all" to ignore changes to this
170 // resource entirely.
171 // ignore_changes = [ami, instance_type]
172 // ignore_changes = all
173 // We also allow two legacy forms for compatibility with earlier
174 // versions:
175 // ignore_changes = ["ami", "instance_type"]
176 // ignore_changes = ["*"]
177
178 kw := hcl.ExprAsKeyword(attr.Expr)
179
180 switch {
181 case kw == "all":
182 r.Managed.IgnoreAllChanges = true
183 default:
184 exprs, listDiags := hcl.ExprList(attr.Expr)
185 diags = append(diags, listDiags...)
186
187 var ignoreAllRange hcl.Range
188
189 for _, expr := range exprs {
190
191 // our expr might be the literal string "*", which
192 // we accept as a deprecated way of saying "all".
193 if shimIsIgnoreChangesStar(expr) {
194 r.Managed.IgnoreAllChanges = true
195 ignoreAllRange = expr.Range()
196 diags = append(diags, &hcl.Diagnostic{
197 Severity: hcl.DiagWarning,
198 Summary: "Deprecated ignore_changes wildcard",
199 Detail: "The [\"*\"] form of ignore_changes wildcard is deprecated. Use \"ignore_changes = all\" to ignore changes to all attributes.",
200 Subject: attr.Expr.Range().Ptr(),
201 })
202 continue
203 }
204
205 expr, shimDiags := shimTraversalInString(expr, false)
206 diags = append(diags, shimDiags...)
207
208 traversal, travDiags := hcl.RelTraversalForExpr(expr)
209 diags = append(diags, travDiags...)
210 if len(traversal) != 0 {
211 r.Managed.IgnoreChanges = append(r.Managed.IgnoreChanges, traversal)
212 }
213 }
214
215 if r.Managed.IgnoreAllChanges && len(r.Managed.IgnoreChanges) != 0 {
216 diags = append(diags, &hcl.Diagnostic{
217 Severity: hcl.DiagError,
218 Summary: "Invalid ignore_changes ruleset",
219 Detail: "Cannot mix wildcard string \"*\" with non-wildcard references.",
220 Subject: &ignoreAllRange,
221 Context: attr.Expr.Range().Ptr(),
222 })
223 }
224
225 }
226
227 }
228
229 case "connection":
230 if seenConnection != nil {
231 diags = append(diags, &hcl.Diagnostic{
232 Severity: hcl.DiagError,
233 Summary: "Duplicate connection block",
234 Detail: fmt.Sprintf("This resource already has a connection block at %s.", seenConnection.DefRange),
235 Subject: &block.DefRange,
236 })
237 continue
238 }
239 seenConnection = block
240
241 r.Managed.Connection = &Connection{
242 Config: block.Body,
243 DeclRange: block.DefRange,
244 }
245
246 case "provisioner":
247 pv, pvDiags := decodeProvisionerBlock(block)
248 diags = append(diags, pvDiags...)
249 if pv != nil {
250 r.Managed.Provisioners = append(r.Managed.Provisioners, pv)
251 }
252
253 default:
254 // Any other block types are ones we've reserved for future use,
255 // so they get a generic message.
256 diags = append(diags, &hcl.Diagnostic{
257 Severity: hcl.DiagError,
258 Summary: "Reserved block type name in resource block",
259 Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type),
260 Subject: &block.TypeRange,
261 })
262 }
263 }
264
265 return r, diags
266}
267
268func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) {
269 r := &Resource{
270 Mode: addrs.DataResourceMode,
271 Type: block.Labels[0],
272 Name: block.Labels[1],
273 DeclRange: block.DefRange,
274 TypeRange: block.LabelRanges[0],
275 }
276
277 content, remain, diags := block.Body.PartialContent(dataBlockSchema)
278 r.Config = remain
279
280 if !hclsyntax.ValidIdentifier(r.Type) {
281 diags = append(diags, &hcl.Diagnostic{
282 Severity: hcl.DiagError,
283 Summary: "Invalid data source name",
284 Detail: badIdentifierDetail,
285 Subject: &block.LabelRanges[0],
286 })
287 }
288 if !hclsyntax.ValidIdentifier(r.Name) {
289 diags = append(diags, &hcl.Diagnostic{
290 Severity: hcl.DiagError,
291 Summary: "Invalid data resource name",
292 Detail: badIdentifierDetail,
293 Subject: &block.LabelRanges[1],
294 })
295 }
296
297 if attr, exists := content.Attributes["count"]; exists {
298 r.Count = attr.Expr
299 }
300
301 if attr, exists := content.Attributes["for_each"]; exists {
302 r.ForEach = attr.Expr
303 // We currently parse this, but don't yet do anything with it.
304 diags = append(diags, &hcl.Diagnostic{
305 Severity: hcl.DiagError,
306 Summary: "Reserved argument name in module block",
307 Detail: fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name),
308 Subject: &attr.NameRange,
309 })
310 }
311
312 if attr, exists := content.Attributes["provider"]; exists {
313 var providerDiags hcl.Diagnostics
314 r.ProviderConfigRef, providerDiags = decodeProviderConfigRef(attr.Expr, "provider")
315 diags = append(diags, providerDiags...)
316 }
317
318 if attr, exists := content.Attributes["depends_on"]; exists {
319 deps, depsDiags := decodeDependsOn(attr)
320 diags = append(diags, depsDiags...)
321 r.DependsOn = append(r.DependsOn, deps...)
322 }
323
324 for _, block := range content.Blocks {
325 // All of the block types we accept are just reserved for future use, but some get a specialized error message.
326 switch block.Type {
327 case "lifecycle":
328 diags = append(diags, &hcl.Diagnostic{
329 Severity: hcl.DiagError,
330 Summary: "Unsupported lifecycle block",
331 Detail: "Data resources do not have lifecycle settings, so a lifecycle block is not allowed.",
332 Subject: &block.DefRange,
333 })
334 default:
335 diags = append(diags, &hcl.Diagnostic{
336 Severity: hcl.DiagError,
337 Summary: "Reserved block type name in data block",
338 Detail: fmt.Sprintf("The block type name %q is reserved for use by Terraform in a future version.", block.Type),
339 Subject: &block.TypeRange,
340 })
341 }
342 }
343
344 return r, diags
345}
346
347type ProviderConfigRef struct {
348 Name string
349 NameRange hcl.Range
350 Alias string
351 AliasRange *hcl.Range // nil if alias not set
352}
353
354func decodeProviderConfigRef(expr hcl.Expression, argName string) (*ProviderConfigRef, hcl.Diagnostics) {
355 var diags hcl.Diagnostics
356
357 var shimDiags hcl.Diagnostics
358 expr, shimDiags = shimTraversalInString(expr, false)
359 diags = append(diags, shimDiags...)
360
361 traversal, travDiags := hcl.AbsTraversalForExpr(expr)
362
363 // AbsTraversalForExpr produces only generic errors, so we'll discard
364 // the errors given and produce our own with extra context. If we didn't
365 // get any errors then we might still have warnings, though.
366 if !travDiags.HasErrors() {
367 diags = append(diags, travDiags...)
368 }
369
370 if len(traversal) < 1 || len(traversal) > 2 {
371 // A provider reference was given as a string literal in the legacy
372 // configuration language and there are lots of examples out there
373 // showing that usage, so we'll sniff for that situation here and
374 // produce a specialized error message for it to help users find
375 // the new correct form.
376 if exprIsNativeQuotedString(expr) {
377 diags = append(diags, &hcl.Diagnostic{
378 Severity: hcl.DiagError,
379 Summary: "Invalid provider configuration reference",
380 Detail: "A provider configuration reference must not be given in quotes.",
381 Subject: expr.Range().Ptr(),
382 })
383 return nil, diags
384 }
385
386 diags = append(diags, &hcl.Diagnostic{
387 Severity: hcl.DiagError,
388 Summary: "Invalid provider configuration reference",
389 Detail: fmt.Sprintf("The %s argument requires a provider type name, optionally followed by a period and then a configuration alias.", argName),
390 Subject: expr.Range().Ptr(),
391 })
392 return nil, diags
393 }
394
395 ret := &ProviderConfigRef{
396 Name: traversal.RootName(),
397 NameRange: traversal[0].SourceRange(),
398 }
399
400 if len(traversal) > 1 {
401 aliasStep, ok := traversal[1].(hcl.TraverseAttr)
402 if !ok {
403 diags = append(diags, &hcl.Diagnostic{
404 Severity: hcl.DiagError,
405 Summary: "Invalid provider configuration reference",
406 Detail: "Provider name must either stand alone or be followed by a period and then a configuration alias.",
407 Subject: traversal[1].SourceRange().Ptr(),
408 })
409 return ret, diags
410 }
411
412 ret.Alias = aliasStep.Name
413 ret.AliasRange = aliasStep.SourceRange().Ptr()
414 }
415
416 return ret, diags
417}
418
419// Addr returns the provider config address corresponding to the receiving
420// config reference.
421//
422// This is a trivial conversion, essentially just discarding the source
423// location information and keeping just the addressing information.
424func (r *ProviderConfigRef) Addr() addrs.ProviderConfig {
425 return addrs.ProviderConfig{
426 Type: r.Name,
427 Alias: r.Alias,
428 }
429}
430
431func (r *ProviderConfigRef) String() string {
432 if r == nil {
433 return "<nil>"
434 }
435 if r.Alias != "" {
436 return fmt.Sprintf("%s.%s", r.Name, r.Alias)
437 }
438 return r.Name
439}
440
441var commonResourceAttributes = []hcl.AttributeSchema{
442 {
443 Name: "count",
444 },
445 {
446 Name: "for_each",
447 },
448 {
449 Name: "provider",
450 },
451 {
452 Name: "depends_on",
453 },
454}
455
456var resourceBlockSchema = &hcl.BodySchema{
457 Attributes: commonResourceAttributes,
458 Blocks: []hcl.BlockHeaderSchema{
459 {Type: "locals"}, // reserved for future use
460 {Type: "lifecycle"},
461 {Type: "connection"},
462 {Type: "provisioner", LabelNames: []string{"type"}},
463 },
464}
465
466var dataBlockSchema = &hcl.BodySchema{
467 Attributes: commonResourceAttributes,
468 Blocks: []hcl.BlockHeaderSchema{
469 {Type: "lifecycle"}, // reserved for future use
470 {Type: "locals"}, // reserved for future use
471 },
472}
473
474var resourceLifecycleBlockSchema = &hcl.BodySchema{
475 Attributes: []hcl.AttributeSchema{
476 {
477 Name: "create_before_destroy",
478 },
479 {
480 Name: "prevent_destroy",
481 },
482 {
483 Name: "ignore_changes",
484 },
485 },
486}
diff --git a/vendor/github.com/hashicorp/terraform/configs/synth_body.go b/vendor/github.com/hashicorp/terraform/configs/synth_body.go
new file mode 100644
index 0000000..3ae1bff
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/synth_body.go
@@ -0,0 +1,118 @@
1package configs
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/hashicorp/hcl2/hcl/hclsyntax"
8 "github.com/zclconf/go-cty/cty"
9)
10
11// SynthBody produces a synthetic hcl.Body that behaves as if it had attributes
12// corresponding to the elements given in the values map.
13//
14// This is useful in situations where, for example, values provided on the
15// command line can override values given in configuration, using MergeBodies.
16//
17// The given filename is used in case any diagnostics are returned. Since
18// the created body is synthetic, it is likely that this will not be a "real"
19// filename. For example, if from a command line argument it could be
20// a representation of that argument's name, such as "-var=...".
21func SynthBody(filename string, values map[string]cty.Value) hcl.Body {
22 return synthBody{
23 Filename: filename,
24 Values: values,
25 }
26}
27
28type synthBody struct {
29 Filename string
30 Values map[string]cty.Value
31}
32
33func (b synthBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
34 content, remain, diags := b.PartialContent(schema)
35 remainS := remain.(synthBody)
36 for name := range remainS.Values {
37 diags = append(diags, &hcl.Diagnostic{
38 Severity: hcl.DiagError,
39 Summary: "Unsupported attribute",
40 Detail: fmt.Sprintf("An attribute named %q is not expected here.", name),
41 Subject: b.synthRange().Ptr(),
42 })
43 }
44 return content, diags
45}
46
47func (b synthBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
48 var diags hcl.Diagnostics
49 content := &hcl.BodyContent{
50 Attributes: make(hcl.Attributes),
51 MissingItemRange: b.synthRange(),
52 }
53
54 remainValues := make(map[string]cty.Value)
55 for attrName, val := range b.Values {
56 remainValues[attrName] = val
57 }
58
59 for _, attrS := range schema.Attributes {
60 delete(remainValues, attrS.Name)
61 val, defined := b.Values[attrS.Name]
62 if !defined {
63 if attrS.Required {
64 diags = append(diags, &hcl.Diagnostic{
65 Severity: hcl.DiagError,
66 Summary: "Missing required attribute",
67 Detail: fmt.Sprintf("The attribute %q is required, but no definition was found.", attrS.Name),
68 Subject: b.synthRange().Ptr(),
69 })
70 }
71 continue
72 }
73 content.Attributes[attrS.Name] = b.synthAttribute(attrS.Name, val)
74 }
75
76 // We just ignore blocks altogether, because this body type never has
77 // nested blocks.
78
79 remain := synthBody{
80 Filename: b.Filename,
81 Values: remainValues,
82 }
83
84 return content, remain, diags
85}
86
87func (b synthBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
88 ret := make(hcl.Attributes)
89 for name, val := range b.Values {
90 ret[name] = b.synthAttribute(name, val)
91 }
92 return ret, nil
93}
94
95func (b synthBody) MissingItemRange() hcl.Range {
96 return b.synthRange()
97}
98
99func (b synthBody) synthAttribute(name string, val cty.Value) *hcl.Attribute {
100 rng := b.synthRange()
101 return &hcl.Attribute{
102 Name: name,
103 Expr: &hclsyntax.LiteralValueExpr{
104 Val: val,
105 SrcRange: rng,
106 },
107 NameRange: rng,
108 Range: rng,
109 }
110}
111
112func (b synthBody) synthRange() hcl.Range {
113 return hcl.Range{
114 Filename: b.Filename,
115 Start: hcl.Pos{Line: 1, Column: 1},
116 End: hcl.Pos{Line: 1, Column: 1},
117 }
118}
diff --git a/vendor/github.com/hashicorp/terraform/configs/util.go b/vendor/github.com/hashicorp/terraform/configs/util.go
new file mode 100644
index 0000000..5fbde43
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/util.go
@@ -0,0 +1,63 @@
1package configs
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/hcl2/hcl/hclsyntax"
6)
7
8// exprIsNativeQuotedString determines whether the given expression looks like
9// it's a quoted string in the HCL native syntax.
10//
11// This should be used sparingly only for situations where our legacy HCL
12// decoding would've expected a keyword or reference in quotes but our new
13// decoding expects the keyword or reference to be provided directly as
14// an identifier-based expression.
15func exprIsNativeQuotedString(expr hcl.Expression) bool {
16 _, ok := expr.(*hclsyntax.TemplateExpr)
17 return ok
18}
19
20// schemaForOverrides takes a *hcl.BodySchema and produces a new one that is
21// equivalent except that any required attributes are forced to not be required.
22//
23// This is useful for dealing with "override" config files, which are allowed
24// to omit things that they don't wish to override from the main configuration.
25//
26// The returned schema may have some pointers in common with the given schema,
27// so neither the given schema nor the returned schema should be modified after
28// using this function in order to avoid confusion.
29//
30// Overrides are rarely used, so it's recommended to just create the override
31// schema on the fly only when it's needed, rather than storing it in a global
32// variable as we tend to do for a primary schema.
33func schemaForOverrides(schema *hcl.BodySchema) *hcl.BodySchema {
34 ret := &hcl.BodySchema{
35 Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)),
36 Blocks: schema.Blocks,
37 }
38
39 for i, attrS := range schema.Attributes {
40 ret.Attributes[i] = attrS
41 ret.Attributes[i].Required = false
42 }
43
44 return ret
45}
46
47// schemaWithDynamic takes a *hcl.BodySchema and produces a new one that
48// is equivalent except that it accepts an additional block type "dynamic" with
49// a single label, used to recognize usage of the HCL dynamic block extension.
50func schemaWithDynamic(schema *hcl.BodySchema) *hcl.BodySchema {
51 ret := &hcl.BodySchema{
52 Attributes: schema.Attributes,
53 Blocks: make([]hcl.BlockHeaderSchema, len(schema.Blocks), len(schema.Blocks)+1),
54 }
55
56 copy(ret.Blocks, schema.Blocks)
57 ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{
58 Type: "dynamic",
59 LabelNames: []string{"type"},
60 })
61
62 return ret
63}
diff --git a/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go b/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go
new file mode 100644
index 0000000..204efd1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/variable_type_hint.go
@@ -0,0 +1,45 @@
1package configs
2
3// VariableTypeHint is an enumeration used for the Variable.TypeHint field,
4// which is an incompletely-specified type for the variable which is used
5// as a hint for whether a value provided in an ambiguous context (on the
6// command line or in an environment variable) should be taken literally as a
7// string or parsed as an HCL expression to produce a data structure.
8//
9// The type hint is applied to runtime values as well, but since it does not
10// accurately describe a precise type it is not fully-sufficient to infer
11// the dynamic type of a value passed through a variable.
12//
13// These hints use inaccurate terminology for historical reasons. Full details
14// are in the documentation for each constant in this enumeration, but in
15// summary:
16//
17// TypeHintString requires a primitive type
18// TypeHintList requires a type that could be converted to a tuple
19// TypeHintMap requires a type that could be converted to an object
20type VariableTypeHint rune
21
22//go:generate stringer -type VariableTypeHint
23
24// TypeHintNone indicates the absense of a type hint. Values specified in
25// ambiguous contexts will be treated as literal strings, as if TypeHintString
26// were selected, but no runtime value checks will be applied. This is reasonable
27// type hint for a module that is never intended to be used at the top-level
28// of a configuration, since descendent modules never recieve values from
29// ambiguous contexts.
30const TypeHintNone VariableTypeHint = 0
31
32// TypeHintString spec indicates that a value provided in an ambiguous context
33// should be treated as a literal string, and additionally requires that the
34// runtime value for the variable is of a primitive type (string, number, bool).
35const TypeHintString VariableTypeHint = 'S'
36
37// TypeHintList indicates that a value provided in an ambiguous context should
38// be treated as an HCL expression, and additionally requires that the
39// runtime value for the variable is of an tuple, list, or set type.
40const TypeHintList VariableTypeHint = 'L'
41
42// TypeHintMap indicates that a value provided in an ambiguous context should
43// be treated as an HCL expression, and additionally requires that the
44// runtime value for the variable is of an object or map type.
45const TypeHintMap VariableTypeHint = 'M'
diff --git a/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go b/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go
new file mode 100644
index 0000000..2b50428
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/variabletypehint_string.go
@@ -0,0 +1,39 @@
1// Code generated by "stringer -type VariableTypeHint"; DO NOT EDIT.
2
3package configs
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[TypeHintNone-0]
12 _ = x[TypeHintString-83]
13 _ = x[TypeHintList-76]
14 _ = x[TypeHintMap-77]
15}
16
17const (
18 _VariableTypeHint_name_0 = "TypeHintNone"
19 _VariableTypeHint_name_1 = "TypeHintListTypeHintMap"
20 _VariableTypeHint_name_2 = "TypeHintString"
21)
22
23var (
24 _VariableTypeHint_index_1 = [...]uint8{0, 12, 23}
25)
26
27func (i VariableTypeHint) String() string {
28 switch {
29 case i == 0:
30 return _VariableTypeHint_name_0
31 case 76 <= i && i <= 77:
32 i -= 76
33 return _VariableTypeHint_name_1[_VariableTypeHint_index_1[i]:_VariableTypeHint_index_1[i+1]]
34 case i == 83:
35 return _VariableTypeHint_name_2
36 default:
37 return "VariableTypeHint(" + strconv.FormatInt(int64(i), 10) + ")"
38 }
39}
diff --git a/vendor/github.com/hashicorp/terraform/configs/version_constraint.go b/vendor/github.com/hashicorp/terraform/configs/version_constraint.go
new file mode 100644
index 0000000..7aa19ef
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/configs/version_constraint.go
@@ -0,0 +1,64 @@
1package configs
2
3import (
4 "fmt"
5
6 version "github.com/hashicorp/go-version"
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/zclconf/go-cty/cty"
9 "github.com/zclconf/go-cty/cty/convert"
10)
11
12// VersionConstraint represents a version constraint on some resource
13// (e.g. Terraform Core, a provider, a module, ...) that carries with it
14// a source range so that a helpful diagnostic can be printed in the event
15// that a particular constraint does not match.
16type VersionConstraint struct {
17 Required version.Constraints
18 DeclRange hcl.Range
19}
20
21func decodeVersionConstraint(attr *hcl.Attribute) (VersionConstraint, hcl.Diagnostics) {
22 ret := VersionConstraint{
23 DeclRange: attr.Range,
24 }
25
26 val, diags := attr.Expr.Value(nil)
27 if diags.HasErrors() {
28 return ret, diags
29 }
30 var err error
31 val, err = convert.Convert(val, cty.String)
32 if err != nil {
33 diags = append(diags, &hcl.Diagnostic{
34 Severity: hcl.DiagError,
35 Summary: "Invalid version constraint",
36 Detail: fmt.Sprintf("A string value is required for %s.", attr.Name),
37 Subject: attr.Expr.Range().Ptr(),
38 })
39 return ret, diags
40 }
41
42 if val.IsNull() {
43 // A null version constraint is strange, but we'll just treat it
44 // like an empty constraint set.
45 return ret, diags
46 }
47
48 constraintStr := val.AsString()
49 constraints, err := version.NewConstraint(constraintStr)
50 if err != nil {
51 // NewConstraint doesn't return user-friendly errors, so we'll just
52 // ignore the provided error and produce our own generic one.
53 diags = append(diags, &hcl.Diagnostic{
54 Severity: hcl.DiagError,
55 Summary: "Invalid version constraint",
56 Detail: "This string does not use correct version constraint syntax.", // Not very actionable :(
57 Subject: attr.Expr.Range().Ptr(),
58 })
59 return ret, diags
60 }
61
62 ret.Required = constraints
63 return ret, diags
64}
diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go
index b7eb10c..77c67ef 100644
--- a/vendor/github.com/hashicorp/terraform/dag/dag.go
+++ b/vendor/github.com/hashicorp/terraform/dag/dag.go
@@ -5,6 +5,8 @@ import (
5 "sort" 5 "sort"
6 "strings" 6 "strings"
7 7
8 "github.com/hashicorp/terraform/tfdiags"
9
8 "github.com/hashicorp/go-multierror" 10 "github.com/hashicorp/go-multierror"
9) 11)
10 12
@@ -15,7 +17,7 @@ type AcyclicGraph struct {
15} 17}
16 18
17// WalkFunc is the callback used for walking the graph. 19// WalkFunc is the callback used for walking the graph.
18type WalkFunc func(Vertex) error 20type WalkFunc func(Vertex) tfdiags.Diagnostics
19 21
20// DepthWalkFunc is a walk function that also receives the current depth of the 22// DepthWalkFunc is a walk function that also receives the current depth of the
21// walk as an argument 23// walk as an argument
@@ -161,9 +163,9 @@ func (g *AcyclicGraph) Cycles() [][]Vertex {
161} 163}
162 164
163// Walk walks the graph, calling your callback as each node is visited. 165// Walk walks the graph, calling your callback as each node is visited.
164// This will walk nodes in parallel if it can. Because the walk is done 166// This will walk nodes in parallel if it can. The resulting diagnostics
165// in parallel, the error returned will be a multierror. 167// contains problems from all graphs visited, in no particular order.
166func (g *AcyclicGraph) Walk(cb WalkFunc) error { 168func (g *AcyclicGraph) Walk(cb WalkFunc) tfdiags.Diagnostics {
167 defer g.debug.BeginOperation(typeWalk, "").End("") 169 defer g.debug.BeginOperation(typeWalk, "").End("")
168 170
169 w := &Walker{Callback: cb, Reverse: true} 171 w := &Walker{Callback: cb, Reverse: true}
diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go
index f03b100..1c926c2 100644
--- a/vendor/github.com/hashicorp/terraform/dag/walk.go
+++ b/vendor/github.com/hashicorp/terraform/dag/walk.go
@@ -2,12 +2,11 @@ package dag
2 2
3import ( 3import (
4 "errors" 4 "errors"
5 "fmt"
6 "log" 5 "log"
7 "sync" 6 "sync"
8 "time" 7 "time"
9 8
10 "github.com/hashicorp/go-multierror" 9 "github.com/hashicorp/terraform/tfdiags"
11) 10)
12 11
13// Walker is used to walk every vertex of a graph in parallel. 12// Walker is used to walk every vertex of a graph in parallel.
@@ -54,10 +53,15 @@ type Walker struct {
54 // if new vertices are added. 53 // if new vertices are added.
55 wait sync.WaitGroup 54 wait sync.WaitGroup
56 55
57 // errMap contains the errors recorded so far for execution. Reading 56 // diagsMap contains the diagnostics recorded so far for execution,
58 // and writing should hold errLock. 57 // and upstreamFailed contains all the vertices whose problems were
59 errMap map[Vertex]error 58 // caused by upstream failures, and thus whose diagnostics should be
60 errLock sync.Mutex 59 // excluded from the final set.
60 //
61 // Readers and writers of either map must hold diagsLock.
62 diagsMap map[Vertex]tfdiags.Diagnostics
63 upstreamFailed map[Vertex]struct{}
64 diagsLock sync.Mutex
61} 65}
62 66
63type walkerVertex struct { 67type walkerVertex struct {
@@ -98,31 +102,30 @@ type walkerVertex struct {
98// user-returned error. 102// user-returned error.
99var errWalkUpstream = errors.New("upstream dependency failed") 103var errWalkUpstream = errors.New("upstream dependency failed")
100 104
101// Wait waits for the completion of the walk and returns any errors ( 105// Wait waits for the completion of the walk and returns diagnostics describing
102// in the form of a multierror) that occurred. Update should be called 106// any problems that arose. Update should be called to populate the walk with
103// to populate the walk with vertices and edges prior to calling this. 107// vertices and edges prior to calling this.
104// 108//
105// Wait will return as soon as all currently known vertices are complete. 109// Wait will return as soon as all currently known vertices are complete.
106// If you plan on calling Update with more vertices in the future, you 110// If you plan on calling Update with more vertices in the future, you
107// should not call Wait until after this is done. 111// should not call Wait until after this is done.
108func (w *Walker) Wait() error { 112func (w *Walker) Wait() tfdiags.Diagnostics {
109 // Wait for completion 113 // Wait for completion
110 w.wait.Wait() 114 w.wait.Wait()
111 115
112 // Grab the error lock 116 var diags tfdiags.Diagnostics
113 w.errLock.Lock() 117 w.diagsLock.Lock()
114 defer w.errLock.Unlock() 118 for v, vDiags := range w.diagsMap {
115 119 if _, upstream := w.upstreamFailed[v]; upstream {
116 // Build the error 120 // Ignore diagnostics for nodes that had failed upstreams, since
117 var result error 121 // the downstream diagnostics are likely to be redundant.
118 for v, err := range w.errMap { 122 continue
119 if err != nil && err != errWalkUpstream {
120 result = multierror.Append(result, fmt.Errorf(
121 "%s: %s", VertexName(v), err))
122 } 123 }
124 diags = diags.Append(vDiags)
123 } 125 }
126 w.diagsLock.Unlock()
124 127
125 return result 128 return diags
126} 129}
127 130
128// Update updates the currently executing walk with the given graph. 131// Update updates the currently executing walk with the given graph.
@@ -136,6 +139,7 @@ func (w *Walker) Wait() error {
136// Multiple Updates can be called in parallel. Update can be called at any 139// Multiple Updates can be called in parallel. Update can be called at any
137// time during a walk. 140// time during a walk.
138func (w *Walker) Update(g *AcyclicGraph) { 141func (w *Walker) Update(g *AcyclicGraph) {
142 log.Print("[TRACE] dag/walk: updating graph")
139 var v, e *Set 143 var v, e *Set
140 if g != nil { 144 if g != nil {
141 v, e = g.vertices, g.edges 145 v, e = g.vertices, g.edges
@@ -381,25 +385,34 @@ func (w *Walker) walkVertex(v Vertex, info *walkerVertex) {
381 } 385 }
382 386
383 // Run our callback or note that our upstream failed 387 // Run our callback or note that our upstream failed
384 var err error 388 var diags tfdiags.Diagnostics
389 var upstreamFailed bool
385 if depsSuccess { 390 if depsSuccess {
386 log.Printf("[TRACE] dag/walk: walking %q", VertexName(v)) 391 log.Printf("[TRACE] dag/walk: visiting %q", VertexName(v))
387 err = w.Callback(v) 392 diags = w.Callback(v)
388 } else { 393 } else {
389 log.Printf("[TRACE] dag/walk: upstream errored, not walking %q", VertexName(v)) 394 log.Printf("[TRACE] dag/walk: upstream of %q errored, so skipping", VertexName(v))
390 err = errWalkUpstream 395 // This won't be displayed to the user because we'll set upstreamFailed,
396 // but we need to ensure there's at least one error in here so that
397 // the failures will cascade downstream.
398 diags = diags.Append(errors.New("upstream dependencies failed"))
399 upstreamFailed = true
391 } 400 }
392 401
393 // Record the error 402 // Record the result (we must do this after execution because we mustn't
394 if err != nil { 403 // hold diagsLock while visiting a vertex.)
395 w.errLock.Lock() 404 w.diagsLock.Lock()
396 defer w.errLock.Unlock() 405 if w.diagsMap == nil {
397 406 w.diagsMap = make(map[Vertex]tfdiags.Diagnostics)
398 if w.errMap == nil { 407 }
399 w.errMap = make(map[Vertex]error) 408 w.diagsMap[v] = diags
400 } 409 if w.upstreamFailed == nil {
401 w.errMap[v] = err 410 w.upstreamFailed = make(map[Vertex]struct{})
402 } 411 }
412 if upstreamFailed {
413 w.upstreamFailed[v] = struct{}{}
414 }
415 w.diagsLock.Unlock()
403} 416}
404 417
405func (w *Walker) waitDeps( 418func (w *Walker) waitDeps(
@@ -407,6 +420,7 @@ func (w *Walker) waitDeps(
407 deps map[Vertex]<-chan struct{}, 420 deps map[Vertex]<-chan struct{},
408 doneCh chan<- bool, 421 doneCh chan<- bool,
409 cancelCh <-chan struct{}) { 422 cancelCh <-chan struct{}) {
423
410 // For each dependency given to us, wait for it to complete 424 // For each dependency given to us, wait for it to complete
411 for dep, depCh := range deps { 425 for dep, depCh := range deps {
412 DepSatisfied: 426 DepSatisfied:
@@ -423,17 +437,17 @@ func (w *Walker) waitDeps(
423 return 437 return
424 438
425 case <-time.After(time.Second * 5): 439 case <-time.After(time.Second * 5):
426 log.Printf("[TRACE] dag/walk: vertex %q, waiting for: %q", 440 log.Printf("[TRACE] dag/walk: vertex %q is waiting for %q",
427 VertexName(v), VertexName(dep)) 441 VertexName(v), VertexName(dep))
428 } 442 }
429 } 443 }
430 } 444 }
431 445
432 // Dependencies satisfied! We need to check if any errored 446 // Dependencies satisfied! We need to check if any errored
433 w.errLock.Lock() 447 w.diagsLock.Lock()
434 defer w.errLock.Unlock() 448 defer w.diagsLock.Unlock()
435 for dep, _ := range deps { 449 for dep := range deps {
436 if w.errMap[dep] != nil { 450 if w.diagsMap[dep].HasErrors() {
437 // One of our dependencies failed, so return false 451 // One of our dependencies failed, so return false
438 doneCh <- false 452 doneCh <- false
439 return 453 return
diff --git a/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go b/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go
new file mode 100644
index 0000000..54899bc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/didyoumean/name_suggestion.go
@@ -0,0 +1,24 @@
1package didyoumean
2
3import (
4 "github.com/agext/levenshtein"
5)
6
7// NameSuggestion tries to find a name from the given slice of suggested names
8// that is close to the given name and returns it if found. If no suggestion
9// is close enough, returns the empty string.
10//
11// The suggestions are tried in order, so earlier suggestions take precedence
12// if the given string is similar to two or more suggestions.
13//
14// This function is intended to be used with a relatively-small number of
15// suggestions. It's not optimized for hundreds or thousands of them.
16func NameSuggestion(given string, suggestions []string) string {
17 for _, suggestion := range suggestions {
18 dist := levenshtein.Distance(given, suggestion, nil)
19 if dist < 3 { // threshold determined experimentally
20 return suggestion
21 }
22 }
23 return ""
24}
diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/doc.go b/vendor/github.com/hashicorp/terraform/helper/plugin/doc.go
new file mode 100644
index 0000000..82b5937
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/plugin/doc.go
@@ -0,0 +1,6 @@
1// Package plugin contains types and functions to help Terraform plugins
2// implement the plugin rpc interface.
3// The primary Provider type will be responsible for converting from the grpc
4// wire protocol to the types and methods known to the provider
5// implementations.
6package plugin
diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go
new file mode 100644
index 0000000..510f47f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go
@@ -0,0 +1,1338 @@
1package plugin
2
3import (
4 "encoding/json"
5 "errors"
6 "fmt"
7 "log"
8 "strconv"
9
10 "github.com/zclconf/go-cty/cty"
11 ctyconvert "github.com/zclconf/go-cty/cty/convert"
12 "github.com/zclconf/go-cty/cty/msgpack"
13 context "golang.org/x/net/context"
14
15 "github.com/hashicorp/terraform/config/hcl2shim"
16 "github.com/hashicorp/terraform/configs/configschema"
17 "github.com/hashicorp/terraform/helper/schema"
18 proto "github.com/hashicorp/terraform/internal/tfplugin5"
19 "github.com/hashicorp/terraform/plugin/convert"
20 "github.com/hashicorp/terraform/terraform"
21)
22
23const newExtraKey = "_new_extra_shim"
24
25// NewGRPCProviderServerShim wraps a terraform.ResourceProvider in a
26// proto.ProviderServer implementation. If the provided provider is not a
27// *schema.Provider, this will return nil,
28func NewGRPCProviderServerShim(p terraform.ResourceProvider) *GRPCProviderServer {
29 sp, ok := p.(*schema.Provider)
30 if !ok {
31 return nil
32 }
33
34 return &GRPCProviderServer{
35 provider: sp,
36 }
37}
38
39// GRPCProviderServer handles the server, or plugin side of the rpc connection.
40type GRPCProviderServer struct {
41 provider *schema.Provider
42}
43
44func (s *GRPCProviderServer) GetSchema(_ context.Context, req *proto.GetProviderSchema_Request) (*proto.GetProviderSchema_Response, error) {
45 // Here we are certain that the provider is being called through grpc, so
46 // make sure the feature flag for helper/schema is set
47 schema.SetProto5()
48
49 resp := &proto.GetProviderSchema_Response{
50 ResourceSchemas: make(map[string]*proto.Schema),
51 DataSourceSchemas: make(map[string]*proto.Schema),
52 }
53
54 resp.Provider = &proto.Schema{
55 Block: convert.ConfigSchemaToProto(s.getProviderSchemaBlock()),
56 }
57
58 for typ, res := range s.provider.ResourcesMap {
59 resp.ResourceSchemas[typ] = &proto.Schema{
60 Version: int64(res.SchemaVersion),
61 Block: convert.ConfigSchemaToProto(res.CoreConfigSchema()),
62 }
63 }
64
65 for typ, dat := range s.provider.DataSourcesMap {
66 resp.DataSourceSchemas[typ] = &proto.Schema{
67 Version: int64(dat.SchemaVersion),
68 Block: convert.ConfigSchemaToProto(dat.CoreConfigSchema()),
69 }
70 }
71
72 return resp, nil
73}
74
75func (s *GRPCProviderServer) getProviderSchemaBlock() *configschema.Block {
76 return schema.InternalMap(s.provider.Schema).CoreConfigSchema()
77}
78
79func (s *GRPCProviderServer) getResourceSchemaBlock(name string) *configschema.Block {
80 res := s.provider.ResourcesMap[name]
81 return res.CoreConfigSchema()
82}
83
84func (s *GRPCProviderServer) getDatasourceSchemaBlock(name string) *configschema.Block {
85 dat := s.provider.DataSourcesMap[name]
86 return dat.CoreConfigSchema()
87}
88
89func (s *GRPCProviderServer) PrepareProviderConfig(_ context.Context, req *proto.PrepareProviderConfig_Request) (*proto.PrepareProviderConfig_Response, error) {
90 resp := &proto.PrepareProviderConfig_Response{}
91
92 schemaBlock := s.getProviderSchemaBlock()
93
94 configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType())
95 if err != nil {
96 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
97 return resp, nil
98 }
99
100 // lookup any required, top-level attributes that are Null, and see if we
101 // have a Default value available.
102 configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) {
103 // we're only looking for top-level attributes
104 if len(path) != 1 {
105 return val, nil
106 }
107
108 // nothing to do if we already have a value
109 if !val.IsNull() {
110 return val, nil
111 }
112
113 // get the Schema definition for this attribute
114 getAttr, ok := path[0].(cty.GetAttrStep)
115 // these should all exist, but just ignore anything strange
116 if !ok {
117 return val, nil
118 }
119
120 attrSchema := s.provider.Schema[getAttr.Name]
121 // continue to ignore anything that doesn't match
122 if attrSchema == nil {
123 return val, nil
124 }
125
126 // this is deprecated, so don't set it
127 if attrSchema.Deprecated != "" || attrSchema.Removed != "" {
128 return val, nil
129 }
130
131 // find a default value if it exists
132 def, err := attrSchema.DefaultValue()
133 if err != nil {
134 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error getting default for %q: %s", getAttr.Name, err))
135 return val, err
136 }
137
138 // no default
139 if def == nil {
140 return val, nil
141 }
142
143 // create a cty.Value and make sure it's the correct type
144 tmpVal := hcl2shim.HCL2ValueFromConfigValue(def)
145
146 // helper/schema used to allow setting "" to a bool
147 if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) {
148 // return a warning about the conversion
149 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, "provider set empty string as default value for bool "+getAttr.Name)
150 tmpVal = cty.False
151 }
152
153 val, err = ctyconvert.Convert(tmpVal, val.Type())
154 if err != nil {
155 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, fmt.Errorf("error setting default for %q: %s", getAttr.Name, err))
156 }
157
158 return val, err
159 })
160 if err != nil {
161 // any error here was already added to the diagnostics
162 return resp, nil
163 }
164
165 configVal, err = schemaBlock.CoerceValue(configVal)
166 if err != nil {
167 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
168 return resp, nil
169 }
170
171 // Ensure there are no nulls that will cause helper/schema to panic.
172 if err := validateConfigNulls(configVal, nil); err != nil {
173 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
174 return resp, nil
175 }
176
177 config := terraform.NewResourceConfigShimmed(configVal, schemaBlock)
178
179 warns, errs := s.provider.Validate(config)
180 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs))
181
182 preparedConfigMP, err := msgpack.Marshal(configVal, schemaBlock.ImpliedType())
183 if err != nil {
184 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
185 return resp, nil
186 }
187
188 resp.PreparedConfig = &proto.DynamicValue{Msgpack: preparedConfigMP}
189
190 return resp, nil
191}
192
193func (s *GRPCProviderServer) ValidateResourceTypeConfig(_ context.Context, req *proto.ValidateResourceTypeConfig_Request) (*proto.ValidateResourceTypeConfig_Response, error) {
194 resp := &proto.ValidateResourceTypeConfig_Response{}
195
196 schemaBlock := s.getResourceSchemaBlock(req.TypeName)
197
198 configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType())
199 if err != nil {
200 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
201 return resp, nil
202 }
203
204 config := terraform.NewResourceConfigShimmed(configVal, schemaBlock)
205
206 warns, errs := s.provider.ValidateResource(req.TypeName, config)
207 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs))
208
209 return resp, nil
210}
211
212func (s *GRPCProviderServer) ValidateDataSourceConfig(_ context.Context, req *proto.ValidateDataSourceConfig_Request) (*proto.ValidateDataSourceConfig_Response, error) {
213 resp := &proto.ValidateDataSourceConfig_Response{}
214
215 schemaBlock := s.getDatasourceSchemaBlock(req.TypeName)
216
217 configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType())
218 if err != nil {
219 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
220 return resp, nil
221 }
222
223 // Ensure there are no nulls that will cause helper/schema to panic.
224 if err := validateConfigNulls(configVal, nil); err != nil {
225 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
226 return resp, nil
227 }
228
229 config := terraform.NewResourceConfigShimmed(configVal, schemaBlock)
230
231 warns, errs := s.provider.ValidateDataSource(req.TypeName, config)
232 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs))
233
234 return resp, nil
235}
236
237func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto.UpgradeResourceState_Request) (*proto.UpgradeResourceState_Response, error) {
238 resp := &proto.UpgradeResourceState_Response{}
239
240 res := s.provider.ResourcesMap[req.TypeName]
241 schemaBlock := s.getResourceSchemaBlock(req.TypeName)
242
243 version := int(req.Version)
244
245 jsonMap := map[string]interface{}{}
246 var err error
247
248 switch {
249 // We first need to upgrade a flatmap state if it exists.
250 // There should never be both a JSON and Flatmap state in the request.
251 case len(req.RawState.Flatmap) > 0:
252 jsonMap, version, err = s.upgradeFlatmapState(version, req.RawState.Flatmap, res)
253 if err != nil {
254 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
255 return resp, nil
256 }
257 // if there's a JSON state, we need to decode it.
258 case len(req.RawState.Json) > 0:
259 err = json.Unmarshal(req.RawState.Json, &jsonMap)
260 if err != nil {
261 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
262 return resp, nil
263 }
264 default:
265 log.Println("[DEBUG] no state provided to upgrade")
266 return resp, nil
267 }
268
269 // complete the upgrade of the JSON states
270 jsonMap, err = s.upgradeJSONState(version, jsonMap, res)
271 if err != nil {
272 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
273 return resp, nil
274 }
275
276 // The provider isn't required to clean out removed fields
277 s.removeAttributes(jsonMap, schemaBlock.ImpliedType())
278
279 // now we need to turn the state into the default json representation, so
280 // that it can be re-decoded using the actual schema.
281 val, err := schema.JSONMapToStateValue(jsonMap, schemaBlock)
282 if err != nil {
283 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
284 return resp, nil
285 }
286
287 // encode the final state to the expected msgpack format
288 newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType())
289 if err != nil {
290 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
291 return resp, nil
292 }
293
294 resp.UpgradedState = &proto.DynamicValue{Msgpack: newStateMP}
295 return resp, nil
296}
297
298// upgradeFlatmapState takes a legacy flatmap state, upgrades it using Migrate
299// state if necessary, and converts it to the new JSON state format decoded as a
300// map[string]interface{}.
301// upgradeFlatmapState returns the json map along with the corresponding schema
302// version.
303func (s *GRPCProviderServer) upgradeFlatmapState(version int, m map[string]string, res *schema.Resource) (map[string]interface{}, int, error) {
304 // this will be the version we've upgraded so, defaulting to the given
305 // version in case no migration was called.
306 upgradedVersion := version
307
308 // first determine if we need to call the legacy MigrateState func
309 requiresMigrate := version < res.SchemaVersion
310
311 schemaType := res.CoreConfigSchema().ImpliedType()
312
313 // if there are any StateUpgraders, then we need to only compare
314 // against the first version there
315 if len(res.StateUpgraders) > 0 {
316 requiresMigrate = version < res.StateUpgraders[0].Version
317 }
318
319 if requiresMigrate {
320 if res.MigrateState == nil {
321 return nil, 0, errors.New("cannot upgrade state, missing MigrateState function")
322 }
323
324 is := &terraform.InstanceState{
325 ID: m["id"],
326 Attributes: m,
327 Meta: map[string]interface{}{
328 "schema_version": strconv.Itoa(version),
329 },
330 }
331
332 is, err := res.MigrateState(version, is, s.provider.Meta())
333 if err != nil {
334 return nil, 0, err
335 }
336
337 // re-assign the map in case there was a copy made, making sure to keep
338 // the ID
339 m := is.Attributes
340 m["id"] = is.ID
341
342 // if there are further upgraders, then we've only updated that far
343 if len(res.StateUpgraders) > 0 {
344 schemaType = res.StateUpgraders[0].Type
345 upgradedVersion = res.StateUpgraders[0].Version
346 }
347 } else {
348 // the schema version may be newer than the MigrateState functions
349 // handled and older than the current, but still stored in the flatmap
350 // form. If that's the case, we need to find the correct schema type to
351 // convert the state.
352 for _, upgrader := range res.StateUpgraders {
353 if upgrader.Version == version {
354 schemaType = upgrader.Type
355 break
356 }
357 }
358 }
359
360 // now we know the state is up to the latest version that handled the
361 // flatmap format state. Now we can upgrade the format and continue from
362 // there.
363 newConfigVal, err := hcl2shim.HCL2ValueFromFlatmap(m, schemaType)
364 if err != nil {
365 return nil, 0, err
366 }
367
368 jsonMap, err := schema.StateValueToJSONMap(newConfigVal, schemaType)
369 return jsonMap, upgradedVersion, err
370}
371
372func (s *GRPCProviderServer) upgradeJSONState(version int, m map[string]interface{}, res *schema.Resource) (map[string]interface{}, error) {
373 var err error
374
375 for _, upgrader := range res.StateUpgraders {
376 if version != upgrader.Version {
377 continue
378 }
379
380 m, err = upgrader.Upgrade(m, s.provider.Meta())
381 if err != nil {
382 return nil, err
383 }
384 version++
385 }
386
387 return m, nil
388}
389
390// Remove any attributes no longer present in the schema, so that the json can
391// be correctly decoded.
392func (s *GRPCProviderServer) removeAttributes(v interface{}, ty cty.Type) {
393 // we're only concerned with finding maps that corespond to object
394 // attributes
395 switch v := v.(type) {
396 case []interface{}:
397 // If these aren't blocks the next call will be a noop
398 if ty.IsListType() || ty.IsSetType() {
399 eTy := ty.ElementType()
400 for _, eV := range v {
401 s.removeAttributes(eV, eTy)
402 }
403 }
404 return
405 case map[string]interface{}:
406 // map blocks aren't yet supported, but handle this just in case
407 if ty.IsMapType() {
408 eTy := ty.ElementType()
409 for _, eV := range v {
410 s.removeAttributes(eV, eTy)
411 }
412 return
413 }
414
415 if ty == cty.DynamicPseudoType {
416 log.Printf("[DEBUG] ignoring dynamic block: %#v\n", v)
417 return
418 }
419
420 if !ty.IsObjectType() {
421 // This shouldn't happen, and will fail to decode further on, so
422 // there's no need to handle it here.
423 log.Printf("[WARN] unexpected type %#v for map in json state", ty)
424 return
425 }
426
427 attrTypes := ty.AttributeTypes()
428 for attr, attrV := range v {
429 attrTy, ok := attrTypes[attr]
430 if !ok {
431 log.Printf("[DEBUG] attribute %q no longer present in schema", attr)
432 delete(v, attr)
433 continue
434 }
435
436 s.removeAttributes(attrV, attrTy)
437 }
438 }
439}
440
441func (s *GRPCProviderServer) Stop(_ context.Context, _ *proto.Stop_Request) (*proto.Stop_Response, error) {
442 resp := &proto.Stop_Response{}
443
444 err := s.provider.Stop()
445 if err != nil {
446 resp.Error = err.Error()
447 }
448
449 return resp, nil
450}
451
452func (s *GRPCProviderServer) Configure(_ context.Context, req *proto.Configure_Request) (*proto.Configure_Response, error) {
453 resp := &proto.Configure_Response{}
454
455 schemaBlock := s.getProviderSchemaBlock()
456
457 configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType())
458 if err != nil {
459 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
460 return resp, nil
461 }
462
463 s.provider.TerraformVersion = req.TerraformVersion
464
465 // Ensure there are no nulls that will cause helper/schema to panic.
466 if err := validateConfigNulls(configVal, nil); err != nil {
467 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
468 return resp, nil
469 }
470
471 config := terraform.NewResourceConfigShimmed(configVal, schemaBlock)
472 err = s.provider.Configure(config)
473 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
474
475 return resp, nil
476}
477
478func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) {
479 resp := &proto.ReadResource_Response{}
480
481 res := s.provider.ResourcesMap[req.TypeName]
482 schemaBlock := s.getResourceSchemaBlock(req.TypeName)
483
484 stateVal, err := msgpack.Unmarshal(req.CurrentState.Msgpack, schemaBlock.ImpliedType())
485 if err != nil {
486 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
487 return resp, nil
488 }
489
490 instanceState, err := res.ShimInstanceStateFromValue(stateVal)
491 if err != nil {
492 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
493 return resp, nil
494 }
495
496 newInstanceState, err := res.RefreshWithoutUpgrade(instanceState, s.provider.Meta())
497 if err != nil {
498 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
499 return resp, nil
500 }
501
502 if newInstanceState == nil || newInstanceState.ID == "" {
503 // The old provider API used an empty id to signal that the remote
504 // object appears to have been deleted, but our new protocol expects
505 // to see a null value (in the cty sense) in that case.
506 newStateMP, err := msgpack.Marshal(cty.NullVal(schemaBlock.ImpliedType()), schemaBlock.ImpliedType())
507 if err != nil {
508 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
509 }
510 resp.NewState = &proto.DynamicValue{
511 Msgpack: newStateMP,
512 }
513 return resp, nil
514 }
515
516 // helper/schema should always copy the ID over, but do it again just to be safe
517 newInstanceState.Attributes["id"] = newInstanceState.ID
518
519 newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(newInstanceState.Attributes, schemaBlock.ImpliedType())
520 if err != nil {
521 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
522 return resp, nil
523 }
524
525 newStateVal = normalizeNullValues(newStateVal, stateVal, false)
526 newStateVal = copyTimeoutValues(newStateVal, stateVal)
527
528 newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
529 if err != nil {
530 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
531 return resp, nil
532 }
533
534 resp.NewState = &proto.DynamicValue{
535 Msgpack: newStateMP,
536 }
537
538 return resp, nil
539}
540
541func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.PlanResourceChange_Request) (*proto.PlanResourceChange_Response, error) {
542 resp := &proto.PlanResourceChange_Response{}
543
544 // This is a signal to Terraform Core that we're doing the best we can to
545 // shim the legacy type system of the SDK onto the Terraform type system
546 // but we need it to cut us some slack. This setting should not be taken
547 // forward to any new SDK implementations, since setting it prevents us
548 // from catching certain classes of provider bug that can lead to
549 // confusing downstream errors.
550 resp.LegacyTypeSystem = true
551
552 res := s.provider.ResourcesMap[req.TypeName]
553 schemaBlock := s.getResourceSchemaBlock(req.TypeName)
554
555 priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType())
556 if err != nil {
557 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
558 return resp, nil
559 }
560
561 create := priorStateVal.IsNull()
562
563 proposedNewStateVal, err := msgpack.Unmarshal(req.ProposedNewState.Msgpack, schemaBlock.ImpliedType())
564 if err != nil {
565 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
566 return resp, nil
567 }
568
569 // We don't usually plan destroys, but this can return early in any case.
570 if proposedNewStateVal.IsNull() {
571 resp.PlannedState = req.ProposedNewState
572 return resp, nil
573 }
574
575 info := &terraform.InstanceInfo{
576 Type: req.TypeName,
577 }
578
579 priorState, err := res.ShimInstanceStateFromValue(priorStateVal)
580 if err != nil {
581 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
582 return resp, nil
583 }
584 priorPrivate := make(map[string]interface{})
585 if len(req.PriorPrivate) > 0 {
586 if err := json.Unmarshal(req.PriorPrivate, &priorPrivate); err != nil {
587 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
588 return resp, nil
589 }
590 }
591
592 priorState.Meta = priorPrivate
593
594 // Ensure there are no nulls that will cause helper/schema to panic.
595 if err := validateConfigNulls(proposedNewStateVal, nil); err != nil {
596 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
597 return resp, nil
598 }
599
600 // turn the proposed state into a legacy configuration
601 cfg := terraform.NewResourceConfigShimmed(proposedNewStateVal, schemaBlock)
602
603 diff, err := s.provider.SimpleDiff(info, priorState, cfg)
604 if err != nil {
605 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
606 return resp, nil
607 }
608
609 // if this is a new instance, we need to make sure ID is going to be computed
610 if create {
611 if diff == nil {
612 diff = terraform.NewInstanceDiff()
613 }
614
615 diff.Attributes["id"] = &terraform.ResourceAttrDiff{
616 NewComputed: true,
617 }
618 }
619
620 if diff == nil || len(diff.Attributes) == 0 {
621 // schema.Provider.Diff returns nil if it ends up making a diff with no
622 // changes, but our new interface wants us to return an actual change
623 // description that _shows_ there are no changes. This is always the
624 // prior state, because we force a diff above if this is a new instance.
625 resp.PlannedState = req.PriorState
626 return resp, nil
627 }
628
629 if priorState == nil {
630 priorState = &terraform.InstanceState{}
631 }
632
633 // now we need to apply the diff to the prior state, so get the planned state
634 plannedAttrs, err := diff.Apply(priorState.Attributes, schemaBlock)
635
636 plannedStateVal, err := hcl2shim.HCL2ValueFromFlatmap(plannedAttrs, schemaBlock.ImpliedType())
637 if err != nil {
638 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
639 return resp, nil
640 }
641
642 plannedStateVal, err = schemaBlock.CoerceValue(plannedStateVal)
643 if err != nil {
644 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
645 return resp, nil
646 }
647
648 plannedStateVal = normalizeNullValues(plannedStateVal, proposedNewStateVal, false)
649
650 if err != nil {
651 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
652 return resp, nil
653 }
654
655 plannedStateVal = copyTimeoutValues(plannedStateVal, proposedNewStateVal)
656
657 // The old SDK code has some imprecisions that cause it to sometimes
658 // generate differences that the SDK itself does not consider significant
659 // but Terraform Core would. To avoid producing weird do-nothing diffs
660 // in that case, we'll check if the provider as produced something we
661 // think is "equivalent" to the prior state and just return the prior state
662 // itself if so, thus ensuring that Terraform Core will treat this as
663 // a no-op. See the docs for ValuesSDKEquivalent for some caveats on its
664 // accuracy.
665 forceNoChanges := false
666 if hcl2shim.ValuesSDKEquivalent(priorStateVal, plannedStateVal) {
667 plannedStateVal = priorStateVal
668 forceNoChanges = true
669 }
670
671 // if this was creating the resource, we need to set any remaining computed
672 // fields
673 if create {
674 plannedStateVal = SetUnknowns(plannedStateVal, schemaBlock)
675 }
676
677 plannedMP, err := msgpack.Marshal(plannedStateVal, schemaBlock.ImpliedType())
678 if err != nil {
679 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
680 return resp, nil
681 }
682 resp.PlannedState = &proto.DynamicValue{
683 Msgpack: plannedMP,
684 }
685
686 // Now we need to store any NewExtra values, which are where any actual
687 // StateFunc modified config fields are hidden.
688 privateMap := diff.Meta
689 if privateMap == nil {
690 privateMap = map[string]interface{}{}
691 }
692
693 newExtra := map[string]interface{}{}
694
695 for k, v := range diff.Attributes {
696 if v.NewExtra != nil {
697 newExtra[k] = v.NewExtra
698 }
699 }
700 privateMap[newExtraKey] = newExtra
701
702 // the Meta field gets encoded into PlannedPrivate
703 plannedPrivate, err := json.Marshal(privateMap)
704 if err != nil {
705 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
706 return resp, nil
707 }
708 resp.PlannedPrivate = plannedPrivate
709
710 // collect the attributes that require instance replacement, and convert
711 // them to cty.Paths.
712 var requiresNew []string
713 if !forceNoChanges {
714 for attr, d := range diff.Attributes {
715 if d.RequiresNew {
716 requiresNew = append(requiresNew, attr)
717 }
718 }
719 }
720
721 // If anything requires a new resource already, or the "id" field indicates
722 // that we will be creating a new resource, then we need to add that to
723 // RequiresReplace so that core can tell if the instance is being replaced
724 // even if changes are being suppressed via "ignore_changes".
725 id := plannedStateVal.GetAttr("id")
726 if len(requiresNew) > 0 || id.IsNull() || !id.IsKnown() {
727 requiresNew = append(requiresNew, "id")
728 }
729
730 requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schemaBlock.ImpliedType())
731 if err != nil {
732 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
733 return resp, nil
734 }
735
736 // convert these to the protocol structures
737 for _, p := range requiresReplace {
738 resp.RequiresReplace = append(resp.RequiresReplace, pathToAttributePath(p))
739 }
740
741 return resp, nil
742}
743
744func (s *GRPCProviderServer) ApplyResourceChange(_ context.Context, req *proto.ApplyResourceChange_Request) (*proto.ApplyResourceChange_Response, error) {
745 resp := &proto.ApplyResourceChange_Response{
746 // Start with the existing state as a fallback
747 NewState: req.PriorState,
748 }
749
750 res := s.provider.ResourcesMap[req.TypeName]
751 schemaBlock := s.getResourceSchemaBlock(req.TypeName)
752
753 priorStateVal, err := msgpack.Unmarshal(req.PriorState.Msgpack, schemaBlock.ImpliedType())
754 if err != nil {
755 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
756 return resp, nil
757 }
758
759 plannedStateVal, err := msgpack.Unmarshal(req.PlannedState.Msgpack, schemaBlock.ImpliedType())
760 if err != nil {
761 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
762 return resp, nil
763 }
764
765 info := &terraform.InstanceInfo{
766 Type: req.TypeName,
767 }
768
769 priorState, err := res.ShimInstanceStateFromValue(priorStateVal)
770 if err != nil {
771 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
772 return resp, nil
773 }
774
775 private := make(map[string]interface{})
776 if len(req.PlannedPrivate) > 0 {
777 if err := json.Unmarshal(req.PlannedPrivate, &private); err != nil {
778 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
779 return resp, nil
780 }
781 }
782
783 var diff *terraform.InstanceDiff
784 destroy := false
785
786 // a null state means we are destroying the instance
787 if plannedStateVal.IsNull() {
788 destroy = true
789 diff = &terraform.InstanceDiff{
790 Attributes: make(map[string]*terraform.ResourceAttrDiff),
791 Meta: make(map[string]interface{}),
792 Destroy: true,
793 }
794 } else {
795 diff, err = schema.DiffFromValues(priorStateVal, plannedStateVal, stripResourceModifiers(res))
796 if err != nil {
797 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
798 return resp, nil
799 }
800 }
801
802 if diff == nil {
803 diff = &terraform.InstanceDiff{
804 Attributes: make(map[string]*terraform.ResourceAttrDiff),
805 Meta: make(map[string]interface{}),
806 }
807 }
808
809 // add NewExtra Fields that may have been stored in the private data
810 if newExtra := private[newExtraKey]; newExtra != nil {
811 for k, v := range newExtra.(map[string]interface{}) {
812 d := diff.Attributes[k]
813
814 if d == nil {
815 d = &terraform.ResourceAttrDiff{}
816 }
817
818 d.NewExtra = v
819 diff.Attributes[k] = d
820 }
821 }
822
823 if private != nil {
824 diff.Meta = private
825 }
826
827 for k, d := range diff.Attributes {
828 // We need to turn off any RequiresNew. There could be attributes
829 // without changes in here inserted by helper/schema, but if they have
830 // RequiresNew then the state will be dropped from the ResourceData.
831 d.RequiresNew = false
832
833 // Check that any "removed" attributes that don't actually exist in the
834 // prior state, or helper/schema will confuse itself
835 if d.NewRemoved {
836 if _, ok := priorState.Attributes[k]; !ok {
837 delete(diff.Attributes, k)
838 }
839 }
840 }
841
842 newInstanceState, err := s.provider.Apply(info, priorState, diff)
843 // we record the error here, but continue processing any returned state.
844 if err != nil {
845 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
846 }
847 newStateVal := cty.NullVal(schemaBlock.ImpliedType())
848
849 // Always return a null value for destroy.
850 // While this is usually indicated by a nil state, check for missing ID or
851 // attributes in the case of a provider failure.
852 if destroy || newInstanceState == nil || newInstanceState.Attributes == nil || newInstanceState.ID == "" {
853 newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
854 if err != nil {
855 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
856 return resp, nil
857 }
858 resp.NewState = &proto.DynamicValue{
859 Msgpack: newStateMP,
860 }
861 return resp, nil
862 }
863
864 // We keep the null val if we destroyed the resource, otherwise build the
865 // entire object, even if the new state was nil.
866 newStateVal, err = schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType())
867 if err != nil {
868 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
869 return resp, nil
870 }
871
872 newStateVal = normalizeNullValues(newStateVal, plannedStateVal, true)
873
874 newStateVal = copyTimeoutValues(newStateVal, plannedStateVal)
875
876 newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
877 if err != nil {
878 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
879 return resp, nil
880 }
881 resp.NewState = &proto.DynamicValue{
882 Msgpack: newStateMP,
883 }
884
885 meta, err := json.Marshal(newInstanceState.Meta)
886 if err != nil {
887 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
888 return resp, nil
889 }
890 resp.Private = meta
891
892 // This is a signal to Terraform Core that we're doing the best we can to
893 // shim the legacy type system of the SDK onto the Terraform type system
894 // but we need it to cut us some slack. This setting should not be taken
895 // forward to any new SDK implementations, since setting it prevents us
896 // from catching certain classes of provider bug that can lead to
897 // confusing downstream errors.
898 resp.LegacyTypeSystem = true
899
900 return resp, nil
901}
902
903func (s *GRPCProviderServer) ImportResourceState(_ context.Context, req *proto.ImportResourceState_Request) (*proto.ImportResourceState_Response, error) {
904 resp := &proto.ImportResourceState_Response{}
905
906 info := &terraform.InstanceInfo{
907 Type: req.TypeName,
908 }
909
910 newInstanceStates, err := s.provider.ImportState(info, req.Id)
911 if err != nil {
912 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
913 return resp, nil
914 }
915
916 for _, is := range newInstanceStates {
917 // copy the ID again just to be sure it wasn't missed
918 is.Attributes["id"] = is.ID
919
920 resourceType := is.Ephemeral.Type
921 if resourceType == "" {
922 resourceType = req.TypeName
923 }
924
925 schemaBlock := s.getResourceSchemaBlock(resourceType)
926 newStateVal, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schemaBlock.ImpliedType())
927 if err != nil {
928 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
929 return resp, nil
930 }
931
932 newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
933 if err != nil {
934 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
935 return resp, nil
936 }
937
938 meta, err := json.Marshal(is.Meta)
939 if err != nil {
940 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
941 return resp, nil
942 }
943
944 importedResource := &proto.ImportResourceState_ImportedResource{
945 TypeName: resourceType,
946 State: &proto.DynamicValue{
947 Msgpack: newStateMP,
948 },
949 Private: meta,
950 }
951
952 resp.ImportedResources = append(resp.ImportedResources, importedResource)
953 }
954
955 return resp, nil
956}
957
958func (s *GRPCProviderServer) ReadDataSource(_ context.Context, req *proto.ReadDataSource_Request) (*proto.ReadDataSource_Response, error) {
959 resp := &proto.ReadDataSource_Response{}
960
961 schemaBlock := s.getDatasourceSchemaBlock(req.TypeName)
962
963 configVal, err := msgpack.Unmarshal(req.Config.Msgpack, schemaBlock.ImpliedType())
964 if err != nil {
965 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
966 return resp, nil
967 }
968
969 info := &terraform.InstanceInfo{
970 Type: req.TypeName,
971 }
972
973 // Ensure there are no nulls that will cause helper/schema to panic.
974 if err := validateConfigNulls(configVal, nil); err != nil {
975 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
976 return resp, nil
977 }
978
979 config := terraform.NewResourceConfigShimmed(configVal, schemaBlock)
980
981 // we need to still build the diff separately with the Read method to match
982 // the old behavior
983 diff, err := s.provider.ReadDataDiff(info, config)
984 if err != nil {
985 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
986 return resp, nil
987 }
988
989 // now we can get the new complete data source
990 newInstanceState, err := s.provider.ReadDataApply(info, diff)
991 if err != nil {
992 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
993 return resp, nil
994 }
995
996 newStateVal, err := schema.StateValueFromInstanceState(newInstanceState, schemaBlock.ImpliedType())
997 if err != nil {
998 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
999 return resp, nil
1000 }
1001
1002 newStateVal = copyTimeoutValues(newStateVal, configVal)
1003
1004 newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
1005 if err != nil {
1006 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
1007 return resp, nil
1008 }
1009 resp.State = &proto.DynamicValue{
1010 Msgpack: newStateMP,
1011 }
1012 return resp, nil
1013}
1014
1015func pathToAttributePath(path cty.Path) *proto.AttributePath {
1016 var steps []*proto.AttributePath_Step
1017
1018 for _, step := range path {
1019 switch s := step.(type) {
1020 case cty.GetAttrStep:
1021 steps = append(steps, &proto.AttributePath_Step{
1022 Selector: &proto.AttributePath_Step_AttributeName{
1023 AttributeName: s.Name,
1024 },
1025 })
1026 case cty.IndexStep:
1027 ty := s.Key.Type()
1028 switch ty {
1029 case cty.Number:
1030 i, _ := s.Key.AsBigFloat().Int64()
1031 steps = append(steps, &proto.AttributePath_Step{
1032 Selector: &proto.AttributePath_Step_ElementKeyInt{
1033 ElementKeyInt: i,
1034 },
1035 })
1036 case cty.String:
1037 steps = append(steps, &proto.AttributePath_Step{
1038 Selector: &proto.AttributePath_Step_ElementKeyString{
1039 ElementKeyString: s.Key.AsString(),
1040 },
1041 })
1042 }
1043 }
1044 }
1045
1046 return &proto.AttributePath{Steps: steps}
1047}
1048
1049// helper/schema throws away timeout values from the config and stores them in
1050// the Private/Meta fields. we need to copy those values into the planned state
1051// so that core doesn't see a perpetual diff with the timeout block.
1052func copyTimeoutValues(to cty.Value, from cty.Value) cty.Value {
1053 // if `to` is null we are planning to remove it altogether.
1054 if to.IsNull() {
1055 return to
1056 }
1057 toAttrs := to.AsValueMap()
1058 // We need to remove the key since the hcl2shims will add a non-null block
1059 // because we can't determine if a single block was null from the flatmapped
1060 // values. This needs to conform to the correct schema for marshaling, so
1061 // change the value to null rather than deleting it from the object map.
1062 timeouts, ok := toAttrs[schema.TimeoutsConfigKey]
1063 if ok {
1064 toAttrs[schema.TimeoutsConfigKey] = cty.NullVal(timeouts.Type())
1065 }
1066
1067 // if from is null then there are no timeouts to copy
1068 if from.IsNull() {
1069 return cty.ObjectVal(toAttrs)
1070 }
1071
1072 fromAttrs := from.AsValueMap()
1073 timeouts, ok = fromAttrs[schema.TimeoutsConfigKey]
1074
1075 // timeouts shouldn't be unknown, but don't copy possibly invalid values either
1076 if !ok || timeouts.IsNull() || !timeouts.IsWhollyKnown() {
1077 // no timeouts block to copy
1078 return cty.ObjectVal(toAttrs)
1079 }
1080
1081 toAttrs[schema.TimeoutsConfigKey] = timeouts
1082
1083 return cty.ObjectVal(toAttrs)
1084}
1085
1086// stripResourceModifiers takes a *schema.Resource and returns a deep copy with all
1087// StateFuncs and CustomizeDiffs removed. This will be used during apply to
1088// create a diff from a planned state where the diff modifications have already
1089// been applied.
1090func stripResourceModifiers(r *schema.Resource) *schema.Resource {
1091 if r == nil {
1092 return nil
1093 }
1094 // start with a shallow copy
1095 newResource := new(schema.Resource)
1096 *newResource = *r
1097
1098 newResource.CustomizeDiff = nil
1099 newResource.Schema = map[string]*schema.Schema{}
1100
1101 for k, s := range r.Schema {
1102 newResource.Schema[k] = stripSchema(s)
1103 }
1104
1105 return newResource
1106}
1107
1108func stripSchema(s *schema.Schema) *schema.Schema {
1109 if s == nil {
1110 return nil
1111 }
1112 // start with a shallow copy
1113 newSchema := new(schema.Schema)
1114 *newSchema = *s
1115
1116 newSchema.StateFunc = nil
1117
1118 switch e := newSchema.Elem.(type) {
1119 case *schema.Schema:
1120 newSchema.Elem = stripSchema(e)
1121 case *schema.Resource:
1122 newSchema.Elem = stripResourceModifiers(e)
1123 }
1124
1125 return newSchema
1126}
1127
1128// Zero values and empty containers may be interchanged by the apply process.
1129// When there is a discrepency between src and dst value being null or empty,
1130// prefer the src value. This takes a little more liberty with set types, since
1131// we can't correlate modified set values. In the case of sets, if the src set
1132// was wholly known we assume the value was correctly applied and copy that
1133// entirely to the new value.
1134// While apply prefers the src value, during plan we prefer dst whenever there
1135// is an unknown or a set is involved, since the plan can alter the value
1136// however it sees fit. This however means that a CustomizeDiffFunction may not
1137// be able to change a null to an empty value or vice versa, but that should be
1138// very uncommon nor was it reliable before 0.12 either.
1139func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value {
1140 ty := dst.Type()
1141 if !src.IsNull() && !src.IsKnown() {
1142 // Return src during plan to retain unknown interpolated placeholders,
1143 // which could be lost if we're only updating a resource. If this is a
1144 // read scenario, then there shouldn't be any unknowns at all.
1145 if dst.IsNull() && !apply {
1146 return src
1147 }
1148 return dst
1149 }
1150
1151 // Handle null/empty changes for collections during apply.
1152 // A change between null and empty values prefers src to make sure the state
1153 // is consistent between plan and apply.
1154 if ty.IsCollectionType() && apply {
1155 dstEmpty := !dst.IsNull() && dst.IsKnown() && dst.LengthInt() == 0
1156 srcEmpty := !src.IsNull() && src.IsKnown() && src.LengthInt() == 0
1157
1158 if (src.IsNull() && dstEmpty) || (srcEmpty && dst.IsNull()) {
1159 return src
1160 }
1161 }
1162
1163 if src.IsNull() || !src.IsKnown() || !dst.IsKnown() {
1164 return dst
1165 }
1166
1167 switch {
1168 case ty.IsMapType(), ty.IsObjectType():
1169 var dstMap map[string]cty.Value
1170 if !dst.IsNull() {
1171 dstMap = dst.AsValueMap()
1172 }
1173 if dstMap == nil {
1174 dstMap = map[string]cty.Value{}
1175 }
1176
1177 srcMap := src.AsValueMap()
1178 for key, v := range srcMap {
1179 dstVal, ok := dstMap[key]
1180 if !ok && apply && ty.IsMapType() {
1181 // don't transfer old map values to dst during apply
1182 continue
1183 }
1184
1185 if dstVal == cty.NilVal {
1186 if !apply && ty.IsMapType() {
1187 // let plan shape this map however it wants
1188 continue
1189 }
1190 dstVal = cty.NullVal(v.Type())
1191 }
1192
1193 dstMap[key] = normalizeNullValues(dstVal, v, apply)
1194 }
1195
1196 // you can't call MapVal/ObjectVal with empty maps, but nothing was
1197 // copied in anyway. If the dst is nil, and the src is known, assume the
1198 // src is correct.
1199 if len(dstMap) == 0 {
1200 if dst.IsNull() && src.IsWhollyKnown() && apply {
1201 return src
1202 }
1203 return dst
1204 }
1205
1206 if ty.IsMapType() {
1207 // helper/schema will populate an optional+computed map with
1208 // unknowns which we have to fixup here.
1209 // It would be preferable to simply prevent any known value from
1210 // becoming unknown, but concessions have to be made to retain the
1211 // broken legacy behavior when possible.
1212 for k, srcVal := range srcMap {
1213 if !srcVal.IsNull() && srcVal.IsKnown() {
1214 dstVal, ok := dstMap[k]
1215 if !ok {
1216 continue
1217 }
1218
1219 if !dstVal.IsNull() && !dstVal.IsKnown() {
1220 dstMap[k] = srcVal
1221 }
1222 }
1223 }
1224
1225 return cty.MapVal(dstMap)
1226 }
1227
1228 return cty.ObjectVal(dstMap)
1229
1230 case ty.IsSetType():
1231 // If the original was wholly known, then we expect that is what the
1232 // provider applied. The apply process loses too much information to
1233 // reliably re-create the set.
1234 if src.IsWhollyKnown() && apply {
1235 return src
1236 }
1237
1238 case ty.IsListType(), ty.IsTupleType():
1239 // If the dst is null, and the src is known, then we lost an empty value
1240 // so take the original.
1241 if dst.IsNull() {
1242 if src.IsWhollyKnown() && src.LengthInt() == 0 && apply {
1243 return src
1244 }
1245
1246 // if dst is null and src only contains unknown values, then we lost
1247 // those during a read or plan.
1248 if !apply && !src.IsNull() {
1249 allUnknown := true
1250 for _, v := range src.AsValueSlice() {
1251 if v.IsKnown() {
1252 allUnknown = false
1253 break
1254 }
1255 }
1256 if allUnknown {
1257 return src
1258 }
1259 }
1260
1261 return dst
1262 }
1263
1264 // if the lengths are identical, then iterate over each element in succession.
1265 srcLen := src.LengthInt()
1266 dstLen := dst.LengthInt()
1267 if srcLen == dstLen && srcLen > 0 {
1268 srcs := src.AsValueSlice()
1269 dsts := dst.AsValueSlice()
1270
1271 for i := 0; i < srcLen; i++ {
1272 dsts[i] = normalizeNullValues(dsts[i], srcs[i], apply)
1273 }
1274
1275 if ty.IsTupleType() {
1276 return cty.TupleVal(dsts)
1277 }
1278 return cty.ListVal(dsts)
1279 }
1280
1281 case ty.IsPrimitiveType():
1282 if dst.IsNull() && src.IsWhollyKnown() && apply {
1283 return src
1284 }
1285 }
1286
1287 return dst
1288}
1289
1290// validateConfigNulls checks a config value for unsupported nulls before
1291// attempting to shim the value. While null values can mostly be ignored in the
1292// configuration, since they're not supported in HCL1, the case where a null
1293// appears in a list-like attribute (list, set, tuple) will present a nil value
1294// to helper/schema which can panic. Return an error to the user in this case,
1295// indicating the attribute with the null value.
1296func validateConfigNulls(v cty.Value, path cty.Path) []*proto.Diagnostic {
1297 var diags []*proto.Diagnostic
1298 if v.IsNull() || !v.IsKnown() {
1299 return diags
1300 }
1301
1302 switch {
1303 case v.Type().IsListType() || v.Type().IsSetType() || v.Type().IsTupleType():
1304 it := v.ElementIterator()
1305 for it.Next() {
1306 kv, ev := it.Element()
1307 if ev.IsNull() {
1308 diags = append(diags, &proto.Diagnostic{
1309 Severity: proto.Diagnostic_ERROR,
1310 Summary: "Null value found in list",
1311 Detail: "Null values are not allowed for this attribute value.",
1312 Attribute: convert.PathToAttributePath(append(path, cty.IndexStep{Key: kv})),
1313 })
1314 continue
1315 }
1316
1317 d := validateConfigNulls(ev, append(path, cty.IndexStep{Key: kv}))
1318 diags = convert.AppendProtoDiag(diags, d)
1319 }
1320
1321 case v.Type().IsMapType() || v.Type().IsObjectType():
1322 it := v.ElementIterator()
1323 for it.Next() {
1324 kv, ev := it.Element()
1325 var step cty.PathStep
1326 switch {
1327 case v.Type().IsMapType():
1328 step = cty.IndexStep{Key: kv}
1329 case v.Type().IsObjectType():
1330 step = cty.GetAttrStep{Name: kv.AsString()}
1331 }
1332 d := validateConfigNulls(ev, append(path, step))
1333 diags = convert.AppendProtoDiag(diags, d)
1334 }
1335 }
1336
1337 return diags
1338}
diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go
new file mode 100644
index 0000000..14494e4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provisioner.go
@@ -0,0 +1,147 @@
1package plugin
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/helper/schema"
7 proto "github.com/hashicorp/terraform/internal/tfplugin5"
8 "github.com/hashicorp/terraform/plugin/convert"
9 "github.com/hashicorp/terraform/terraform"
10 "github.com/zclconf/go-cty/cty"
11 ctyconvert "github.com/zclconf/go-cty/cty/convert"
12 "github.com/zclconf/go-cty/cty/msgpack"
13 context "golang.org/x/net/context"
14)
15
16// NewGRPCProvisionerServerShim wraps a terraform.ResourceProvisioner in a
17// proto.ProvisionerServer implementation. If the provided provisioner is not a
18// *schema.Provisioner, this will return nil,
19func NewGRPCProvisionerServerShim(p terraform.ResourceProvisioner) *GRPCProvisionerServer {
20 sp, ok := p.(*schema.Provisioner)
21 if !ok {
22 return nil
23 }
24 return &GRPCProvisionerServer{
25 provisioner: sp,
26 }
27}
28
29type GRPCProvisionerServer struct {
30 provisioner *schema.Provisioner
31}
32
33func (s *GRPCProvisionerServer) GetSchema(_ context.Context, req *proto.GetProvisionerSchema_Request) (*proto.GetProvisionerSchema_Response, error) {
34 resp := &proto.GetProvisionerSchema_Response{}
35
36 resp.Provisioner = &proto.Schema{
37 Block: convert.ConfigSchemaToProto(schema.InternalMap(s.provisioner.Schema).CoreConfigSchema()),
38 }
39
40 return resp, nil
41}
42
43func (s *GRPCProvisionerServer) ValidateProvisionerConfig(_ context.Context, req *proto.ValidateProvisionerConfig_Request) (*proto.ValidateProvisionerConfig_Response, error) {
44 resp := &proto.ValidateProvisionerConfig_Response{}
45
46 cfgSchema := schema.InternalMap(s.provisioner.Schema).CoreConfigSchema()
47
48 configVal, err := msgpack.Unmarshal(req.Config.Msgpack, cfgSchema.ImpliedType())
49 if err != nil {
50 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
51 return resp, nil
52 }
53
54 config := terraform.NewResourceConfigShimmed(configVal, cfgSchema)
55
56 warns, errs := s.provisioner.Validate(config)
57 resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, convert.WarnsAndErrsToProto(warns, errs))
58
59 return resp, nil
60}
61
62// stringMapFromValue converts a cty.Value to a map[stirng]string.
63// This will panic if the val is not a cty.Map(cty.String).
64func stringMapFromValue(val cty.Value) map[string]string {
65 m := map[string]string{}
66 if val.IsNull() || !val.IsKnown() {
67 return m
68 }
69
70 for it := val.ElementIterator(); it.Next(); {
71 ak, av := it.Element()
72 name := ak.AsString()
73
74 if !av.IsKnown() || av.IsNull() {
75 continue
76 }
77
78 av, _ = ctyconvert.Convert(av, cty.String)
79 m[name] = av.AsString()
80 }
81
82 return m
83}
84
85// uiOutput implements the terraform.UIOutput interface to adapt the grpc
86// stream to the legacy Provisioner.Apply method.
87type uiOutput struct {
88 srv proto.Provisioner_ProvisionResourceServer
89}
90
91func (o uiOutput) Output(s string) {
92 err := o.srv.Send(&proto.ProvisionResource_Response{
93 Output: s,
94 })
95 if err != nil {
96 log.Printf("[ERROR] %s", err)
97 }
98}
99
100func (s *GRPCProvisionerServer) ProvisionResource(req *proto.ProvisionResource_Request, srv proto.Provisioner_ProvisionResourceServer) error {
101 // We send back a diagnostics over the stream if there was a
102 // provisioner-side problem.
103 srvResp := &proto.ProvisionResource_Response{}
104
105 cfgSchema := schema.InternalMap(s.provisioner.Schema).CoreConfigSchema()
106 cfgVal, err := msgpack.Unmarshal(req.Config.Msgpack, cfgSchema.ImpliedType())
107 if err != nil {
108 srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err)
109 srv.Send(srvResp)
110 return nil
111 }
112 resourceConfig := terraform.NewResourceConfigShimmed(cfgVal, cfgSchema)
113
114 connVal, err := msgpack.Unmarshal(req.Connection.Msgpack, cty.Map(cty.String))
115 if err != nil {
116 srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err)
117 srv.Send(srvResp)
118 return nil
119 }
120
121 conn := stringMapFromValue(connVal)
122
123 instanceState := &terraform.InstanceState{
124 Ephemeral: terraform.EphemeralState{
125 ConnInfo: conn,
126 },
127 Meta: make(map[string]interface{}),
128 }
129
130 err = s.provisioner.Apply(uiOutput{srv}, instanceState, resourceConfig)
131 if err != nil {
132 srvResp.Diagnostics = convert.AppendProtoDiag(srvResp.Diagnostics, err)
133 srv.Send(srvResp)
134 }
135 return nil
136}
137
138func (s *GRPCProvisionerServer) Stop(_ context.Context, req *proto.Stop_Request) (*proto.Stop_Response, error) {
139 resp := &proto.Stop_Response{}
140
141 err := s.provisioner.Stop()
142 if err != nil {
143 resp.Error = err.Error()
144 }
145
146 return resp, nil
147}
diff --git a/vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go b/vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go
new file mode 100644
index 0000000..64a6784
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/plugin/unknown.go
@@ -0,0 +1,131 @@
1package plugin
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/zclconf/go-cty/cty"
8)
9
10// SetUnknowns takes a cty.Value, and compares it to the schema setting any null
11// values which are computed to unknown.
12func SetUnknowns(val cty.Value, schema *configschema.Block) cty.Value {
13 if !val.IsKnown() {
14 return val
15 }
16
17 // If the object was null, we still need to handle the top level attributes
18 // which might be computed, but we don't need to expand the blocks.
19 if val.IsNull() {
20 objMap := map[string]cty.Value{}
21 allNull := true
22 for name, attr := range schema.Attributes {
23 switch {
24 case attr.Computed:
25 objMap[name] = cty.UnknownVal(attr.Type)
26 allNull = false
27 default:
28 objMap[name] = cty.NullVal(attr.Type)
29 }
30 }
31
32 // If this object has no unknown attributes, then we can leave it null.
33 if allNull {
34 return val
35 }
36
37 return cty.ObjectVal(objMap)
38 }
39
40 valMap := val.AsValueMap()
41 newVals := make(map[string]cty.Value)
42
43 for name, attr := range schema.Attributes {
44 v := valMap[name]
45
46 if attr.Computed && v.IsNull() {
47 newVals[name] = cty.UnknownVal(attr.Type)
48 continue
49 }
50
51 newVals[name] = v
52 }
53
54 for name, blockS := range schema.BlockTypes {
55 blockVal := valMap[name]
56 if blockVal.IsNull() || !blockVal.IsKnown() {
57 newVals[name] = blockVal
58 continue
59 }
60
61 blockValType := blockVal.Type()
62 blockElementType := blockS.Block.ImpliedType()
63
64 // This switches on the value type here, so we can correctly switch
65 // between Tuples/Lists and Maps/Objects.
66 switch {
67 case blockS.Nesting == configschema.NestingSingle || blockS.Nesting == configschema.NestingGroup:
68 // NestingSingle is the only exception here, where we treat the
69 // block directly as an object
70 newVals[name] = SetUnknowns(blockVal, &blockS.Block)
71
72 case blockValType.IsSetType(), blockValType.IsListType(), blockValType.IsTupleType():
73 listVals := blockVal.AsValueSlice()
74 newListVals := make([]cty.Value, 0, len(listVals))
75
76 for _, v := range listVals {
77 newListVals = append(newListVals, SetUnknowns(v, &blockS.Block))
78 }
79
80 switch {
81 case blockValType.IsSetType():
82 switch len(newListVals) {
83 case 0:
84 newVals[name] = cty.SetValEmpty(blockElementType)
85 default:
86 newVals[name] = cty.SetVal(newListVals)
87 }
88 case blockValType.IsListType():
89 switch len(newListVals) {
90 case 0:
91 newVals[name] = cty.ListValEmpty(blockElementType)
92 default:
93 newVals[name] = cty.ListVal(newListVals)
94 }
95 case blockValType.IsTupleType():
96 newVals[name] = cty.TupleVal(newListVals)
97 }
98
99 case blockValType.IsMapType(), blockValType.IsObjectType():
100 mapVals := blockVal.AsValueMap()
101 newMapVals := make(map[string]cty.Value)
102
103 for k, v := range mapVals {
104 newMapVals[k] = SetUnknowns(v, &blockS.Block)
105 }
106
107 switch {
108 case blockValType.IsMapType():
109 switch len(newMapVals) {
110 case 0:
111 newVals[name] = cty.MapValEmpty(blockElementType)
112 default:
113 newVals[name] = cty.MapVal(newMapVals)
114 }
115 case blockValType.IsObjectType():
116 if len(newMapVals) == 0 {
117 // We need to populate empty values to make a valid object.
118 for attr, ty := range blockElementType.AttributeTypes() {
119 newMapVals[attr] = cty.NullVal(ty)
120 }
121 }
122 newVals[name] = cty.ObjectVal(newMapVals)
123 }
124
125 default:
126 panic(fmt.Sprintf("failed to set unknown values for nested block %q:%#v", name, blockValType))
127 }
128 }
129
130 return cty.ObjectVal(newVals)
131}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go b/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go
new file mode 100644
index 0000000..0742e99
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/grpc_test_provider.go
@@ -0,0 +1,43 @@
1package resource
2
3import (
4 "context"
5 "net"
6 "time"
7
8 "github.com/hashicorp/terraform/helper/plugin"
9 proto "github.com/hashicorp/terraform/internal/tfplugin5"
10 tfplugin "github.com/hashicorp/terraform/plugin"
11 "github.com/hashicorp/terraform/providers"
12 "github.com/hashicorp/terraform/terraform"
13 "google.golang.org/grpc"
14 "google.golang.org/grpc/test/bufconn"
15)
16
17// GRPCTestProvider takes a legacy ResourceProvider, wraps it in the new GRPC
18// shim and starts it in a grpc server using an inmem connection. It returns a
19// GRPCClient for this new server to test the shimmed resource provider.
20func GRPCTestProvider(rp terraform.ResourceProvider) providers.Interface {
21 listener := bufconn.Listen(256 * 1024)
22 grpcServer := grpc.NewServer()
23
24 p := plugin.NewGRPCProviderServerShim(rp)
25 proto.RegisterProviderServer(grpcServer, p)
26
27 go grpcServer.Serve(listener)
28
29 conn, err := grpc.Dial("", grpc.WithDialer(func(string, time.Duration) (net.Conn, error) {
30 return listener.Dial()
31 }), grpc.WithInsecure())
32 if err != nil {
33 panic(err)
34 }
35
36 var pp tfplugin.GRPCProviderPlugin
37 client, _ := pp.GRPCClient(context.Background(), nil, conn)
38
39 grpcClient := client.(*tfplugin.GRPCProvider)
40 grpcClient.TestServer = grpcServer
41
42 return grpcClient
43}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
index c34e21b..88a8396 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/state.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
@@ -38,7 +38,7 @@ type StateChangeConf struct {
38// specified in the configuration using the specified Refresh() func, 38// specified in the configuration using the specified Refresh() func,
39// waiting the number of seconds specified in the timeout configuration. 39// waiting the number of seconds specified in the timeout configuration.
40// 40//
41// If the Refresh function returns a error, exit immediately with that error. 41// If the Refresh function returns an error, exit immediately with that error.
42// 42//
43// If the Refresh function returns a state other than the Target state or one 43// If the Refresh function returns a state other than the Target state or one
44// listed in Pending, return immediately with an error. 44// listed in Pending, return immediately with an error.
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go b/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go
new file mode 100644
index 0000000..b2aff99
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go
@@ -0,0 +1,163 @@
1package resource
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/addrs"
7 "github.com/zclconf/go-cty/cty"
8
9 "github.com/hashicorp/terraform/config/hcl2shim"
10 "github.com/hashicorp/terraform/helper/schema"
11
12 "github.com/hashicorp/terraform/states"
13 "github.com/hashicorp/terraform/terraform"
14)
15
16// shimState takes a new *states.State and reverts it to a legacy state for the provider ACC tests
17func shimNewState(newState *states.State, providers map[string]terraform.ResourceProvider) (*terraform.State, error) {
18 state := terraform.NewState()
19
20 // in the odd case of a nil state, let the helper packages handle it
21 if newState == nil {
22 return nil, nil
23 }
24
25 for _, newMod := range newState.Modules {
26 mod := state.AddModule(newMod.Addr)
27
28 for name, out := range newMod.OutputValues {
29 outputType := ""
30 val := hcl2shim.ConfigValueFromHCL2(out.Value)
31 ty := out.Value.Type()
32 switch {
33 case ty == cty.String:
34 outputType = "string"
35 case ty.IsTupleType() || ty.IsListType():
36 outputType = "list"
37 case ty.IsMapType():
38 outputType = "map"
39 }
40
41 mod.Outputs[name] = &terraform.OutputState{
42 Type: outputType,
43 Value: val,
44 Sensitive: out.Sensitive,
45 }
46 }
47
48 for _, res := range newMod.Resources {
49 resType := res.Addr.Type
50 providerType := res.ProviderConfig.ProviderConfig.Type
51
52 resource := getResource(providers, providerType, res.Addr)
53
54 for key, i := range res.Instances {
55 flatmap, err := shimmedAttributes(i.Current, resource)
56 if err != nil {
57 return nil, fmt.Errorf("error decoding state for %q: %s", resType, err)
58 }
59
60 resState := &terraform.ResourceState{
61 Type: resType,
62 Primary: &terraform.InstanceState{
63 ID: flatmap["id"],
64 Attributes: flatmap,
65 Tainted: i.Current.Status == states.ObjectTainted,
66 },
67 Provider: res.ProviderConfig.String(),
68 }
69 if i.Current.SchemaVersion != 0 {
70 resState.Primary.Meta = map[string]interface{}{
71 "schema_version": i.Current.SchemaVersion,
72 }
73 }
74
75 for _, dep := range i.Current.Dependencies {
76 resState.Dependencies = append(resState.Dependencies, dep.String())
77 }
78
79 // convert the indexes to the old style flapmap indexes
80 idx := ""
81 switch key.(type) {
82 case addrs.IntKey:
83 // don't add numeric index values to resources with a count of 0
84 if len(res.Instances) > 1 {
85 idx = fmt.Sprintf(".%d", key)
86 }
87 case addrs.StringKey:
88 idx = "." + key.String()
89 }
90
91 mod.Resources[res.Addr.String()+idx] = resState
92
93 // add any deposed instances
94 for _, dep := range i.Deposed {
95 flatmap, err := shimmedAttributes(dep, resource)
96 if err != nil {
97 return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err)
98 }
99
100 deposed := &terraform.InstanceState{
101 ID: flatmap["id"],
102 Attributes: flatmap,
103 Tainted: dep.Status == states.ObjectTainted,
104 }
105 if dep.SchemaVersion != 0 {
106 deposed.Meta = map[string]interface{}{
107 "schema_version": dep.SchemaVersion,
108 }
109 }
110
111 resState.Deposed = append(resState.Deposed, deposed)
112 }
113 }
114 }
115 }
116
117 return state, nil
118}
119
120func getResource(providers map[string]terraform.ResourceProvider, providerName string, addr addrs.Resource) *schema.Resource {
121 p := providers[providerName]
122 if p == nil {
123 panic(fmt.Sprintf("provider %q not found in test step", providerName))
124 }
125
126 // this is only for tests, so should only see schema.Providers
127 provider := p.(*schema.Provider)
128
129 switch addr.Mode {
130 case addrs.ManagedResourceMode:
131 resource := provider.ResourcesMap[addr.Type]
132 if resource != nil {
133 return resource
134 }
135 case addrs.DataResourceMode:
136 resource := provider.DataSourcesMap[addr.Type]
137 if resource != nil {
138 return resource
139 }
140 }
141
142 panic(fmt.Sprintf("resource %s not found in test step", addr.Type))
143}
144
145func shimmedAttributes(instance *states.ResourceInstanceObjectSrc, res *schema.Resource) (map[string]string, error) {
146 flatmap := instance.AttrsFlat
147 if flatmap != nil {
148 return flatmap, nil
149 }
150
151 // if we have json attrs, they need to be decoded
152 rio, err := instance.Decode(res.CoreConfigSchema().ImpliedType())
153 if err != nil {
154 return nil, err
155 }
156
157 instanceState, err := res.ShimInstanceStateFromValue(rio.Value)
158 if err != nil {
159 return nil, err
160 }
161
162 return instanceState.Attributes, nil
163}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
index b97673f..aa7454d 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
@@ -1,6 +1,7 @@
1package resource 1package resource
2 2
3import ( 3import (
4 "bytes"
4 "flag" 5 "flag"
5 "fmt" 6 "fmt"
6 "io" 7 "io"
@@ -18,9 +19,18 @@ import (
18 "github.com/hashicorp/errwrap" 19 "github.com/hashicorp/errwrap"
19 "github.com/hashicorp/go-multierror" 20 "github.com/hashicorp/go-multierror"
20 "github.com/hashicorp/logutils" 21 "github.com/hashicorp/logutils"
21 "github.com/hashicorp/terraform/config/module" 22 "github.com/mitchellh/colorstring"
23
24 "github.com/hashicorp/terraform/addrs"
25 "github.com/hashicorp/terraform/command/format"
26 "github.com/hashicorp/terraform/configs"
27 "github.com/hashicorp/terraform/configs/configload"
22 "github.com/hashicorp/terraform/helper/logging" 28 "github.com/hashicorp/terraform/helper/logging"
29 "github.com/hashicorp/terraform/internal/initwd"
30 "github.com/hashicorp/terraform/providers"
31 "github.com/hashicorp/terraform/states"
23 "github.com/hashicorp/terraform/terraform" 32 "github.com/hashicorp/terraform/terraform"
33 "github.com/hashicorp/terraform/tfdiags"
24) 34)
25 35
26// flagSweep is a flag available when running tests on the command line. It 36// flagSweep is a flag available when running tests on the command line. It
@@ -373,6 +383,10 @@ type TestStep struct {
373 // be refreshed and don't matter. 383 // be refreshed and don't matter.
374 ImportStateVerify bool 384 ImportStateVerify bool
375 ImportStateVerifyIgnore []string 385 ImportStateVerifyIgnore []string
386
387 // provider s is used internally to maintain a reference to the
388 // underlying providers during the tests
389 providers map[string]terraform.ResourceProvider
376} 390}
377 391
378// Set to a file mask in sprintf format where %s is test name 392// Set to a file mask in sprintf format where %s is test name
@@ -467,10 +481,22 @@ func Test(t TestT, c TestCase) {
467 c.PreCheck() 481 c.PreCheck()
468 } 482 }
469 483
484 // get instances of all providers, so we can use the individual
485 // resources to shim the state during the tests.
486 providers := make(map[string]terraform.ResourceProvider)
487 for name, pf := range testProviderFactories(c) {
488 p, err := pf()
489 if err != nil {
490 t.Fatal(err)
491 }
492 providers[name] = p
493 }
494
470 providerResolver, err := testProviderResolver(c) 495 providerResolver, err := testProviderResolver(c)
471 if err != nil { 496 if err != nil {
472 t.Fatal(err) 497 t.Fatal(err)
473 } 498 }
499
474 opts := terraform.ContextOpts{ProviderResolver: providerResolver} 500 opts := terraform.ContextOpts{ProviderResolver: providerResolver}
475 501
476 // A single state variable to track the lifecycle, starting with no state 502 // A single state variable to track the lifecycle, starting with no state
@@ -481,6 +507,10 @@ func Test(t TestT, c TestCase) {
481 idRefresh := c.IDRefreshName != "" 507 idRefresh := c.IDRefreshName != ""
482 errored := false 508 errored := false
483 for i, step := range c.Steps { 509 for i, step := range c.Steps {
510 // insert the providers into the step so we can get the resources for
511 // shimming the state
512 step.providers = providers
513
484 var err error 514 var err error
485 log.Printf("[DEBUG] Test: Executing step %d", i) 515 log.Printf("[DEBUG] Test: Executing step %d", i)
486 516
@@ -535,8 +565,7 @@ func Test(t TestT, c TestCase) {
535 } 565 }
536 } else { 566 } else {
537 errored = true 567 errored = true
538 t.Error(fmt.Sprintf( 568 t.Error(fmt.Sprintf("Step %d error: %s", i, detailedErrorMessage(err)))
539 "Step %d error: %s", i, err))
540 break 569 break
541 } 570 }
542 } 571 }
@@ -591,6 +620,7 @@ func Test(t TestT, c TestCase) {
591 Destroy: true, 620 Destroy: true,
592 PreventDiskCleanup: lastStep.PreventDiskCleanup, 621 PreventDiskCleanup: lastStep.PreventDiskCleanup,
593 PreventPostDestroyRefresh: c.PreventPostDestroyRefresh, 622 PreventPostDestroyRefresh: c.PreventPostDestroyRefresh,
623 providers: providers,
594 } 624 }
595 625
596 log.Printf("[WARN] Test: Executing destroy step") 626 log.Printf("[WARN] Test: Executing destroy step")
@@ -620,39 +650,50 @@ func testProviderConfig(c TestCase) string {
620 return strings.Join(lines, "") 650 return strings.Join(lines, "")
621} 651}
622 652
623// testProviderResolver is a helper to build a ResourceProviderResolver 653// testProviderFactories combines the fixed Providers and
624// with pre instantiated ResourceProviders, so that we can reset them for the 654// ResourceProviderFactory functions into a single map of
625// test, while only calling the factory function once. 655// ResourceProviderFactory functions.
626// Any errors are stored so that they can be returned by the factory in 656func testProviderFactories(c TestCase) map[string]terraform.ResourceProviderFactory {
627// terraform to match non-test behavior. 657 ctxProviders := make(map[string]terraform.ResourceProviderFactory)
628func testProviderResolver(c TestCase) (terraform.ResourceProviderResolver, error) { 658 for k, pf := range c.ProviderFactories {
629 ctxProviders := c.ProviderFactories 659 ctxProviders[k] = pf
630 if ctxProviders == nil {
631 ctxProviders = make(map[string]terraform.ResourceProviderFactory)
632 } 660 }
633 661
634 // add any fixed providers 662 // add any fixed providers
635 for k, p := range c.Providers { 663 for k, p := range c.Providers {
636 ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p) 664 ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p)
637 } 665 }
666 return ctxProviders
667}
668
669// testProviderResolver is a helper to build a ResourceProviderResolver
670// with pre instantiated ResourceProviders, so that we can reset them for the
671// test, while only calling the factory function once.
672// Any errors are stored so that they can be returned by the factory in
673// terraform to match non-test behavior.
674func testProviderResolver(c TestCase) (providers.Resolver, error) {
675 ctxProviders := testProviderFactories(c)
676
677 // wrap the old provider factories in the test grpc server so they can be
678 // called from terraform.
679 newProviders := make(map[string]providers.Factory)
638 680
639 // reset the providers if needed
640 for k, pf := range ctxProviders { 681 for k, pf := range ctxProviders {
641 // we can ignore any errors here, if we don't have a provider to reset 682 factory := pf // must copy to ensure each closure sees its own value
642 // the error will be handled later 683 newProviders[k] = func() (providers.Interface, error) {
643 p, err := pf() 684 p, err := factory()
644 if err != nil {
645 return nil, err
646 }
647 if p, ok := p.(TestProvider); ok {
648 err := p.TestReset()
649 if err != nil { 685 if err != nil {
650 return nil, fmt.Errorf("[ERROR] failed to reset provider %q: %s", k, err) 686 return nil, err
651 } 687 }
688
689 // The provider is wrapped in a GRPCTestProvider so that it can be
690 // passed back to terraform core as a providers.Interface, rather
691 // than the legacy ResourceProvider.
692 return GRPCTestProvider(p), nil
652 } 693 }
653 } 694 }
654 695
655 return terraform.ResourceProviderResolverFixed(ctxProviders), nil 696 return providers.ResolverFixed(newProviders), nil
656} 697}
657 698
658// UnitTest is a helper to force the acceptance testing harness to run in the 699// UnitTest is a helper to force the acceptance testing harness to run in the
@@ -670,33 +711,40 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r
670 return nil 711 return nil
671 } 712 }
672 713
673 name := fmt.Sprintf("%s.foo", r.Type) 714 addr := addrs.Resource{
715 Mode: addrs.ManagedResourceMode,
716 Type: r.Type,
717 Name: "foo",
718 }.Instance(addrs.NoKey)
719 absAddr := addr.Absolute(addrs.RootModuleInstance)
674 720
675 // Build the state. The state is just the resource with an ID. There 721 // Build the state. The state is just the resource with an ID. There
676 // are no attributes. We only set what is needed to perform a refresh. 722 // are no attributes. We only set what is needed to perform a refresh.
677 state := terraform.NewState() 723 state := states.NewState()
678 state.RootModule().Resources[name] = &terraform.ResourceState{ 724 state.RootModule().SetResourceInstanceCurrent(
679 Type: r.Type, 725 addr,
680 Primary: &terraform.InstanceState{ 726 &states.ResourceInstanceObjectSrc{
681 ID: r.Primary.ID, 727 AttrsFlat: r.Primary.Attributes,
728 Status: states.ObjectReady,
682 }, 729 },
683 } 730 addrs.ProviderConfig{Type: "placeholder"}.Absolute(addrs.RootModuleInstance),
731 )
684 732
685 // Create the config module. We use the full config because Refresh 733 // Create the config module. We use the full config because Refresh
686 // doesn't have access to it and we may need things like provider 734 // doesn't have access to it and we may need things like provider
687 // configurations. The initial implementation of id-only checks used 735 // configurations. The initial implementation of id-only checks used
688 // an empty config module, but that caused the aforementioned problems. 736 // an empty config module, but that caused the aforementioned problems.
689 mod, err := testModule(opts, step) 737 cfg, err := testConfig(opts, step)
690 if err != nil { 738 if err != nil {
691 return err 739 return err
692 } 740 }
693 741
694 // Initialize the context 742 // Initialize the context
695 opts.Module = mod 743 opts.Config = cfg
696 opts.State = state 744 opts.State = state
697 ctx, err := terraform.NewContext(&opts) 745 ctx, ctxDiags := terraform.NewContext(&opts)
698 if err != nil { 746 if ctxDiags.HasErrors() {
699 return err 747 return ctxDiags.Err()
700 } 748 }
701 if diags := ctx.Validate(); len(diags) > 0 { 749 if diags := ctx.Validate(); len(diags) > 0 {
702 if diags.HasErrors() { 750 if diags.HasErrors() {
@@ -707,20 +755,20 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r
707 } 755 }
708 756
709 // Refresh! 757 // Refresh!
710 state, err = ctx.Refresh() 758 state, refreshDiags := ctx.Refresh()
711 if err != nil { 759 if refreshDiags.HasErrors() {
712 return fmt.Errorf("Error refreshing: %s", err) 760 return refreshDiags.Err()
713 } 761 }
714 762
715 // Verify attribute equivalence. 763 // Verify attribute equivalence.
716 actualR := state.RootModule().Resources[name] 764 actualR := state.ResourceInstance(absAddr)
717 if actualR == nil { 765 if actualR == nil {
718 return fmt.Errorf("Resource gone!") 766 return fmt.Errorf("Resource gone!")
719 } 767 }
720 if actualR.Primary == nil { 768 if actualR.Current == nil {
721 return fmt.Errorf("Resource has no primary instance") 769 return fmt.Errorf("Resource has no primary instance")
722 } 770 }
723 actual := actualR.Primary.Attributes 771 actual := actualR.Current.AttrsFlat
724 expected := r.Primary.Attributes 772 expected := r.Primary.Attributes
725 // Remove fields we're ignoring 773 // Remove fields we're ignoring
726 for _, v := range c.IDRefreshIgnore { 774 for _, v := range c.IDRefreshIgnore {
@@ -756,15 +804,14 @@ func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r
756 return nil 804 return nil
757} 805}
758 806
759func testModule(opts terraform.ContextOpts, step TestStep) (*module.Tree, error) { 807func testConfig(opts terraform.ContextOpts, step TestStep) (*configs.Config, error) {
760 if step.PreConfig != nil { 808 if step.PreConfig != nil {
761 step.PreConfig() 809 step.PreConfig()
762 } 810 }
763 811
764 cfgPath, err := ioutil.TempDir("", "tf-test") 812 cfgPath, err := ioutil.TempDir("", "tf-test")
765 if err != nil { 813 if err != nil {
766 return nil, fmt.Errorf( 814 return nil, fmt.Errorf("Error creating temporary directory for config: %s", err)
767 "Error creating temporary directory for config: %s", err)
768 } 815 }
769 816
770 if step.PreventDiskCleanup { 817 if step.PreventDiskCleanup {
@@ -773,38 +820,38 @@ func testModule(opts terraform.ContextOpts, step TestStep) (*module.Tree, error)
773 defer os.RemoveAll(cfgPath) 820 defer os.RemoveAll(cfgPath)
774 } 821 }
775 822
776 // Write the configuration 823 // Write the main configuration file
777 cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf")) 824 err = ioutil.WriteFile(filepath.Join(cfgPath, "main.tf"), []byte(step.Config), os.ModePerm)
778 if err != nil { 825 if err != nil {
779 return nil, fmt.Errorf( 826 return nil, fmt.Errorf("Error creating temporary file for config: %s", err)
780 "Error creating temporary file for config: %s", err)
781 } 827 }
782 828
783 _, err = io.Copy(cfgF, strings.NewReader(step.Config)) 829 // Create directory for our child modules, if any.
784 cfgF.Close() 830 modulesDir := filepath.Join(cfgPath, ".modules")
831 err = os.Mkdir(modulesDir, os.ModePerm)
785 if err != nil { 832 if err != nil {
786 return nil, fmt.Errorf( 833 return nil, fmt.Errorf("Error creating child modules directory: %s", err)
787 "Error creating temporary file for config: %s", err)
788 } 834 }
789 835
790 // Parse the configuration 836 inst := initwd.NewModuleInstaller(modulesDir, nil)
791 mod, err := module.NewTreeModule("", cfgPath) 837 _, installDiags := inst.InstallModules(cfgPath, true, initwd.ModuleInstallHooksImpl{})
792 if err != nil { 838 if installDiags.HasErrors() {
793 return nil, fmt.Errorf( 839 return nil, installDiags.Err()
794 "Error loading configuration: %s", err)
795 } 840 }
796 841
797 // Load the modules 842 loader, err := configload.NewLoader(&configload.Config{
798 modStorage := &module.Storage{ 843 ModulesDir: modulesDir,
799 StorageDir: filepath.Join(cfgPath, ".tfmodules"), 844 })
800 Mode: module.GetModeGet,
801 }
802 err = mod.Load(modStorage)
803 if err != nil { 845 if err != nil {
804 return nil, fmt.Errorf("Error downloading modules: %s", err) 846 return nil, fmt.Errorf("failed to create config loader: %s", err)
847 }
848
849 config, configDiags := loader.LoadConfig(cfgPath)
850 if configDiags.HasErrors() {
851 return nil, configDiags
805 } 852 }
806 853
807 return mod, nil 854 return config, nil
808} 855}
809 856
810func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) { 857func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) {
@@ -881,8 +928,9 @@ func TestCheckResourceAttrSet(name, key string) TestCheckFunc {
881// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with 928// TestCheckModuleResourceAttrSet - as per TestCheckResourceAttrSet but with
882// support for non-root modules 929// support for non-root modules
883func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc { 930func TestCheckModuleResourceAttrSet(mp []string, name string, key string) TestCheckFunc {
931 mpt := addrs.Module(mp).UnkeyedInstanceShim()
884 return func(s *terraform.State) error { 932 return func(s *terraform.State) error {
885 is, err := modulePathPrimaryInstanceState(s, mp, name) 933 is, err := modulePathPrimaryInstanceState(s, mpt, name)
886 if err != nil { 934 if err != nil {
887 return err 935 return err
888 } 936 }
@@ -915,8 +963,9 @@ func TestCheckResourceAttr(name, key, value string) TestCheckFunc {
915// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with 963// TestCheckModuleResourceAttr - as per TestCheckResourceAttr but with
916// support for non-root modules 964// support for non-root modules
917func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc { 965func TestCheckModuleResourceAttr(mp []string, name string, key string, value string) TestCheckFunc {
966 mpt := addrs.Module(mp).UnkeyedInstanceShim()
918 return func(s *terraform.State) error { 967 return func(s *terraform.State) error {
919 is, err := modulePathPrimaryInstanceState(s, mp, name) 968 is, err := modulePathPrimaryInstanceState(s, mpt, name)
920 if err != nil { 969 if err != nil {
921 return err 970 return err
922 } 971 }
@@ -926,7 +975,19 @@ func TestCheckModuleResourceAttr(mp []string, name string, key string, value str
926} 975}
927 976
928func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error { 977func testCheckResourceAttr(is *terraform.InstanceState, name string, key string, value string) error {
978 // Empty containers may be elided from the state.
979 // If the intent here is to check for an empty container, allow the key to
980 // also be non-existent.
981 emptyCheck := false
982 if value == "0" && (strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) {
983 emptyCheck = true
984 }
985
929 if v, ok := is.Attributes[key]; !ok || v != value { 986 if v, ok := is.Attributes[key]; !ok || v != value {
987 if emptyCheck && !ok {
988 return nil
989 }
990
930 if !ok { 991 if !ok {
931 return fmt.Errorf("%s: Attribute '%s' not found", name, key) 992 return fmt.Errorf("%s: Attribute '%s' not found", name, key)
932 } 993 }
@@ -957,8 +1018,9 @@ func TestCheckNoResourceAttr(name, key string) TestCheckFunc {
957// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with 1018// TestCheckModuleNoResourceAttr - as per TestCheckNoResourceAttr but with
958// support for non-root modules 1019// support for non-root modules
959func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc { 1020func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestCheckFunc {
1021 mpt := addrs.Module(mp).UnkeyedInstanceShim()
960 return func(s *terraform.State) error { 1022 return func(s *terraform.State) error {
961 is, err := modulePathPrimaryInstanceState(s, mp, name) 1023 is, err := modulePathPrimaryInstanceState(s, mpt, name)
962 if err != nil { 1024 if err != nil {
963 return err 1025 return err
964 } 1026 }
@@ -968,7 +1030,20 @@ func TestCheckModuleNoResourceAttr(mp []string, name string, key string) TestChe
968} 1030}
969 1031
970func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error { 1032func testCheckNoResourceAttr(is *terraform.InstanceState, name string, key string) error {
971 if _, ok := is.Attributes[key]; ok { 1033 // Empty containers may sometimes be included in the state.
1034 // If the intent here is to check for an empty container, allow the value to
1035 // also be "0".
1036 emptyCheck := false
1037 if strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%") {
1038 emptyCheck = true
1039 }
1040
1041 val, exists := is.Attributes[key]
1042 if emptyCheck && val == "0" {
1043 return nil
1044 }
1045
1046 if exists {
972 return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key) 1047 return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key)
973 } 1048 }
974 1049
@@ -991,8 +1066,9 @@ func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc {
991// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with 1066// TestModuleMatchResourceAttr - as per TestMatchResourceAttr but with
992// support for non-root modules 1067// support for non-root modules
993func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc { 1068func TestModuleMatchResourceAttr(mp []string, name string, key string, r *regexp.Regexp) TestCheckFunc {
1069 mpt := addrs.Module(mp).UnkeyedInstanceShim()
994 return func(s *terraform.State) error { 1070 return func(s *terraform.State) error {
995 is, err := modulePathPrimaryInstanceState(s, mp, name) 1071 is, err := modulePathPrimaryInstanceState(s, mpt, name)
996 if err != nil { 1072 if err != nil {
997 return err 1073 return err
998 } 1074 }
@@ -1052,13 +1128,15 @@ func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string
1052// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with 1128// TestCheckModuleResourceAttrPair - as per TestCheckResourceAttrPair but with
1053// support for non-root modules 1129// support for non-root modules
1054func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc { 1130func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirst string, mpSecond []string, nameSecond string, keySecond string) TestCheckFunc {
1131 mptFirst := addrs.Module(mpFirst).UnkeyedInstanceShim()
1132 mptSecond := addrs.Module(mpSecond).UnkeyedInstanceShim()
1055 return func(s *terraform.State) error { 1133 return func(s *terraform.State) error {
1056 isFirst, err := modulePathPrimaryInstanceState(s, mpFirst, nameFirst) 1134 isFirst, err := modulePathPrimaryInstanceState(s, mptFirst, nameFirst)
1057 if err != nil { 1135 if err != nil {
1058 return err 1136 return err
1059 } 1137 }
1060 1138
1061 isSecond, err := modulePathPrimaryInstanceState(s, mpSecond, nameSecond) 1139 isSecond, err := modulePathPrimaryInstanceState(s, mptSecond, nameSecond)
1062 if err != nil { 1140 if err != nil {
1063 return err 1141 return err
1064 } 1142 }
@@ -1068,14 +1146,32 @@ func TestCheckModuleResourceAttrPair(mpFirst []string, nameFirst string, keyFirs
1068} 1146}
1069 1147
1070func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error { 1148func testCheckResourceAttrPair(isFirst *terraform.InstanceState, nameFirst string, keyFirst string, isSecond *terraform.InstanceState, nameSecond string, keySecond string) error {
1071 vFirst, ok := isFirst.Attributes[keyFirst] 1149 vFirst, okFirst := isFirst.Attributes[keyFirst]
1072 if !ok { 1150 vSecond, okSecond := isSecond.Attributes[keySecond]
1073 return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst) 1151
1152 // Container count values of 0 should not be relied upon, and not reliably
1153 // maintained by helper/schema. For the purpose of tests, consider unset and
1154 // 0 to be equal.
1155 if len(keyFirst) > 2 && len(keySecond) > 2 && keyFirst[len(keyFirst)-2:] == keySecond[len(keySecond)-2:] &&
1156 (strings.HasSuffix(keyFirst, ".#") || strings.HasSuffix(keyFirst, ".%")) {
1157 // they have the same suffix, and it is a collection count key.
1158 if vFirst == "0" || vFirst == "" {
1159 okFirst = false
1160 }
1161 if vSecond == "0" || vSecond == "" {
1162 okSecond = false
1163 }
1074 } 1164 }
1075 1165
1076 vSecond, ok := isSecond.Attributes[keySecond] 1166 if okFirst != okSecond {
1077 if !ok { 1167 if !okFirst {
1078 return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond) 1168 return fmt.Errorf("%s: Attribute %q not set, but %q is set in %s as %q", nameFirst, keyFirst, keySecond, nameSecond, vSecond)
1169 }
1170 return fmt.Errorf("%s: Attribute %q is %q, but %q is not set in %s", nameFirst, keyFirst, vFirst, keySecond, nameSecond)
1171 }
1172 if !(okFirst || okSecond) {
1173 // If they both don't exist then they are equally unset, so that's okay.
1174 return nil
1079 } 1175 }
1080 1176
1081 if vFirst != vSecond { 1177 if vFirst != vSecond {
@@ -1163,7 +1259,7 @@ func modulePrimaryInstanceState(s *terraform.State, ms *terraform.ModuleState, n
1163 1259
1164// modulePathPrimaryInstanceState returns the primary instance state for the 1260// modulePathPrimaryInstanceState returns the primary instance state for the
1165// given resource name in a given module path. 1261// given resource name in a given module path.
1166func modulePathPrimaryInstanceState(s *terraform.State, mp []string, name string) (*terraform.InstanceState, error) { 1262func modulePathPrimaryInstanceState(s *terraform.State, mp addrs.ModuleInstance, name string) (*terraform.InstanceState, error) {
1167 ms := s.ModuleByPath(mp) 1263 ms := s.ModuleByPath(mp)
1168 if ms == nil { 1264 if ms == nil {
1169 return nil, fmt.Errorf("No module found at: %s", mp) 1265 return nil, fmt.Errorf("No module found at: %s", mp)
@@ -1178,3 +1274,47 @@ func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceS
1178 ms := s.RootModule() 1274 ms := s.RootModule()
1179 return modulePrimaryInstanceState(s, ms, name) 1275 return modulePrimaryInstanceState(s, ms, name)
1180} 1276}
1277
1278// operationError is a specialized implementation of error used to describe
1279// failures during one of the several operations performed for a particular
1280// test case.
1281type operationError struct {
1282 OpName string
1283 Diags tfdiags.Diagnostics
1284}
1285
1286func newOperationError(opName string, diags tfdiags.Diagnostics) error {
1287 return operationError{opName, diags}
1288}
1289
1290// Error returns a terse error string containing just the basic diagnostic
1291// messages, for situations where normal Go error behavior is appropriate.
1292func (err operationError) Error() string {
1293 return fmt.Sprintf("errors during %s: %s", err.OpName, err.Diags.Err().Error())
1294}
1295
1296// ErrorDetail is like Error except it includes verbosely-rendered diagnostics
1297// similar to what would come from a normal Terraform run, which include
1298// additional context not included in Error().
1299func (err operationError) ErrorDetail() string {
1300 var buf bytes.Buffer
1301 fmt.Fprintf(&buf, "errors during %s:", err.OpName)
1302 clr := &colorstring.Colorize{Disable: true, Colors: colorstring.DefaultColors}
1303 for _, diag := range err.Diags {
1304 diagStr := format.Diagnostic(diag, nil, clr, 78)
1305 buf.WriteByte('\n')
1306 buf.WriteString(diagStr)
1307 }
1308 return buf.String()
1309}
1310
1311// detailedErrorMessage is a helper for calling ErrorDetail on an error if
1312// it is an operationError or just taking Error otherwise.
1313func detailedErrorMessage(err error) string {
1314 switch tErr := err.(type) {
1315 case operationError:
1316 return tErr.ErrorDetail()
1317 default:
1318 return err.Error()
1319 }
1320}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
index 033f126..311fdb6 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
@@ -1,13 +1,23 @@
1package resource 1package resource
2 2
3import ( 3import (
4 "bufio"
5 "bytes"
4 "errors" 6 "errors"
5 "fmt" 7 "fmt"
6 "log" 8 "log"
9 "sort"
7 "strings" 10 "strings"
8 11
12 "github.com/hashicorp/terraform/addrs"
13 "github.com/hashicorp/terraform/config"
14 "github.com/hashicorp/terraform/config/hcl2shim"
15 "github.com/hashicorp/terraform/states"
16
9 "github.com/hashicorp/errwrap" 17 "github.com/hashicorp/errwrap"
18 "github.com/hashicorp/terraform/plans"
10 "github.com/hashicorp/terraform/terraform" 19 "github.com/hashicorp/terraform/terraform"
20 "github.com/hashicorp/terraform/tfdiags"
11) 21)
12 22
13// testStepConfig runs a config-mode test step 23// testStepConfig runs a config-mode test step
@@ -18,69 +28,79 @@ func testStepConfig(
18 return testStep(opts, state, step) 28 return testStep(opts, state, step)
19} 29}
20 30
21func testStep( 31func testStep(opts terraform.ContextOpts, state *terraform.State, step TestStep) (*terraform.State, error) {
22 opts terraform.ContextOpts,
23 state *terraform.State,
24 step TestStep) (*terraform.State, error) {
25 // Pre-taint any resources that have been defined in Taint, as long as this
26 // is not a destroy step.
27 if !step.Destroy { 32 if !step.Destroy {
28 if err := testStepTaint(state, step); err != nil { 33 if err := testStepTaint(state, step); err != nil {
29 return state, err 34 return state, err
30 } 35 }
31 } 36 }
32 37
33 mod, err := testModule(opts, step) 38 cfg, err := testConfig(opts, step)
34 if err != nil { 39 if err != nil {
35 return state, err 40 return state, err
36 } 41 }
37 42
43 var stepDiags tfdiags.Diagnostics
44
38 // Build the context 45 // Build the context
39 opts.Module = mod 46 opts.Config = cfg
40 opts.State = state 47 opts.State, err = terraform.ShimLegacyState(state)
41 opts.Destroy = step.Destroy
42 ctx, err := terraform.NewContext(&opts)
43 if err != nil { 48 if err != nil {
44 return state, fmt.Errorf("Error initializing context: %s", err) 49 return nil, err
50 }
51
52 opts.Destroy = step.Destroy
53 ctx, stepDiags := terraform.NewContext(&opts)
54 if stepDiags.HasErrors() {
55 return state, fmt.Errorf("Error initializing context: %s", stepDiags.Err())
45 } 56 }
46 if diags := ctx.Validate(); len(diags) > 0 { 57 if stepDiags := ctx.Validate(); len(stepDiags) > 0 {
47 if diags.HasErrors() { 58 if stepDiags.HasErrors() {
48 return nil, errwrap.Wrapf("config is invalid: {{err}}", diags.Err()) 59 return state, errwrap.Wrapf("config is invalid: {{err}}", stepDiags.Err())
49 } 60 }
50 61
51 log.Printf("[WARN] Config warnings:\n%s", diags) 62 log.Printf("[WARN] Config warnings:\n%s", stepDiags)
52 } 63 }
53 64
54 // Refresh! 65 // Refresh!
55 state, err = ctx.Refresh() 66 newState, stepDiags := ctx.Refresh()
67 // shim the state first so the test can check the state on errors
68
69 state, err = shimNewState(newState, step.providers)
56 if err != nil { 70 if err != nil {
57 return state, fmt.Errorf( 71 return nil, err
58 "Error refreshing: %s", err) 72 }
73 if stepDiags.HasErrors() {
74 return state, newOperationError("refresh", stepDiags)
59 } 75 }
60 76
61 // If this step is a PlanOnly step, skip over this first Plan and subsequent 77 // If this step is a PlanOnly step, skip over this first Plan and subsequent
62 // Apply, and use the follow up Plan that checks for perpetual diffs 78 // Apply, and use the follow up Plan that checks for perpetual diffs
63 if !step.PlanOnly { 79 if !step.PlanOnly {
64 // Plan! 80 // Plan!
65 if p, err := ctx.Plan(); err != nil { 81 if p, stepDiags := ctx.Plan(); stepDiags.HasErrors() {
66 return state, fmt.Errorf( 82 return state, newOperationError("plan", stepDiags)
67 "Error planning: %s", err)
68 } else { 83 } else {
69 log.Printf("[WARN] Test: Step plan: %s", p) 84 log.Printf("[WARN] Test: Step plan: %s", legacyPlanComparisonString(newState, p.Changes))
70 } 85 }
71 86
72 // We need to keep a copy of the state prior to destroying 87 // We need to keep a copy of the state prior to destroying
73 // such that destroy steps can verify their behaviour in the check 88 // such that destroy steps can verify their behavior in the check
74 // function 89 // function
75 stateBeforeApplication := state.DeepCopy() 90 stateBeforeApplication := state.DeepCopy()
76 91
77 // Apply! 92 // Apply the diff, creating real resources.
78 state, err = ctx.Apply() 93 newState, stepDiags = ctx.Apply()
94 // shim the state first so the test can check the state on errors
95 state, err = shimNewState(newState, step.providers)
79 if err != nil { 96 if err != nil {
80 return state, fmt.Errorf("Error applying: %s", err) 97 return nil, err
98 }
99 if stepDiags.HasErrors() {
100 return state, newOperationError("apply", stepDiags)
81 } 101 }
82 102
83 // Check! Excitement! 103 // Run any configured checks
84 if step.Check != nil { 104 if step.Check != nil {
85 if step.Destroy { 105 if step.Destroy {
86 if err := step.Check(stateBeforeApplication); err != nil { 106 if err := step.Check(stateBeforeApplication); err != nil {
@@ -96,31 +116,35 @@ func testStep(
96 116
97 // Now, verify that Plan is now empty and we don't have a perpetual diff issue 117 // Now, verify that Plan is now empty and we don't have a perpetual diff issue
98 // We do this with TWO plans. One without a refresh. 118 // We do this with TWO plans. One without a refresh.
99 var p *terraform.Plan 119 var p *plans.Plan
100 if p, err = ctx.Plan(); err != nil { 120 if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() {
101 return state, fmt.Errorf("Error on follow-up plan: %s", err) 121 return state, newOperationError("follow-up plan", stepDiags)
102 } 122 }
103 if p.Diff != nil && !p.Diff.Empty() { 123 if !p.Changes.Empty() {
104 if step.ExpectNonEmptyPlan { 124 if step.ExpectNonEmptyPlan {
105 log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p) 125 log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes))
106 } else { 126 } else {
107 return state, fmt.Errorf( 127 return state, fmt.Errorf(
108 "After applying this step, the plan was not empty:\n\n%s", p) 128 "After applying this step, the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes))
109 } 129 }
110 } 130 }
111 131
112 // And another after a Refresh. 132 // And another after a Refresh.
113 if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) { 133 if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) {
114 state, err = ctx.Refresh() 134 newState, stepDiags = ctx.Refresh()
135 if stepDiags.HasErrors() {
136 return state, newOperationError("follow-up refresh", stepDiags)
137 }
138
139 state, err = shimNewState(newState, step.providers)
115 if err != nil { 140 if err != nil {
116 return state, fmt.Errorf( 141 return nil, err
117 "Error on follow-up refresh: %s", err)
118 } 142 }
119 } 143 }
120 if p, err = ctx.Plan(); err != nil { 144 if p, stepDiags = ctx.Plan(); stepDiags.HasErrors() {
121 return state, fmt.Errorf("Error on second follow-up plan: %s", err) 145 return state, newOperationError("second follow-up refresh", stepDiags)
122 } 146 }
123 empty := p.Diff == nil || p.Diff.Empty() 147 empty := p.Changes.Empty()
124 148
125 // Data resources are tricky because they legitimately get instantiated 149 // Data resources are tricky because they legitimately get instantiated
126 // during refresh so that they will be already populated during the 150 // during refresh so that they will be already populated during the
@@ -128,35 +152,28 @@ func testStep(
128 // config we'll end up wanting to destroy them again here. This is 152 // config we'll end up wanting to destroy them again here. This is
129 // acceptable and expected, and we'll treat it as "empty" for the 153 // acceptable and expected, and we'll treat it as "empty" for the
130 // sake of this testing. 154 // sake of this testing.
131 if step.Destroy { 155 if step.Destroy && !empty {
132 empty = true 156 empty = true
133 157 for _, change := range p.Changes.Resources {
134 for _, moduleDiff := range p.Diff.Modules { 158 if change.Addr.Resource.Resource.Mode != addrs.DataResourceMode {
135 for k, instanceDiff := range moduleDiff.Resources { 159 empty = false
136 if !strings.HasPrefix(k, "data.") { 160 break
137 empty = false
138 break
139 }
140
141 if !instanceDiff.Destroy {
142 empty = false
143 }
144 } 161 }
145 } 162 }
146 } 163 }
147 164
148 if !empty { 165 if !empty {
149 if step.ExpectNonEmptyPlan { 166 if step.ExpectNonEmptyPlan {
150 log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p) 167 log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", legacyPlanComparisonString(newState, p.Changes))
151 } else { 168 } else {
152 return state, fmt.Errorf( 169 return state, fmt.Errorf(
153 "After applying this step and refreshing, "+ 170 "After applying this step and refreshing, "+
154 "the plan was not empty:\n\n%s", p) 171 "the plan was not empty:\n\n%s", legacyPlanComparisonString(newState, p.Changes))
155 } 172 }
156 } 173 }
157 174
158 // Made it here, but expected a non-empty plan, fail! 175 // Made it here, but expected a non-empty plan, fail!
159 if step.ExpectNonEmptyPlan && (p.Diff == nil || p.Diff.Empty()) { 176 if step.ExpectNonEmptyPlan && empty {
160 return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!") 177 return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!")
161 } 178 }
162 179
@@ -164,6 +181,213 @@ func testStep(
164 return state, nil 181 return state, nil
165} 182}
166 183
184// legacyPlanComparisonString produces a string representation of the changes
185// from a plan and a given state togther, as was formerly produced by the
186// String method of terraform.Plan.
187//
188// This is here only for compatibility with existing tests that predate our
189// new plan and state types, and should not be used in new tests. Instead, use
190// a library like "cmp" to do a deep equality and diff on the two
191// data structures.
192func legacyPlanComparisonString(state *states.State, changes *plans.Changes) string {
193 return fmt.Sprintf(
194 "DIFF:\n\n%s\n\nSTATE:\n\n%s",
195 legacyDiffComparisonString(changes),
196 state.String(),
197 )
198}
199
200// legacyDiffComparisonString produces a string representation of the changes
201// from a planned changes object, as was formerly produced by the String method
202// of terraform.Diff.
203//
204// This is here only for compatibility with existing tests that predate our
205// new plan types, and should not be used in new tests. Instead, use a library
206// like "cmp" to do a deep equality check and diff on the two data structures.
207func legacyDiffComparisonString(changes *plans.Changes) string {
208 // The old string representation of a plan was grouped by module, but
209 // our new plan structure is not grouped in that way and so we'll need
210 // to preprocess it in order to produce that grouping.
211 type ResourceChanges struct {
212 Current *plans.ResourceInstanceChangeSrc
213 Deposed map[states.DeposedKey]*plans.ResourceInstanceChangeSrc
214 }
215 byModule := map[string]map[string]*ResourceChanges{}
216 resourceKeys := map[string][]string{}
217 requiresReplace := map[string][]string{}
218 var moduleKeys []string
219 for _, rc := range changes.Resources {
220 if rc.Action == plans.NoOp {
221 // We won't mention no-op changes here at all, since the old plan
222 // model we are emulating here didn't have such a concept.
223 continue
224 }
225 moduleKey := rc.Addr.Module.String()
226 if _, exists := byModule[moduleKey]; !exists {
227 moduleKeys = append(moduleKeys, moduleKey)
228 byModule[moduleKey] = make(map[string]*ResourceChanges)
229 }
230 resourceKey := rc.Addr.Resource.String()
231 if _, exists := byModule[moduleKey][resourceKey]; !exists {
232 resourceKeys[moduleKey] = append(resourceKeys[moduleKey], resourceKey)
233 byModule[moduleKey][resourceKey] = &ResourceChanges{
234 Deposed: make(map[states.DeposedKey]*plans.ResourceInstanceChangeSrc),
235 }
236 }
237
238 if rc.DeposedKey == states.NotDeposed {
239 byModule[moduleKey][resourceKey].Current = rc
240 } else {
241 byModule[moduleKey][resourceKey].Deposed[rc.DeposedKey] = rc
242 }
243
244 rr := []string{}
245 for _, p := range rc.RequiredReplace.List() {
246 rr = append(rr, hcl2shim.FlatmapKeyFromPath(p))
247 }
248 requiresReplace[resourceKey] = rr
249 }
250 sort.Strings(moduleKeys)
251 for _, ks := range resourceKeys {
252 sort.Strings(ks)
253 }
254
255 var buf bytes.Buffer
256
257 for _, moduleKey := range moduleKeys {
258 rcs := byModule[moduleKey]
259 var mBuf bytes.Buffer
260
261 for _, resourceKey := range resourceKeys[moduleKey] {
262 rc := rcs[resourceKey]
263
264 forceNewAttrs := requiresReplace[resourceKey]
265
266 crud := "UPDATE"
267 if rc.Current != nil {
268 switch rc.Current.Action {
269 case plans.DeleteThenCreate:
270 crud = "DESTROY/CREATE"
271 case plans.CreateThenDelete:
272 crud = "CREATE/DESTROY"
273 case plans.Delete:
274 crud = "DESTROY"
275 case plans.Create:
276 crud = "CREATE"
277 }
278 } else {
279 // We must be working on a deposed object then, in which
280 // case destroying is the only possible action.
281 crud = "DESTROY"
282 }
283
284 extra := ""
285 if rc.Current == nil && len(rc.Deposed) > 0 {
286 extra = " (deposed only)"
287 }
288
289 fmt.Fprintf(
290 &mBuf, "%s: %s%s\n",
291 crud, resourceKey, extra,
292 )
293
294 attrNames := map[string]bool{}
295 var oldAttrs map[string]string
296 var newAttrs map[string]string
297 if rc.Current != nil {
298 if before := rc.Current.Before; before != nil {
299 ty, err := before.ImpliedType()
300 if err == nil {
301 val, err := before.Decode(ty)
302 if err == nil {
303 oldAttrs = hcl2shim.FlatmapValueFromHCL2(val)
304 for k := range oldAttrs {
305 attrNames[k] = true
306 }
307 }
308 }
309 }
310 if after := rc.Current.After; after != nil {
311 ty, err := after.ImpliedType()
312 if err == nil {
313 val, err := after.Decode(ty)
314 if err == nil {
315 newAttrs = hcl2shim.FlatmapValueFromHCL2(val)
316 for k := range newAttrs {
317 attrNames[k] = true
318 }
319 }
320 }
321 }
322 }
323 if oldAttrs == nil {
324 oldAttrs = make(map[string]string)
325 }
326 if newAttrs == nil {
327 newAttrs = make(map[string]string)
328 }
329
330 attrNamesOrder := make([]string, 0, len(attrNames))
331 keyLen := 0
332 for n := range attrNames {
333 attrNamesOrder = append(attrNamesOrder, n)
334 if len(n) > keyLen {
335 keyLen = len(n)
336 }
337 }
338 sort.Strings(attrNamesOrder)
339
340 for _, attrK := range attrNamesOrder {
341 v := newAttrs[attrK]
342 u := oldAttrs[attrK]
343
344 if v == config.UnknownVariableValue {
345 v = "<computed>"
346 }
347 // NOTE: we don't support <sensitive> here because we would
348 // need schema to do that. Excluding sensitive values
349 // is now done at the UI layer, and so should not be tested
350 // at the core layer.
351
352 updateMsg := ""
353
354 // This may not be as precise as in the old diff, as it matches
355 // everything under the attribute that was originally marked as
356 // ForceNew, but should help make it easier to determine what
357 // caused replacement here.
358 for _, k := range forceNewAttrs {
359 if strings.HasPrefix(attrK, k) {
360 updateMsg = " (forces new resource)"
361 break
362 }
363 }
364
365 fmt.Fprintf(
366 &mBuf, " %s:%s %#v => %#v%s\n",
367 attrK,
368 strings.Repeat(" ", keyLen-len(attrK)),
369 u, v,
370 updateMsg,
371 )
372 }
373 }
374
375 if moduleKey == "" { // root module
376 buf.Write(mBuf.Bytes())
377 buf.WriteByte('\n')
378 continue
379 }
380
381 fmt.Fprintf(&buf, "%s:\n", moduleKey)
382 s := bufio.NewScanner(&mBuf)
383 for s.Scan() {
384 buf.WriteString(fmt.Sprintf(" %s\n", s.Text()))
385 }
386 }
387
388 return buf.String()
389}
390
167func testStepTaint(state *terraform.State, step TestStep) error { 391func testStepTaint(state *terraform.State, step TestStep) error {
168 for _, p := range step.Taint { 392 for _, p := range step.Taint {
169 m := state.RootModule() 393 m := state.RootModule()
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
index 94fef3c..e1b7aea 100644
--- a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
+++ b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
@@ -7,6 +7,12 @@ import (
7 "strings" 7 "strings"
8 8
9 "github.com/davecgh/go-spew/spew" 9 "github.com/davecgh/go-spew/spew"
10 "github.com/hashicorp/hcl2/hcl"
11 "github.com/hashicorp/hcl2/hcl/hclsyntax"
12
13 "github.com/hashicorp/terraform/addrs"
14 "github.com/hashicorp/terraform/helper/schema"
15 "github.com/hashicorp/terraform/states"
10 "github.com/hashicorp/terraform/terraform" 16 "github.com/hashicorp/terraform/terraform"
11) 17)
12 18
@@ -15,6 +21,7 @@ func testStepImportState(
15 opts terraform.ContextOpts, 21 opts terraform.ContextOpts,
16 state *terraform.State, 22 state *terraform.State,
17 step TestStep) (*terraform.State, error) { 23 step TestStep) (*terraform.State, error) {
24
18 // Determine the ID to import 25 // Determine the ID to import
19 var importId string 26 var importId string
20 switch { 27 switch {
@@ -41,33 +48,53 @@ func testStepImportState(
41 48
42 // Setup the context. We initialize with an empty state. We use the 49 // Setup the context. We initialize with an empty state. We use the
43 // full config for provider configurations. 50 // full config for provider configurations.
44 mod, err := testModule(opts, step) 51 cfg, err := testConfig(opts, step)
45 if err != nil { 52 if err != nil {
46 return state, err 53 return state, err
47 } 54 }
48 55
49 opts.Module = mod 56 opts.Config = cfg
50 opts.State = terraform.NewState() 57
51 ctx, err := terraform.NewContext(&opts) 58 // import tests start with empty state
52 if err != nil { 59 opts.State = states.NewState()
53 return state, err 60
61 ctx, stepDiags := terraform.NewContext(&opts)
62 if stepDiags.HasErrors() {
63 return state, stepDiags.Err()
54 } 64 }
55 65
56 // Do the import! 66 // The test step provides the resource address as a string, so we need
57 newState, err := ctx.Import(&terraform.ImportOpts{ 67 // to parse it to get an addrs.AbsResourceAddress to pass in to the
68 // import method.
69 traversal, hclDiags := hclsyntax.ParseTraversalAbs([]byte(step.ResourceName), "", hcl.Pos{})
70 if hclDiags.HasErrors() {
71 return nil, hclDiags
72 }
73 importAddr, stepDiags := addrs.ParseAbsResourceInstance(traversal)
74 if stepDiags.HasErrors() {
75 return nil, stepDiags.Err()
76 }
77
78 // Do the import
79 importedState, stepDiags := ctx.Import(&terraform.ImportOpts{
58 // Set the module so that any provider config is loaded 80 // Set the module so that any provider config is loaded
59 Module: mod, 81 Config: cfg,
60 82
61 Targets: []*terraform.ImportTarget{ 83 Targets: []*terraform.ImportTarget{
62 &terraform.ImportTarget{ 84 &terraform.ImportTarget{
63 Addr: step.ResourceName, 85 Addr: importAddr,
64 ID: importId, 86 ID: importId,
65 }, 87 },
66 }, 88 },
67 }) 89 })
90 if stepDiags.HasErrors() {
91 log.Printf("[ERROR] Test: ImportState failure: %s", stepDiags.Err())
92 return state, stepDiags.Err()
93 }
94
95 newState, err := shimNewState(importedState, step.providers)
68 if err != nil { 96 if err != nil {
69 log.Printf("[ERROR] Test: ImportState failure: %s", err) 97 return nil, err
70 return state, err
71 } 98 }
72 99
73 // Go through the new state and verify 100 // Go through the new state and verify
@@ -75,7 +102,9 @@ func testStepImportState(
75 var states []*terraform.InstanceState 102 var states []*terraform.InstanceState
76 for _, r := range newState.RootModule().Resources { 103 for _, r := range newState.RootModule().Resources {
77 if r.Primary != nil { 104 if r.Primary != nil {
78 states = append(states, r.Primary) 105 is := r.Primary.DeepCopy()
106 is.Ephemeral.Type = r.Type // otherwise the check function cannot see the type
107 states = append(states, is)
79 } 108 }
80 } 109 }
81 if err := step.ImportStateCheck(states); err != nil { 110 if err := step.ImportStateCheck(states); err != nil {
@@ -102,30 +131,84 @@ func testStepImportState(
102 r.Primary.ID) 131 r.Primary.ID)
103 } 132 }
104 133
134 // We'll try our best to find the schema for this resource type
135 // so we can ignore Removed fields during validation. If we fail
136 // to find the schema then we won't ignore them and so the test
137 // will need to rely on explicit ImportStateVerifyIgnore, though
138 // this shouldn't happen in any reasonable case.
139 var rsrcSchema *schema.Resource
140 if providerAddr, diags := addrs.ParseAbsProviderConfigStr(r.Provider); !diags.HasErrors() {
141 providerType := providerAddr.ProviderConfig.Type
142 if provider, ok := step.providers[providerType]; ok {
143 if provider, ok := provider.(*schema.Provider); ok {
144 rsrcSchema = provider.ResourcesMap[r.Type]
145 }
146 }
147 }
148
149 // don't add empty flatmapped containers, so we can more easily
150 // compare the attributes
151 skipEmpty := func(k, v string) bool {
152 if strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%") {
153 if v == "0" {
154 return true
155 }
156 }
157 return false
158 }
159
105 // Compare their attributes 160 // Compare their attributes
106 actual := make(map[string]string) 161 actual := make(map[string]string)
107 for k, v := range r.Primary.Attributes { 162 for k, v := range r.Primary.Attributes {
163 if skipEmpty(k, v) {
164 continue
165 }
108 actual[k] = v 166 actual[k] = v
109 } 167 }
168
110 expected := make(map[string]string) 169 expected := make(map[string]string)
111 for k, v := range oldR.Primary.Attributes { 170 for k, v := range oldR.Primary.Attributes {
171 if skipEmpty(k, v) {
172 continue
173 }
112 expected[k] = v 174 expected[k] = v
113 } 175 }
114 176
115 // Remove fields we're ignoring 177 // Remove fields we're ignoring
116 for _, v := range step.ImportStateVerifyIgnore { 178 for _, v := range step.ImportStateVerifyIgnore {
117 for k, _ := range actual { 179 for k := range actual {
118 if strings.HasPrefix(k, v) { 180 if strings.HasPrefix(k, v) {
119 delete(actual, k) 181 delete(actual, k)
120 } 182 }
121 } 183 }
122 for k, _ := range expected { 184 for k := range expected {
123 if strings.HasPrefix(k, v) { 185 if strings.HasPrefix(k, v) {
124 delete(expected, k) 186 delete(expected, k)
125 } 187 }
126 } 188 }
127 } 189 }
128 190
191 // Also remove any attributes that are marked as "Removed" in the
192 // schema, if we have a schema to check that against.
193 if rsrcSchema != nil {
194 for k := range actual {
195 for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) {
196 if schema.Removed != "" {
197 delete(actual, k)
198 break
199 }
200 }
201 }
202 for k := range expected {
203 for _, schema := range rsrcSchema.SchemasForFlatmapPath(k) {
204 if schema.Removed != "" {
205 delete(expected, k)
206 break
207 }
208 }
209 }
210 }
211
129 if !reflect.DeepEqual(actual, expected) { 212 if !reflect.DeepEqual(actual, expected) {
130 // Determine only the different attributes 213 // Determine only the different attributes
131 for k, v := range expected { 214 for k, v := range expected {
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
index 57fbba7..c8d8ae2 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
@@ -2,8 +2,15 @@ package schema
2 2
3import ( 3import (
4 "context" 4 "context"
5 "fmt"
5 6
7 "github.com/hashicorp/terraform/tfdiags"
8 "github.com/zclconf/go-cty/cty"
9
10 "github.com/hashicorp/terraform/config/hcl2shim"
11 "github.com/hashicorp/terraform/configs/configschema"
6 "github.com/hashicorp/terraform/terraform" 12 "github.com/hashicorp/terraform/terraform"
13 ctyconvert "github.com/zclconf/go-cty/cty/convert"
7) 14)
8 15
9// Backend represents a partial backend.Backend implementation and simplifies 16// Backend represents a partial backend.Backend implementation and simplifies
@@ -38,41 +45,123 @@ func FromContextBackendConfig(ctx context.Context) *ResourceData {
38 return ctx.Value(backendConfigKey).(*ResourceData) 45 return ctx.Value(backendConfigKey).(*ResourceData)
39} 46}
40 47
41func (b *Backend) Input( 48func (b *Backend) ConfigSchema() *configschema.Block {
42 input terraform.UIInput, 49 // This is an alias of CoreConfigSchema just to implement the
43 c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) { 50 // backend.Backend interface.
51 return b.CoreConfigSchema()
52}
53
54func (b *Backend) PrepareConfig(configVal cty.Value) (cty.Value, tfdiags.Diagnostics) {
44 if b == nil { 55 if b == nil {
45 return c, nil 56 return configVal, nil
46 } 57 }
58 var diags tfdiags.Diagnostics
59 var err error
47 60
48 return schemaMap(b.Schema).Input(input, c) 61 // In order to use Transform below, this needs to be filled out completely
49} 62 // according the schema.
63 configVal, err = b.CoreConfigSchema().CoerceValue(configVal)
64 if err != nil {
65 return configVal, diags.Append(err)
66 }
50 67
51func (b *Backend) Validate(c *terraform.ResourceConfig) ([]string, []error) { 68 // lookup any required, top-level attributes that are Null, and see if we
52 if b == nil { 69 // have a Default value available.
53 return nil, nil 70 configVal, err = cty.Transform(configVal, func(path cty.Path, val cty.Value) (cty.Value, error) {
71 // we're only looking for top-level attributes
72 if len(path) != 1 {
73 return val, nil
74 }
75
76 // nothing to do if we already have a value
77 if !val.IsNull() {
78 return val, nil
79 }
80
81 // get the Schema definition for this attribute
82 getAttr, ok := path[0].(cty.GetAttrStep)
83 // these should all exist, but just ignore anything strange
84 if !ok {
85 return val, nil
86 }
87
88 attrSchema := b.Schema[getAttr.Name]
89 // continue to ignore anything that doesn't match
90 if attrSchema == nil {
91 return val, nil
92 }
93
94 // this is deprecated, so don't set it
95 if attrSchema.Deprecated != "" || attrSchema.Removed != "" {
96 return val, nil
97 }
98
99 // find a default value if it exists
100 def, err := attrSchema.DefaultValue()
101 if err != nil {
102 diags = diags.Append(fmt.Errorf("error getting default for %q: %s", getAttr.Name, err))
103 return val, err
104 }
105
106 // no default
107 if def == nil {
108 return val, nil
109 }
110
111 // create a cty.Value and make sure it's the correct type
112 tmpVal := hcl2shim.HCL2ValueFromConfigValue(def)
113
114 // helper/schema used to allow setting "" to a bool
115 if val.Type() == cty.Bool && tmpVal.RawEquals(cty.StringVal("")) {
116 // return a warning about the conversion
117 diags = diags.Append("provider set empty string as default value for bool " + getAttr.Name)
118 tmpVal = cty.False
119 }
120
121 val, err = ctyconvert.Convert(tmpVal, val.Type())
122 if err != nil {
123 diags = diags.Append(fmt.Errorf("error setting default for %q: %s", getAttr.Name, err))
124 }
125
126 return val, err
127 })
128 if err != nil {
129 // any error here was already added to the diagnostics
130 return configVal, diags
54 } 131 }
55 132
56 return schemaMap(b.Schema).Validate(c) 133 shimRC := b.shimConfig(configVal)
134 warns, errs := schemaMap(b.Schema).Validate(shimRC)
135 for _, warn := range warns {
136 diags = diags.Append(tfdiags.SimpleWarning(warn))
137 }
138 for _, err := range errs {
139 diags = diags.Append(err)
140 }
141 return configVal, diags
57} 142}
58 143
59func (b *Backend) Configure(c *terraform.ResourceConfig) error { 144func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics {
60 if b == nil { 145 if b == nil {
61 return nil 146 return nil
62 } 147 }
63 148
149 var diags tfdiags.Diagnostics
64 sm := schemaMap(b.Schema) 150 sm := schemaMap(b.Schema)
151 shimRC := b.shimConfig(obj)
65 152
66 // Get a ResourceData for this configuration. To do this, we actually 153 // Get a ResourceData for this configuration. To do this, we actually
67 // generate an intermediary "diff" although that is never exposed. 154 // generate an intermediary "diff" although that is never exposed.
68 diff, err := sm.Diff(nil, c, nil, nil) 155 diff, err := sm.Diff(nil, shimRC, nil, nil, true)
69 if err != nil { 156 if err != nil {
70 return err 157 diags = diags.Append(err)
158 return diags
71 } 159 }
72 160
73 data, err := sm.Data(nil, diff) 161 data, err := sm.Data(nil, diff)
74 if err != nil { 162 if err != nil {
75 return err 163 diags = diags.Append(err)
164 return diags
76 } 165 }
77 b.config = data 166 b.config = data
78 167
@@ -80,11 +169,28 @@ func (b *Backend) Configure(c *terraform.ResourceConfig) error {
80 err = b.ConfigureFunc(context.WithValue( 169 err = b.ConfigureFunc(context.WithValue(
81 context.Background(), backendConfigKey, data)) 170 context.Background(), backendConfigKey, data))
82 if err != nil { 171 if err != nil {
83 return err 172 diags = diags.Append(err)
173 return diags
84 } 174 }
85 } 175 }
86 176
87 return nil 177 return diags
178}
179
180// shimConfig turns a new-style cty.Value configuration (which must be of
181// an object type) into a minimal old-style *terraform.ResourceConfig object
182// that should be populated enough to appease the not-yet-updated functionality
183// in this package. This should be removed once everything is updated.
184func (b *Backend) shimConfig(obj cty.Value) *terraform.ResourceConfig {
185 shimMap, ok := hcl2shim.ConfigValueFromHCL2(obj).(map[string]interface{})
186 if !ok {
187 // If the configVal was nil, we still want a non-nil map here.
188 shimMap = map[string]interface{}{}
189 }
190 return &terraform.ResourceConfig{
191 Config: shimMap,
192 Raw: shimMap,
193 }
88} 194}
89 195
90// Config returns the configuration. This is available after Configure is 196// Config returns the configuration. This is available after Configure is
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go
index bf952f6..875677e 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/core_schema.go
@@ -3,7 +3,7 @@ package schema
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/config/configschema" 6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/zclconf/go-cty/cty" 7 "github.com/zclconf/go-cty/cty"
8) 8)
9 9
@@ -39,14 +39,42 @@ func (m schemaMap) CoreConfigSchema() *configschema.Block {
39 ret.Attributes[name] = schema.coreConfigSchemaAttribute() 39 ret.Attributes[name] = schema.coreConfigSchemaAttribute()
40 continue 40 continue
41 } 41 }
42 switch schema.Elem.(type) { 42 if schema.Type == TypeMap {
43 case *Schema: 43 // For TypeMap in particular, it isn't valid for Elem to be a
44 // *Resource (since that would be ambiguous in flatmap) and
45 // so Elem is treated as a TypeString schema if so. This matches
46 // how the field readers treat this situation, for compatibility
47 // with configurations targeting Terraform 0.11 and earlier.
48 if _, isResource := schema.Elem.(*Resource); isResource {
49 sch := *schema // shallow copy
50 sch.Elem = &Schema{
51 Type: TypeString,
52 }
53 ret.Attributes[name] = sch.coreConfigSchemaAttribute()
54 continue
55 }
56 }
57 switch schema.ConfigMode {
58 case SchemaConfigModeAttr:
44 ret.Attributes[name] = schema.coreConfigSchemaAttribute() 59 ret.Attributes[name] = schema.coreConfigSchemaAttribute()
45 case *Resource: 60 case SchemaConfigModeBlock:
46 ret.BlockTypes[name] = schema.coreConfigSchemaBlock() 61 ret.BlockTypes[name] = schema.coreConfigSchemaBlock()
47 default: 62 default: // SchemaConfigModeAuto, or any other invalid value
48 // Should never happen for a valid schema 63 if schema.Computed && !schema.Optional {
49 panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem)) 64 // Computed-only schemas are always handled as attributes,
65 // because they never appear in configuration.
66 ret.Attributes[name] = schema.coreConfigSchemaAttribute()
67 continue
68 }
69 switch schema.Elem.(type) {
70 case *Schema, ValueType:
71 ret.Attributes[name] = schema.coreConfigSchemaAttribute()
72 case *Resource:
73 ret.BlockTypes[name] = schema.coreConfigSchemaBlock()
74 default:
75 // Should never happen for a valid schema
76 panic(fmt.Errorf("invalid Schema.Elem %#v; need *Schema or *Resource", schema.Elem))
77 }
50 } 78 }
51 } 79 }
52 80
@@ -58,12 +86,42 @@ func (m schemaMap) CoreConfigSchema() *configschema.Block {
58// Elem is an instance of Schema. Use coreConfigSchemaBlock for collections 86// Elem is an instance of Schema. Use coreConfigSchemaBlock for collections
59// whose elem is a whole resource. 87// whose elem is a whole resource.
60func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute { 88func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute {
89 // The Schema.DefaultFunc capability adds some extra weirdness here since
90 // it can be combined with "Required: true" to create a sitution where
91 // required-ness is conditional. Terraform Core doesn't share this concept,
92 // so we must sniff for this possibility here and conditionally turn
93 // off the "Required" flag if it looks like the DefaultFunc is going
94 // to provide a value.
95 // This is not 100% true to the original interface of DefaultFunc but
96 // works well enough for the EnvDefaultFunc and MultiEnvDefaultFunc
97 // situations, which are the main cases we care about.
98 //
99 // Note that this also has a consequence for commands that return schema
100 // information for documentation purposes: running those for certain
101 // providers will produce different results depending on which environment
102 // variables are set. We accept that weirdness in order to keep this
103 // interface to core otherwise simple.
104 reqd := s.Required
105 opt := s.Optional
106 if reqd && s.DefaultFunc != nil {
107 v, err := s.DefaultFunc()
108 // We can't report errors from here, so we'll instead just force
109 // "Required" to false and let the provider try calling its
110 // DefaultFunc again during the validate step, where it can then
111 // return the error.
112 if err != nil || (err == nil && v != nil) {
113 reqd = false
114 opt = true
115 }
116 }
117
61 return &configschema.Attribute{ 118 return &configschema.Attribute{
62 Type: s.coreConfigSchemaType(), 119 Type: s.coreConfigSchemaType(),
63 Optional: s.Optional, 120 Optional: opt,
64 Required: s.Required, 121 Required: reqd,
65 Computed: s.Computed, 122 Computed: s.Computed,
66 Sensitive: s.Sensitive, 123 Sensitive: s.Sensitive,
124 Description: s.Description,
67 } 125 }
68} 126}
69 127
@@ -72,7 +130,7 @@ func (s *Schema) coreConfigSchemaAttribute() *configschema.Attribute {
72// of Resource, and will panic otherwise. 130// of Resource, and will panic otherwise.
73func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock { 131func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock {
74 ret := &configschema.NestedBlock{} 132 ret := &configschema.NestedBlock{}
75 if nested := s.Elem.(*Resource).CoreConfigSchema(); nested != nil { 133 if nested := s.Elem.(*Resource).coreConfigSchema(); nested != nil {
76 ret.Block = *nested 134 ret.Block = *nested
77 } 135 }
78 switch s.Type { 136 switch s.Type {
@@ -95,6 +153,20 @@ func (s *Schema) coreConfigSchemaBlock() *configschema.NestedBlock {
95 // blocks, but we can fake it by requiring at least one item. 153 // blocks, but we can fake it by requiring at least one item.
96 ret.MinItems = 1 154 ret.MinItems = 1
97 } 155 }
156 if s.Optional && s.MinItems > 0 {
157 // Historically helper/schema would ignore MinItems if Optional were
158 // set, so we must mimic this behavior here to ensure that providers
159 // relying on that undocumented behavior can continue to operate as
160 // they did before.
161 ret.MinItems = 0
162 }
163 if s.Computed && !s.Optional {
164 // MinItems/MaxItems are meaningless for computed nested blocks, since
165 // they are never set by the user anyway. This ensures that we'll never
166 // generate weird errors about them.
167 ret.MinItems = 0
168 ret.MaxItems = 0
169 }
98 170
99 return ret 171 return ret
100} 172}
@@ -117,11 +189,16 @@ func (s *Schema) coreConfigSchemaType() cty.Type {
117 switch set := s.Elem.(type) { 189 switch set := s.Elem.(type) {
118 case *Schema: 190 case *Schema:
119 elemType = set.coreConfigSchemaType() 191 elemType = set.coreConfigSchemaType()
192 case ValueType:
193 // This represents a mistake in the provider code, but it's a
194 // common one so we'll just shim it.
195 elemType = (&Schema{Type: set}).coreConfigSchemaType()
120 case *Resource: 196 case *Resource:
121 // In practice we don't actually use this for normal schema 197 // By default we construct a NestedBlock in this case, but this
122 // construction because we construct a NestedBlock in that 198 // behavior is selected either for computed-only schemas or
123 // case instead. See schemaMap.CoreConfigSchema. 199 // when ConfigMode is explicitly SchemaConfigModeBlock.
124 elemType = set.CoreConfigSchema().ImpliedType() 200 // See schemaMap.CoreConfigSchema for the exact rules.
201 elemType = set.coreConfigSchema().ImpliedType()
125 default: 202 default:
126 if set != nil { 203 if set != nil {
127 // Should never happen for a valid schema 204 // Should never happen for a valid schema
@@ -148,8 +225,85 @@ func (s *Schema) coreConfigSchemaType() cty.Type {
148 } 225 }
149} 226}
150 227
151// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema 228// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema on
152// on the resource's schema. 229// the resource's schema. CoreConfigSchema adds the implicitly required "id"
230// attribute for top level resources if it doesn't exist.
153func (r *Resource) CoreConfigSchema() *configschema.Block { 231func (r *Resource) CoreConfigSchema() *configschema.Block {
232 block := r.coreConfigSchema()
233
234 if block.Attributes == nil {
235 block.Attributes = map[string]*configschema.Attribute{}
236 }
237
238 // Add the implicitly required "id" field if it doesn't exist
239 if block.Attributes["id"] == nil {
240 block.Attributes["id"] = &configschema.Attribute{
241 Type: cty.String,
242 Optional: true,
243 Computed: true,
244 }
245 }
246
247 _, timeoutsAttr := block.Attributes[TimeoutsConfigKey]
248 _, timeoutsBlock := block.BlockTypes[TimeoutsConfigKey]
249
250 // Insert configured timeout values into the schema, as long as the schema
251 // didn't define anything else by that name.
252 if r.Timeouts != nil && !timeoutsAttr && !timeoutsBlock {
253 timeouts := configschema.Block{
254 Attributes: map[string]*configschema.Attribute{},
255 }
256
257 if r.Timeouts.Create != nil {
258 timeouts.Attributes[TimeoutCreate] = &configschema.Attribute{
259 Type: cty.String,
260 Optional: true,
261 }
262 }
263
264 if r.Timeouts.Read != nil {
265 timeouts.Attributes[TimeoutRead] = &configschema.Attribute{
266 Type: cty.String,
267 Optional: true,
268 }
269 }
270
271 if r.Timeouts.Update != nil {
272 timeouts.Attributes[TimeoutUpdate] = &configschema.Attribute{
273 Type: cty.String,
274 Optional: true,
275 }
276 }
277
278 if r.Timeouts.Delete != nil {
279 timeouts.Attributes[TimeoutDelete] = &configschema.Attribute{
280 Type: cty.String,
281 Optional: true,
282 }
283 }
284
285 if r.Timeouts.Default != nil {
286 timeouts.Attributes[TimeoutDefault] = &configschema.Attribute{
287 Type: cty.String,
288 Optional: true,
289 }
290 }
291
292 block.BlockTypes[TimeoutsConfigKey] = &configschema.NestedBlock{
293 Nesting: configschema.NestingSingle,
294 Block: timeouts,
295 }
296 }
297
298 return block
299}
300
301func (r *Resource) coreConfigSchema() *configschema.Block {
302 return schemaMap(r.Schema).CoreConfigSchema()
303}
304
305// CoreConfigSchema is a convenient shortcut for calling CoreConfigSchema
306// on the backends's schema.
307func (r *Backend) CoreConfigSchema() *configschema.Block {
154 return schemaMap(r.Schema).CoreConfigSchema() 308 return schemaMap(r.Schema).CoreConfigSchema()
155} 309}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
index b80b223..2a66a06 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
@@ -3,6 +3,7 @@ package schema
3import ( 3import (
4 "fmt" 4 "fmt"
5 "strconv" 5 "strconv"
6 "strings"
6) 7)
7 8
8// FieldReaders are responsible for decoding fields out of data into 9// FieldReaders are responsible for decoding fields out of data into
@@ -41,6 +42,13 @@ func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} {
41 return s.ZeroValue() 42 return s.ZeroValue()
42} 43}
43 44
45// SchemasForFlatmapPath tries its best to find a sequence of schemas that
46// the given dot-delimited attribute path traverses through.
47func SchemasForFlatmapPath(path string, schemaMap map[string]*Schema) []*Schema {
48 parts := strings.Split(path, ".")
49 return addrToSchema(parts, schemaMap)
50}
51
44// addrToSchema finds the final element schema for the given address 52// addrToSchema finds the final element schema for the given address
45// and the given schema. It returns all the schemas that led to the final 53// and the given schema. It returns all the schemas that led to the final
46// schema. These are in order of the address (out to in). 54// schema. These are in order of the address (out to in).
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
index 55a301d..808375c 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
@@ -2,6 +2,7 @@ package schema
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
5 "strconv" 6 "strconv"
6 "strings" 7 "strings"
7 "sync" 8 "sync"
@@ -93,6 +94,22 @@ func (r *ConfigFieldReader) readField(
93 } 94 }
94 } 95 }
95 96
97 if protoVersion5 {
98 switch schema.Type {
99 case TypeList, TypeSet, TypeMap, typeObject:
100 // Check if the value itself is unknown.
101 // The new protocol shims will add unknown values to this list of
102 // ComputedKeys. This is the only way we have to indicate that a
103 // collection is unknown in the config
104 for _, unknown := range r.Config.ComputedKeys {
105 if k == unknown {
106 log.Printf("[DEBUG] setting computed for %q from ComputedKeys", k)
107 return FieldReadResult{Computed: true, Exists: true}, nil
108 }
109 }
110 }
111 }
112
96 switch schema.Type { 113 switch schema.Type {
97 case TypeBool, TypeFloat, TypeInt, TypeString: 114 case TypeBool, TypeFloat, TypeInt, TypeString:
98 return r.readPrimitive(k, schema) 115 return r.readPrimitive(k, schema)
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
index d558a5b..ae35b4a 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
@@ -174,6 +174,9 @@ func (r *DiffFieldReader) readPrimitive(
174 174
175func (r *DiffFieldReader) readSet( 175func (r *DiffFieldReader) readSet(
176 address []string, schema *Schema) (FieldReadResult, error) { 176 address []string, schema *Schema) (FieldReadResult, error) {
177 // copy address to ensure we don't modify the argument
178 address = append([]string(nil), address...)
179
177 prefix := strings.Join(address, ".") + "." 180 prefix := strings.Join(address, ".") + "."
178 181
179 // Create the set that will be our result 182 // Create the set that will be our result
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
index 054efe0..53f73b7 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
@@ -98,6 +98,9 @@ func (r *MapFieldReader) readPrimitive(
98 98
99func (r *MapFieldReader) readSet( 99func (r *MapFieldReader) readSet(
100 address []string, schema *Schema) (FieldReadResult, error) { 100 address []string, schema *Schema) (FieldReadResult, error) {
101 // copy address to ensure we don't modify the argument
102 address = append([]string(nil), address...)
103
101 // Get the number of elements in the list 104 // Get the number of elements in the list
102 countRaw, err := r.readPrimitive( 105 countRaw, err := r.readPrimitive(
103 append(address, "#"), &Schema{Type: TypeInt}) 106 append(address, "#"), &Schema{Type: TypeInt})
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
index 814c7ba..c09358b 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
@@ -297,13 +297,14 @@ func (w *MapFieldWriter) setSet(
297 // we get the proper order back based on the hash code. 297 // we get the proper order back based on the hash code.
298 if v := reflect.ValueOf(value); v.Kind() == reflect.Slice { 298 if v := reflect.ValueOf(value); v.Kind() == reflect.Slice {
299 // Build a temp *ResourceData to use for the conversion 299 // Build a temp *ResourceData to use for the conversion
300 tempAddr := addr[len(addr)-1:]
300 tempSchema := *schema 301 tempSchema := *schema
301 tempSchema.Type = TypeList 302 tempSchema.Type = TypeList
302 tempSchemaMap := map[string]*Schema{addr[0]: &tempSchema} 303 tempSchemaMap := map[string]*Schema{tempAddr[0]: &tempSchema}
303 tempW := &MapFieldWriter{Schema: tempSchemaMap} 304 tempW := &MapFieldWriter{Schema: tempSchemaMap}
304 305
305 // Set the entire list, this lets us get sane values out of it 306 // Set the entire list, this lets us get sane values out of it
306 if err := tempW.WriteField(addr, value); err != nil { 307 if err := tempW.WriteField(tempAddr, value); err != nil {
307 return err 308 return err
308 } 309 }
309 310
@@ -319,7 +320,7 @@ func (w *MapFieldWriter) setSet(
319 } 320 }
320 for i := 0; i < v.Len(); i++ { 321 for i := 0; i < v.Len(); i++ {
321 is := strconv.FormatInt(int64(i), 10) 322 is := strconv.FormatInt(int64(i), 10)
322 result, err := tempR.ReadField(append(addrCopy, is)) 323 result, err := tempR.ReadField(append(tempAddr, is))
323 if err != nil { 324 if err != nil {
324 return err 325 return err
325 } 326 }
@@ -340,6 +341,11 @@ func (w *MapFieldWriter) setSet(
340 // problems when the old data isn't wiped first. 341 // problems when the old data isn't wiped first.
341 w.clearTree(addr) 342 w.clearTree(addr)
342 343
344 if value.(*Set) == nil {
345 w.result[k+".#"] = "0"
346 return nil
347 }
348
343 for code, elem := range value.(*Set).m { 349 for code, elem := range value.(*Set).m {
344 if err := w.set(append(addrCopy, code), elem); err != nil { 350 if err := w.set(append(addrCopy, code), elem); err != nil {
345 return err 351 return err
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
index 38cd8c7..0184d7b 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
@@ -4,6 +4,18 @@ package schema
4 4
5import "strconv" 5import "strconv"
6 6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[getSourceState-1]
12 _ = x[getSourceConfig-2]
13 _ = x[getSourceDiff-4]
14 _ = x[getSourceSet-8]
15 _ = x[getSourceExact-16]
16 _ = x[getSourceLevelMask-15]
17}
18
7const ( 19const (
8 _getSource_name_0 = "getSourceStategetSourceConfig" 20 _getSource_name_0 = "getSourceStategetSourceConfig"
9 _getSource_name_1 = "getSourceDiff" 21 _getSource_name_1 = "getSourceDiff"
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
index 6cd325d..9702447 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
@@ -9,7 +9,7 @@ import (
9 9
10 "github.com/hashicorp/go-multierror" 10 "github.com/hashicorp/go-multierror"
11 "github.com/hashicorp/terraform/config" 11 "github.com/hashicorp/terraform/config"
12 "github.com/hashicorp/terraform/config/configschema" 12 "github.com/hashicorp/terraform/configs/configschema"
13 "github.com/hashicorp/terraform/terraform" 13 "github.com/hashicorp/terraform/terraform"
14) 14)
15 15
@@ -64,6 +64,8 @@ type Provider struct {
64 stopCtx context.Context 64 stopCtx context.Context
65 stopCtxCancel context.CancelFunc 65 stopCtxCancel context.CancelFunc
66 stopOnce sync.Once 66 stopOnce sync.Once
67
68 TerraformVersion string
67} 69}
68 70
69// ConfigureFunc is the function used to configure a Provider. 71// ConfigureFunc is the function used to configure a Provider.
@@ -251,7 +253,7 @@ func (p *Provider) Configure(c *terraform.ResourceConfig) error {
251 253
252 // Get a ResourceData for this configuration. To do this, we actually 254 // Get a ResourceData for this configuration. To do this, we actually
253 // generate an intermediary "diff" although that is never exposed. 255 // generate an intermediary "diff" although that is never exposed.
254 diff, err := sm.Diff(nil, c, nil, p.meta) 256 diff, err := sm.Diff(nil, c, nil, p.meta, true)
255 if err != nil { 257 if err != nil {
256 return err 258 return err
257 } 259 }
@@ -296,6 +298,20 @@ func (p *Provider) Diff(
296 return r.Diff(s, c, p.meta) 298 return r.Diff(s, c, p.meta)
297} 299}
298 300
301// SimpleDiff is used by the new protocol wrappers to get a diff that doesn't
302// attempt to calculate ignore_changes.
303func (p *Provider) SimpleDiff(
304 info *terraform.InstanceInfo,
305 s *terraform.InstanceState,
306 c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
307 r, ok := p.ResourcesMap[info.Type]
308 if !ok {
309 return nil, fmt.Errorf("unknown resource type: %s", info.Type)
310 }
311
312 return r.simpleDiff(s, c, p.meta)
313}
314
299// Refresh implementation of terraform.ResourceProvider interface. 315// Refresh implementation of terraform.ResourceProvider interface.
300func (p *Provider) Refresh( 316func (p *Provider) Refresh(
301 info *terraform.InstanceInfo, 317 info *terraform.InstanceInfo,
@@ -311,7 +327,7 @@ func (p *Provider) Refresh(
311// Resources implementation of terraform.ResourceProvider interface. 327// Resources implementation of terraform.ResourceProvider interface.
312func (p *Provider) Resources() []terraform.ResourceType { 328func (p *Provider) Resources() []terraform.ResourceType {
313 keys := make([]string, 0, len(p.ResourcesMap)) 329 keys := make([]string, 0, len(p.ResourcesMap))
314 for k, _ := range p.ResourcesMap { 330 for k := range p.ResourcesMap {
315 keys = append(keys, k) 331 keys = append(keys, k)
316 } 332 }
317 sort.Strings(keys) 333 sort.Strings(keys)
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
index a8d42db..637e221 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
@@ -8,6 +8,7 @@ import (
8 8
9 "github.com/hashicorp/go-multierror" 9 "github.com/hashicorp/go-multierror"
10 "github.com/hashicorp/terraform/config" 10 "github.com/hashicorp/terraform/config"
11 "github.com/hashicorp/terraform/configs/configschema"
11 "github.com/hashicorp/terraform/terraform" 12 "github.com/hashicorp/terraform/terraform"
12) 13)
13 14
@@ -121,6 +122,11 @@ func (p *Provisioner) Stop() error {
121 return nil 122 return nil
122} 123}
123 124
125// GetConfigSchema implementation of terraform.ResourceProvisioner interface.
126func (p *Provisioner) GetConfigSchema() (*configschema.Block, error) {
127 return schemaMap(p.Schema).CoreConfigSchema(), nil
128}
129
124// Apply implementation of terraform.ResourceProvisioner interface. 130// Apply implementation of terraform.ResourceProvisioner interface.
125func (p *Provisioner) Apply( 131func (p *Provisioner) Apply(
126 o terraform.UIOutput, 132 o terraform.UIOutput,
@@ -146,7 +152,7 @@ func (p *Provisioner) Apply(
146 } 152 }
147 153
148 sm := schemaMap(p.ConnSchema) 154 sm := schemaMap(p.ConnSchema)
149 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil) 155 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil, true)
150 if err != nil { 156 if err != nil {
151 return err 157 return err
152 } 158 }
@@ -160,7 +166,7 @@ func (p *Provisioner) Apply(
160 // Build the configuration data. Doing this requires making a "diff" 166 // Build the configuration data. Doing this requires making a "diff"
161 // even though that's never used. We use that just to get the correct types. 167 // even though that's never used. We use that just to get the correct types.
162 configMap := schemaMap(p.Schema) 168 configMap := schemaMap(p.Schema)
163 diff, err := configMap.Diff(nil, c, nil, nil) 169 diff, err := configMap.Diff(nil, c, nil, nil, true)
164 if err != nil { 170 if err != nil {
165 return err 171 return err
166 } 172 }
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
index d3be2d6..b5e3065 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
@@ -8,6 +8,7 @@ import (
8 8
9 "github.com/hashicorp/terraform/config" 9 "github.com/hashicorp/terraform/config"
10 "github.com/hashicorp/terraform/terraform" 10 "github.com/hashicorp/terraform/terraform"
11 "github.com/zclconf/go-cty/cty"
11) 12)
12 13
13// Resource represents a thing in Terraform that has a set of configurable 14// Resource represents a thing in Terraform that has a set of configurable
@@ -44,6 +45,12 @@ type Resource struct {
44 // their Versioning at any integer >= 1 45 // their Versioning at any integer >= 1
45 SchemaVersion int 46 SchemaVersion int
46 47
48 // MigrateState is deprecated and any new changes to a resource's schema
49 // should be handled by StateUpgraders. Existing MigrateState implementations
50 // should remain for compatibility with existing state. MigrateState will
51 // still be called if the stored SchemaVersion is less than the
52 // first version of the StateUpgraders.
53 //
47 // MigrateState is responsible for updating an InstanceState with an old 54 // MigrateState is responsible for updating an InstanceState with an old
48 // version to the format expected by the current version of the Schema. 55 // version to the format expected by the current version of the Schema.
49 // 56 //
@@ -56,6 +63,18 @@ type Resource struct {
56 // needs to make any remote API calls. 63 // needs to make any remote API calls.
57 MigrateState StateMigrateFunc 64 MigrateState StateMigrateFunc
58 65
66 // StateUpgraders contains the functions responsible for upgrading an
67 // existing state with an old schema version to a newer schema. It is
68 // called specifically by Terraform when the stored schema version is less
69 // than the current SchemaVersion of the Resource.
70 //
71 // StateUpgraders map specific schema versions to a StateUpgrader
72 // function. The registered versions are expected to be ordered,
73 // consecutive values. The initial value may be greater than 0 to account
74 // for legacy schemas that weren't recorded and can be handled by
75 // MigrateState.
76 StateUpgraders []StateUpgrader
77
59 // The functions below are the CRUD operations for this resource. 78 // The functions below are the CRUD operations for this resource.
60 // 79 //
61 // The only optional operation is Update. If Update is not implemented, 80 // The only optional operation is Update. If Update is not implemented,
@@ -136,6 +155,27 @@ type Resource struct {
136 Timeouts *ResourceTimeout 155 Timeouts *ResourceTimeout
137} 156}
138 157
158// ShimInstanceStateFromValue converts a cty.Value to a
159// terraform.InstanceState.
160func (r *Resource) ShimInstanceStateFromValue(state cty.Value) (*terraform.InstanceState, error) {
161 // Get the raw shimmed value. While this is correct, the set hashes don't
162 // match those from the Schema.
163 s := terraform.NewInstanceStateShimmedFromValue(state, r.SchemaVersion)
164
165 // We now rebuild the state through the ResourceData, so that the set indexes
166 // match what helper/schema expects.
167 data, err := schemaMap(r.Schema).Data(s, nil)
168 if err != nil {
169 return nil, err
170 }
171
172 s = data.State()
173 if s == nil {
174 s = &terraform.InstanceState{}
175 }
176 return s, nil
177}
178
139// See Resource documentation. 179// See Resource documentation.
140type CreateFunc func(*ResourceData, interface{}) error 180type CreateFunc func(*ResourceData, interface{}) error
141 181
@@ -155,6 +195,27 @@ type ExistsFunc func(*ResourceData, interface{}) (bool, error)
155type StateMigrateFunc func( 195type StateMigrateFunc func(
156 int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error) 196 int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error)
157 197
198type StateUpgrader struct {
199 // Version is the version schema that this Upgrader will handle, converting
200 // it to Version+1.
201 Version int
202
203 // Type describes the schema that this function can upgrade. Type is
204 // required to decode the schema if the state was stored in a legacy
205 // flatmap format.
206 Type cty.Type
207
208 // Upgrade takes the JSON encoded state and the provider meta value, and
209 // upgrades the state one single schema version. The provided state is
210 // deocded into the default json types using a map[string]interface{}. It
211 // is up to the StateUpgradeFunc to ensure that the returned value can be
212 // encoded using the new schema.
213 Upgrade StateUpgradeFunc
214}
215
216// See StateUpgrader
217type StateUpgradeFunc func(rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error)
218
158// See Resource documentation. 219// See Resource documentation.
159type CustomizeDiffFunc func(*ResourceDiff, interface{}) error 220type CustomizeDiffFunc func(*ResourceDiff, interface{}) error
160 221
@@ -247,7 +308,7 @@ func (r *Resource) Diff(
247 return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err) 308 return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err)
248 } 309 }
249 310
250 instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta) 311 instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, true)
251 if err != nil { 312 if err != nil {
252 return instanceDiff, err 313 return instanceDiff, err
253 } 314 }
@@ -263,6 +324,45 @@ func (r *Resource) Diff(
263 return instanceDiff, err 324 return instanceDiff, err
264} 325}
265 326
327func (r *Resource) simpleDiff(
328 s *terraform.InstanceState,
329 c *terraform.ResourceConfig,
330 meta interface{}) (*terraform.InstanceDiff, error) {
331
332 t := &ResourceTimeout{}
333 err := t.ConfigDecode(r, c)
334
335 if err != nil {
336 return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err)
337 }
338
339 instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false)
340 if err != nil {
341 return instanceDiff, err
342 }
343
344 if instanceDiff == nil {
345 log.Printf("[DEBUG] Instance Diff is nil in SimpleDiff()")
346 return nil, err
347 }
348
349 // Make sure the old value is set in each of the instance diffs.
350 // This was done by the RequiresNew logic in the full legacy Diff.
351 for k, attr := range instanceDiff.Attributes {
352 if attr == nil {
353 continue
354 }
355 if s != nil {
356 attr.Old = s.Attributes[k]
357 }
358 }
359
360 if err := t.DiffEncode(instanceDiff); err != nil {
361 log.Printf("[ERR] Error encoding timeout to instance diff: %s", err)
362 }
363 return instanceDiff, err
364}
365
266// Validate validates the resource configuration against the schema. 366// Validate validates the resource configuration against the schema.
267func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) { 367func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) {
268 warns, errs := schemaMap(r.Schema).Validate(c) 368 warns, errs := schemaMap(r.Schema).Validate(c)
@@ -300,8 +400,11 @@ func (r *Resource) ReadDataApply(
300 return r.recordCurrentSchemaVersion(state), err 400 return r.recordCurrentSchemaVersion(state), err
301} 401}
302 402
303// Refresh refreshes the state of the resource. 403// RefreshWithoutUpgrade reads the instance state, but does not call
304func (r *Resource) Refresh( 404// MigrateState or the StateUpgraders, since those are now invoked in a
405// separate API call.
406// RefreshWithoutUpgrade is part of the new plugin shims.
407func (r *Resource) RefreshWithoutUpgrade(
305 s *terraform.InstanceState, 408 s *terraform.InstanceState,
306 meta interface{}) (*terraform.InstanceState, error) { 409 meta interface{}) (*terraform.InstanceState, error) {
307 // If the ID is already somehow blank, it doesn't exist 410 // If the ID is already somehow blank, it doesn't exist
@@ -335,12 +438,60 @@ func (r *Resource) Refresh(
335 } 438 }
336 } 439 }
337 440
338 needsMigration, stateSchemaVersion := r.checkSchemaVersion(s) 441 data, err := schemaMap(r.Schema).Data(s, nil)
339 if needsMigration && r.MigrateState != nil { 442 data.timeouts = &rt
340 s, err := r.MigrateState(stateSchemaVersion, s, meta) 443 if err != nil {
444 return s, err
445 }
446
447 err = r.Read(data, meta)
448 state := data.State()
449 if state != nil && state.ID == "" {
450 state = nil
451 }
452
453 return r.recordCurrentSchemaVersion(state), err
454}
455
456// Refresh refreshes the state of the resource.
457func (r *Resource) Refresh(
458 s *terraform.InstanceState,
459 meta interface{}) (*terraform.InstanceState, error) {
460 // If the ID is already somehow blank, it doesn't exist
461 if s.ID == "" {
462 return nil, nil
463 }
464
465 rt := ResourceTimeout{}
466 if _, ok := s.Meta[TimeoutKey]; ok {
467 if err := rt.StateDecode(s); err != nil {
468 log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
469 }
470 }
471
472 if r.Exists != nil {
473 // Make a copy of data so that if it is modified it doesn't
474 // affect our Read later.
475 data, err := schemaMap(r.Schema).Data(s, nil)
476 data.timeouts = &rt
477
341 if err != nil { 478 if err != nil {
342 return s, err 479 return s, err
343 } 480 }
481
482 exists, err := r.Exists(data, meta)
483 if err != nil {
484 return s, err
485 }
486 if !exists {
487 return nil, nil
488 }
489 }
490
491 // there may be new StateUpgraders that need to be run
492 s, err := r.upgradeState(s, meta)
493 if err != nil {
494 return s, err
344 } 495 }
345 496
346 data, err := schemaMap(r.Schema).Data(s, nil) 497 data, err := schemaMap(r.Schema).Data(s, nil)
@@ -358,6 +509,71 @@ func (r *Resource) Refresh(
358 return r.recordCurrentSchemaVersion(state), err 509 return r.recordCurrentSchemaVersion(state), err
359} 510}
360 511
512func (r *Resource) upgradeState(s *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) {
513 var err error
514
515 needsMigration, stateSchemaVersion := r.checkSchemaVersion(s)
516 migrate := needsMigration && r.MigrateState != nil
517
518 if migrate {
519 s, err = r.MigrateState(stateSchemaVersion, s, meta)
520 if err != nil {
521 return s, err
522 }
523 }
524
525 if len(r.StateUpgraders) == 0 {
526 return s, nil
527 }
528
529 // If we ran MigrateState, then the stateSchemaVersion value is no longer
530 // correct. We can expect the first upgrade function to be the correct
531 // schema type version.
532 if migrate {
533 stateSchemaVersion = r.StateUpgraders[0].Version
534 }
535
536 schemaType := r.CoreConfigSchema().ImpliedType()
537 // find the expected type to convert the state
538 for _, upgrader := range r.StateUpgraders {
539 if stateSchemaVersion == upgrader.Version {
540 schemaType = upgrader.Type
541 }
542 }
543
544 // StateUpgraders only operate on the new JSON format state, so the state
545 // need to be converted.
546 stateVal, err := StateValueFromInstanceState(s, schemaType)
547 if err != nil {
548 return nil, err
549 }
550
551 jsonState, err := StateValueToJSONMap(stateVal, schemaType)
552 if err != nil {
553 return nil, err
554 }
555
556 for _, upgrader := range r.StateUpgraders {
557 if stateSchemaVersion != upgrader.Version {
558 continue
559 }
560
561 jsonState, err = upgrader.Upgrade(jsonState, meta)
562 if err != nil {
563 return nil, err
564 }
565 stateSchemaVersion++
566 }
567
568 // now we need to re-flatmap the new state
569 stateVal, err = JSONMapToStateValue(jsonState, r.CoreConfigSchema())
570 if err != nil {
571 return nil, err
572 }
573
574 return r.ShimInstanceStateFromValue(stateVal)
575}
576
361// InternalValidate should be called to validate the structure 577// InternalValidate should be called to validate the structure
362// of the resource. 578// of the resource.
363// 579//
@@ -437,6 +653,31 @@ func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error
437 } 653 }
438 } 654 }
439 655
656 lastVersion := -1
657 for _, u := range r.StateUpgraders {
658 if lastVersion >= 0 && u.Version-lastVersion > 1 {
659 return fmt.Errorf("missing schema version between %d and %d", lastVersion, u.Version)
660 }
661
662 if u.Version >= r.SchemaVersion {
663 return fmt.Errorf("StateUpgrader version %d is >= current version %d", u.Version, r.SchemaVersion)
664 }
665
666 if !u.Type.IsObjectType() {
667 return fmt.Errorf("StateUpgrader %d type is not cty.Object", u.Version)
668 }
669
670 if u.Upgrade == nil {
671 return fmt.Errorf("StateUpgrader %d missing StateUpgradeFunc", u.Version)
672 }
673
674 lastVersion = u.Version
675 }
676
677 if lastVersion >= 0 && lastVersion != r.SchemaVersion-1 {
678 return fmt.Errorf("missing StateUpgrader between %d and %d", lastVersion, r.SchemaVersion)
679 }
680
440 // Data source 681 // Data source
441 if r.isTopLevel() && !writable { 682 if r.isTopLevel() && !writable {
442 tsm = schemaMap(r.Schema) 683 tsm = schemaMap(r.Schema)
@@ -513,6 +754,13 @@ func (r *Resource) TestResourceData() *ResourceData {
513 } 754 }
514} 755}
515 756
757// SchemasForFlatmapPath tries its best to find a sequence of schemas that
758// the given dot-delimited attribute path traverses through in the schema
759// of the receiving Resource.
760func (r *Resource) SchemasForFlatmapPath(path string) []*Schema {
761 return SchemasForFlatmapPath(path, r.Schema)
762}
763
516// Returns true if the resource is "top level" i.e. not a sub-resource. 764// Returns true if the resource is "top level" i.e. not a sub-resource.
517func (r *Resource) isTopLevel() bool { 765func (r *Resource) isTopLevel() bool {
518 // TODO: This is a heuristic; replace with a definitive attribute? 766 // TODO: This is a heuristic; replace with a definitive attribute?
@@ -538,7 +786,15 @@ func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) {
538 } 786 }
539 787
540 stateSchemaVersion, _ := strconv.Atoi(rawString) 788 stateSchemaVersion, _ := strconv.Atoi(rawString)
541 return stateSchemaVersion < r.SchemaVersion, stateSchemaVersion 789
790 // Don't run MigrateState if the version is handled by a StateUpgrader,
791 // since StateMigrateFuncs are not required to handle unknown versions
792 maxVersion := r.SchemaVersion
793 if len(r.StateUpgraders) > 0 {
794 maxVersion = r.StateUpgraders[0].Version
795 }
796
797 return stateSchemaVersion < maxVersion, stateSchemaVersion
542} 798}
543 799
544func (r *Resource) recordCurrentSchemaVersion( 800func (r *Resource) recordCurrentSchemaVersion(
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
index 6cc01ee..1c39070 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
@@ -52,6 +52,8 @@ type getResult struct {
52// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary 52// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary
53// values, bypassing schema. This MUST NOT be used in normal circumstances - 53// values, bypassing schema. This MUST NOT be used in normal circumstances -
54// it exists only to support the remote_state data source. 54// it exists only to support the remote_state data source.
55//
56// Deprecated: Fully define schema attributes and use Set() instead.
55func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) { 57func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) {
56 d.once.Do(d.init) 58 d.once.Do(d.init)
57 59
@@ -219,10 +221,16 @@ func (d *ResourceData) Id() string {
219 221
220 if d.state != nil { 222 if d.state != nil {
221 result = d.state.ID 223 result = d.state.ID
224 if result == "" {
225 result = d.state.Attributes["id"]
226 }
222 } 227 }
223 228
224 if d.newState != nil { 229 if d.newState != nil {
225 result = d.newState.ID 230 result = d.newState.ID
231 if result == "" {
232 result = d.newState.Attributes["id"]
233 }
226 } 234 }
227 235
228 return result 236 return result
@@ -246,6 +254,18 @@ func (d *ResourceData) ConnInfo() map[string]string {
246func (d *ResourceData) SetId(v string) { 254func (d *ResourceData) SetId(v string) {
247 d.once.Do(d.init) 255 d.once.Do(d.init)
248 d.newState.ID = v 256 d.newState.ID = v
257
258 // once we transition away from the legacy state types, "id" will no longer
259 // be a special field, and will become a normal attribute.
260 // set the attribute normally
261 d.setWriter.unsafeWriteField("id", v)
262
263 // Make sure the newState is also set, otherwise the old value
264 // may get precedence.
265 if d.newState.Attributes == nil {
266 d.newState.Attributes = map[string]string{}
267 }
268 d.newState.Attributes["id"] = v
249} 269}
250 270
251// SetConnInfo sets the connection info for a resource. 271// SetConnInfo sets the connection info for a resource.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go
index 7db3dec..47b5481 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go
@@ -367,7 +367,7 @@ func (d *ResourceDiff) Get(key string) interface{} {
367} 367}
368 368
369// GetChange gets the change between the state and diff, checking first to see 369// GetChange gets the change between the state and diff, checking first to see
370// if a overridden diff exists. 370// if an overridden diff exists.
371// 371//
372// This implementation differs from ResourceData's in the way that we first get 372// This implementation differs from ResourceData's in the way that we first get
373// results from the exact levels for the new diff, then from state and diff as 373// results from the exact levels for the new diff, then from state and diff as
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
index 445819f..9e422c1 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
@@ -5,6 +5,7 @@ import (
5 "log" 5 "log"
6 "time" 6 "time"
7 7
8 "github.com/hashicorp/terraform/config"
8 "github.com/hashicorp/terraform/terraform" 9 "github.com/hashicorp/terraform/terraform"
9 "github.com/mitchellh/copystructure" 10 "github.com/mitchellh/copystructure"
10) 11)
@@ -62,55 +63,70 @@ func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig)
62 } 63 }
63 64
64 if raw, ok := c.Config[TimeoutsConfigKey]; ok { 65 if raw, ok := c.Config[TimeoutsConfigKey]; ok {
65 if configTimeouts, ok := raw.([]map[string]interface{}); ok { 66 var rawTimeouts []map[string]interface{}
66 for _, timeoutValues := range configTimeouts { 67 switch raw := raw.(type) {
67 // loop through each Timeout given in the configuration and validate they 68 case map[string]interface{}:
68 // the Timeout defined in the resource 69 rawTimeouts = append(rawTimeouts, raw)
69 for timeKey, timeValue := range timeoutValues { 70 case []map[string]interface{}:
70 // validate that we're dealing with the normal CRUD actions 71 rawTimeouts = raw
71 var found bool 72 case string:
72 for _, key := range timeoutKeys() { 73 if raw == config.UnknownVariableValue {
73 if timeKey == key { 74 // Timeout is not defined in the config
74 found = true 75 // Defaults will be used instead
75 break 76 return nil
76 } 77 } else {
77 } 78 log.Printf("[ERROR] Invalid timeout value: %q", raw)
79 return fmt.Errorf("Invalid Timeout value found")
80 }
81 default:
82 log.Printf("[ERROR] Invalid timeout structure: %#v", raw)
83 return fmt.Errorf("Invalid Timeout structure found")
84 }
78 85
79 if !found { 86 for _, timeoutValues := range rawTimeouts {
80 return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey) 87 for timeKey, timeValue := range timeoutValues {
88 // validate that we're dealing with the normal CRUD actions
89 var found bool
90 for _, key := range timeoutKeys() {
91 if timeKey == key {
92 found = true
93 break
81 } 94 }
95 }
82 96
83 // Get timeout 97 if !found {
84 rt, err := time.ParseDuration(timeValue.(string)) 98 return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey)
85 if err != nil { 99 }
86 return fmt.Errorf("Error parsing Timeout for (%s): %s", timeKey, err)
87 }
88 100
89 var timeout *time.Duration 101 // Get timeout
90 switch timeKey { 102 rt, err := time.ParseDuration(timeValue.(string))
91 case TimeoutCreate: 103 if err != nil {
92 timeout = t.Create 104 return fmt.Errorf("Error parsing %q timeout: %s", timeKey, err)
93 case TimeoutUpdate: 105 }
94 timeout = t.Update
95 case TimeoutRead:
96 timeout = t.Read
97 case TimeoutDelete:
98 timeout = t.Delete
99 case TimeoutDefault:
100 timeout = t.Default
101 }
102 106
103 // If the resource has not delcared this in the definition, then error 107 var timeout *time.Duration
104 // with an unsupported message 108 switch timeKey {
105 if timeout == nil { 109 case TimeoutCreate:
106 return unsupportedTimeoutKeyError(timeKey) 110 timeout = t.Create
107 } 111 case TimeoutUpdate:
112 timeout = t.Update
113 case TimeoutRead:
114 timeout = t.Read
115 case TimeoutDelete:
116 timeout = t.Delete
117 case TimeoutDefault:
118 timeout = t.Default
119 }
108 120
109 *timeout = rt 121 // If the resource has not delcared this in the definition, then error
122 // with an unsupported message
123 if timeout == nil {
124 return unsupportedTimeoutKeyError(timeKey)
110 } 125 }
126
127 *timeout = rt
111 } 128 }
112 } else { 129 return nil
113 log.Printf("[WARN] Invalid Timeout structure found, skipping timeouts")
114 } 130 }
115 } 131 }
116 132
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
index 0ea5aad..6a3c15a 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
@@ -12,6 +12,7 @@
12package schema 12package schema
13 13
14import ( 14import (
15 "context"
15 "fmt" 16 "fmt"
16 "os" 17 "os"
17 "reflect" 18 "reflect"
@@ -19,7 +20,9 @@ import (
19 "sort" 20 "sort"
20 "strconv" 21 "strconv"
21 "strings" 22 "strings"
23 "sync"
22 24
25 "github.com/hashicorp/terraform/config"
23 "github.com/hashicorp/terraform/terraform" 26 "github.com/hashicorp/terraform/terraform"
24 "github.com/mitchellh/copystructure" 27 "github.com/mitchellh/copystructure"
25 "github.com/mitchellh/mapstructure" 28 "github.com/mitchellh/mapstructure"
@@ -31,6 +34,27 @@ const PanicOnErr = "TF_SCHEMA_PANIC_ON_ERROR"
31// type used for schema package context keys 34// type used for schema package context keys
32type contextKey string 35type contextKey string
33 36
37var (
38 protoVersionMu sync.Mutex
39 protoVersion5 = false
40)
41
42func isProto5() bool {
43 protoVersionMu.Lock()
44 defer protoVersionMu.Unlock()
45 return protoVersion5
46
47}
48
49// SetProto5 enables a feature flag for any internal changes required required
50// to work with the new plugin protocol. This should not be called by
51// provider.
52func SetProto5() {
53 protoVersionMu.Lock()
54 defer protoVersionMu.Unlock()
55 protoVersion5 = true
56}
57
34// Schema is used to describe the structure of a value. 58// Schema is used to describe the structure of a value.
35// 59//
36// Read the documentation of the struct elements for important details. 60// Read the documentation of the struct elements for important details.
@@ -51,6 +75,26 @@ type Schema struct {
51 // 75 //
52 Type ValueType 76 Type ValueType
53 77
78 // ConfigMode allows for overriding the default behaviors for mapping
79 // schema entries onto configuration constructs.
80 //
81 // By default, the Elem field is used to choose whether a particular
82 // schema is represented in configuration as an attribute or as a nested
83 // block; if Elem is a *schema.Resource then it's a block and it's an
84 // attribute otherwise.
85 //
86 // If Elem is *schema.Resource then setting ConfigMode to
87 // SchemaConfigModeAttr will force it to be represented in configuration
88 // as an attribute, which means that the Computed flag can be used to
89 // provide default elements when the argument isn't set at all, while still
90 // allowing the user to force zero elements by explicitly assigning an
91 // empty list.
92 //
93 // When Computed is set without Optional, the attribute is not settable
94 // in configuration at all and so SchemaConfigModeAttr is the automatic
95 // behavior, and SchemaConfigModeBlock is not permitted.
96 ConfigMode SchemaConfigMode
97
54 // If one of these is set, then this item can come from the configuration. 98 // If one of these is set, then this item can come from the configuration.
55 // Both cannot be set. If Optional is set, the value is optional. If 99 // Both cannot be set. If Optional is set, the value is optional. If
56 // Required is set, the value is required. 100 // Required is set, the value is required.
@@ -123,7 +167,8 @@ type Schema struct {
123 // The following fields are only set for a TypeList, TypeSet, or TypeMap. 167 // The following fields are only set for a TypeList, TypeSet, or TypeMap.
124 // 168 //
125 // Elem represents the element type. For a TypeMap, it must be a *Schema 169 // Elem represents the element type. For a TypeMap, it must be a *Schema
126 // with a Type of TypeString, otherwise it may be either a *Schema or a 170 // with a Type that is one of the primitives: TypeString, TypeBool,
171 // TypeInt, or TypeFloat. Otherwise it may be either a *Schema or a
127 // *Resource. If it is *Schema, the element type is just a simple value. 172 // *Resource. If it is *Schema, the element type is just a simple value.
128 // If it is *Resource, the element type is a complex structure, 173 // If it is *Resource, the element type is a complex structure,
129 // potentially with its own lifecycle. 174 // potentially with its own lifecycle.
@@ -141,13 +186,17 @@ type Schema struct {
141 // used to wrap a complex structure, however less than one instance would 186 // used to wrap a complex structure, however less than one instance would
142 // cause instability. 187 // cause instability.
143 // 188 //
144 // PromoteSingle, if true, will allow single elements to be standalone 189 // If the field Optional is set to true then MinItems is ignored and thus
145 // and promote them to a list. For example "foo" would be promoted to 190 // effectively zero.
146 // ["foo"] automatically. This is primarily for legacy reasons and the 191 MaxItems int
147 // ambiguity is not recommended for new usage. Promotion is only allowed 192 MinItems int
148 // for primitive element types. 193
149 MaxItems int 194 // PromoteSingle originally allowed for a single element to be assigned
150 MinItems int 195 // where a primitive list was expected, but this no longer works from
196 // Terraform v0.12 onwards (Terraform Core will require a list to be set
197 // regardless of what this is set to) and so only applies to Terraform v0.11
198 // and earlier, and so should be used only to retain this functionality
199 // for those still using v0.11 with a provider that formerly used this.
151 PromoteSingle bool 200 PromoteSingle bool
152 201
153 // The following fields are only valid for a TypeSet type. 202 // The following fields are only valid for a TypeSet type.
@@ -189,7 +238,8 @@ type Schema struct {
189 // guaranteed to be of the proper Schema type, and it can yield warnings or 238 // guaranteed to be of the proper Schema type, and it can yield warnings or
190 // errors based on inspection of that value. 239 // errors based on inspection of that value.
191 // 240 //
192 // ValidateFunc currently only works for primitive types. 241 // ValidateFunc is honored only when the schema's Type is set to TypeInt,
242 // TypeFloat, TypeString, TypeBool, or TypeMap. It is ignored for all other types.
193 ValidateFunc SchemaValidateFunc 243 ValidateFunc SchemaValidateFunc
194 244
195 // Sensitive ensures that the attribute's value does not get displayed in 245 // Sensitive ensures that the attribute's value does not get displayed in
@@ -199,6 +249,17 @@ type Schema struct {
199 Sensitive bool 249 Sensitive bool
200} 250}
201 251
252// SchemaConfigMode is used to influence how a schema item is mapped into a
253// corresponding configuration construct, using the ConfigMode field of
254// Schema.
255type SchemaConfigMode int
256
257const (
258 SchemaConfigModeAuto SchemaConfigMode = iota
259 SchemaConfigModeAttr
260 SchemaConfigModeBlock
261)
262
202// SchemaDiffSuppressFunc is a function which can be used to determine 263// SchemaDiffSuppressFunc is a function which can be used to determine
203// whether a detected diff on a schema element is "valid" or not, and 264// whether a detected diff on a schema element is "valid" or not, and
204// suppress it from the plan if necessary. 265// suppress it from the plan if necessary.
@@ -364,6 +425,11 @@ func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *t
364 return d 425 return d
365} 426}
366 427
428// InternalMap is used to aid in the transition to the new schema types and
429// protocol. The name is not meant to convey any usefulness, as this is not to
430// be used directly by any providers.
431type InternalMap = schemaMap
432
367// schemaMap is a wrapper that adds nice functions on top of schemas. 433// schemaMap is a wrapper that adds nice functions on top of schemas.
368type schemaMap map[string]*Schema 434type schemaMap map[string]*Schema
369 435
@@ -404,7 +470,8 @@ func (m schemaMap) Diff(
404 s *terraform.InstanceState, 470 s *terraform.InstanceState,
405 c *terraform.ResourceConfig, 471 c *terraform.ResourceConfig,
406 customizeDiff CustomizeDiffFunc, 472 customizeDiff CustomizeDiffFunc,
407 meta interface{}) (*terraform.InstanceDiff, error) { 473 meta interface{},
474 handleRequiresNew bool) (*terraform.InstanceDiff, error) {
408 result := new(terraform.InstanceDiff) 475 result := new(terraform.InstanceDiff)
409 result.Attributes = make(map[string]*terraform.ResourceAttrDiff) 476 result.Attributes = make(map[string]*terraform.ResourceAttrDiff)
410 477
@@ -450,82 +517,85 @@ func (m schemaMap) Diff(
450 } 517 }
451 } 518 }
452 519
453 // If the diff requires a new resource, then we recompute the diff 520 if handleRequiresNew {
454 // so we have the complete new resource diff, and preserve the 521 // If the diff requires a new resource, then we recompute the diff
455 // RequiresNew fields where necessary so the user knows exactly what 522 // so we have the complete new resource diff, and preserve the
456 // caused that. 523 // RequiresNew fields where necessary so the user knows exactly what
457 if result.RequiresNew() { 524 // caused that.
458 // Create the new diff 525 if result.RequiresNew() {
459 result2 := new(terraform.InstanceDiff) 526 // Create the new diff
460 result2.Attributes = make(map[string]*terraform.ResourceAttrDiff) 527 result2 := new(terraform.InstanceDiff)
461 528 result2.Attributes = make(map[string]*terraform.ResourceAttrDiff)
462 // Preserve the DestroyTainted flag
463 result2.DestroyTainted = result.DestroyTainted
464 529
465 // Reset the data to not contain state. We have to call init() 530 // Preserve the DestroyTainted flag
466 // again in order to reset the FieldReaders. 531 result2.DestroyTainted = result.DestroyTainted
467 d.state = nil
468 d.init()
469 532
470 // Perform the diff again 533 // Reset the data to not contain state. We have to call init()
471 for k, schema := range m { 534 // again in order to reset the FieldReaders.
472 err := m.diff(k, schema, result2, d, false) 535 d.state = nil
473 if err != nil { 536 d.init()
474 return nil, err
475 }
476 }
477 537
478 // Re-run customization 538 // Perform the diff again
479 if !result2.DestroyTainted && customizeDiff != nil { 539 for k, schema := range m {
480 mc := m.DeepCopy() 540 err := m.diff(k, schema, result2, d, false)
481 rd := newResourceDiff(mc, c, d.state, result2)
482 if err := customizeDiff(rd, meta); err != nil {
483 return nil, err
484 }
485 for _, k := range rd.UpdatedKeys() {
486 err := m.diff(k, mc[k], result2, rd, false)
487 if err != nil { 541 if err != nil {
488 return nil, err 542 return nil, err
489 } 543 }
490 } 544 }
491 }
492 545
493 // Force all the fields to not force a new since we know what we 546 // Re-run customization
494 // want to force new. 547 if !result2.DestroyTainted && customizeDiff != nil {
495 for k, attr := range result2.Attributes { 548 mc := m.DeepCopy()
496 if attr == nil { 549 rd := newResourceDiff(mc, c, d.state, result2)
497 continue 550 if err := customizeDiff(rd, meta); err != nil {
551 return nil, err
552 }
553 for _, k := range rd.UpdatedKeys() {
554 err := m.diff(k, mc[k], result2, rd, false)
555 if err != nil {
556 return nil, err
557 }
558 }
498 } 559 }
499 560
500 if attr.RequiresNew { 561 // Force all the fields to not force a new since we know what we
501 attr.RequiresNew = false 562 // want to force new.
502 } 563 for k, attr := range result2.Attributes {
564 if attr == nil {
565 continue
566 }
503 567
504 if s != nil { 568 if attr.RequiresNew {
505 attr.Old = s.Attributes[k] 569 attr.RequiresNew = false
506 } 570 }
507 }
508 571
509 // Now copy in all the requires new diffs... 572 if s != nil {
510 for k, attr := range result.Attributes { 573 attr.Old = s.Attributes[k]
511 if attr == nil { 574 }
512 continue
513 } 575 }
514 576
515 newAttr, ok := result2.Attributes[k] 577 // Now copy in all the requires new diffs...
516 if !ok { 578 for k, attr := range result.Attributes {
517 newAttr = attr 579 if attr == nil {
518 } 580 continue
581 }
519 582
520 if attr.RequiresNew { 583 newAttr, ok := result2.Attributes[k]
521 newAttr.RequiresNew = true 584 if !ok {
585 newAttr = attr
586 }
587
588 if attr.RequiresNew {
589 newAttr.RequiresNew = true
590 }
591
592 result2.Attributes[k] = newAttr
522 } 593 }
523 594
524 result2.Attributes[k] = newAttr 595 // And set the diff!
596 result = result2
525 } 597 }
526 598
527 // And set the diff!
528 result = result2
529 } 599 }
530 600
531 // Go through and detect all of the ComputedWhens now that we've 601 // Go through and detect all of the ComputedWhens now that we've
@@ -611,6 +681,10 @@ func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) {
611// from a unit test (and not in user-path code) to verify that a schema 681// from a unit test (and not in user-path code) to verify that a schema
612// is properly built. 682// is properly built.
613func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error { 683func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error {
684 return m.internalValidate(topSchemaMap, false)
685}
686
687func (m schemaMap) internalValidate(topSchemaMap schemaMap, attrsOnly bool) error {
614 if topSchemaMap == nil { 688 if topSchemaMap == nil {
615 topSchemaMap = m 689 topSchemaMap = m
616 } 690 }
@@ -631,6 +705,34 @@ func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error {
631 return fmt.Errorf("%s: One of optional, required, or computed must be set", k) 705 return fmt.Errorf("%s: One of optional, required, or computed must be set", k)
632 } 706 }
633 707
708 computedOnly := v.Computed && !v.Optional
709
710 switch v.ConfigMode {
711 case SchemaConfigModeBlock:
712 if _, ok := v.Elem.(*Resource); !ok {
713 return fmt.Errorf("%s: ConfigMode of block is allowed only when Elem is *schema.Resource", k)
714 }
715 if attrsOnly {
716 return fmt.Errorf("%s: ConfigMode of block cannot be used in child of schema with ConfigMode of attribute", k)
717 }
718 if computedOnly {
719 return fmt.Errorf("%s: ConfigMode of block cannot be used for computed schema", k)
720 }
721 case SchemaConfigModeAttr:
722 // anything goes
723 case SchemaConfigModeAuto:
724 // Since "Auto" for Elem: *Resource would create a nested block,
725 // and that's impossible inside an attribute, we require it to be
726 // explicitly overridden as mode "Attr" for clarity.
727 if _, ok := v.Elem.(*Resource); ok {
728 if attrsOnly {
729 return fmt.Errorf("%s: in *schema.Resource with ConfigMode of attribute, so must also have ConfigMode of attribute", k)
730 }
731 }
732 default:
733 return fmt.Errorf("%s: invalid ConfigMode value", k)
734 }
735
634 if v.Computed && v.Default != nil { 736 if v.Computed && v.Default != nil {
635 return fmt.Errorf("%s: Default must be nil if computed", k) 737 return fmt.Errorf("%s: Default must be nil if computed", k)
636 } 738 }
@@ -695,7 +797,9 @@ func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error {
695 797
696 switch t := v.Elem.(type) { 798 switch t := v.Elem.(type) {
697 case *Resource: 799 case *Resource:
698 if err := t.InternalValidate(topSchemaMap, true); err != nil { 800 attrsOnly := attrsOnly || v.ConfigMode == SchemaConfigModeAttr
801
802 if err := schemaMap(t.Schema).internalValidate(topSchemaMap, attrsOnly); err != nil {
699 return err 803 return err
700 } 804 }
701 case *Schema: 805 case *Schema:
@@ -785,10 +889,19 @@ func (m schemaMap) diff(
785 for attrK, attrV := range unsupressedDiff.Attributes { 889 for attrK, attrV := range unsupressedDiff.Attributes {
786 switch rd := d.(type) { 890 switch rd := d.(type) {
787 case *ResourceData: 891 case *ResourceData:
788 if schema.DiffSuppressFunc != nil && 892 if schema.DiffSuppressFunc != nil && attrV != nil &&
789 attrV != nil &&
790 schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) { 893 schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, rd) {
791 continue 894 // If this attr diff is suppressed, we may still need it in the
895 // overall diff if it's contained within a set. Rather than
896 // dropping the diff, make it a NOOP.
897 if !all {
898 continue
899 }
900
901 attrV = &terraform.ResourceAttrDiff{
902 Old: attrV.Old,
903 New: attrV.Old,
904 }
792 } 905 }
793 } 906 }
794 diff.Attributes[attrK] = attrV 907 diff.Attributes[attrK] = attrV
@@ -1171,7 +1284,7 @@ func (m schemaMap) diffString(
1171 return fmt.Errorf("%s: %s", k, err) 1284 return fmt.Errorf("%s: %s", k, err)
1172 } 1285 }
1173 1286
1174 if os == ns && !all { 1287 if os == ns && !all && !computed {
1175 // They're the same value. If there old value is not blank or we 1288 // They're the same value. If there old value is not blank or we
1176 // have an ID, then return right away since we're already setup. 1289 // have an ID, then return right away since we're already setup.
1177 if os != "" || d.Id() != "" { 1290 if os != "" || d.Id() != "" {
@@ -1179,7 +1292,7 @@ func (m schemaMap) diffString(
1179 } 1292 }
1180 1293
1181 // Otherwise, only continue if we're computed 1294 // Otherwise, only continue if we're computed
1182 if !schema.Computed && !computed { 1295 if !schema.Computed {
1183 return nil 1296 return nil
1184 } 1297 }
1185 } 1298 }
@@ -1210,7 +1323,7 @@ func (m schemaMap) inputString(
1210 input terraform.UIInput, 1323 input terraform.UIInput,
1211 k string, 1324 k string,
1212 schema *Schema) (interface{}, error) { 1325 schema *Schema) (interface{}, error) {
1213 result, err := input.Input(&terraform.InputOpts{ 1326 result, err := input.Input(context.Background(), &terraform.InputOpts{
1214 Id: k, 1327 Id: k,
1215 Query: k, 1328 Query: k,
1216 Description: schema.Description, 1329 Description: schema.Description,
@@ -1252,6 +1365,13 @@ func (m schemaMap) validate(
1252 "%q: this field cannot be set", k)} 1365 "%q: this field cannot be set", k)}
1253 } 1366 }
1254 1367
1368 if raw == config.UnknownVariableValue {
1369 // If the value is unknown then we can't validate it yet.
1370 // In particular, this avoids spurious type errors where downstream
1371 // validation code sees UnknownVariableValue as being just a string.
1372 return nil, nil
1373 }
1374
1255 err := m.validateConflictingAttributes(k, schema, c) 1375 err := m.validateConflictingAttributes(k, schema, c)
1256 if err != nil { 1376 if err != nil {
1257 return nil, []error{err} 1377 return nil, []error{err}
@@ -1269,10 +1389,15 @@ func (m schemaMap) validateConflictingAttributes(
1269 return nil 1389 return nil
1270 } 1390 }
1271 1391
1272 for _, conflicting_key := range schema.ConflictsWith { 1392 for _, conflictingKey := range schema.ConflictsWith {
1273 if _, ok := c.Get(conflicting_key); ok { 1393 if raw, ok := c.Get(conflictingKey); ok {
1394 if raw == config.UnknownVariableValue {
1395 // An unknown value might become unset (null) once known, so
1396 // we must defer validation until it's known.
1397 continue
1398 }
1274 return fmt.Errorf( 1399 return fmt.Errorf(
1275 "%q: conflicts with %s", k, conflicting_key) 1400 "%q: conflicts with %s", k, conflictingKey)
1276 } 1401 }
1277 } 1402 }
1278 1403
@@ -1284,6 +1409,13 @@ func (m schemaMap) validateList(
1284 raw interface{}, 1409 raw interface{},
1285 schema *Schema, 1410 schema *Schema,
1286 c *terraform.ResourceConfig) ([]string, []error) { 1411 c *terraform.ResourceConfig) ([]string, []error) {
1412 // first check if the list is wholly unknown
1413 if s, ok := raw.(string); ok {
1414 if s == config.UnknownVariableValue {
1415 return nil, nil
1416 }
1417 }
1418
1287 // We use reflection to verify the slice because you can't 1419 // We use reflection to verify the slice because you can't
1288 // case to []interface{} unless the slice is exactly that type. 1420 // case to []interface{} unless the slice is exactly that type.
1289 rawV := reflect.ValueOf(raw) 1421 rawV := reflect.ValueOf(raw)
@@ -1355,6 +1487,13 @@ func (m schemaMap) validateMap(
1355 raw interface{}, 1487 raw interface{},
1356 schema *Schema, 1488 schema *Schema,
1357 c *terraform.ResourceConfig) ([]string, []error) { 1489 c *terraform.ResourceConfig) ([]string, []error) {
1490 // first check if the list is wholly unknown
1491 if s, ok := raw.(string); ok {
1492 if s == config.UnknownVariableValue {
1493 return nil, nil
1494 }
1495 }
1496
1358 // We use reflection to verify the slice because you can't 1497 // We use reflection to verify the slice because you can't
1359 // case to []interface{} unless the slice is exactly that type. 1498 // case to []interface{} unless the slice is exactly that type.
1360 rawV := reflect.ValueOf(raw) 1499 rawV := reflect.ValueOf(raw)
@@ -1556,12 +1695,25 @@ func (m schemaMap) validatePrimitive(
1556 } 1695 }
1557 decoded = n 1696 decoded = n
1558 case TypeInt: 1697 case TypeInt:
1559 // Verify that we can parse this as an int 1698 switch {
1560 var n int 1699 case isProto5():
1561 if err := mapstructure.WeakDecode(raw, &n); err != nil { 1700 // We need to verify the type precisely, because WeakDecode will
1562 return nil, []error{fmt.Errorf("%s: %s", k, err)} 1701 // decode a float as an integer.
1702
1703 // the config shims only use int for integral number values
1704 if v, ok := raw.(int); ok {
1705 decoded = v
1706 } else {
1707 return nil, []error{fmt.Errorf("%s: must be a whole number, got %v", k, raw)}
1708 }
1709 default:
1710 // Verify that we can parse this as an int
1711 var n int
1712 if err := mapstructure.WeakDecode(raw, &n); err != nil {
1713 return nil, []error{fmt.Errorf("%s: %s", k, err)}
1714 }
1715 decoded = n
1563 } 1716 }
1564 decoded = n
1565 case TypeFloat: 1717 case TypeFloat:
1566 // Verify that we can parse this as an int 1718 // Verify that we can parse this as an int
1567 var n float64 1719 var n float64
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
index cba2890..8ee89e4 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/set.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
@@ -198,6 +198,16 @@ func (s *Set) add(item interface{}, computed bool) string {
198 code := s.hash(item) 198 code := s.hash(item)
199 if computed { 199 if computed {
200 code = "~" + code 200 code = "~" + code
201
202 if isProto5() {
203 tmpCode := code
204 count := 0
205 for _, exists := s.m[tmpCode]; exists; _, exists = s.m[tmpCode] {
206 count++
207 tmpCode = fmt.Sprintf("%s%d", code, count)
208 }
209 code = tmpCode
210 }
201 } 211 }
202 212
203 if _, ok := s.m[code]; !ok { 213 if _, ok := s.m[code]; !ok {
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/shims.go b/vendor/github.com/hashicorp/terraform/helper/schema/shims.go
new file mode 100644
index 0000000..203d017
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/shims.go
@@ -0,0 +1,115 @@
1package schema
2
3import (
4 "encoding/json"
5
6 "github.com/zclconf/go-cty/cty"
7 ctyjson "github.com/zclconf/go-cty/cty/json"
8
9 "github.com/hashicorp/terraform/config"
10 "github.com/hashicorp/terraform/configs/configschema"
11 "github.com/hashicorp/terraform/terraform"
12)
13
14// DiffFromValues takes the current state and desired state as cty.Values and
15// derives a terraform.InstanceDiff to give to the legacy providers. This is
16// used to take the states provided by the new ApplyResourceChange method and
17// convert them to a state+diff required for the legacy Apply method.
18func DiffFromValues(prior, planned cty.Value, res *Resource) (*terraform.InstanceDiff, error) {
19 return diffFromValues(prior, planned, res, nil)
20}
21
22// diffFromValues takes an additional CustomizeDiffFunc, so we can generate our
23// test fixtures from the legacy tests. In the new provider protocol the diff
24// only needs to be created for the apply operation, and any customizations
25// have already been done.
26func diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) {
27 instanceState, err := res.ShimInstanceStateFromValue(prior)
28 if err != nil {
29 return nil, err
30 }
31
32 configSchema := res.CoreConfigSchema()
33
34 cfg := terraform.NewResourceConfigShimmed(planned, configSchema)
35 removeConfigUnknowns(cfg.Config)
36 removeConfigUnknowns(cfg.Raw)
37
38 diff, err := schemaMap(res.Schema).Diff(instanceState, cfg, cust, nil, false)
39 if err != nil {
40 return nil, err
41 }
42
43 return diff, err
44}
45
46// During apply the only unknown values are those which are to be computed by
47// the resource itself. These may have been marked as unknown config values, and
48// need to be removed to prevent the UnknownVariableValue from appearing the diff.
49func removeConfigUnknowns(cfg map[string]interface{}) {
50 for k, v := range cfg {
51 switch v := v.(type) {
52 case string:
53 if v == config.UnknownVariableValue {
54 delete(cfg, k)
55 }
56 case []interface{}:
57 for _, i := range v {
58 if m, ok := i.(map[string]interface{}); ok {
59 removeConfigUnknowns(m)
60 }
61 }
62 case map[string]interface{}:
63 removeConfigUnknowns(v)
64 }
65 }
66}
67
68// ApplyDiff takes a cty.Value state and applies a terraform.InstanceDiff to
69// get a new cty.Value state. This is used to convert the diff returned from
70// the legacy provider Diff method to the state required for the new
71// PlanResourceChange method.
72func ApplyDiff(base cty.Value, d *terraform.InstanceDiff, schema *configschema.Block) (cty.Value, error) {
73 return d.ApplyToValue(base, schema)
74}
75
76// StateValueToJSONMap converts a cty.Value to generic JSON map via the cty JSON
77// encoding.
78func StateValueToJSONMap(val cty.Value, ty cty.Type) (map[string]interface{}, error) {
79 js, err := ctyjson.Marshal(val, ty)
80 if err != nil {
81 return nil, err
82 }
83
84 var m map[string]interface{}
85 if err := json.Unmarshal(js, &m); err != nil {
86 return nil, err
87 }
88
89 return m, nil
90}
91
92// JSONMapToStateValue takes a generic json map[string]interface{} and converts it
93// to the specific type, ensuring that the values conform to the schema.
94func JSONMapToStateValue(m map[string]interface{}, block *configschema.Block) (cty.Value, error) {
95 var val cty.Value
96
97 js, err := json.Marshal(m)
98 if err != nil {
99 return val, err
100 }
101
102 val, err = ctyjson.Unmarshal(js, block.ImpliedType())
103 if err != nil {
104 return val, err
105 }
106
107 return block.CoerceValue(val)
108}
109
110// StateValueFromInstanceState converts a terraform.InstanceState to a
111// cty.Value as described by the provided cty.Type, and maintains the resource
112// ID as the "id" attribute.
113func StateValueFromInstanceState(is *terraform.InstanceState, ty cty.Type) (cty.Value, error) {
114 return is.AttrsAsObjectValue(ty)
115}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
index da754ac..a367a1f 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
@@ -18,7 +18,7 @@ func TestResourceDataRaw(
18 } 18 }
19 19
20 sm := schemaMap(schema) 20 sm := schemaMap(schema)
21 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil) 21 diff, err := sm.Diff(nil, terraform.NewResourceConfig(c), nil, nil, true)
22 if err != nil { 22 if err != nil {
23 t.Fatalf("err: %s", err) 23 t.Fatalf("err: %s", err)
24 } 24 }
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
index 3bc3ac4..914ca32 100644
--- a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
+++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
@@ -4,6 +4,21 @@ package schema
4 4
5import "strconv" 5import "strconv"
6 6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[TypeInvalid-0]
12 _ = x[TypeBool-1]
13 _ = x[TypeInt-2]
14 _ = x[TypeFloat-3]
15 _ = x[TypeString-4]
16 _ = x[TypeList-5]
17 _ = x[TypeMap-6]
18 _ = x[TypeSet-7]
19 _ = x[typeObject-8]
20}
21
7const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" 22const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject"
8 23
9var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77} 24var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77}
diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go
new file mode 100644
index 0000000..a9b8f98
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config.go
@@ -0,0 +1,123 @@
1package earlyconfig
2
3import (
4 "fmt"
5 "sort"
6
7 version "github.com/hashicorp/go-version"
8 "github.com/hashicorp/terraform-config-inspect/tfconfig"
9 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/moduledeps"
11 "github.com/hashicorp/terraform/plugin/discovery"
12 "github.com/hashicorp/terraform/tfdiags"
13)
14
15// A Config is a node in the tree of modules within a configuration.
16//
17// The module tree is constructed by following ModuleCall instances recursively
18// through the root module transitively into descendent modules.
19type Config struct {
20 // RootModule points to the Config for the root module within the same
21 // module tree as this module. If this module _is_ the root module then
22 // this is self-referential.
23 Root *Config
24
25 // ParentModule points to the Config for the module that directly calls
26 // this module. If this is the root module then this field is nil.
27 Parent *Config
28
29 // Path is a sequence of module logical names that traverse from the root
30 // module to this config. Path is empty for the root module.
31 //
32 // This should only be used to display paths to the end-user in rare cases
33 // where we are talking about the static module tree, before module calls
34 // have been resolved. In most cases, an addrs.ModuleInstance describing
35 // a node in the dynamic module tree is better, since it will then include
36 // any keys resulting from evaluating "count" and "for_each" arguments.
37 Path addrs.Module
38
39 // ChildModules points to the Config for each of the direct child modules
40 // called from this module. The keys in this map match the keys in
41 // Module.ModuleCalls.
42 Children map[string]*Config
43
44 // Module points to the object describing the configuration for the
45 // various elements (variables, resources, etc) defined by this module.
46 Module *tfconfig.Module
47
48 // CallPos is the source position for the header of the module block that
49 // requested this module.
50 //
51 // This field is meaningless for the root module, where its contents are undefined.
52 CallPos tfconfig.SourcePos
53
54 // SourceAddr is the source address that the referenced module was requested
55 // from, as specified in configuration.
56 //
57 // This field is meaningless for the root module, where its contents are undefined.
58 SourceAddr string
59
60 // Version is the specific version that was selected for this module,
61 // based on version constraints given in configuration.
62 //
63 // This field is nil if the module was loaded from a non-registry source,
64 // since versions are not supported for other sources.
65 //
66 // This field is meaningless for the root module, where it will always
67 // be nil.
68 Version *version.Version
69}
70
71// ProviderDependencies returns the provider dependencies for the recieving
72// config, including all of its descendent modules.
73func (c *Config) ProviderDependencies() (*moduledeps.Module, tfdiags.Diagnostics) {
74 var diags tfdiags.Diagnostics
75
76 var name string
77 if len(c.Path) > 0 {
78 name = c.Path[len(c.Path)-1]
79 }
80
81 ret := &moduledeps.Module{
82 Name: name,
83 }
84
85 providers := make(moduledeps.Providers)
86 for name, reqs := range c.Module.RequiredProviders {
87 inst := moduledeps.ProviderInstance(name)
88 var constraints version.Constraints
89 for _, reqStr := range reqs {
90 if reqStr != "" {
91 constraint, err := version.NewConstraint(reqStr)
92 if err != nil {
93 diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{
94 Severity: tfconfig.DiagError,
95 Summary: "Invalid provider version constraint",
96 Detail: fmt.Sprintf("Invalid version constraint %q for provider %s.", reqStr, name),
97 }))
98 continue
99 }
100 constraints = append(constraints, constraint...)
101 }
102 }
103 providers[inst] = moduledeps.ProviderDependency{
104 Constraints: discovery.NewConstraints(constraints),
105 Reason: moduledeps.ProviderDependencyExplicit,
106 }
107 }
108 ret.Providers = providers
109
110 childNames := make([]string, 0, len(c.Children))
111 for name := range c.Children {
112 childNames = append(childNames, name)
113 }
114 sort.Strings(childNames)
115
116 for _, name := range childNames {
117 child, childDiags := c.Children[name].ProviderDependencies()
118 ret.Children = append(ret.Children, child)
119 diags = diags.Append(childDiags)
120 }
121
122 return ret, diags
123}
diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go
new file mode 100644
index 0000000..770d5df
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/config_build.go
@@ -0,0 +1,144 @@
1package earlyconfig
2
3import (
4 "fmt"
5 "sort"
6 "strings"
7
8 version "github.com/hashicorp/go-version"
9 "github.com/hashicorp/terraform-config-inspect/tfconfig"
10 "github.com/hashicorp/terraform/addrs"
11 "github.com/hashicorp/terraform/tfdiags"
12)
13
14// BuildConfig constructs a Config from a root module by loading all of its
15// descendent modules via the given ModuleWalker.
16func BuildConfig(root *tfconfig.Module, walker ModuleWalker) (*Config, tfdiags.Diagnostics) {
17 var diags tfdiags.Diagnostics
18 cfg := &Config{
19 Module: root,
20 }
21 cfg.Root = cfg // Root module is self-referential.
22 cfg.Children, diags = buildChildModules(cfg, walker)
23 return cfg, diags
24}
25
26func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config, tfdiags.Diagnostics) {
27 var diags tfdiags.Diagnostics
28 ret := map[string]*Config{}
29 calls := parent.Module.ModuleCalls
30
31 // We'll sort the calls by their local names so that they'll appear in a
32 // predictable order in any logging that's produced during the walk.
33 callNames := make([]string, 0, len(calls))
34 for k := range calls {
35 callNames = append(callNames, k)
36 }
37 sort.Strings(callNames)
38
39 for _, callName := range callNames {
40 call := calls[callName]
41 path := make([]string, len(parent.Path)+1)
42 copy(path, parent.Path)
43 path[len(path)-1] = call.Name
44
45 var vc version.Constraints
46 if strings.TrimSpace(call.Version) != "" {
47 var err error
48 vc, err = version.NewConstraint(call.Version)
49 if err != nil {
50 diags = diags.Append(wrapDiagnostic(tfconfig.Diagnostic{
51 Severity: tfconfig.DiagError,
52 Summary: "Invalid version constraint",
53 Detail: fmt.Sprintf("Module %q (declared at %s line %d) has invalid version constraint %q: %s.", callName, call.Pos.Filename, call.Pos.Line, call.Version, err),
54 }))
55 continue
56 }
57 }
58
59 req := ModuleRequest{
60 Name: call.Name,
61 Path: path,
62 SourceAddr: call.Source,
63 VersionConstraints: vc,
64 Parent: parent,
65 CallPos: call.Pos,
66 }
67
68 mod, ver, modDiags := walker.LoadModule(&req)
69 diags = append(diags, modDiags...)
70 if mod == nil {
71 // nil can be returned if the source address was invalid and so
72 // nothing could be loaded whatsoever. LoadModule should've
73 // returned at least one error diagnostic in that case.
74 continue
75 }
76
77 child := &Config{
78 Parent: parent,
79 Root: parent.Root,
80 Path: path,
81 Module: mod,
82 CallPos: call.Pos,
83 SourceAddr: call.Source,
84 Version: ver,
85 }
86
87 child.Children, modDiags = buildChildModules(child, walker)
88 diags = diags.Append(modDiags)
89
90 ret[call.Name] = child
91 }
92
93 return ret, diags
94}
95
96// ModuleRequest is used as part of the ModuleWalker interface used with
97// function BuildConfig.
98type ModuleRequest struct {
99 // Name is the "logical name" of the module call within configuration.
100 // This is provided in case the name is used as part of a storage key
101 // for the module, but implementations must otherwise treat it as an
102 // opaque string. It is guaranteed to have already been validated as an
103 // HCL identifier and UTF-8 encoded.
104 Name string
105
106 // Path is a list of logical names that traverse from the root module to
107 // this module. This can be used, for example, to form a lookup key for
108 // each distinct module call in a configuration, allowing for multiple
109 // calls with the same name at different points in the tree.
110 Path addrs.Module
111
112 // SourceAddr is the source address string provided by the user in
113 // configuration.
114 SourceAddr string
115
116 // VersionConstraint is the version constraint applied to the module in
117 // configuration.
118 VersionConstraints version.Constraints
119
120 // Parent is the partially-constructed module tree node that the loaded
121 // module will be added to. Callers may refer to any field of this
122 // structure except Children, which is still under construction when
123 // ModuleRequest objects are created and thus has undefined content.
124 // The main reason this is provided is so that full module paths can
125 // be constructed for uniqueness.
126 Parent *Config
127
128 // CallRange is the source position for the header of the "module" block
129 // in configuration that prompted this request.
130 CallPos tfconfig.SourcePos
131}
132
133// ModuleWalker is an interface used with BuildConfig.
134type ModuleWalker interface {
135 LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics)
136}
137
138// ModuleWalkerFunc is an implementation of ModuleWalker that directly wraps
139// a callback function, for more convenient use of that interface.
140type ModuleWalkerFunc func(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics)
141
142func (f ModuleWalkerFunc) LoadModule(req *ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) {
143 return f(req)
144}
diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go
new file mode 100644
index 0000000..9b2fd7f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/diagnostics.go
@@ -0,0 +1,78 @@
1package earlyconfig
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform-config-inspect/tfconfig"
7 "github.com/hashicorp/terraform/tfdiags"
8)
9
10func wrapDiagnostics(diags tfconfig.Diagnostics) tfdiags.Diagnostics {
11 ret := make(tfdiags.Diagnostics, len(diags))
12 for i, diag := range diags {
13 ret[i] = wrapDiagnostic(diag)
14 }
15 return ret
16}
17
18func wrapDiagnostic(diag tfconfig.Diagnostic) tfdiags.Diagnostic {
19 return wrappedDiagnostic{
20 d: diag,
21 }
22}
23
24type wrappedDiagnostic struct {
25 d tfconfig.Diagnostic
26}
27
28func (d wrappedDiagnostic) Severity() tfdiags.Severity {
29 switch d.d.Severity {
30 case tfconfig.DiagError:
31 return tfdiags.Error
32 case tfconfig.DiagWarning:
33 return tfdiags.Warning
34 default:
35 // Should never happen since there are no other severities
36 return 0
37 }
38}
39
40func (d wrappedDiagnostic) Description() tfdiags.Description {
41 // Since the inspect library doesn't produce precise source locations,
42 // we include the position information as part of the error message text.
43 // See the comment inside method "Source" for more information.
44 switch {
45 case d.d.Pos == nil:
46 return tfdiags.Description{
47 Summary: d.d.Summary,
48 Detail: d.d.Detail,
49 }
50 case d.d.Detail != "":
51 return tfdiags.Description{
52 Summary: d.d.Summary,
53 Detail: fmt.Sprintf("On %s line %d: %s", d.d.Pos.Filename, d.d.Pos.Line, d.d.Detail),
54 }
55 default:
56 return tfdiags.Description{
57 Summary: fmt.Sprintf("%s (on %s line %d)", d.d.Summary, d.d.Pos.Filename, d.d.Pos.Line),
58 }
59 }
60}
61
62func (d wrappedDiagnostic) Source() tfdiags.Source {
63 // Since the inspect library is constrained by the lowest common denominator
64 // between legacy HCL and modern HCL, it only returns ranges at whole-line
65 // granularity, and that isn't sufficient to populate a tfdiags.Source
66 // and so we'll just omit ranges altogether and include the line number in
67 // the Description text.
68 //
69 // Callers that want to return nicer errors should consider reacting to
70 // earlyconfig errors by attempting a follow-up parse with the normal
71 // config loader, which can produce more precise source location
72 // information.
73 return tfdiags.Source{}
74}
75
76func (d wrappedDiagnostic) FromExpr() *tfdiags.FromExpr {
77 return nil
78}
diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go
new file mode 100644
index 0000000..a9cf10f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/doc.go
@@ -0,0 +1,20 @@
1// Package earlyconfig is a specialized alternative to the top-level "configs"
2// package that does only shallow processing of configuration and is therefore
3// able to be much more liberal than the full config loader in what it accepts.
4//
5// In particular, it can accept both current and legacy HCL syntax, and it
6// ignores top-level blocks that it doesn't recognize. These two characteristics
7// make this package ideal for dependency-checking use-cases so that we are
8// more likely to be able to return an error message about an explicit
9// incompatibility than to return a less-actionable message about a construct
10// not being supported.
11//
12// However, its liberal approach also means it should be used sparingly. It
13// exists primarily for "terraform init", so that it is able to detect
14// incompatibilities more robustly when installing dependencies. For most
15// other use-cases, use the "configs" and "configs/configload" packages.
16//
17// Package earlyconfig is a wrapper around the terraform-config-inspect
18// codebase, adding to it just some helper functionality for Terraform's own
19// use-cases.
20package earlyconfig
diff --git a/vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go
new file mode 100644
index 0000000..d2d6287
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/earlyconfig/module.go
@@ -0,0 +1,13 @@
1package earlyconfig
2
3import (
4 "github.com/hashicorp/terraform-config-inspect/tfconfig"
5 "github.com/hashicorp/terraform/tfdiags"
6)
7
8// LoadModule loads some top-level metadata for the module in the given
9// directory.
10func LoadModule(dir string) (*tfconfig.Module, tfdiags.Diagnostics) {
11 mod, diags := tfconfig.LoadModule(dir)
12 return mod, wrapDiagnostics(diags)
13}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go b/vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go
new file mode 100644
index 0000000..7096ff7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/copy_dir.go
@@ -0,0 +1,125 @@
1package initwd
2
3import (
4 "io"
5 "os"
6 "path/filepath"
7 "strings"
8)
9
10// copyDir copies the src directory contents into dst. Both directories
11// should already exist.
12func copyDir(dst, src string) error {
13 src, err := filepath.EvalSymlinks(src)
14 if err != nil {
15 return err
16 }
17
18 walkFn := func(path string, info os.FileInfo, err error) error {
19 if err != nil {
20 return err
21 }
22
23 if path == src {
24 return nil
25 }
26
27 if strings.HasPrefix(filepath.Base(path), ".") {
28 // Skip any dot files
29 if info.IsDir() {
30 return filepath.SkipDir
31 } else {
32 return nil
33 }
34 }
35
36 // The "path" has the src prefixed to it. We need to join our
37 // destination with the path without the src on it.
38 dstPath := filepath.Join(dst, path[len(src):])
39
40 // we don't want to try and copy the same file over itself.
41 if eq, err := sameFile(path, dstPath); eq {
42 return nil
43 } else if err != nil {
44 return err
45 }
46
47 // If we have a directory, make that subdirectory, then continue
48 // the walk.
49 if info.IsDir() {
50 if path == filepath.Join(src, dst) {
51 // dst is in src; don't walk it.
52 return nil
53 }
54
55 if err := os.MkdirAll(dstPath, 0755); err != nil {
56 return err
57 }
58
59 return nil
60 }
61
62 // If the current path is a symlink, recreate the symlink relative to
63 // the dst directory
64 if info.Mode()&os.ModeSymlink == os.ModeSymlink {
65 target, err := os.Readlink(path)
66 if err != nil {
67 return err
68 }
69
70 return os.Symlink(target, dstPath)
71 }
72
73 // If we have a file, copy the contents.
74 srcF, err := os.Open(path)
75 if err != nil {
76 return err
77 }
78 defer srcF.Close()
79
80 dstF, err := os.Create(dstPath)
81 if err != nil {
82 return err
83 }
84 defer dstF.Close()
85
86 if _, err := io.Copy(dstF, srcF); err != nil {
87 return err
88 }
89
90 // Chmod it
91 return os.Chmod(dstPath, info.Mode())
92 }
93
94 return filepath.Walk(src, walkFn)
95}
96
97// sameFile tried to determine if to paths are the same file.
98// If the paths don't match, we lookup the inode on supported systems.
99func sameFile(a, b string) (bool, error) {
100 if a == b {
101 return true, nil
102 }
103
104 aIno, err := inode(a)
105 if err != nil {
106 if os.IsNotExist(err) {
107 return false, nil
108 }
109 return false, err
110 }
111
112 bIno, err := inode(b)
113 if err != nil {
114 if os.IsNotExist(err) {
115 return false, nil
116 }
117 return false, err
118 }
119
120 if aIno > 0 && aIno == bIno {
121 return true, nil
122 }
123
124 return false, nil
125}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/doc.go b/vendor/github.com/hashicorp/terraform/internal/initwd/doc.go
new file mode 100644
index 0000000..b9d938d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/doc.go
@@ -0,0 +1,7 @@
1// Package initwd contains various helper functions used by the "terraform init"
2// command to initialize a working directory.
3//
4// These functions may also be used from testing code to simulate the behaviors
5// of "terraform init" against test fixtures, but should not be used elsewhere
6// in the main code.
7package initwd
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go b/vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go
new file mode 100644
index 0000000..6b40d08
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/from_module.go
@@ -0,0 +1,363 @@
1package initwd
2
3import (
4 "fmt"
5 "github.com/hashicorp/terraform/internal/earlyconfig"
6 "io/ioutil"
7 "log"
8 "os"
9 "path/filepath"
10 "sort"
11 "strings"
12
13 version "github.com/hashicorp/go-version"
14 "github.com/hashicorp/terraform-config-inspect/tfconfig"
15 "github.com/hashicorp/terraform/internal/modsdir"
16 "github.com/hashicorp/terraform/registry"
17 "github.com/hashicorp/terraform/tfdiags"
18)
19
20const initFromModuleRootCallName = "root"
21const initFromModuleRootKeyPrefix = initFromModuleRootCallName + "."
22
23// DirFromModule populates the given directory (which must exist and be
24// empty) with the contents of the module at the given source address.
25//
26// It does this by installing the given module and all of its descendent
27// modules in a temporary root directory and then copying the installed
28// files into suitable locations. As a consequence, any diagnostics it
29// generates will reveal the location of this temporary directory to the
30// user.
31//
32// This rather roundabout installation approach is taken to ensure that
33// installation proceeds in a manner identical to normal module installation.
34//
35// If the given source address specifies a sub-directory of the given
36// package then only the sub-directory and its descendents will be copied
37// into the given root directory, which will cause any relative module
38// references using ../ from that module to be unresolvable. Error diagnostics
39// are produced in that case, to prompt the user to rewrite the source strings
40// to be absolute references to the original remote module.
41func DirFromModule(rootDir, modulesDir, sourceAddr string, reg *registry.Client, hooks ModuleInstallHooks) tfdiags.Diagnostics {
42 var diags tfdiags.Diagnostics
43
44 // The way this function works is pretty ugly, but we accept it because
45 // -from-module is a less important case than normal module installation
46 // and so it's better to keep this ugly complexity out here rather than
47 // adding even more complexity to the normal module installer.
48
49 // The target directory must exist but be empty.
50 {
51 entries, err := ioutil.ReadDir(rootDir)
52 if err != nil {
53 if os.IsNotExist(err) {
54 diags = diags.Append(tfdiags.Sourceless(
55 tfdiags.Error,
56 "Target directory does not exist",
57 fmt.Sprintf("Cannot initialize non-existent directory %s.", rootDir),
58 ))
59 } else {
60 diags = diags.Append(tfdiags.Sourceless(
61 tfdiags.Error,
62 "Failed to read target directory",
63 fmt.Sprintf("Error reading %s to ensure it is empty: %s.", rootDir, err),
64 ))
65 }
66 return diags
67 }
68 haveEntries := false
69 for _, entry := range entries {
70 if entry.Name() == "." || entry.Name() == ".." || entry.Name() == ".terraform" {
71 continue
72 }
73 haveEntries = true
74 }
75 if haveEntries {
76 diags = diags.Append(tfdiags.Sourceless(
77 tfdiags.Error,
78 "Can't populate non-empty directory",
79 fmt.Sprintf("The target directory %s is not empty, so it cannot be initialized with the -from-module=... option.", rootDir),
80 ))
81 return diags
82 }
83 }
84
85 instDir := filepath.Join(rootDir, ".terraform/init-from-module")
86 inst := NewModuleInstaller(instDir, reg)
87 log.Printf("[DEBUG] installing modules in %s to initialize working directory from %q", instDir, sourceAddr)
88 os.RemoveAll(instDir) // if this fails then we'll fail on MkdirAll below too
89 err := os.MkdirAll(instDir, os.ModePerm)
90 if err != nil {
91 diags = diags.Append(tfdiags.Sourceless(
92 tfdiags.Error,
93 "Failed to create temporary directory",
94 fmt.Sprintf("Failed to create temporary directory %s: %s.", instDir, err),
95 ))
96 return diags
97 }
98
99 instManifest := make(modsdir.Manifest)
100 retManifest := make(modsdir.Manifest)
101
102 fakeFilename := fmt.Sprintf("-from-module=%q", sourceAddr)
103 fakePos := tfconfig.SourcePos{
104 Filename: fakeFilename,
105 Line: 1,
106 }
107
108 // -from-module allows relative paths but it's different than a normal
109 // module address where it'd be resolved relative to the module call
110 // (which is synthetic, here.) To address this, we'll just patch up any
111 // relative paths to be absolute paths before we run, ensuring we'll
112 // get the right result. This also, as an important side-effect, ensures
113 // that the result will be "downloaded" with go-getter (copied from the
114 // source location), rather than just recorded as a relative path.
115 {
116 maybePath := filepath.ToSlash(sourceAddr)
117 if maybePath == "." || strings.HasPrefix(maybePath, "./") || strings.HasPrefix(maybePath, "../") {
118 if wd, err := os.Getwd(); err == nil {
119 sourceAddr = filepath.Join(wd, sourceAddr)
120 log.Printf("[TRACE] -from-module relative path rewritten to absolute path %s", sourceAddr)
121 }
122 }
123 }
124
125 // Now we need to create an artificial root module that will seed our
126 // installation process.
127 fakeRootModule := &tfconfig.Module{
128 ModuleCalls: map[string]*tfconfig.ModuleCall{
129 initFromModuleRootCallName: {
130 Name: initFromModuleRootCallName,
131 Source: sourceAddr,
132 Pos: fakePos,
133 },
134 },
135 }
136
137 // wrapHooks filters hook notifications to only include Download calls
138 // and to trim off the initFromModuleRootCallName prefix. We'll produce
139 // our own Install notifications directly below.
140 wrapHooks := installHooksInitDir{
141 Wrapped: hooks,
142 }
143 getter := reusingGetter{}
144 _, instDiags := inst.installDescendentModules(fakeRootModule, rootDir, instManifest, true, wrapHooks, getter)
145 diags = append(diags, instDiags...)
146 if instDiags.HasErrors() {
147 return diags
148 }
149
150 // If all of that succeeded then we'll now migrate what was installed
151 // into the final directory structure.
152 err = os.MkdirAll(modulesDir, os.ModePerm)
153 if err != nil {
154 diags = diags.Append(tfdiags.Sourceless(
155 tfdiags.Error,
156 "Failed to create local modules directory",
157 fmt.Sprintf("Failed to create modules directory %s: %s.", modulesDir, err),
158 ))
159 return diags
160 }
161
162 recordKeys := make([]string, 0, len(instManifest))
163 for k := range instManifest {
164 recordKeys = append(recordKeys, k)
165 }
166 sort.Strings(recordKeys)
167
168 for _, recordKey := range recordKeys {
169 record := instManifest[recordKey]
170
171 if record.Key == initFromModuleRootCallName {
172 // We've found the module the user requested, which we must
173 // now copy into rootDir so it can be used directly.
174 log.Printf("[TRACE] copying new root module from %s to %s", record.Dir, rootDir)
175 err := copyDir(rootDir, record.Dir)
176 if err != nil {
177 diags = diags.Append(tfdiags.Sourceless(
178 tfdiags.Error,
179 "Failed to copy root module",
180 fmt.Sprintf("Error copying root module %q from %s to %s: %s.", sourceAddr, record.Dir, rootDir, err),
181 ))
182 continue
183 }
184
185 // We'll try to load the newly-copied module here just so we can
186 // sniff for any module calls that ../ out of the root directory
187 // and must thus be rewritten to be absolute addresses again.
188 // For now we can't do this rewriting automatically, but we'll
189 // generate an error to help the user do it manually.
190 mod, _ := earlyconfig.LoadModule(rootDir) // ignore diagnostics since we're just doing value-add here anyway
191 if mod != nil {
192 for _, mc := range mod.ModuleCalls {
193 if pathTraversesUp(mc.Source) {
194 packageAddr, givenSubdir := splitAddrSubdir(sourceAddr)
195 newSubdir := filepath.Join(givenSubdir, mc.Source)
196 if pathTraversesUp(newSubdir) {
197 // This should never happen in any reasonable
198 // configuration since this suggests a path that
199 // traverses up out of the package root. We'll just
200 // ignore this, since we'll fail soon enough anyway
201 // trying to resolve this path when this module is
202 // loaded.
203 continue
204 }
205
206 var newAddr = packageAddr
207 if newSubdir != "" {
208 newAddr = fmt.Sprintf("%s//%s", newAddr, filepath.ToSlash(newSubdir))
209 }
210 diags = diags.Append(tfdiags.Sourceless(
211 tfdiags.Error,
212 "Root module references parent directory",
213 fmt.Sprintf("The requested module %q refers to a module via its parent directory. To use this as a new root module this source string must be rewritten as a remote source address, such as %q.", sourceAddr, newAddr),
214 ))
215 continue
216 }
217 }
218 }
219
220 retManifest[""] = modsdir.Record{
221 Key: "",
222 Dir: rootDir,
223 }
224 continue
225 }
226
227 if !strings.HasPrefix(record.Key, initFromModuleRootKeyPrefix) {
228 // Ignore the *real* root module, whose key is empty, since
229 // we're only interested in the module named "root" and its
230 // descendents.
231 continue
232 }
233
234 newKey := record.Key[len(initFromModuleRootKeyPrefix):]
235 instPath := filepath.Join(modulesDir, newKey)
236 tempPath := filepath.Join(instDir, record.Key)
237
238 // tempPath won't be present for a module that was installed from
239 // a relative path, so in that case we just record the installation
240 // directory and assume it was already copied into place as part
241 // of its parent.
242 if _, err := os.Stat(tempPath); err != nil {
243 if !os.IsNotExist(err) {
244 diags = diags.Append(tfdiags.Sourceless(
245 tfdiags.Error,
246 "Failed to stat temporary module install directory",
247 fmt.Sprintf("Error from stat %s for module %s: %s.", instPath, newKey, err),
248 ))
249 continue
250 }
251
252 var parentKey string
253 if lastDot := strings.LastIndexByte(newKey, '.'); lastDot != -1 {
254 parentKey = newKey[:lastDot]
255 } else {
256 parentKey = "" // parent is the root module
257 }
258
259 parentOld := instManifest[initFromModuleRootKeyPrefix+parentKey]
260 parentNew := retManifest[parentKey]
261
262 // We need to figure out which portion of our directory is the
263 // parent package path and which portion is the subdirectory
264 // under that.
265 baseDirRel, err := filepath.Rel(parentOld.Dir, record.Dir)
266 if err != nil {
267 // Should never happen, because we constructed both directories
268 // from the same base and so they must have a common prefix.
269 panic(err)
270 }
271
272 newDir := filepath.Join(parentNew.Dir, baseDirRel)
273 log.Printf("[TRACE] relative reference for %s rewritten from %s to %s", newKey, record.Dir, newDir)
274 newRecord := record // shallow copy
275 newRecord.Dir = newDir
276 newRecord.Key = newKey
277 retManifest[newKey] = newRecord
278 hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir)
279 continue
280 }
281
282 err = os.MkdirAll(instPath, os.ModePerm)
283 if err != nil {
284 diags = diags.Append(tfdiags.Sourceless(
285 tfdiags.Error,
286 "Failed to create module install directory",
287 fmt.Sprintf("Error creating directory %s for module %s: %s.", instPath, newKey, err),
288 ))
289 continue
290 }
291
292 // We copy rather than "rename" here because renaming between directories
293 // can be tricky in edge-cases like network filesystems, etc.
294 log.Printf("[TRACE] copying new module %s from %s to %s", newKey, record.Dir, instPath)
295 err := copyDir(instPath, tempPath)
296 if err != nil {
297 diags = diags.Append(tfdiags.Sourceless(
298 tfdiags.Error,
299 "Failed to copy descendent module",
300 fmt.Sprintf("Error copying module %q from %s to %s: %s.", newKey, tempPath, rootDir, err),
301 ))
302 continue
303 }
304
305 subDir, err := filepath.Rel(tempPath, record.Dir)
306 if err != nil {
307 // Should never happen, because we constructed both directories
308 // from the same base and so they must have a common prefix.
309 panic(err)
310 }
311
312 newRecord := record // shallow copy
313 newRecord.Dir = filepath.Join(instPath, subDir)
314 newRecord.Key = newKey
315 retManifest[newKey] = newRecord
316 hooks.Install(newRecord.Key, newRecord.Version, newRecord.Dir)
317 }
318
319 retManifest.WriteSnapshotToDir(modulesDir)
320 if err != nil {
321 diags = diags.Append(tfdiags.Sourceless(
322 tfdiags.Error,
323 "Failed to write module manifest",
324 fmt.Sprintf("Error writing module manifest: %s.", err),
325 ))
326 }
327
328 if !diags.HasErrors() {
329 // Try to clean up our temporary directory, but don't worry if we don't
330 // succeed since it shouldn't hurt anything.
331 os.RemoveAll(instDir)
332 }
333
334 return diags
335}
336
337func pathTraversesUp(path string) bool {
338 return strings.HasPrefix(filepath.ToSlash(path), "../")
339}
340
341// installHooksInitDir is an adapter wrapper for an InstallHooks that
342// does some fakery to make downloads look like they are happening in their
343// final locations, rather than in the temporary loader we use.
344//
345// It also suppresses "Install" calls entirely, since InitDirFromModule
346// does its own installation steps after the initial installation pass
347// has completed.
348type installHooksInitDir struct {
349 Wrapped ModuleInstallHooks
350 ModuleInstallHooksImpl
351}
352
353func (h installHooksInitDir) Download(moduleAddr, packageAddr string, version *version.Version) {
354 if !strings.HasPrefix(moduleAddr, initFromModuleRootKeyPrefix) {
355 // We won't announce the root module, since hook implementations
356 // don't expect to see that and the caller will usually have produced
357 // its own user-facing notification about what it's doing anyway.
358 return
359 }
360
361 trimAddr := moduleAddr[len(initFromModuleRootKeyPrefix):]
362 h.Wrapped.Download(trimAddr, packageAddr, version)
363}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go b/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go
new file mode 100644
index 0000000..50e2572
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/getter.go
@@ -0,0 +1,210 @@
1package initwd
2
3import (
4 "fmt"
5 "log"
6 "os"
7 "path/filepath"
8 "strings"
9
10 cleanhttp "github.com/hashicorp/go-cleanhttp"
11 getter "github.com/hashicorp/go-getter"
12 "github.com/hashicorp/terraform/registry/regsrc"
13)
14
15// We configure our own go-getter detector and getter sets here, because
16// the set of sources we support is part of Terraform's documentation and
17// so we don't want any new sources introduced in go-getter to sneak in here
18// and work even though they aren't documented. This also insulates us from
19// any meddling that might be done by other go-getter callers linked into our
20// executable.
21
22var goGetterDetectors = []getter.Detector{
23 new(getter.GitHubDetector),
24 new(getter.BitBucketDetector),
25 new(getter.S3Detector),
26 new(getter.FileDetector),
27}
28
29var goGetterNoDetectors = []getter.Detector{}
30
31var goGetterDecompressors = map[string]getter.Decompressor{
32 "bz2": new(getter.Bzip2Decompressor),
33 "gz": new(getter.GzipDecompressor),
34 "xz": new(getter.XzDecompressor),
35 "zip": new(getter.ZipDecompressor),
36
37 "tar.bz2": new(getter.TarBzip2Decompressor),
38 "tar.tbz2": new(getter.TarBzip2Decompressor),
39
40 "tar.gz": new(getter.TarGzipDecompressor),
41 "tgz": new(getter.TarGzipDecompressor),
42
43 "tar.xz": new(getter.TarXzDecompressor),
44 "txz": new(getter.TarXzDecompressor),
45}
46
47var goGetterGetters = map[string]getter.Getter{
48 "file": new(getter.FileGetter),
49 "git": new(getter.GitGetter),
50 "hg": new(getter.HgGetter),
51 "s3": new(getter.S3Getter),
52 "http": getterHTTPGetter,
53 "https": getterHTTPGetter,
54}
55
56var getterHTTPClient = cleanhttp.DefaultClient()
57
58var getterHTTPGetter = &getter.HttpGetter{
59 Client: getterHTTPClient,
60 Netrc: true,
61}
62
63// A reusingGetter is a helper for the module installer that remembers
64// the final resolved addresses of all of the sources it has already been
65// asked to install, and will copy from a prior installation directory if
66// it has the same resolved source address.
67//
68// The keys in a reusingGetter are resolved and trimmed source addresses
69// (with a scheme always present, and without any "subdir" component),
70// and the values are the paths where each source was previously installed.
71type reusingGetter map[string]string
72
73// getWithGoGetter retrieves the package referenced in the given address
74// into the installation path and then returns the full path to any subdir
75// indicated in the address.
76//
77// The errors returned by this function are those surfaced by the underlying
78// go-getter library, which have very inconsistent quality as
79// end-user-actionable error messages. At this time we do not have any
80// reasonable way to improve these error messages at this layer because
81// the underlying errors are not separately recognizable.
82func (g reusingGetter) getWithGoGetter(instPath, addr string) (string, error) {
83 packageAddr, subDir := splitAddrSubdir(addr)
84
85 log.Printf("[DEBUG] will download %q to %s", packageAddr, instPath)
86
87 realAddr, err := getter.Detect(packageAddr, instPath, getter.Detectors)
88 if err != nil {
89 return "", err
90 }
91
92 if isMaybeRelativeLocalPath(realAddr) {
93 return "", &MaybeRelativePathErr{addr}
94 }
95
96 var realSubDir string
97 realAddr, realSubDir = splitAddrSubdir(realAddr)
98 if realSubDir != "" {
99 subDir = filepath.Join(realSubDir, subDir)
100 }
101
102 if realAddr != packageAddr {
103 log.Printf("[TRACE] go-getter detectors rewrote %q to %q", packageAddr, realAddr)
104 }
105
106 if prevDir, exists := g[realAddr]; exists {
107 log.Printf("[TRACE] copying previous install %s to %s", prevDir, instPath)
108 err := os.Mkdir(instPath, os.ModePerm)
109 if err != nil {
110 return "", fmt.Errorf("failed to create directory %s: %s", instPath, err)
111 }
112 err = copyDir(instPath, prevDir)
113 if err != nil {
114 return "", fmt.Errorf("failed to copy from %s to %s: %s", prevDir, instPath, err)
115 }
116 } else {
117 log.Printf("[TRACE] fetching %q to %q", realAddr, instPath)
118 client := getter.Client{
119 Src: realAddr,
120 Dst: instPath,
121 Pwd: instPath,
122
123 Mode: getter.ClientModeDir,
124
125 Detectors: goGetterNoDetectors, // we already did detection above
126 Decompressors: goGetterDecompressors,
127 Getters: goGetterGetters,
128 }
129 err = client.Get()
130 if err != nil {
131 return "", err
132 }
133 // Remember where we installed this so we might reuse this directory
134 // on subsequent calls to avoid re-downloading.
135 g[realAddr] = instPath
136 }
137
138 // Our subDir string can contain wildcards until this point, so that
139 // e.g. a subDir of * can expand to one top-level directory in a .tar.gz
140 // archive. Now that we've expanded the archive successfully we must
141 // resolve that into a concrete path.
142 var finalDir string
143 if subDir != "" {
144 finalDir, err = getter.SubdirGlob(instPath, subDir)
145 log.Printf("[TRACE] expanded %q to %q", subDir, finalDir)
146 if err != nil {
147 return "", err
148 }
149 } else {
150 finalDir = instPath
151 }
152
153 // If we got this far then we have apparently succeeded in downloading
154 // the requested object!
155 return filepath.Clean(finalDir), nil
156}
157
158// splitAddrSubdir splits the given address (which is assumed to be a
159// registry address or go-getter-style address) into a package portion
160// and a sub-directory portion.
161//
162// The package portion defines what should be downloaded and then the
163// sub-directory portion, if present, specifies a sub-directory within
164// the downloaded object (an archive, VCS repository, etc) that contains
165// the module's configuration files.
166//
167// The subDir portion will be returned as empty if no subdir separator
168// ("//") is present in the address.
169func splitAddrSubdir(addr string) (packageAddr, subDir string) {
170 return getter.SourceDirSubdir(addr)
171}
172
173var localSourcePrefixes = []string{
174 "./",
175 "../",
176 ".\\",
177 "..\\",
178}
179
180func isLocalSourceAddr(addr string) bool {
181 for _, prefix := range localSourcePrefixes {
182 if strings.HasPrefix(addr, prefix) {
183 return true
184 }
185 }
186 return false
187}
188
189func isRegistrySourceAddr(addr string) bool {
190 _, err := regsrc.ParseModuleSource(addr)
191 return err == nil
192}
193
194type MaybeRelativePathErr struct {
195 Addr string
196}
197
198func (e *MaybeRelativePathErr) Error() string {
199 return fmt.Sprintf("Terraform cannot determine the module source for %s", e.Addr)
200}
201
202func isMaybeRelativeLocalPath(addr string) bool {
203 if strings.HasPrefix(addr, "file://") {
204 _, err := os.Stat(addr[7:])
205 if err != nil {
206 return true
207 }
208 }
209 return false
210}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/inode.go b/vendor/github.com/hashicorp/terraform/internal/initwd/inode.go
new file mode 100644
index 0000000..1150b09
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/inode.go
@@ -0,0 +1,21 @@
1// +build linux darwin openbsd netbsd solaris dragonfly
2
3package initwd
4
5import (
6 "fmt"
7 "os"
8 "syscall"
9)
10
11// lookup the inode of a file on posix systems
12func inode(path string) (uint64, error) {
13 stat, err := os.Stat(path)
14 if err != nil {
15 return 0, err
16 }
17 if st, ok := stat.Sys().(*syscall.Stat_t); ok {
18 return st.Ino, nil
19 }
20 return 0, fmt.Errorf("could not determine file inode")
21}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go
new file mode 100644
index 0000000..30532f5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_freebsd.go
@@ -0,0 +1,21 @@
1// +build freebsd
2
3package initwd
4
5import (
6 "fmt"
7 "os"
8 "syscall"
9)
10
11// lookup the inode of a file on posix systems
12func inode(path string) (uint64, error) {
13 stat, err := os.Stat(path)
14 if err != nil {
15 return 0, err
16 }
17 if st, ok := stat.Sys().(*syscall.Stat_t); ok {
18 return uint64(st.Ino), nil
19 }
20 return 0, fmt.Errorf("could not determine file inode")
21}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go
new file mode 100644
index 0000000..3ed58e4
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/inode_windows.go
@@ -0,0 +1,8 @@
1// +build windows
2
3package initwd
4
5// no syscall.Stat_t on windows, return 0 for inodes
6func inode(path string) (uint64, error) {
7 return 0, nil
8}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go b/vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go
new file mode 100644
index 0000000..6f77dcd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/load_config.go
@@ -0,0 +1,56 @@
1package initwd
2
3import (
4 "fmt"
5
6 version "github.com/hashicorp/go-version"
7 "github.com/hashicorp/terraform-config-inspect/tfconfig"
8 "github.com/hashicorp/terraform/internal/earlyconfig"
9 "github.com/hashicorp/terraform/internal/modsdir"
10 "github.com/hashicorp/terraform/tfdiags"
11)
12
13// LoadConfig loads a full configuration tree that has previously had all of
14// its dependent modules installed to the given modulesDir using a
15// ModuleInstaller.
16//
17// This uses the early configuration loader and thus only reads top-level
18// metadata from the modules in the configuration. Most callers should use
19// the configs/configload package to fully load a configuration.
20func LoadConfig(rootDir, modulesDir string) (*earlyconfig.Config, tfdiags.Diagnostics) {
21 rootMod, diags := earlyconfig.LoadModule(rootDir)
22 if rootMod == nil {
23 return nil, diags
24 }
25
26 manifest, err := modsdir.ReadManifestSnapshotForDir(modulesDir)
27 if err != nil {
28 diags = diags.Append(tfdiags.Sourceless(
29 tfdiags.Error,
30 "Failed to read module manifest",
31 fmt.Sprintf("Terraform failed to read its manifest of locally-cached modules: %s.", err),
32 ))
33 return nil, diags
34 }
35
36 return earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc(
37 func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) {
38 var diags tfdiags.Diagnostics
39
40 key := manifest.ModuleKey(req.Path)
41 record, exists := manifest[key]
42 if !exists {
43 diags = diags.Append(tfdiags.Sourceless(
44 tfdiags.Error,
45 "Module not installed",
46 fmt.Sprintf("Module %s is not yet installed. Run \"terraform init\" to install all modules required by this configuration.", req.Path.String()),
47 ))
48 return nil, nil, diags
49 }
50
51 mod, mDiags := earlyconfig.LoadModule(record.Dir)
52 diags = diags.Append(mDiags)
53 return mod, record.Version, diags
54 },
55 ))
56}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go
new file mode 100644
index 0000000..531310a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install.go
@@ -0,0 +1,558 @@
1package initwd
2
3import (
4 "fmt"
5 "log"
6 "os"
7 "path/filepath"
8 "strings"
9
10 version "github.com/hashicorp/go-version"
11 "github.com/hashicorp/terraform-config-inspect/tfconfig"
12 "github.com/hashicorp/terraform/addrs"
13 "github.com/hashicorp/terraform/internal/earlyconfig"
14 "github.com/hashicorp/terraform/internal/modsdir"
15 "github.com/hashicorp/terraform/registry"
16 "github.com/hashicorp/terraform/registry/regsrc"
17 "github.com/hashicorp/terraform/tfdiags"
18)
19
20type ModuleInstaller struct {
21 modsDir string
22 reg *registry.Client
23}
24
25func NewModuleInstaller(modsDir string, reg *registry.Client) *ModuleInstaller {
26 return &ModuleInstaller{
27 modsDir: modsDir,
28 reg: reg,
29 }
30}
31
32// InstallModules analyses the root module in the given directory and installs
33// all of its direct and transitive dependencies into the given modules
34// directory, which must already exist.
35//
36// Since InstallModules makes possibly-time-consuming calls to remote services,
37// a hook interface is supported to allow the caller to be notified when
38// each module is installed and, for remote modules, when downloading begins.
39// LoadConfig guarantees that two hook calls will not happen concurrently but
40// it does not guarantee any particular ordering of hook calls. This mechanism
41// is for UI feedback only and does not give the caller any control over the
42// process.
43//
44// If modules are already installed in the target directory, they will be
45// skipped unless their source address or version have changed or unless
46// the upgrade flag is set.
47//
48// InstallModules never deletes any directory, except in the case where it
49// needs to replace a directory that is already present with a newly-extracted
50// package.
51//
52// If the returned diagnostics contains errors then the module installation
53// may have wholly or partially completed. Modules must be loaded in order
54// to find their dependencies, so this function does many of the same checks
55// as LoadConfig as a side-effect.
56//
57// If successful (the returned diagnostics contains no errors) then the
58// first return value is the early configuration tree that was constructed by
59// the installation process.
60func (i *ModuleInstaller) InstallModules(rootDir string, upgrade bool, hooks ModuleInstallHooks) (*earlyconfig.Config, tfdiags.Diagnostics) {
61 log.Printf("[TRACE] ModuleInstaller: installing child modules for %s into %s", rootDir, i.modsDir)
62
63 rootMod, diags := earlyconfig.LoadModule(rootDir)
64 if rootMod == nil {
65 return nil, diags
66 }
67
68 manifest, err := modsdir.ReadManifestSnapshotForDir(i.modsDir)
69 if err != nil {
70 diags = diags.Append(tfdiags.Sourceless(
71 tfdiags.Error,
72 "Failed to read modules manifest file",
73 fmt.Sprintf("Error reading manifest for %s: %s.", i.modsDir, err),
74 ))
75 return nil, diags
76 }
77
78 getter := reusingGetter{}
79 cfg, instDiags := i.installDescendentModules(rootMod, rootDir, manifest, upgrade, hooks, getter)
80 diags = append(diags, instDiags...)
81
82 return cfg, diags
83}
84
85func (i *ModuleInstaller) installDescendentModules(rootMod *tfconfig.Module, rootDir string, manifest modsdir.Manifest, upgrade bool, hooks ModuleInstallHooks, getter reusingGetter) (*earlyconfig.Config, tfdiags.Diagnostics) {
86 var diags tfdiags.Diagnostics
87
88 if hooks == nil {
89 // Use our no-op implementation as a placeholder
90 hooks = ModuleInstallHooksImpl{}
91 }
92
93 // Create a manifest record for the root module. This will be used if
94 // there are any relative-pathed modules in the root.
95 manifest[""] = modsdir.Record{
96 Key: "",
97 Dir: rootDir,
98 }
99
100 cfg, cDiags := earlyconfig.BuildConfig(rootMod, earlyconfig.ModuleWalkerFunc(
101 func(req *earlyconfig.ModuleRequest) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) {
102
103 key := manifest.ModuleKey(req.Path)
104 instPath := i.packageInstallPath(req.Path)
105
106 log.Printf("[DEBUG] Module installer: begin %s", key)
107
108 // First we'll check if we need to upgrade/replace an existing
109 // installed module, and delete it out of the way if so.
110 replace := upgrade
111 if !replace {
112 record, recorded := manifest[key]
113 switch {
114 case !recorded:
115 log.Printf("[TRACE] ModuleInstaller: %s is not yet installed", key)
116 replace = true
117 case record.SourceAddr != req.SourceAddr:
118 log.Printf("[TRACE] ModuleInstaller: %s source address has changed from %q to %q", key, record.SourceAddr, req.SourceAddr)
119 replace = true
120 case record.Version != nil && !req.VersionConstraints.Check(record.Version):
121 log.Printf("[TRACE] ModuleInstaller: %s version %s no longer compatible with constraints %s", key, record.Version, req.VersionConstraints)
122 replace = true
123 }
124 }
125
126 // If we _are_ planning to replace this module, then we'll remove
127 // it now so our installation code below won't conflict with any
128 // existing remnants.
129 if replace {
130 if _, recorded := manifest[key]; recorded {
131 log.Printf("[TRACE] ModuleInstaller: discarding previous record of %s prior to reinstall", key)
132 }
133 delete(manifest, key)
134 // Deleting a module invalidates all of its descendent modules too.
135 keyPrefix := key + "."
136 for subKey := range manifest {
137 if strings.HasPrefix(subKey, keyPrefix) {
138 if _, recorded := manifest[subKey]; recorded {
139 log.Printf("[TRACE] ModuleInstaller: also discarding downstream %s", subKey)
140 }
141 delete(manifest, subKey)
142 }
143 }
144 }
145
146 record, recorded := manifest[key]
147 if !recorded {
148 // Clean up any stale cache directory that might be present.
149 // If this is a local (relative) source then the dir will
150 // not exist, but we'll ignore that.
151 log.Printf("[TRACE] ModuleInstaller: cleaning directory %s prior to install of %s", instPath, key)
152 err := os.RemoveAll(instPath)
153 if err != nil && !os.IsNotExist(err) {
154 log.Printf("[TRACE] ModuleInstaller: failed to remove %s: %s", key, err)
155 diags = diags.Append(tfdiags.Sourceless(
156 tfdiags.Error,
157 "Failed to remove local module cache",
158 fmt.Sprintf(
159 "Terraform tried to remove %s in order to reinstall this module, but encountered an error: %s",
160 instPath, err,
161 ),
162 ))
163 return nil, nil, diags
164 }
165 } else {
166 // If this module is already recorded and its root directory
167 // exists then we will just load what's already there and
168 // keep our existing record.
169 info, err := os.Stat(record.Dir)
170 if err == nil && info.IsDir() {
171 mod, mDiags := earlyconfig.LoadModule(record.Dir)
172 diags = diags.Append(mDiags)
173
174 log.Printf("[TRACE] ModuleInstaller: Module installer: %s %s already installed in %s", key, record.Version, record.Dir)
175 return mod, record.Version, diags
176 }
177 }
178
179 // If we get down here then it's finally time to actually install
180 // the module. There are some variants to this process depending
181 // on what type of module source address we have.
182 switch {
183
184 case isLocalSourceAddr(req.SourceAddr):
185 log.Printf("[TRACE] ModuleInstaller: %s has local path %q", key, req.SourceAddr)
186 mod, mDiags := i.installLocalModule(req, key, manifest, hooks)
187 diags = append(diags, mDiags...)
188 return mod, nil, diags
189
190 case isRegistrySourceAddr(req.SourceAddr):
191 addr, err := regsrc.ParseModuleSource(req.SourceAddr)
192 if err != nil {
193 // Should never happen because isRegistrySourceAddr already validated
194 panic(err)
195 }
196 log.Printf("[TRACE] ModuleInstaller: %s is a registry module at %s", key, addr)
197
198 mod, v, mDiags := i.installRegistryModule(req, key, instPath, addr, manifest, hooks, getter)
199 diags = append(diags, mDiags...)
200 return mod, v, diags
201
202 default:
203 log.Printf("[TRACE] ModuleInstaller: %s address %q will be handled by go-getter", key, req.SourceAddr)
204
205 mod, mDiags := i.installGoGetterModule(req, key, instPath, manifest, hooks, getter)
206 diags = append(diags, mDiags...)
207 return mod, nil, diags
208 }
209
210 },
211 ))
212 diags = append(diags, cDiags...)
213
214 err := manifest.WriteSnapshotToDir(i.modsDir)
215 if err != nil {
216 diags = diags.Append(tfdiags.Sourceless(
217 tfdiags.Error,
218 "Failed to update module manifest",
219 fmt.Sprintf("Unable to write the module manifest file: %s", err),
220 ))
221 }
222
223 return cfg, diags
224}
225
226func (i *ModuleInstaller) installLocalModule(req *earlyconfig.ModuleRequest, key string, manifest modsdir.Manifest, hooks ModuleInstallHooks) (*tfconfig.Module, tfdiags.Diagnostics) {
227 var diags tfdiags.Diagnostics
228
229 parentKey := manifest.ModuleKey(req.Parent.Path)
230 parentRecord, recorded := manifest[parentKey]
231 if !recorded {
232 // This is indicative of a bug rather than a user-actionable error
233 panic(fmt.Errorf("missing manifest record for parent module %s", parentKey))
234 }
235
236 if len(req.VersionConstraints) != 0 {
237 diags = diags.Append(tfdiags.Sourceless(
238 tfdiags.Error,
239 "Invalid version constraint",
240 fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a relative local path.", req.Name, req.CallPos.Filename, req.CallPos.Line),
241 ))
242 }
243
244 // For local sources we don't actually need to modify the
245 // filesystem at all because the parent already wrote
246 // the files we need, and so we just load up what's already here.
247 newDir := filepath.Join(parentRecord.Dir, req.SourceAddr)
248
249 log.Printf("[TRACE] ModuleInstaller: %s uses directory from parent: %s", key, newDir)
250 // it is possible that the local directory is a symlink
251 newDir, err := filepath.EvalSymlinks(newDir)
252 if err != nil {
253 diags = diags.Append(tfdiags.Sourceless(
254 tfdiags.Error,
255 "Unreadable module directory",
256 fmt.Sprintf("Unable to evaluate directory symlink: %s", err.Error()),
257 ))
258 }
259
260 mod, mDiags := earlyconfig.LoadModule(newDir)
261 if mod == nil {
262 // nil indicates missing or unreadable directory, so we'll
263 // discard the returned diags and return a more specific
264 // error message here.
265 diags = diags.Append(tfdiags.Sourceless(
266 tfdiags.Error,
267 "Unreadable module directory",
268 fmt.Sprintf("The directory %s could not be read for module %q at %s:%d.", newDir, req.Name, req.CallPos.Filename, req.CallPos.Line),
269 ))
270 } else {
271 diags = diags.Append(mDiags)
272 }
273
274 // Note the local location in our manifest.
275 manifest[key] = modsdir.Record{
276 Key: key,
277 Dir: newDir,
278 SourceAddr: req.SourceAddr,
279 }
280 log.Printf("[DEBUG] Module installer: %s installed at %s", key, newDir)
281 hooks.Install(key, nil, newDir)
282
283 return mod, diags
284}
285
286func (i *ModuleInstaller) installRegistryModule(req *earlyconfig.ModuleRequest, key string, instPath string, addr *regsrc.Module, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, *version.Version, tfdiags.Diagnostics) {
287 var diags tfdiags.Diagnostics
288
289 hostname, err := addr.SvcHost()
290 if err != nil {
291 // If it looks like the user was trying to use punycode then we'll generate
292 // a specialized error for that case. We require the unicode form of
293 // hostname so that hostnames are always human-readable in configuration
294 // and punycode can't be used to hide a malicious module hostname.
295 if strings.HasPrefix(addr.RawHost.Raw, "xn--") {
296 diags = diags.Append(tfdiags.Sourceless(
297 tfdiags.Error,
298 "Invalid module registry hostname",
299 fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not an acceptable hostname. Internationalized domain names must be given in unicode form rather than ASCII (\"punycode\") form.", req.Name, req.CallPos.Filename, req.CallPos.Line),
300 ))
301 } else {
302 diags = diags.Append(tfdiags.Sourceless(
303 tfdiags.Error,
304 "Invalid module registry hostname",
305 fmt.Sprintf("The hostname portion of the module %q source address (at %s:%d) is not a valid hostname.", req.Name, req.CallPos.Filename, req.CallPos.Line),
306 ))
307 }
308 return nil, nil, diags
309 }
310
311 reg := i.reg
312
313 log.Printf("[DEBUG] %s listing available versions of %s at %s", key, addr, hostname)
314 resp, err := reg.ModuleVersions(addr)
315 if err != nil {
316 if registry.IsModuleNotFound(err) {
317 diags = diags.Append(tfdiags.Sourceless(
318 tfdiags.Error,
319 "Module not found",
320 fmt.Sprintf("Module %q (from %s:%d) cannot be found in the module registry at %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname),
321 ))
322 } else {
323 diags = diags.Append(tfdiags.Sourceless(
324 tfdiags.Error,
325 "Error accessing remote module registry",
326 fmt.Sprintf("Failed to retrieve available versions for module %q (%s:%d) from %s: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, hostname, err),
327 ))
328 }
329 return nil, nil, diags
330 }
331
332 // The response might contain information about dependencies to allow us
333 // to potentially optimize future requests, but we don't currently do that
334 // and so for now we'll just take the first item which is guaranteed to
335 // be the address we requested.
336 if len(resp.Modules) < 1 {
337 // Should never happen, but since this is a remote service that may
338 // be implemented by third-parties we will handle it gracefully.
339 diags = diags.Append(tfdiags.Sourceless(
340 tfdiags.Error,
341 "Invalid response from remote module registry",
342 fmt.Sprintf("The registry at %s returned an invalid response when Terraform requested available versions for module %q (%s:%d).", hostname, req.Name, req.CallPos.Filename, req.CallPos.Line),
343 ))
344 return nil, nil, diags
345 }
346
347 modMeta := resp.Modules[0]
348
349 var latestMatch *version.Version
350 var latestVersion *version.Version
351 for _, mv := range modMeta.Versions {
352 v, err := version.NewVersion(mv.Version)
353 if err != nil {
354 // Should never happen if the registry server is compliant with
355 // the protocol, but we'll warn if not to assist someone who
356 // might be developing a module registry server.
357 diags = diags.Append(tfdiags.Sourceless(
358 tfdiags.Warning,
359 "Invalid response from remote module registry",
360 fmt.Sprintf("The registry at %s returned an invalid version string %q for module %q (%s:%d), which Terraform ignored.", hostname, mv.Version, req.Name, req.CallPos.Filename, req.CallPos.Line),
361 ))
362 continue
363 }
364
365 // If we've found a pre-release version then we'll ignore it unless
366 // it was exactly requested.
367 if v.Prerelease() != "" && req.VersionConstraints.String() != v.String() {
368 log.Printf("[TRACE] ModuleInstaller: %s ignoring %s because it is a pre-release and was not requested exactly", key, v)
369 continue
370 }
371
372 if latestVersion == nil || v.GreaterThan(latestVersion) {
373 latestVersion = v
374 }
375
376 if req.VersionConstraints.Check(v) {
377 if latestMatch == nil || v.GreaterThan(latestMatch) {
378 latestMatch = v
379 }
380 }
381 }
382
383 if latestVersion == nil {
384 diags = diags.Append(tfdiags.Sourceless(
385 tfdiags.Error,
386 "Module has no versions",
387 fmt.Sprintf("Module %q (%s:%d) has no versions available on %s.", addr, req.CallPos.Filename, req.CallPos.Line, hostname),
388 ))
389 return nil, nil, diags
390 }
391
392 if latestMatch == nil {
393 diags = diags.Append(tfdiags.Sourceless(
394 tfdiags.Error,
395 "Unresolvable module version constraint",
396 fmt.Sprintf("There is no available version of module %q (%s:%d) which matches the given version constraint. The newest available version is %s.", addr, req.CallPos.Filename, req.CallPos.Line, latestVersion),
397 ))
398 return nil, nil, diags
399 }
400
401 // Report up to the caller that we're about to start downloading.
402 packageAddr, _ := splitAddrSubdir(req.SourceAddr)
403 hooks.Download(key, packageAddr, latestMatch)
404
405 // If we manage to get down here then we've found a suitable version to
406 // install, so we need to ask the registry where we should download it from.
407 // The response to this is a go-getter-style address string.
408 dlAddr, err := reg.ModuleLocation(addr, latestMatch.String())
409 if err != nil {
410 log.Printf("[ERROR] %s from %s %s: %s", key, addr, latestMatch, err)
411 diags = diags.Append(tfdiags.Sourceless(
412 tfdiags.Error,
413 "Invalid response from remote module registry",
414 fmt.Sprintf("The remote registry at %s failed to return a download URL for %s %s.", hostname, addr, latestMatch),
415 ))
416 return nil, nil, diags
417 }
418
419 log.Printf("[TRACE] ModuleInstaller: %s %s %s is available at %q", key, addr, latestMatch, dlAddr)
420
421 modDir, err := getter.getWithGoGetter(instPath, dlAddr)
422 if err != nil {
423 // Errors returned by go-getter have very inconsistent quality as
424 // end-user error messages, but for now we're accepting that because
425 // we have no way to recognize any specific errors to improve them
426 // and masking the error entirely would hide valuable diagnostic
427 // information from the user.
428 diags = diags.Append(tfdiags.Sourceless(
429 tfdiags.Error,
430 "Failed to download module",
431 fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s.", req.Name, req.CallPos.Filename, req.CallPos.Line, dlAddr, err),
432 ))
433 return nil, nil, diags
434 }
435
436 log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, dlAddr, modDir)
437
438 if addr.RawSubmodule != "" {
439 // Append the user's requested subdirectory to any subdirectory that
440 // was implied by any of the nested layers we expanded within go-getter.
441 modDir = filepath.Join(modDir, addr.RawSubmodule)
442 }
443
444 log.Printf("[TRACE] ModuleInstaller: %s should now be at %s", key, modDir)
445
446 // Finally we are ready to try actually loading the module.
447 mod, mDiags := earlyconfig.LoadModule(modDir)
448 if mod == nil {
449 // nil indicates missing or unreadable directory, so we'll
450 // discard the returned diags and return a more specific
451 // error message here. For registry modules this actually
452 // indicates a bug in the code above, since it's not the
453 // user's responsibility to create the directory in this case.
454 diags = diags.Append(tfdiags.Sourceless(
455 tfdiags.Error,
456 "Unreadable module directory",
457 fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir),
458 ))
459 } else {
460 diags = append(diags, mDiags...)
461 }
462
463 // Note the local location in our manifest.
464 manifest[key] = modsdir.Record{
465 Key: key,
466 Version: latestMatch,
467 Dir: modDir,
468 SourceAddr: req.SourceAddr,
469 }
470 log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir)
471 hooks.Install(key, latestMatch, modDir)
472
473 return mod, latestMatch, diags
474}
475
476func (i *ModuleInstaller) installGoGetterModule(req *earlyconfig.ModuleRequest, key string, instPath string, manifest modsdir.Manifest, hooks ModuleInstallHooks, getter reusingGetter) (*tfconfig.Module, tfdiags.Diagnostics) {
477 var diags tfdiags.Diagnostics
478
479 // Report up to the caller that we're about to start downloading.
480 packageAddr, _ := splitAddrSubdir(req.SourceAddr)
481 hooks.Download(key, packageAddr, nil)
482
483 if len(req.VersionConstraints) != 0 {
484 diags = diags.Append(tfdiags.Sourceless(
485 tfdiags.Error,
486 "Invalid version constraint",
487 fmt.Sprintf("Cannot apply a version constraint to module %q (at %s:%d) because it has a non Registry URL.", req.Name, req.CallPos.Filename, req.CallPos.Line),
488 ))
489 return nil, diags
490 }
491
492 modDir, err := getter.getWithGoGetter(instPath, req.SourceAddr)
493 if err != nil {
494 if _, ok := err.(*MaybeRelativePathErr); ok {
495 log.Printf(
496 "[TRACE] ModuleInstaller: %s looks like a local path but is missing ./ or ../",
497 req.SourceAddr,
498 )
499 diags = diags.Append(tfdiags.Sourceless(
500 tfdiags.Error,
501 "Module not found",
502 fmt.Sprintf(
503 "The module address %q could not be resolved.\n\n"+
504 "If you intended this as a path relative to the current "+
505 "module, use \"./%s\" instead. The \"./\" prefix "+
506 "indicates that the address is a relative filesystem path.",
507 req.SourceAddr, req.SourceAddr,
508 ),
509 ))
510 } else {
511 // Errors returned by go-getter have very inconsistent quality as
512 // end-user error messages, but for now we're accepting that because
513 // we have no way to recognize any specific errors to improve them
514 // and masking the error entirely would hide valuable diagnostic
515 // information from the user.
516 diags = diags.Append(tfdiags.Sourceless(
517 tfdiags.Error,
518 "Failed to download module",
519 fmt.Sprintf("Could not download module %q (%s:%d) source code from %q: %s", req.Name, req.CallPos.Filename, req.CallPos.Line, packageAddr, err),
520 ))
521 }
522 return nil, diags
523
524 }
525
526 log.Printf("[TRACE] ModuleInstaller: %s %q was downloaded to %s", key, req.SourceAddr, modDir)
527
528 mod, mDiags := earlyconfig.LoadModule(modDir)
529 if mod == nil {
530 // nil indicates missing or unreadable directory, so we'll
531 // discard the returned diags and return a more specific
532 // error message here. For go-getter modules this actually
533 // indicates a bug in the code above, since it's not the
534 // user's responsibility to create the directory in this case.
535 diags = diags.Append(tfdiags.Sourceless(
536 tfdiags.Error,
537 "Unreadable module directory",
538 fmt.Sprintf("The directory %s could not be read. This is a bug in Terraform and should be reported.", modDir),
539 ))
540 } else {
541 diags = append(diags, mDiags...)
542 }
543
544 // Note the local location in our manifest.
545 manifest[key] = modsdir.Record{
546 Key: key,
547 Dir: modDir,
548 SourceAddr: req.SourceAddr,
549 }
550 log.Printf("[DEBUG] Module installer: %s installed at %s", key, modDir)
551 hooks.Install(key, nil, modDir)
552
553 return mod, diags
554}
555
556func (i *ModuleInstaller) packageInstallPath(modulePath addrs.Module) string {
557 return filepath.Join(i.modsDir, strings.Join(modulePath, "."))
558}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go
new file mode 100644
index 0000000..817a6dc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/module_install_hooks.go
@@ -0,0 +1,36 @@
1package initwd
2
3import (
4 version "github.com/hashicorp/go-version"
5)
6
7// ModuleInstallHooks is an interface used to provide notifications about the
8// installation process being orchestrated by InstallModules.
9//
10// This interface may have new methods added in future, so implementers should
11// embed InstallHooksImpl to get no-op implementations of any unimplemented
12// methods.
13type ModuleInstallHooks interface {
14 // Download is called for modules that are retrieved from a remote source
15 // before that download begins, to allow a caller to give feedback
16 // on progress through a possibly-long sequence of downloads.
17 Download(moduleAddr, packageAddr string, version *version.Version)
18
19 // Install is called for each module that is installed, even if it did
20 // not need to be downloaded from a remote source.
21 Install(moduleAddr string, version *version.Version, localPath string)
22}
23
24// ModuleInstallHooksImpl is a do-nothing implementation of InstallHooks that
25// can be embedded in another implementation struct to allow only partial
26// implementation of the interface.
27type ModuleInstallHooksImpl struct {
28}
29
30func (h ModuleInstallHooksImpl) Download(moduleAddr, packageAddr string, version *version.Version) {
31}
32
33func (h ModuleInstallHooksImpl) Install(moduleAddr string, version *version.Version, localPath string) {
34}
35
36var _ ModuleInstallHooks = ModuleInstallHooksImpl{}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/testing.go b/vendor/github.com/hashicorp/terraform/internal/initwd/testing.go
new file mode 100644
index 0000000..8cef80a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/testing.go
@@ -0,0 +1,73 @@
1package initwd
2
3import (
4 "github.com/hashicorp/terraform/registry"
5 "testing"
6
7 "github.com/hashicorp/terraform/configs"
8 "github.com/hashicorp/terraform/configs/configload"
9 "github.com/hashicorp/terraform/tfdiags"
10)
11
12// LoadConfigForTests is a convenience wrapper around configload.NewLoaderForTests,
13// ModuleInstaller.InstallModules and configload.Loader.LoadConfig that allows
14// a test configuration to be loaded in a single step.
15//
16// If module installation fails, t.Fatal (or similar) is called to halt
17// execution of the test, under the assumption that installation failures are
18// not expected. If installation failures _are_ expected then use
19// NewLoaderForTests and work with the loader object directly. If module
20// installation succeeds but generates warnings, these warnings are discarded.
21//
22// If installation succeeds but errors are detected during loading then a
23// possibly-incomplete config is returned along with error diagnostics. The
24// test run is not aborted in this case, so that the caller can make assertions
25// against the returned diagnostics.
26//
27// As with NewLoaderForTests, a cleanup function is returned which must be
28// called before the test completes in order to remove the temporary
29// modules directory.
30func LoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func(), tfdiags.Diagnostics) {
31 t.Helper()
32
33 var diags tfdiags.Diagnostics
34
35 loader, cleanup := configload.NewLoaderForTests(t)
36 inst := NewModuleInstaller(loader.ModulesDir(), registry.NewClient(nil, nil))
37
38 _, moreDiags := inst.InstallModules(rootDir, true, ModuleInstallHooksImpl{})
39 diags = diags.Append(moreDiags)
40 if diags.HasErrors() {
41 cleanup()
42 t.Fatal(diags.Err())
43 return nil, nil, func() {}, diags
44 }
45
46 // Since module installer has modified the module manifest on disk, we need
47 // to refresh the cache of it in the loader.
48 if err := loader.RefreshModules(); err != nil {
49 t.Fatalf("failed to refresh modules after installation: %s", err)
50 }
51
52 config, hclDiags := loader.LoadConfig(rootDir)
53 diags = diags.Append(hclDiags)
54 return config, loader, cleanup, diags
55}
56
57// MustLoadConfigForTests is a variant of LoadConfigForTests which calls
58// t.Fatal (or similar) if there are any errors during loading, and thus
59// does not return diagnostics at all.
60//
61// This is useful for concisely writing tests that don't expect errors at
62// all. For tests that expect errors and need to assert against them, use
63// LoadConfigForTests instead.
64func MustLoadConfigForTests(t *testing.T, rootDir string) (*configs.Config, *configload.Loader, func()) {
65 t.Helper()
66
67 config, loader, cleanup, diags := LoadConfigForTests(t, rootDir)
68 if diags.HasErrors() {
69 cleanup()
70 t.Fatal(diags.Err())
71 }
72 return config, loader, cleanup
73}
diff --git a/vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go b/vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go
new file mode 100644
index 0000000..104840b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/initwd/version_required.go
@@ -0,0 +1,83 @@
1package initwd
2
3import (
4 "fmt"
5
6 version "github.com/hashicorp/go-version"
7 "github.com/hashicorp/terraform/internal/earlyconfig"
8 "github.com/hashicorp/terraform/tfdiags"
9 tfversion "github.com/hashicorp/terraform/version"
10)
11
12// CheckCoreVersionRequirements visits each of the modules in the given
13// early configuration tree and verifies that any given Core version constraints
14// match with the version of Terraform Core that is being used.
15//
16// The returned diagnostics will contain errors if any constraints do not match.
17// The returned diagnostics might also return warnings, which should be
18// displayed to the user.
19func CheckCoreVersionRequirements(earlyConfig *earlyconfig.Config) tfdiags.Diagnostics {
20 if earlyConfig == nil {
21 return nil
22 }
23
24 var diags tfdiags.Diagnostics
25 module := earlyConfig.Module
26
27 var constraints version.Constraints
28 for _, constraintStr := range module.RequiredCore {
29 constraint, err := version.NewConstraint(constraintStr)
30 if err != nil {
31 // Unfortunately the early config parser doesn't preserve a source
32 // location for this, so we're unable to indicate a specific
33 // location where this constraint came from, but we can at least
34 // say which module set it.
35 switch {
36 case len(earlyConfig.Path) == 0:
37 diags = diags.Append(tfdiags.Sourceless(
38 tfdiags.Error,
39 "Invalid provider version constraint",
40 fmt.Sprintf("Invalid version core constraint %q in the root module.", constraintStr),
41 ))
42 default:
43 diags = diags.Append(tfdiags.Sourceless(
44 tfdiags.Error,
45 "Invalid provider version constraint",
46 fmt.Sprintf("Invalid version core constraint %q in %s.", constraintStr, earlyConfig.Path),
47 ))
48 }
49 continue
50 }
51 constraints = append(constraints, constraint...)
52 }
53
54 if !constraints.Check(tfversion.SemVer) {
55 switch {
56 case len(earlyConfig.Path) == 0:
57 diags = diags.Append(tfdiags.Sourceless(
58 tfdiags.Error,
59 "Unsupported Terraform Core version",
60 fmt.Sprintf(
61 "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update the root module's version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.",
62 tfversion.String(),
63 ),
64 ))
65 default:
66 diags = diags.Append(tfdiags.Sourceless(
67 tfdiags.Error,
68 "Unsupported Terraform Core version",
69 fmt.Sprintf(
70 "Module %s (from %q) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update the module's version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.",
71 earlyConfig.Path, earlyConfig.SourceAddr, tfversion.String(),
72 ),
73 ))
74 }
75 }
76
77 for _, c := range earlyConfig.Children {
78 childDiags := CheckCoreVersionRequirements(c)
79 diags = diags.Append(childDiags)
80 }
81
82 return diags
83}
diff --git a/vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go b/vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go
new file mode 100644
index 0000000..0d7d664
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/modsdir/doc.go
@@ -0,0 +1,3 @@
1// Package modsdir is an internal package containing the model types used to
2// represent the manifest of modules in a local modules cache directory.
3package modsdir
diff --git a/vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go b/vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go
new file mode 100644
index 0000000..36f6c03
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/modsdir/manifest.go
@@ -0,0 +1,138 @@
1package modsdir
2
3import (
4 "encoding/json"
5 "fmt"
6 "io"
7 "io/ioutil"
8 "log"
9 "os"
10 "path/filepath"
11
12 version "github.com/hashicorp/go-version"
13
14 "github.com/hashicorp/terraform/addrs"
15)
16
17// Record represents some metadata about an installed module, as part
18// of a ModuleManifest.
19type Record struct {
20 // Key is a unique identifier for this particular module, based on its
21 // position within the static module tree.
22 Key string `json:"Key"`
23
24 // SourceAddr is the source address given for this module in configuration.
25 // This is used only to detect if the source was changed in configuration
26 // since the module was last installed, which means that the installer
27 // must re-install it.
28 SourceAddr string `json:"Source"`
29
30 // Version is the exact version of the module, which results from parsing
31 // VersionStr. nil for un-versioned modules.
32 Version *version.Version `json:"-"`
33
34 // VersionStr is the version specifier string. This is used only for
35 // serialization in snapshots and should not be accessed or updated
36 // by any other codepaths; use "Version" instead.
37 VersionStr string `json:"Version,omitempty"`
38
39 // Dir is the path to the local directory where the module is installed.
40 Dir string `json:"Dir"`
41}
42
43// Manifest is a map used to keep track of the filesystem locations
44// and other metadata about installed modules.
45//
46// The configuration loader refers to this, while the module installer updates
47// it to reflect any changes to the installed modules.
48type Manifest map[string]Record
49
50func (m Manifest) ModuleKey(path addrs.Module) string {
51 return path.String()
52}
53
54// manifestSnapshotFile is an internal struct used only to assist in our JSON
55// serialization of manifest snapshots. It should not be used for any other
56// purpose.
57type manifestSnapshotFile struct {
58 Records []Record `json:"Modules"`
59}
60
61func ReadManifestSnapshot(r io.Reader) (Manifest, error) {
62 src, err := ioutil.ReadAll(r)
63 if err != nil {
64 return nil, err
65 }
66
67 if len(src) == 0 {
68 // This should never happen, but we'll tolerate it as if it were
69 // a valid empty JSON object.
70 return make(Manifest), nil
71 }
72
73 var read manifestSnapshotFile
74 err = json.Unmarshal(src, &read)
75
76 new := make(Manifest)
77 for _, record := range read.Records {
78 if record.VersionStr != "" {
79 record.Version, err = version.NewVersion(record.VersionStr)
80 if err != nil {
81 return nil, fmt.Errorf("invalid version %q for %s: %s", record.VersionStr, record.Key, err)
82 }
83 }
84 if _, exists := new[record.Key]; exists {
85 // This should never happen in any valid file, so we'll catch it
86 // and report it to avoid confusing/undefined behavior if the
87 // snapshot file was edited incorrectly outside of Terraform.
88 return nil, fmt.Errorf("snapshot file contains two records for path %s", record.Key)
89 }
90 new[record.Key] = record
91 }
92 return new, nil
93}
94
95func ReadManifestSnapshotForDir(dir string) (Manifest, error) {
96 fn := filepath.Join(dir, ManifestSnapshotFilename)
97 r, err := os.Open(fn)
98 if err != nil {
99 if os.IsNotExist(err) {
100 return make(Manifest), nil // missing file is okay and treated as empty
101 }
102 return nil, err
103 }
104 return ReadManifestSnapshot(r)
105}
106
107func (m Manifest) WriteSnapshot(w io.Writer) error {
108 var write manifestSnapshotFile
109
110 for _, record := range m {
111 // Make sure VersionStr is in sync with Version, since we encourage
112 // callers to manipulate Version and ignore VersionStr.
113 if record.Version != nil {
114 record.VersionStr = record.Version.String()
115 } else {
116 record.VersionStr = ""
117 }
118 write.Records = append(write.Records, record)
119 }
120
121 src, err := json.Marshal(write)
122 if err != nil {
123 return err
124 }
125
126 _, err = w.Write(src)
127 return err
128}
129
130func (m Manifest) WriteSnapshotToDir(dir string) error {
131 fn := filepath.Join(dir, ManifestSnapshotFilename)
132 log.Printf("[TRACE] modsdir: writing modules manifest to %s", fn)
133 w, err := os.Create(fn)
134 if err != nil {
135 return err
136 }
137 return m.WriteSnapshot(w)
138}
diff --git a/vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go b/vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go
new file mode 100644
index 0000000..9ebb524
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/modsdir/paths.go
@@ -0,0 +1,3 @@
1package modsdir
2
3const ManifestSnapshotFilename = "modules.json"
diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh
new file mode 100644
index 0000000..de1d693
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/generate.sh
@@ -0,0 +1,16 @@
1#!/bin/bash
2
3# We do not run protoc under go:generate because we want to ensure that all
4# dependencies of go:generate are "go get"-able for general dev environment
5# usability. To compile all protobuf files in this repository, run
6# "make protobuf" at the top-level.
7
8set -eu
9
10SOURCE="${BASH_SOURCE[0]}"
11while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
12DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
13
14cd "$DIR"
15
16protoc -I ./ tfplugin5.proto --go_out=plugins=grpc:./
diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go
new file mode 100644
index 0000000..87a6bec
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go
@@ -0,0 +1,3455 @@
1// Code generated by protoc-gen-go. DO NOT EDIT.
2// source: tfplugin5.proto
3
4package tfplugin5
5
6import proto "github.com/golang/protobuf/proto"
7import fmt "fmt"
8import math "math"
9
10import (
11 context "golang.org/x/net/context"
12 grpc "google.golang.org/grpc"
13)
14
15// Reference imports to suppress errors if they are not otherwise used.
16var _ = proto.Marshal
17var _ = fmt.Errorf
18var _ = math.Inf
19
20// This is a compile-time assertion to ensure that this generated file
21// is compatible with the proto package it is being compiled against.
22// A compilation error at this line likely means your copy of the
23// proto package needs to be updated.
24const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
25
26type Diagnostic_Severity int32
27
28const (
29 Diagnostic_INVALID Diagnostic_Severity = 0
30 Diagnostic_ERROR Diagnostic_Severity = 1
31 Diagnostic_WARNING Diagnostic_Severity = 2
32)
33
34var Diagnostic_Severity_name = map[int32]string{
35 0: "INVALID",
36 1: "ERROR",
37 2: "WARNING",
38}
39var Diagnostic_Severity_value = map[string]int32{
40 "INVALID": 0,
41 "ERROR": 1,
42 "WARNING": 2,
43}
44
45func (x Diagnostic_Severity) String() string {
46 return proto.EnumName(Diagnostic_Severity_name, int32(x))
47}
48func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) {
49 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{1, 0}
50}
51
52type Schema_NestedBlock_NestingMode int32
53
54const (
55 Schema_NestedBlock_INVALID Schema_NestedBlock_NestingMode = 0
56 Schema_NestedBlock_SINGLE Schema_NestedBlock_NestingMode = 1
57 Schema_NestedBlock_LIST Schema_NestedBlock_NestingMode = 2
58 Schema_NestedBlock_SET Schema_NestedBlock_NestingMode = 3
59 Schema_NestedBlock_MAP Schema_NestedBlock_NestingMode = 4
60 Schema_NestedBlock_GROUP Schema_NestedBlock_NestingMode = 5
61)
62
63var Schema_NestedBlock_NestingMode_name = map[int32]string{
64 0: "INVALID",
65 1: "SINGLE",
66 2: "LIST",
67 3: "SET",
68 4: "MAP",
69 5: "GROUP",
70}
71var Schema_NestedBlock_NestingMode_value = map[string]int32{
72 "INVALID": 0,
73 "SINGLE": 1,
74 "LIST": 2,
75 "SET": 3,
76 "MAP": 4,
77 "GROUP": 5,
78}
79
80func (x Schema_NestedBlock_NestingMode) String() string {
81 return proto.EnumName(Schema_NestedBlock_NestingMode_name, int32(x))
82}
83func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) {
84 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 2, 0}
85}
86
87// DynamicValue is an opaque encoding of terraform data, with the field name
88// indicating the encoding scheme used.
89type DynamicValue struct {
90 Msgpack []byte `protobuf:"bytes,1,opt,name=msgpack,proto3" json:"msgpack,omitempty"`
91 Json []byte `protobuf:"bytes,2,opt,name=json,proto3" json:"json,omitempty"`
92 XXX_NoUnkeyedLiteral struct{} `json:"-"`
93 XXX_unrecognized []byte `json:"-"`
94 XXX_sizecache int32 `json:"-"`
95}
96
97func (m *DynamicValue) Reset() { *m = DynamicValue{} }
98func (m *DynamicValue) String() string { return proto.CompactTextString(m) }
99func (*DynamicValue) ProtoMessage() {}
100func (*DynamicValue) Descriptor() ([]byte, []int) {
101 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{0}
102}
103func (m *DynamicValue) XXX_Unmarshal(b []byte) error {
104 return xxx_messageInfo_DynamicValue.Unmarshal(m, b)
105}
106func (m *DynamicValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
107 return xxx_messageInfo_DynamicValue.Marshal(b, m, deterministic)
108}
109func (dst *DynamicValue) XXX_Merge(src proto.Message) {
110 xxx_messageInfo_DynamicValue.Merge(dst, src)
111}
112func (m *DynamicValue) XXX_Size() int {
113 return xxx_messageInfo_DynamicValue.Size(m)
114}
115func (m *DynamicValue) XXX_DiscardUnknown() {
116 xxx_messageInfo_DynamicValue.DiscardUnknown(m)
117}
118
119var xxx_messageInfo_DynamicValue proto.InternalMessageInfo
120
121func (m *DynamicValue) GetMsgpack() []byte {
122 if m != nil {
123 return m.Msgpack
124 }
125 return nil
126}
127
128func (m *DynamicValue) GetJson() []byte {
129 if m != nil {
130 return m.Json
131 }
132 return nil
133}
134
135type Diagnostic struct {
136 Severity Diagnostic_Severity `protobuf:"varint,1,opt,name=severity,proto3,enum=tfplugin5.Diagnostic_Severity" json:"severity,omitempty"`
137 Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"`
138 Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"`
139 Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"`
140 XXX_NoUnkeyedLiteral struct{} `json:"-"`
141 XXX_unrecognized []byte `json:"-"`
142 XXX_sizecache int32 `json:"-"`
143}
144
145func (m *Diagnostic) Reset() { *m = Diagnostic{} }
146func (m *Diagnostic) String() string { return proto.CompactTextString(m) }
147func (*Diagnostic) ProtoMessage() {}
148func (*Diagnostic) Descriptor() ([]byte, []int) {
149 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{1}
150}
151func (m *Diagnostic) XXX_Unmarshal(b []byte) error {
152 return xxx_messageInfo_Diagnostic.Unmarshal(m, b)
153}
154func (m *Diagnostic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
155 return xxx_messageInfo_Diagnostic.Marshal(b, m, deterministic)
156}
157func (dst *Diagnostic) XXX_Merge(src proto.Message) {
158 xxx_messageInfo_Diagnostic.Merge(dst, src)
159}
160func (m *Diagnostic) XXX_Size() int {
161 return xxx_messageInfo_Diagnostic.Size(m)
162}
163func (m *Diagnostic) XXX_DiscardUnknown() {
164 xxx_messageInfo_Diagnostic.DiscardUnknown(m)
165}
166
167var xxx_messageInfo_Diagnostic proto.InternalMessageInfo
168
169func (m *Diagnostic) GetSeverity() Diagnostic_Severity {
170 if m != nil {
171 return m.Severity
172 }
173 return Diagnostic_INVALID
174}
175
176func (m *Diagnostic) GetSummary() string {
177 if m != nil {
178 return m.Summary
179 }
180 return ""
181}
182
183func (m *Diagnostic) GetDetail() string {
184 if m != nil {
185 return m.Detail
186 }
187 return ""
188}
189
190func (m *Diagnostic) GetAttribute() *AttributePath {
191 if m != nil {
192 return m.Attribute
193 }
194 return nil
195}
196
197type AttributePath struct {
198 Steps []*AttributePath_Step `protobuf:"bytes,1,rep,name=steps,proto3" json:"steps,omitempty"`
199 XXX_NoUnkeyedLiteral struct{} `json:"-"`
200 XXX_unrecognized []byte `json:"-"`
201 XXX_sizecache int32 `json:"-"`
202}
203
204func (m *AttributePath) Reset() { *m = AttributePath{} }
205func (m *AttributePath) String() string { return proto.CompactTextString(m) }
206func (*AttributePath) ProtoMessage() {}
207func (*AttributePath) Descriptor() ([]byte, []int) {
208 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{2}
209}
210func (m *AttributePath) XXX_Unmarshal(b []byte) error {
211 return xxx_messageInfo_AttributePath.Unmarshal(m, b)
212}
213func (m *AttributePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
214 return xxx_messageInfo_AttributePath.Marshal(b, m, deterministic)
215}
216func (dst *AttributePath) XXX_Merge(src proto.Message) {
217 xxx_messageInfo_AttributePath.Merge(dst, src)
218}
219func (m *AttributePath) XXX_Size() int {
220 return xxx_messageInfo_AttributePath.Size(m)
221}
222func (m *AttributePath) XXX_DiscardUnknown() {
223 xxx_messageInfo_AttributePath.DiscardUnknown(m)
224}
225
226var xxx_messageInfo_AttributePath proto.InternalMessageInfo
227
228func (m *AttributePath) GetSteps() []*AttributePath_Step {
229 if m != nil {
230 return m.Steps
231 }
232 return nil
233}
234
235type AttributePath_Step struct {
236 // Types that are valid to be assigned to Selector:
237 // *AttributePath_Step_AttributeName
238 // *AttributePath_Step_ElementKeyString
239 // *AttributePath_Step_ElementKeyInt
240 Selector isAttributePath_Step_Selector `protobuf_oneof:"selector"`
241 XXX_NoUnkeyedLiteral struct{} `json:"-"`
242 XXX_unrecognized []byte `json:"-"`
243 XXX_sizecache int32 `json:"-"`
244}
245
246func (m *AttributePath_Step) Reset() { *m = AttributePath_Step{} }
247func (m *AttributePath_Step) String() string { return proto.CompactTextString(m) }
248func (*AttributePath_Step) ProtoMessage() {}
249func (*AttributePath_Step) Descriptor() ([]byte, []int) {
250 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{2, 0}
251}
252func (m *AttributePath_Step) XXX_Unmarshal(b []byte) error {
253 return xxx_messageInfo_AttributePath_Step.Unmarshal(m, b)
254}
255func (m *AttributePath_Step) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
256 return xxx_messageInfo_AttributePath_Step.Marshal(b, m, deterministic)
257}
258func (dst *AttributePath_Step) XXX_Merge(src proto.Message) {
259 xxx_messageInfo_AttributePath_Step.Merge(dst, src)
260}
261func (m *AttributePath_Step) XXX_Size() int {
262 return xxx_messageInfo_AttributePath_Step.Size(m)
263}
264func (m *AttributePath_Step) XXX_DiscardUnknown() {
265 xxx_messageInfo_AttributePath_Step.DiscardUnknown(m)
266}
267
268var xxx_messageInfo_AttributePath_Step proto.InternalMessageInfo
269
270type isAttributePath_Step_Selector interface {
271 isAttributePath_Step_Selector()
272}
273
274type AttributePath_Step_AttributeName struct {
275 AttributeName string `protobuf:"bytes,1,opt,name=attribute_name,json=attributeName,proto3,oneof"`
276}
277
278type AttributePath_Step_ElementKeyString struct {
279 ElementKeyString string `protobuf:"bytes,2,opt,name=element_key_string,json=elementKeyString,proto3,oneof"`
280}
281
282type AttributePath_Step_ElementKeyInt struct {
283 ElementKeyInt int64 `protobuf:"varint,3,opt,name=element_key_int,json=elementKeyInt,proto3,oneof"`
284}
285
286func (*AttributePath_Step_AttributeName) isAttributePath_Step_Selector() {}
287
288func (*AttributePath_Step_ElementKeyString) isAttributePath_Step_Selector() {}
289
290func (*AttributePath_Step_ElementKeyInt) isAttributePath_Step_Selector() {}
291
292func (m *AttributePath_Step) GetSelector() isAttributePath_Step_Selector {
293 if m != nil {
294 return m.Selector
295 }
296 return nil
297}
298
299func (m *AttributePath_Step) GetAttributeName() string {
300 if x, ok := m.GetSelector().(*AttributePath_Step_AttributeName); ok {
301 return x.AttributeName
302 }
303 return ""
304}
305
306func (m *AttributePath_Step) GetElementKeyString() string {
307 if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyString); ok {
308 return x.ElementKeyString
309 }
310 return ""
311}
312
313func (m *AttributePath_Step) GetElementKeyInt() int64 {
314 if x, ok := m.GetSelector().(*AttributePath_Step_ElementKeyInt); ok {
315 return x.ElementKeyInt
316 }
317 return 0
318}
319
320// XXX_OneofFuncs is for the internal use of the proto package.
321func (*AttributePath_Step) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
322 return _AttributePath_Step_OneofMarshaler, _AttributePath_Step_OneofUnmarshaler, _AttributePath_Step_OneofSizer, []interface{}{
323 (*AttributePath_Step_AttributeName)(nil),
324 (*AttributePath_Step_ElementKeyString)(nil),
325 (*AttributePath_Step_ElementKeyInt)(nil),
326 }
327}
328
329func _AttributePath_Step_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
330 m := msg.(*AttributePath_Step)
331 // selector
332 switch x := m.Selector.(type) {
333 case *AttributePath_Step_AttributeName:
334 b.EncodeVarint(1<<3 | proto.WireBytes)
335 b.EncodeStringBytes(x.AttributeName)
336 case *AttributePath_Step_ElementKeyString:
337 b.EncodeVarint(2<<3 | proto.WireBytes)
338 b.EncodeStringBytes(x.ElementKeyString)
339 case *AttributePath_Step_ElementKeyInt:
340 b.EncodeVarint(3<<3 | proto.WireVarint)
341 b.EncodeVarint(uint64(x.ElementKeyInt))
342 case nil:
343 default:
344 return fmt.Errorf("AttributePath_Step.Selector has unexpected type %T", x)
345 }
346 return nil
347}
348
349func _AttributePath_Step_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
350 m := msg.(*AttributePath_Step)
351 switch tag {
352 case 1: // selector.attribute_name
353 if wire != proto.WireBytes {
354 return true, proto.ErrInternalBadWireType
355 }
356 x, err := b.DecodeStringBytes()
357 m.Selector = &AttributePath_Step_AttributeName{x}
358 return true, err
359 case 2: // selector.element_key_string
360 if wire != proto.WireBytes {
361 return true, proto.ErrInternalBadWireType
362 }
363 x, err := b.DecodeStringBytes()
364 m.Selector = &AttributePath_Step_ElementKeyString{x}
365 return true, err
366 case 3: // selector.element_key_int
367 if wire != proto.WireVarint {
368 return true, proto.ErrInternalBadWireType
369 }
370 x, err := b.DecodeVarint()
371 m.Selector = &AttributePath_Step_ElementKeyInt{int64(x)}
372 return true, err
373 default:
374 return false, nil
375 }
376}
377
378func _AttributePath_Step_OneofSizer(msg proto.Message) (n int) {
379 m := msg.(*AttributePath_Step)
380 // selector
381 switch x := m.Selector.(type) {
382 case *AttributePath_Step_AttributeName:
383 n += 1 // tag and wire
384 n += proto.SizeVarint(uint64(len(x.AttributeName)))
385 n += len(x.AttributeName)
386 case *AttributePath_Step_ElementKeyString:
387 n += 1 // tag and wire
388 n += proto.SizeVarint(uint64(len(x.ElementKeyString)))
389 n += len(x.ElementKeyString)
390 case *AttributePath_Step_ElementKeyInt:
391 n += 1 // tag and wire
392 n += proto.SizeVarint(uint64(x.ElementKeyInt))
393 case nil:
394 default:
395 panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
396 }
397 return n
398}
399
400type Stop struct {
401 XXX_NoUnkeyedLiteral struct{} `json:"-"`
402 XXX_unrecognized []byte `json:"-"`
403 XXX_sizecache int32 `json:"-"`
404}
405
406func (m *Stop) Reset() { *m = Stop{} }
407func (m *Stop) String() string { return proto.CompactTextString(m) }
408func (*Stop) ProtoMessage() {}
409func (*Stop) Descriptor() ([]byte, []int) {
410 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{3}
411}
412func (m *Stop) XXX_Unmarshal(b []byte) error {
413 return xxx_messageInfo_Stop.Unmarshal(m, b)
414}
415func (m *Stop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
416 return xxx_messageInfo_Stop.Marshal(b, m, deterministic)
417}
418func (dst *Stop) XXX_Merge(src proto.Message) {
419 xxx_messageInfo_Stop.Merge(dst, src)
420}
421func (m *Stop) XXX_Size() int {
422 return xxx_messageInfo_Stop.Size(m)
423}
424func (m *Stop) XXX_DiscardUnknown() {
425 xxx_messageInfo_Stop.DiscardUnknown(m)
426}
427
428var xxx_messageInfo_Stop proto.InternalMessageInfo
429
430type Stop_Request struct {
431 XXX_NoUnkeyedLiteral struct{} `json:"-"`
432 XXX_unrecognized []byte `json:"-"`
433 XXX_sizecache int32 `json:"-"`
434}
435
436func (m *Stop_Request) Reset() { *m = Stop_Request{} }
437func (m *Stop_Request) String() string { return proto.CompactTextString(m) }
438func (*Stop_Request) ProtoMessage() {}
439func (*Stop_Request) Descriptor() ([]byte, []int) {
440 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{3, 0}
441}
442func (m *Stop_Request) XXX_Unmarshal(b []byte) error {
443 return xxx_messageInfo_Stop_Request.Unmarshal(m, b)
444}
445func (m *Stop_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
446 return xxx_messageInfo_Stop_Request.Marshal(b, m, deterministic)
447}
448func (dst *Stop_Request) XXX_Merge(src proto.Message) {
449 xxx_messageInfo_Stop_Request.Merge(dst, src)
450}
451func (m *Stop_Request) XXX_Size() int {
452 return xxx_messageInfo_Stop_Request.Size(m)
453}
454func (m *Stop_Request) XXX_DiscardUnknown() {
455 xxx_messageInfo_Stop_Request.DiscardUnknown(m)
456}
457
458var xxx_messageInfo_Stop_Request proto.InternalMessageInfo
459
460type Stop_Response struct {
461 Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"`
462 XXX_NoUnkeyedLiteral struct{} `json:"-"`
463 XXX_unrecognized []byte `json:"-"`
464 XXX_sizecache int32 `json:"-"`
465}
466
467func (m *Stop_Response) Reset() { *m = Stop_Response{} }
468func (m *Stop_Response) String() string { return proto.CompactTextString(m) }
469func (*Stop_Response) ProtoMessage() {}
470func (*Stop_Response) Descriptor() ([]byte, []int) {
471 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{3, 1}
472}
473func (m *Stop_Response) XXX_Unmarshal(b []byte) error {
474 return xxx_messageInfo_Stop_Response.Unmarshal(m, b)
475}
476func (m *Stop_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
477 return xxx_messageInfo_Stop_Response.Marshal(b, m, deterministic)
478}
479func (dst *Stop_Response) XXX_Merge(src proto.Message) {
480 xxx_messageInfo_Stop_Response.Merge(dst, src)
481}
482func (m *Stop_Response) XXX_Size() int {
483 return xxx_messageInfo_Stop_Response.Size(m)
484}
485func (m *Stop_Response) XXX_DiscardUnknown() {
486 xxx_messageInfo_Stop_Response.DiscardUnknown(m)
487}
488
489var xxx_messageInfo_Stop_Response proto.InternalMessageInfo
490
491func (m *Stop_Response) GetError() string {
492 if m != nil {
493 return m.Error
494 }
495 return ""
496}
497
498// RawState holds the stored state for a resource to be upgraded by the
499// provider. It can be in one of two formats, the current json encoded format
500// in bytes, or the legacy flatmap format as a map of strings.
501type RawState struct {
502 Json []byte `protobuf:"bytes,1,opt,name=json,proto3" json:"json,omitempty"`
503 Flatmap map[string]string `protobuf:"bytes,2,rep,name=flatmap,proto3" json:"flatmap,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
504 XXX_NoUnkeyedLiteral struct{} `json:"-"`
505 XXX_unrecognized []byte `json:"-"`
506 XXX_sizecache int32 `json:"-"`
507}
508
509func (m *RawState) Reset() { *m = RawState{} }
510func (m *RawState) String() string { return proto.CompactTextString(m) }
511func (*RawState) ProtoMessage() {}
512func (*RawState) Descriptor() ([]byte, []int) {
513 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{4}
514}
515func (m *RawState) XXX_Unmarshal(b []byte) error {
516 return xxx_messageInfo_RawState.Unmarshal(m, b)
517}
518func (m *RawState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
519 return xxx_messageInfo_RawState.Marshal(b, m, deterministic)
520}
521func (dst *RawState) XXX_Merge(src proto.Message) {
522 xxx_messageInfo_RawState.Merge(dst, src)
523}
524func (m *RawState) XXX_Size() int {
525 return xxx_messageInfo_RawState.Size(m)
526}
527func (m *RawState) XXX_DiscardUnknown() {
528 xxx_messageInfo_RawState.DiscardUnknown(m)
529}
530
531var xxx_messageInfo_RawState proto.InternalMessageInfo
532
533func (m *RawState) GetJson() []byte {
534 if m != nil {
535 return m.Json
536 }
537 return nil
538}
539
540func (m *RawState) GetFlatmap() map[string]string {
541 if m != nil {
542 return m.Flatmap
543 }
544 return nil
545}
546
547// Schema is the configuration schema for a Resource, Provider, or Provisioner.
548type Schema struct {
549 // The version of the schema.
550 // Schemas are versioned, so that providers can upgrade a saved resource
551 // state when the schema is changed.
552 Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
553 // Block is the top level configuration block for this schema.
554 Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"`
555 XXX_NoUnkeyedLiteral struct{} `json:"-"`
556 XXX_unrecognized []byte `json:"-"`
557 XXX_sizecache int32 `json:"-"`
558}
559
560func (m *Schema) Reset() { *m = Schema{} }
561func (m *Schema) String() string { return proto.CompactTextString(m) }
562func (*Schema) ProtoMessage() {}
563func (*Schema) Descriptor() ([]byte, []int) {
564 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5}
565}
566func (m *Schema) XXX_Unmarshal(b []byte) error {
567 return xxx_messageInfo_Schema.Unmarshal(m, b)
568}
569func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
570 return xxx_messageInfo_Schema.Marshal(b, m, deterministic)
571}
572func (dst *Schema) XXX_Merge(src proto.Message) {
573 xxx_messageInfo_Schema.Merge(dst, src)
574}
575func (m *Schema) XXX_Size() int {
576 return xxx_messageInfo_Schema.Size(m)
577}
578func (m *Schema) XXX_DiscardUnknown() {
579 xxx_messageInfo_Schema.DiscardUnknown(m)
580}
581
582var xxx_messageInfo_Schema proto.InternalMessageInfo
583
584func (m *Schema) GetVersion() int64 {
585 if m != nil {
586 return m.Version
587 }
588 return 0
589}
590
591func (m *Schema) GetBlock() *Schema_Block {
592 if m != nil {
593 return m.Block
594 }
595 return nil
596}
597
598type Schema_Block struct {
599 Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
600 Attributes []*Schema_Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"`
601 BlockTypes []*Schema_NestedBlock `protobuf:"bytes,3,rep,name=block_types,json=blockTypes,proto3" json:"block_types,omitempty"`
602 XXX_NoUnkeyedLiteral struct{} `json:"-"`
603 XXX_unrecognized []byte `json:"-"`
604 XXX_sizecache int32 `json:"-"`
605}
606
607func (m *Schema_Block) Reset() { *m = Schema_Block{} }
608func (m *Schema_Block) String() string { return proto.CompactTextString(m) }
609func (*Schema_Block) ProtoMessage() {}
610func (*Schema_Block) Descriptor() ([]byte, []int) {
611 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 0}
612}
613func (m *Schema_Block) XXX_Unmarshal(b []byte) error {
614 return xxx_messageInfo_Schema_Block.Unmarshal(m, b)
615}
616func (m *Schema_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
617 return xxx_messageInfo_Schema_Block.Marshal(b, m, deterministic)
618}
619func (dst *Schema_Block) XXX_Merge(src proto.Message) {
620 xxx_messageInfo_Schema_Block.Merge(dst, src)
621}
622func (m *Schema_Block) XXX_Size() int {
623 return xxx_messageInfo_Schema_Block.Size(m)
624}
625func (m *Schema_Block) XXX_DiscardUnknown() {
626 xxx_messageInfo_Schema_Block.DiscardUnknown(m)
627}
628
629var xxx_messageInfo_Schema_Block proto.InternalMessageInfo
630
631func (m *Schema_Block) GetVersion() int64 {
632 if m != nil {
633 return m.Version
634 }
635 return 0
636}
637
638func (m *Schema_Block) GetAttributes() []*Schema_Attribute {
639 if m != nil {
640 return m.Attributes
641 }
642 return nil
643}
644
645func (m *Schema_Block) GetBlockTypes() []*Schema_NestedBlock {
646 if m != nil {
647 return m.BlockTypes
648 }
649 return nil
650}
651
652type Schema_Attribute struct {
653 Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
654 Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
655 Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
656 Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"`
657 Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"`
658 Computed bool `protobuf:"varint,6,opt,name=computed,proto3" json:"computed,omitempty"`
659 Sensitive bool `protobuf:"varint,7,opt,name=sensitive,proto3" json:"sensitive,omitempty"`
660 XXX_NoUnkeyedLiteral struct{} `json:"-"`
661 XXX_unrecognized []byte `json:"-"`
662 XXX_sizecache int32 `json:"-"`
663}
664
665func (m *Schema_Attribute) Reset() { *m = Schema_Attribute{} }
666func (m *Schema_Attribute) String() string { return proto.CompactTextString(m) }
667func (*Schema_Attribute) ProtoMessage() {}
668func (*Schema_Attribute) Descriptor() ([]byte, []int) {
669 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 1}
670}
671func (m *Schema_Attribute) XXX_Unmarshal(b []byte) error {
672 return xxx_messageInfo_Schema_Attribute.Unmarshal(m, b)
673}
674func (m *Schema_Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
675 return xxx_messageInfo_Schema_Attribute.Marshal(b, m, deterministic)
676}
677func (dst *Schema_Attribute) XXX_Merge(src proto.Message) {
678 xxx_messageInfo_Schema_Attribute.Merge(dst, src)
679}
680func (m *Schema_Attribute) XXX_Size() int {
681 return xxx_messageInfo_Schema_Attribute.Size(m)
682}
683func (m *Schema_Attribute) XXX_DiscardUnknown() {
684 xxx_messageInfo_Schema_Attribute.DiscardUnknown(m)
685}
686
687var xxx_messageInfo_Schema_Attribute proto.InternalMessageInfo
688
689func (m *Schema_Attribute) GetName() string {
690 if m != nil {
691 return m.Name
692 }
693 return ""
694}
695
696func (m *Schema_Attribute) GetType() []byte {
697 if m != nil {
698 return m.Type
699 }
700 return nil
701}
702
703func (m *Schema_Attribute) GetDescription() string {
704 if m != nil {
705 return m.Description
706 }
707 return ""
708}
709
710func (m *Schema_Attribute) GetRequired() bool {
711 if m != nil {
712 return m.Required
713 }
714 return false
715}
716
717func (m *Schema_Attribute) GetOptional() bool {
718 if m != nil {
719 return m.Optional
720 }
721 return false
722}
723
724func (m *Schema_Attribute) GetComputed() bool {
725 if m != nil {
726 return m.Computed
727 }
728 return false
729}
730
731func (m *Schema_Attribute) GetSensitive() bool {
732 if m != nil {
733 return m.Sensitive
734 }
735 return false
736}
737
738type Schema_NestedBlock struct {
739 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
740 Block *Schema_Block `protobuf:"bytes,2,opt,name=block,proto3" json:"block,omitempty"`
741 Nesting Schema_NestedBlock_NestingMode `protobuf:"varint,3,opt,name=nesting,proto3,enum=tfplugin5.Schema_NestedBlock_NestingMode" json:"nesting,omitempty"`
742 MinItems int64 `protobuf:"varint,4,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
743 MaxItems int64 `protobuf:"varint,5,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
744 XXX_NoUnkeyedLiteral struct{} `json:"-"`
745 XXX_unrecognized []byte `json:"-"`
746 XXX_sizecache int32 `json:"-"`
747}
748
749func (m *Schema_NestedBlock) Reset() { *m = Schema_NestedBlock{} }
750func (m *Schema_NestedBlock) String() string { return proto.CompactTextString(m) }
751func (*Schema_NestedBlock) ProtoMessage() {}
752func (*Schema_NestedBlock) Descriptor() ([]byte, []int) {
753 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 2}
754}
755func (m *Schema_NestedBlock) XXX_Unmarshal(b []byte) error {
756 return xxx_messageInfo_Schema_NestedBlock.Unmarshal(m, b)
757}
758func (m *Schema_NestedBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
759 return xxx_messageInfo_Schema_NestedBlock.Marshal(b, m, deterministic)
760}
761func (dst *Schema_NestedBlock) XXX_Merge(src proto.Message) {
762 xxx_messageInfo_Schema_NestedBlock.Merge(dst, src)
763}
764func (m *Schema_NestedBlock) XXX_Size() int {
765 return xxx_messageInfo_Schema_NestedBlock.Size(m)
766}
767func (m *Schema_NestedBlock) XXX_DiscardUnknown() {
768 xxx_messageInfo_Schema_NestedBlock.DiscardUnknown(m)
769}
770
771var xxx_messageInfo_Schema_NestedBlock proto.InternalMessageInfo
772
773func (m *Schema_NestedBlock) GetTypeName() string {
774 if m != nil {
775 return m.TypeName
776 }
777 return ""
778}
779
780func (m *Schema_NestedBlock) GetBlock() *Schema_Block {
781 if m != nil {
782 return m.Block
783 }
784 return nil
785}
786
787func (m *Schema_NestedBlock) GetNesting() Schema_NestedBlock_NestingMode {
788 if m != nil {
789 return m.Nesting
790 }
791 return Schema_NestedBlock_INVALID
792}
793
794func (m *Schema_NestedBlock) GetMinItems() int64 {
795 if m != nil {
796 return m.MinItems
797 }
798 return 0
799}
800
801func (m *Schema_NestedBlock) GetMaxItems() int64 {
802 if m != nil {
803 return m.MaxItems
804 }
805 return 0
806}
807
808type GetProviderSchema struct {
809 XXX_NoUnkeyedLiteral struct{} `json:"-"`
810 XXX_unrecognized []byte `json:"-"`
811 XXX_sizecache int32 `json:"-"`
812}
813
814func (m *GetProviderSchema) Reset() { *m = GetProviderSchema{} }
815func (m *GetProviderSchema) String() string { return proto.CompactTextString(m) }
816func (*GetProviderSchema) ProtoMessage() {}
817func (*GetProviderSchema) Descriptor() ([]byte, []int) {
818 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{6}
819}
820func (m *GetProviderSchema) XXX_Unmarshal(b []byte) error {
821 return xxx_messageInfo_GetProviderSchema.Unmarshal(m, b)
822}
823func (m *GetProviderSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
824 return xxx_messageInfo_GetProviderSchema.Marshal(b, m, deterministic)
825}
826func (dst *GetProviderSchema) XXX_Merge(src proto.Message) {
827 xxx_messageInfo_GetProviderSchema.Merge(dst, src)
828}
829func (m *GetProviderSchema) XXX_Size() int {
830 return xxx_messageInfo_GetProviderSchema.Size(m)
831}
832func (m *GetProviderSchema) XXX_DiscardUnknown() {
833 xxx_messageInfo_GetProviderSchema.DiscardUnknown(m)
834}
835
836var xxx_messageInfo_GetProviderSchema proto.InternalMessageInfo
837
838type GetProviderSchema_Request struct {
839 XXX_NoUnkeyedLiteral struct{} `json:"-"`
840 XXX_unrecognized []byte `json:"-"`
841 XXX_sizecache int32 `json:"-"`
842}
843
844func (m *GetProviderSchema_Request) Reset() { *m = GetProviderSchema_Request{} }
845func (m *GetProviderSchema_Request) String() string { return proto.CompactTextString(m) }
846func (*GetProviderSchema_Request) ProtoMessage() {}
847func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) {
848 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{6, 0}
849}
850func (m *GetProviderSchema_Request) XXX_Unmarshal(b []byte) error {
851 return xxx_messageInfo_GetProviderSchema_Request.Unmarshal(m, b)
852}
853func (m *GetProviderSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
854 return xxx_messageInfo_GetProviderSchema_Request.Marshal(b, m, deterministic)
855}
856func (dst *GetProviderSchema_Request) XXX_Merge(src proto.Message) {
857 xxx_messageInfo_GetProviderSchema_Request.Merge(dst, src)
858}
859func (m *GetProviderSchema_Request) XXX_Size() int {
860 return xxx_messageInfo_GetProviderSchema_Request.Size(m)
861}
862func (m *GetProviderSchema_Request) XXX_DiscardUnknown() {
863 xxx_messageInfo_GetProviderSchema_Request.DiscardUnknown(m)
864}
865
866var xxx_messageInfo_GetProviderSchema_Request proto.InternalMessageInfo
867
868type GetProviderSchema_Response struct {
869 Provider *Schema `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider,omitempty"`
870 ResourceSchemas map[string]*Schema `protobuf:"bytes,2,rep,name=resource_schemas,json=resourceSchemas,proto3" json:"resource_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
871 DataSourceSchemas map[string]*Schema `protobuf:"bytes,3,rep,name=data_source_schemas,json=dataSourceSchemas,proto3" json:"data_source_schemas,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
872 Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
873 XXX_NoUnkeyedLiteral struct{} `json:"-"`
874 XXX_unrecognized []byte `json:"-"`
875 XXX_sizecache int32 `json:"-"`
876}
877
878func (m *GetProviderSchema_Response) Reset() { *m = GetProviderSchema_Response{} }
879func (m *GetProviderSchema_Response) String() string { return proto.CompactTextString(m) }
880func (*GetProviderSchema_Response) ProtoMessage() {}
881func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) {
882 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{6, 1}
883}
884func (m *GetProviderSchema_Response) XXX_Unmarshal(b []byte) error {
885 return xxx_messageInfo_GetProviderSchema_Response.Unmarshal(m, b)
886}
887func (m *GetProviderSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
888 return xxx_messageInfo_GetProviderSchema_Response.Marshal(b, m, deterministic)
889}
890func (dst *GetProviderSchema_Response) XXX_Merge(src proto.Message) {
891 xxx_messageInfo_GetProviderSchema_Response.Merge(dst, src)
892}
893func (m *GetProviderSchema_Response) XXX_Size() int {
894 return xxx_messageInfo_GetProviderSchema_Response.Size(m)
895}
896func (m *GetProviderSchema_Response) XXX_DiscardUnknown() {
897 xxx_messageInfo_GetProviderSchema_Response.DiscardUnknown(m)
898}
899
900var xxx_messageInfo_GetProviderSchema_Response proto.InternalMessageInfo
901
902func (m *GetProviderSchema_Response) GetProvider() *Schema {
903 if m != nil {
904 return m.Provider
905 }
906 return nil
907}
908
909func (m *GetProviderSchema_Response) GetResourceSchemas() map[string]*Schema {
910 if m != nil {
911 return m.ResourceSchemas
912 }
913 return nil
914}
915
916func (m *GetProviderSchema_Response) GetDataSourceSchemas() map[string]*Schema {
917 if m != nil {
918 return m.DataSourceSchemas
919 }
920 return nil
921}
922
923func (m *GetProviderSchema_Response) GetDiagnostics() []*Diagnostic {
924 if m != nil {
925 return m.Diagnostics
926 }
927 return nil
928}
929
930type PrepareProviderConfig struct {
931 XXX_NoUnkeyedLiteral struct{} `json:"-"`
932 XXX_unrecognized []byte `json:"-"`
933 XXX_sizecache int32 `json:"-"`
934}
935
936func (m *PrepareProviderConfig) Reset() { *m = PrepareProviderConfig{} }
937func (m *PrepareProviderConfig) String() string { return proto.CompactTextString(m) }
938func (*PrepareProviderConfig) ProtoMessage() {}
939func (*PrepareProviderConfig) Descriptor() ([]byte, []int) {
940 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{7}
941}
942func (m *PrepareProviderConfig) XXX_Unmarshal(b []byte) error {
943 return xxx_messageInfo_PrepareProviderConfig.Unmarshal(m, b)
944}
945func (m *PrepareProviderConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
946 return xxx_messageInfo_PrepareProviderConfig.Marshal(b, m, deterministic)
947}
948func (dst *PrepareProviderConfig) XXX_Merge(src proto.Message) {
949 xxx_messageInfo_PrepareProviderConfig.Merge(dst, src)
950}
951func (m *PrepareProviderConfig) XXX_Size() int {
952 return xxx_messageInfo_PrepareProviderConfig.Size(m)
953}
954func (m *PrepareProviderConfig) XXX_DiscardUnknown() {
955 xxx_messageInfo_PrepareProviderConfig.DiscardUnknown(m)
956}
957
958var xxx_messageInfo_PrepareProviderConfig proto.InternalMessageInfo
959
960type PrepareProviderConfig_Request struct {
961 Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
962 XXX_NoUnkeyedLiteral struct{} `json:"-"`
963 XXX_unrecognized []byte `json:"-"`
964 XXX_sizecache int32 `json:"-"`
965}
966
967func (m *PrepareProviderConfig_Request) Reset() { *m = PrepareProviderConfig_Request{} }
968func (m *PrepareProviderConfig_Request) String() string { return proto.CompactTextString(m) }
969func (*PrepareProviderConfig_Request) ProtoMessage() {}
970func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) {
971 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{7, 0}
972}
973func (m *PrepareProviderConfig_Request) XXX_Unmarshal(b []byte) error {
974 return xxx_messageInfo_PrepareProviderConfig_Request.Unmarshal(m, b)
975}
976func (m *PrepareProviderConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
977 return xxx_messageInfo_PrepareProviderConfig_Request.Marshal(b, m, deterministic)
978}
979func (dst *PrepareProviderConfig_Request) XXX_Merge(src proto.Message) {
980 xxx_messageInfo_PrepareProviderConfig_Request.Merge(dst, src)
981}
982func (m *PrepareProviderConfig_Request) XXX_Size() int {
983 return xxx_messageInfo_PrepareProviderConfig_Request.Size(m)
984}
985func (m *PrepareProviderConfig_Request) XXX_DiscardUnknown() {
986 xxx_messageInfo_PrepareProviderConfig_Request.DiscardUnknown(m)
987}
988
989var xxx_messageInfo_PrepareProviderConfig_Request proto.InternalMessageInfo
990
991func (m *PrepareProviderConfig_Request) GetConfig() *DynamicValue {
992 if m != nil {
993 return m.Config
994 }
995 return nil
996}
997
998type PrepareProviderConfig_Response struct {
999 PreparedConfig *DynamicValue `protobuf:"bytes,1,opt,name=prepared_config,json=preparedConfig,proto3" json:"prepared_config,omitempty"`
1000 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1001 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1002 XXX_unrecognized []byte `json:"-"`
1003 XXX_sizecache int32 `json:"-"`
1004}
1005
1006func (m *PrepareProviderConfig_Response) Reset() { *m = PrepareProviderConfig_Response{} }
1007func (m *PrepareProviderConfig_Response) String() string { return proto.CompactTextString(m) }
1008func (*PrepareProviderConfig_Response) ProtoMessage() {}
1009func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) {
1010 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{7, 1}
1011}
1012func (m *PrepareProviderConfig_Response) XXX_Unmarshal(b []byte) error {
1013 return xxx_messageInfo_PrepareProviderConfig_Response.Unmarshal(m, b)
1014}
1015func (m *PrepareProviderConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1016 return xxx_messageInfo_PrepareProviderConfig_Response.Marshal(b, m, deterministic)
1017}
1018func (dst *PrepareProviderConfig_Response) XXX_Merge(src proto.Message) {
1019 xxx_messageInfo_PrepareProviderConfig_Response.Merge(dst, src)
1020}
1021func (m *PrepareProviderConfig_Response) XXX_Size() int {
1022 return xxx_messageInfo_PrepareProviderConfig_Response.Size(m)
1023}
1024func (m *PrepareProviderConfig_Response) XXX_DiscardUnknown() {
1025 xxx_messageInfo_PrepareProviderConfig_Response.DiscardUnknown(m)
1026}
1027
1028var xxx_messageInfo_PrepareProviderConfig_Response proto.InternalMessageInfo
1029
1030func (m *PrepareProviderConfig_Response) GetPreparedConfig() *DynamicValue {
1031 if m != nil {
1032 return m.PreparedConfig
1033 }
1034 return nil
1035}
1036
1037func (m *PrepareProviderConfig_Response) GetDiagnostics() []*Diagnostic {
1038 if m != nil {
1039 return m.Diagnostics
1040 }
1041 return nil
1042}
1043
1044type UpgradeResourceState struct {
1045 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1046 XXX_unrecognized []byte `json:"-"`
1047 XXX_sizecache int32 `json:"-"`
1048}
1049
1050func (m *UpgradeResourceState) Reset() { *m = UpgradeResourceState{} }
1051func (m *UpgradeResourceState) String() string { return proto.CompactTextString(m) }
1052func (*UpgradeResourceState) ProtoMessage() {}
1053func (*UpgradeResourceState) Descriptor() ([]byte, []int) {
1054 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{8}
1055}
1056func (m *UpgradeResourceState) XXX_Unmarshal(b []byte) error {
1057 return xxx_messageInfo_UpgradeResourceState.Unmarshal(m, b)
1058}
1059func (m *UpgradeResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1060 return xxx_messageInfo_UpgradeResourceState.Marshal(b, m, deterministic)
1061}
1062func (dst *UpgradeResourceState) XXX_Merge(src proto.Message) {
1063 xxx_messageInfo_UpgradeResourceState.Merge(dst, src)
1064}
1065func (m *UpgradeResourceState) XXX_Size() int {
1066 return xxx_messageInfo_UpgradeResourceState.Size(m)
1067}
1068func (m *UpgradeResourceState) XXX_DiscardUnknown() {
1069 xxx_messageInfo_UpgradeResourceState.DiscardUnknown(m)
1070}
1071
1072var xxx_messageInfo_UpgradeResourceState proto.InternalMessageInfo
1073
1074type UpgradeResourceState_Request struct {
1075 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
1076 // version is the schema_version number recorded in the state file
1077 Version int64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
1078 // raw_state is the raw states as stored for the resource. Core does
1079 // not have access to the schema of prior_version, so it's the
1080 // provider's responsibility to interpret this value using the
1081 // appropriate older schema. The raw_state will be the json encoded
1082 // state, or a legacy flat-mapped format.
1083 RawState *RawState `protobuf:"bytes,3,opt,name=raw_state,json=rawState,proto3" json:"raw_state,omitempty"`
1084 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1085 XXX_unrecognized []byte `json:"-"`
1086 XXX_sizecache int32 `json:"-"`
1087}
1088
1089func (m *UpgradeResourceState_Request) Reset() { *m = UpgradeResourceState_Request{} }
1090func (m *UpgradeResourceState_Request) String() string { return proto.CompactTextString(m) }
1091func (*UpgradeResourceState_Request) ProtoMessage() {}
1092func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) {
1093 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{8, 0}
1094}
1095func (m *UpgradeResourceState_Request) XXX_Unmarshal(b []byte) error {
1096 return xxx_messageInfo_UpgradeResourceState_Request.Unmarshal(m, b)
1097}
1098func (m *UpgradeResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1099 return xxx_messageInfo_UpgradeResourceState_Request.Marshal(b, m, deterministic)
1100}
1101func (dst *UpgradeResourceState_Request) XXX_Merge(src proto.Message) {
1102 xxx_messageInfo_UpgradeResourceState_Request.Merge(dst, src)
1103}
1104func (m *UpgradeResourceState_Request) XXX_Size() int {
1105 return xxx_messageInfo_UpgradeResourceState_Request.Size(m)
1106}
1107func (m *UpgradeResourceState_Request) XXX_DiscardUnknown() {
1108 xxx_messageInfo_UpgradeResourceState_Request.DiscardUnknown(m)
1109}
1110
1111var xxx_messageInfo_UpgradeResourceState_Request proto.InternalMessageInfo
1112
1113func (m *UpgradeResourceState_Request) GetTypeName() string {
1114 if m != nil {
1115 return m.TypeName
1116 }
1117 return ""
1118}
1119
1120func (m *UpgradeResourceState_Request) GetVersion() int64 {
1121 if m != nil {
1122 return m.Version
1123 }
1124 return 0
1125}
1126
1127func (m *UpgradeResourceState_Request) GetRawState() *RawState {
1128 if m != nil {
1129 return m.RawState
1130 }
1131 return nil
1132}
1133
1134type UpgradeResourceState_Response struct {
1135 // new_state is a msgpack-encoded data structure that, when interpreted with
1136 // the _current_ schema for this resource type, is functionally equivalent to
1137 // that which was given in prior_state_raw.
1138 UpgradedState *DynamicValue `protobuf:"bytes,1,opt,name=upgraded_state,json=upgradedState,proto3" json:"upgraded_state,omitempty"`
1139 // diagnostics describes any errors encountered during migration that could not
1140 // be safely resolved, and warnings about any possibly-risky assumptions made
1141 // in the upgrade process.
1142 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1143 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1144 XXX_unrecognized []byte `json:"-"`
1145 XXX_sizecache int32 `json:"-"`
1146}
1147
1148func (m *UpgradeResourceState_Response) Reset() { *m = UpgradeResourceState_Response{} }
1149func (m *UpgradeResourceState_Response) String() string { return proto.CompactTextString(m) }
1150func (*UpgradeResourceState_Response) ProtoMessage() {}
1151func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) {
1152 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{8, 1}
1153}
1154func (m *UpgradeResourceState_Response) XXX_Unmarshal(b []byte) error {
1155 return xxx_messageInfo_UpgradeResourceState_Response.Unmarshal(m, b)
1156}
1157func (m *UpgradeResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1158 return xxx_messageInfo_UpgradeResourceState_Response.Marshal(b, m, deterministic)
1159}
1160func (dst *UpgradeResourceState_Response) XXX_Merge(src proto.Message) {
1161 xxx_messageInfo_UpgradeResourceState_Response.Merge(dst, src)
1162}
1163func (m *UpgradeResourceState_Response) XXX_Size() int {
1164 return xxx_messageInfo_UpgradeResourceState_Response.Size(m)
1165}
1166func (m *UpgradeResourceState_Response) XXX_DiscardUnknown() {
1167 xxx_messageInfo_UpgradeResourceState_Response.DiscardUnknown(m)
1168}
1169
1170var xxx_messageInfo_UpgradeResourceState_Response proto.InternalMessageInfo
1171
1172func (m *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue {
1173 if m != nil {
1174 return m.UpgradedState
1175 }
1176 return nil
1177}
1178
1179func (m *UpgradeResourceState_Response) GetDiagnostics() []*Diagnostic {
1180 if m != nil {
1181 return m.Diagnostics
1182 }
1183 return nil
1184}
1185
1186type ValidateResourceTypeConfig struct {
1187 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1188 XXX_unrecognized []byte `json:"-"`
1189 XXX_sizecache int32 `json:"-"`
1190}
1191
1192func (m *ValidateResourceTypeConfig) Reset() { *m = ValidateResourceTypeConfig{} }
1193func (m *ValidateResourceTypeConfig) String() string { return proto.CompactTextString(m) }
1194func (*ValidateResourceTypeConfig) ProtoMessage() {}
1195func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) {
1196 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{9}
1197}
1198func (m *ValidateResourceTypeConfig) XXX_Unmarshal(b []byte) error {
1199 return xxx_messageInfo_ValidateResourceTypeConfig.Unmarshal(m, b)
1200}
1201func (m *ValidateResourceTypeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1202 return xxx_messageInfo_ValidateResourceTypeConfig.Marshal(b, m, deterministic)
1203}
1204func (dst *ValidateResourceTypeConfig) XXX_Merge(src proto.Message) {
1205 xxx_messageInfo_ValidateResourceTypeConfig.Merge(dst, src)
1206}
1207func (m *ValidateResourceTypeConfig) XXX_Size() int {
1208 return xxx_messageInfo_ValidateResourceTypeConfig.Size(m)
1209}
1210func (m *ValidateResourceTypeConfig) XXX_DiscardUnknown() {
1211 xxx_messageInfo_ValidateResourceTypeConfig.DiscardUnknown(m)
1212}
1213
1214var xxx_messageInfo_ValidateResourceTypeConfig proto.InternalMessageInfo
1215
1216type ValidateResourceTypeConfig_Request struct {
1217 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
1218 Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
1219 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1220 XXX_unrecognized []byte `json:"-"`
1221 XXX_sizecache int32 `json:"-"`
1222}
1223
1224func (m *ValidateResourceTypeConfig_Request) Reset() { *m = ValidateResourceTypeConfig_Request{} }
1225func (m *ValidateResourceTypeConfig_Request) String() string { return proto.CompactTextString(m) }
1226func (*ValidateResourceTypeConfig_Request) ProtoMessage() {}
1227func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) {
1228 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{9, 0}
1229}
1230func (m *ValidateResourceTypeConfig_Request) XXX_Unmarshal(b []byte) error {
1231 return xxx_messageInfo_ValidateResourceTypeConfig_Request.Unmarshal(m, b)
1232}
1233func (m *ValidateResourceTypeConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1234 return xxx_messageInfo_ValidateResourceTypeConfig_Request.Marshal(b, m, deterministic)
1235}
1236func (dst *ValidateResourceTypeConfig_Request) XXX_Merge(src proto.Message) {
1237 xxx_messageInfo_ValidateResourceTypeConfig_Request.Merge(dst, src)
1238}
1239func (m *ValidateResourceTypeConfig_Request) XXX_Size() int {
1240 return xxx_messageInfo_ValidateResourceTypeConfig_Request.Size(m)
1241}
1242func (m *ValidateResourceTypeConfig_Request) XXX_DiscardUnknown() {
1243 xxx_messageInfo_ValidateResourceTypeConfig_Request.DiscardUnknown(m)
1244}
1245
1246var xxx_messageInfo_ValidateResourceTypeConfig_Request proto.InternalMessageInfo
1247
1248func (m *ValidateResourceTypeConfig_Request) GetTypeName() string {
1249 if m != nil {
1250 return m.TypeName
1251 }
1252 return ""
1253}
1254
1255func (m *ValidateResourceTypeConfig_Request) GetConfig() *DynamicValue {
1256 if m != nil {
1257 return m.Config
1258 }
1259 return nil
1260}
1261
1262type ValidateResourceTypeConfig_Response struct {
1263 Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1264 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1265 XXX_unrecognized []byte `json:"-"`
1266 XXX_sizecache int32 `json:"-"`
1267}
1268
1269func (m *ValidateResourceTypeConfig_Response) Reset() { *m = ValidateResourceTypeConfig_Response{} }
1270func (m *ValidateResourceTypeConfig_Response) String() string { return proto.CompactTextString(m) }
1271func (*ValidateResourceTypeConfig_Response) ProtoMessage() {}
1272func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) {
1273 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{9, 1}
1274}
1275func (m *ValidateResourceTypeConfig_Response) XXX_Unmarshal(b []byte) error {
1276 return xxx_messageInfo_ValidateResourceTypeConfig_Response.Unmarshal(m, b)
1277}
1278func (m *ValidateResourceTypeConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1279 return xxx_messageInfo_ValidateResourceTypeConfig_Response.Marshal(b, m, deterministic)
1280}
1281func (dst *ValidateResourceTypeConfig_Response) XXX_Merge(src proto.Message) {
1282 xxx_messageInfo_ValidateResourceTypeConfig_Response.Merge(dst, src)
1283}
1284func (m *ValidateResourceTypeConfig_Response) XXX_Size() int {
1285 return xxx_messageInfo_ValidateResourceTypeConfig_Response.Size(m)
1286}
1287func (m *ValidateResourceTypeConfig_Response) XXX_DiscardUnknown() {
1288 xxx_messageInfo_ValidateResourceTypeConfig_Response.DiscardUnknown(m)
1289}
1290
1291var xxx_messageInfo_ValidateResourceTypeConfig_Response proto.InternalMessageInfo
1292
1293func (m *ValidateResourceTypeConfig_Response) GetDiagnostics() []*Diagnostic {
1294 if m != nil {
1295 return m.Diagnostics
1296 }
1297 return nil
1298}
1299
1300type ValidateDataSourceConfig struct {
1301 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1302 XXX_unrecognized []byte `json:"-"`
1303 XXX_sizecache int32 `json:"-"`
1304}
1305
1306func (m *ValidateDataSourceConfig) Reset() { *m = ValidateDataSourceConfig{} }
1307func (m *ValidateDataSourceConfig) String() string { return proto.CompactTextString(m) }
1308func (*ValidateDataSourceConfig) ProtoMessage() {}
1309func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) {
1310 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{10}
1311}
1312func (m *ValidateDataSourceConfig) XXX_Unmarshal(b []byte) error {
1313 return xxx_messageInfo_ValidateDataSourceConfig.Unmarshal(m, b)
1314}
1315func (m *ValidateDataSourceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1316 return xxx_messageInfo_ValidateDataSourceConfig.Marshal(b, m, deterministic)
1317}
1318func (dst *ValidateDataSourceConfig) XXX_Merge(src proto.Message) {
1319 xxx_messageInfo_ValidateDataSourceConfig.Merge(dst, src)
1320}
1321func (m *ValidateDataSourceConfig) XXX_Size() int {
1322 return xxx_messageInfo_ValidateDataSourceConfig.Size(m)
1323}
1324func (m *ValidateDataSourceConfig) XXX_DiscardUnknown() {
1325 xxx_messageInfo_ValidateDataSourceConfig.DiscardUnknown(m)
1326}
1327
1328var xxx_messageInfo_ValidateDataSourceConfig proto.InternalMessageInfo
1329
1330type ValidateDataSourceConfig_Request struct {
1331 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
1332 Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
1333 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1334 XXX_unrecognized []byte `json:"-"`
1335 XXX_sizecache int32 `json:"-"`
1336}
1337
1338func (m *ValidateDataSourceConfig_Request) Reset() { *m = ValidateDataSourceConfig_Request{} }
1339func (m *ValidateDataSourceConfig_Request) String() string { return proto.CompactTextString(m) }
1340func (*ValidateDataSourceConfig_Request) ProtoMessage() {}
1341func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) {
1342 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{10, 0}
1343}
1344func (m *ValidateDataSourceConfig_Request) XXX_Unmarshal(b []byte) error {
1345 return xxx_messageInfo_ValidateDataSourceConfig_Request.Unmarshal(m, b)
1346}
1347func (m *ValidateDataSourceConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1348 return xxx_messageInfo_ValidateDataSourceConfig_Request.Marshal(b, m, deterministic)
1349}
1350func (dst *ValidateDataSourceConfig_Request) XXX_Merge(src proto.Message) {
1351 xxx_messageInfo_ValidateDataSourceConfig_Request.Merge(dst, src)
1352}
1353func (m *ValidateDataSourceConfig_Request) XXX_Size() int {
1354 return xxx_messageInfo_ValidateDataSourceConfig_Request.Size(m)
1355}
1356func (m *ValidateDataSourceConfig_Request) XXX_DiscardUnknown() {
1357 xxx_messageInfo_ValidateDataSourceConfig_Request.DiscardUnknown(m)
1358}
1359
1360var xxx_messageInfo_ValidateDataSourceConfig_Request proto.InternalMessageInfo
1361
1362func (m *ValidateDataSourceConfig_Request) GetTypeName() string {
1363 if m != nil {
1364 return m.TypeName
1365 }
1366 return ""
1367}
1368
1369func (m *ValidateDataSourceConfig_Request) GetConfig() *DynamicValue {
1370 if m != nil {
1371 return m.Config
1372 }
1373 return nil
1374}
1375
1376type ValidateDataSourceConfig_Response struct {
1377 Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1378 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1379 XXX_unrecognized []byte `json:"-"`
1380 XXX_sizecache int32 `json:"-"`
1381}
1382
1383func (m *ValidateDataSourceConfig_Response) Reset() { *m = ValidateDataSourceConfig_Response{} }
1384func (m *ValidateDataSourceConfig_Response) String() string { return proto.CompactTextString(m) }
1385func (*ValidateDataSourceConfig_Response) ProtoMessage() {}
1386func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) {
1387 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{10, 1}
1388}
1389func (m *ValidateDataSourceConfig_Response) XXX_Unmarshal(b []byte) error {
1390 return xxx_messageInfo_ValidateDataSourceConfig_Response.Unmarshal(m, b)
1391}
1392func (m *ValidateDataSourceConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1393 return xxx_messageInfo_ValidateDataSourceConfig_Response.Marshal(b, m, deterministic)
1394}
1395func (dst *ValidateDataSourceConfig_Response) XXX_Merge(src proto.Message) {
1396 xxx_messageInfo_ValidateDataSourceConfig_Response.Merge(dst, src)
1397}
1398func (m *ValidateDataSourceConfig_Response) XXX_Size() int {
1399 return xxx_messageInfo_ValidateDataSourceConfig_Response.Size(m)
1400}
1401func (m *ValidateDataSourceConfig_Response) XXX_DiscardUnknown() {
1402 xxx_messageInfo_ValidateDataSourceConfig_Response.DiscardUnknown(m)
1403}
1404
1405var xxx_messageInfo_ValidateDataSourceConfig_Response proto.InternalMessageInfo
1406
1407func (m *ValidateDataSourceConfig_Response) GetDiagnostics() []*Diagnostic {
1408 if m != nil {
1409 return m.Diagnostics
1410 }
1411 return nil
1412}
1413
1414type Configure struct {
1415 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1416 XXX_unrecognized []byte `json:"-"`
1417 XXX_sizecache int32 `json:"-"`
1418}
1419
1420func (m *Configure) Reset() { *m = Configure{} }
1421func (m *Configure) String() string { return proto.CompactTextString(m) }
1422func (*Configure) ProtoMessage() {}
1423func (*Configure) Descriptor() ([]byte, []int) {
1424 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{11}
1425}
1426func (m *Configure) XXX_Unmarshal(b []byte) error {
1427 return xxx_messageInfo_Configure.Unmarshal(m, b)
1428}
1429func (m *Configure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1430 return xxx_messageInfo_Configure.Marshal(b, m, deterministic)
1431}
1432func (dst *Configure) XXX_Merge(src proto.Message) {
1433 xxx_messageInfo_Configure.Merge(dst, src)
1434}
1435func (m *Configure) XXX_Size() int {
1436 return xxx_messageInfo_Configure.Size(m)
1437}
1438func (m *Configure) XXX_DiscardUnknown() {
1439 xxx_messageInfo_Configure.DiscardUnknown(m)
1440}
1441
1442var xxx_messageInfo_Configure proto.InternalMessageInfo
1443
1444type Configure_Request struct {
1445 TerraformVersion string `protobuf:"bytes,1,opt,name=terraform_version,json=terraformVersion,proto3" json:"terraform_version,omitempty"`
1446 Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
1447 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1448 XXX_unrecognized []byte `json:"-"`
1449 XXX_sizecache int32 `json:"-"`
1450}
1451
1452func (m *Configure_Request) Reset() { *m = Configure_Request{} }
1453func (m *Configure_Request) String() string { return proto.CompactTextString(m) }
1454func (*Configure_Request) ProtoMessage() {}
1455func (*Configure_Request) Descriptor() ([]byte, []int) {
1456 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{11, 0}
1457}
1458func (m *Configure_Request) XXX_Unmarshal(b []byte) error {
1459 return xxx_messageInfo_Configure_Request.Unmarshal(m, b)
1460}
1461func (m *Configure_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1462 return xxx_messageInfo_Configure_Request.Marshal(b, m, deterministic)
1463}
1464func (dst *Configure_Request) XXX_Merge(src proto.Message) {
1465 xxx_messageInfo_Configure_Request.Merge(dst, src)
1466}
1467func (m *Configure_Request) XXX_Size() int {
1468 return xxx_messageInfo_Configure_Request.Size(m)
1469}
1470func (m *Configure_Request) XXX_DiscardUnknown() {
1471 xxx_messageInfo_Configure_Request.DiscardUnknown(m)
1472}
1473
1474var xxx_messageInfo_Configure_Request proto.InternalMessageInfo
1475
1476func (m *Configure_Request) GetTerraformVersion() string {
1477 if m != nil {
1478 return m.TerraformVersion
1479 }
1480 return ""
1481}
1482
1483func (m *Configure_Request) GetConfig() *DynamicValue {
1484 if m != nil {
1485 return m.Config
1486 }
1487 return nil
1488}
1489
1490type Configure_Response struct {
1491 Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1492 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1493 XXX_unrecognized []byte `json:"-"`
1494 XXX_sizecache int32 `json:"-"`
1495}
1496
1497func (m *Configure_Response) Reset() { *m = Configure_Response{} }
1498func (m *Configure_Response) String() string { return proto.CompactTextString(m) }
1499func (*Configure_Response) ProtoMessage() {}
1500func (*Configure_Response) Descriptor() ([]byte, []int) {
1501 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{11, 1}
1502}
1503func (m *Configure_Response) XXX_Unmarshal(b []byte) error {
1504 return xxx_messageInfo_Configure_Response.Unmarshal(m, b)
1505}
1506func (m *Configure_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1507 return xxx_messageInfo_Configure_Response.Marshal(b, m, deterministic)
1508}
1509func (dst *Configure_Response) XXX_Merge(src proto.Message) {
1510 xxx_messageInfo_Configure_Response.Merge(dst, src)
1511}
1512func (m *Configure_Response) XXX_Size() int {
1513 return xxx_messageInfo_Configure_Response.Size(m)
1514}
1515func (m *Configure_Response) XXX_DiscardUnknown() {
1516 xxx_messageInfo_Configure_Response.DiscardUnknown(m)
1517}
1518
1519var xxx_messageInfo_Configure_Response proto.InternalMessageInfo
1520
1521func (m *Configure_Response) GetDiagnostics() []*Diagnostic {
1522 if m != nil {
1523 return m.Diagnostics
1524 }
1525 return nil
1526}
1527
1528type ReadResource struct {
1529 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1530 XXX_unrecognized []byte `json:"-"`
1531 XXX_sizecache int32 `json:"-"`
1532}
1533
1534func (m *ReadResource) Reset() { *m = ReadResource{} }
1535func (m *ReadResource) String() string { return proto.CompactTextString(m) }
1536func (*ReadResource) ProtoMessage() {}
1537func (*ReadResource) Descriptor() ([]byte, []int) {
1538 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{12}
1539}
1540func (m *ReadResource) XXX_Unmarshal(b []byte) error {
1541 return xxx_messageInfo_ReadResource.Unmarshal(m, b)
1542}
1543func (m *ReadResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1544 return xxx_messageInfo_ReadResource.Marshal(b, m, deterministic)
1545}
1546func (dst *ReadResource) XXX_Merge(src proto.Message) {
1547 xxx_messageInfo_ReadResource.Merge(dst, src)
1548}
1549func (m *ReadResource) XXX_Size() int {
1550 return xxx_messageInfo_ReadResource.Size(m)
1551}
1552func (m *ReadResource) XXX_DiscardUnknown() {
1553 xxx_messageInfo_ReadResource.DiscardUnknown(m)
1554}
1555
1556var xxx_messageInfo_ReadResource proto.InternalMessageInfo
1557
1558type ReadResource_Request struct {
1559 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
1560 CurrentState *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"`
1561 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1562 XXX_unrecognized []byte `json:"-"`
1563 XXX_sizecache int32 `json:"-"`
1564}
1565
1566func (m *ReadResource_Request) Reset() { *m = ReadResource_Request{} }
1567func (m *ReadResource_Request) String() string { return proto.CompactTextString(m) }
1568func (*ReadResource_Request) ProtoMessage() {}
1569func (*ReadResource_Request) Descriptor() ([]byte, []int) {
1570 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{12, 0}
1571}
1572func (m *ReadResource_Request) XXX_Unmarshal(b []byte) error {
1573 return xxx_messageInfo_ReadResource_Request.Unmarshal(m, b)
1574}
1575func (m *ReadResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1576 return xxx_messageInfo_ReadResource_Request.Marshal(b, m, deterministic)
1577}
1578func (dst *ReadResource_Request) XXX_Merge(src proto.Message) {
1579 xxx_messageInfo_ReadResource_Request.Merge(dst, src)
1580}
1581func (m *ReadResource_Request) XXX_Size() int {
1582 return xxx_messageInfo_ReadResource_Request.Size(m)
1583}
1584func (m *ReadResource_Request) XXX_DiscardUnknown() {
1585 xxx_messageInfo_ReadResource_Request.DiscardUnknown(m)
1586}
1587
1588var xxx_messageInfo_ReadResource_Request proto.InternalMessageInfo
1589
1590func (m *ReadResource_Request) GetTypeName() string {
1591 if m != nil {
1592 return m.TypeName
1593 }
1594 return ""
1595}
1596
1597func (m *ReadResource_Request) GetCurrentState() *DynamicValue {
1598 if m != nil {
1599 return m.CurrentState
1600 }
1601 return nil
1602}
1603
1604type ReadResource_Response struct {
1605 NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"`
1606 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1607 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1608 XXX_unrecognized []byte `json:"-"`
1609 XXX_sizecache int32 `json:"-"`
1610}
1611
1612func (m *ReadResource_Response) Reset() { *m = ReadResource_Response{} }
1613func (m *ReadResource_Response) String() string { return proto.CompactTextString(m) }
1614func (*ReadResource_Response) ProtoMessage() {}
1615func (*ReadResource_Response) Descriptor() ([]byte, []int) {
1616 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{12, 1}
1617}
1618func (m *ReadResource_Response) XXX_Unmarshal(b []byte) error {
1619 return xxx_messageInfo_ReadResource_Response.Unmarshal(m, b)
1620}
1621func (m *ReadResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1622 return xxx_messageInfo_ReadResource_Response.Marshal(b, m, deterministic)
1623}
1624func (dst *ReadResource_Response) XXX_Merge(src proto.Message) {
1625 xxx_messageInfo_ReadResource_Response.Merge(dst, src)
1626}
1627func (m *ReadResource_Response) XXX_Size() int {
1628 return xxx_messageInfo_ReadResource_Response.Size(m)
1629}
1630func (m *ReadResource_Response) XXX_DiscardUnknown() {
1631 xxx_messageInfo_ReadResource_Response.DiscardUnknown(m)
1632}
1633
1634var xxx_messageInfo_ReadResource_Response proto.InternalMessageInfo
1635
1636func (m *ReadResource_Response) GetNewState() *DynamicValue {
1637 if m != nil {
1638 return m.NewState
1639 }
1640 return nil
1641}
1642
1643func (m *ReadResource_Response) GetDiagnostics() []*Diagnostic {
1644 if m != nil {
1645 return m.Diagnostics
1646 }
1647 return nil
1648}
1649
1650type PlanResourceChange struct {
1651 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1652 XXX_unrecognized []byte `json:"-"`
1653 XXX_sizecache int32 `json:"-"`
1654}
1655
1656func (m *PlanResourceChange) Reset() { *m = PlanResourceChange{} }
1657func (m *PlanResourceChange) String() string { return proto.CompactTextString(m) }
1658func (*PlanResourceChange) ProtoMessage() {}
1659func (*PlanResourceChange) Descriptor() ([]byte, []int) {
1660 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{13}
1661}
1662func (m *PlanResourceChange) XXX_Unmarshal(b []byte) error {
1663 return xxx_messageInfo_PlanResourceChange.Unmarshal(m, b)
1664}
1665func (m *PlanResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1666 return xxx_messageInfo_PlanResourceChange.Marshal(b, m, deterministic)
1667}
1668func (dst *PlanResourceChange) XXX_Merge(src proto.Message) {
1669 xxx_messageInfo_PlanResourceChange.Merge(dst, src)
1670}
1671func (m *PlanResourceChange) XXX_Size() int {
1672 return xxx_messageInfo_PlanResourceChange.Size(m)
1673}
1674func (m *PlanResourceChange) XXX_DiscardUnknown() {
1675 xxx_messageInfo_PlanResourceChange.DiscardUnknown(m)
1676}
1677
1678var xxx_messageInfo_PlanResourceChange proto.InternalMessageInfo
1679
1680type PlanResourceChange_Request struct {
1681 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
1682 PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"`
1683 ProposedNewState *DynamicValue `protobuf:"bytes,3,opt,name=proposed_new_state,json=proposedNewState,proto3" json:"proposed_new_state,omitempty"`
1684 Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"`
1685 PriorPrivate []byte `protobuf:"bytes,5,opt,name=prior_private,json=priorPrivate,proto3" json:"prior_private,omitempty"`
1686 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1687 XXX_unrecognized []byte `json:"-"`
1688 XXX_sizecache int32 `json:"-"`
1689}
1690
1691func (m *PlanResourceChange_Request) Reset() { *m = PlanResourceChange_Request{} }
1692func (m *PlanResourceChange_Request) String() string { return proto.CompactTextString(m) }
1693func (*PlanResourceChange_Request) ProtoMessage() {}
1694func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) {
1695 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{13, 0}
1696}
1697func (m *PlanResourceChange_Request) XXX_Unmarshal(b []byte) error {
1698 return xxx_messageInfo_PlanResourceChange_Request.Unmarshal(m, b)
1699}
1700func (m *PlanResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1701 return xxx_messageInfo_PlanResourceChange_Request.Marshal(b, m, deterministic)
1702}
1703func (dst *PlanResourceChange_Request) XXX_Merge(src proto.Message) {
1704 xxx_messageInfo_PlanResourceChange_Request.Merge(dst, src)
1705}
1706func (m *PlanResourceChange_Request) XXX_Size() int {
1707 return xxx_messageInfo_PlanResourceChange_Request.Size(m)
1708}
1709func (m *PlanResourceChange_Request) XXX_DiscardUnknown() {
1710 xxx_messageInfo_PlanResourceChange_Request.DiscardUnknown(m)
1711}
1712
1713var xxx_messageInfo_PlanResourceChange_Request proto.InternalMessageInfo
1714
1715func (m *PlanResourceChange_Request) GetTypeName() string {
1716 if m != nil {
1717 return m.TypeName
1718 }
1719 return ""
1720}
1721
1722func (m *PlanResourceChange_Request) GetPriorState() *DynamicValue {
1723 if m != nil {
1724 return m.PriorState
1725 }
1726 return nil
1727}
1728
1729func (m *PlanResourceChange_Request) GetProposedNewState() *DynamicValue {
1730 if m != nil {
1731 return m.ProposedNewState
1732 }
1733 return nil
1734}
1735
1736func (m *PlanResourceChange_Request) GetConfig() *DynamicValue {
1737 if m != nil {
1738 return m.Config
1739 }
1740 return nil
1741}
1742
1743func (m *PlanResourceChange_Request) GetPriorPrivate() []byte {
1744 if m != nil {
1745 return m.PriorPrivate
1746 }
1747 return nil
1748}
1749
1750type PlanResourceChange_Response struct {
1751 PlannedState *DynamicValue `protobuf:"bytes,1,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"`
1752 RequiresReplace []*AttributePath `protobuf:"bytes,2,rep,name=requires_replace,json=requiresReplace,proto3" json:"requires_replace,omitempty"`
1753 PlannedPrivate []byte `protobuf:"bytes,3,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"`
1754 Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1755 // This may be set only by the helper/schema "SDK" in the main Terraform
1756 // repository, to request that Terraform Core >=0.12 permit additional
1757 // inconsistencies that can result from the legacy SDK type system
1758 // and its imprecise mapping to the >=0.12 type system.
1759 // The change in behavior implied by this flag makes sense only for the
1760 // specific details of the legacy SDK type system, and are not a general
1761 // mechanism to avoid proper type handling in providers.
1762 //
1763 // ==== DO NOT USE THIS ====
1764 // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
1765 // ==== DO NOT USE THIS ====
1766 LegacyTypeSystem bool `protobuf:"varint,5,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"`
1767 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1768 XXX_unrecognized []byte `json:"-"`
1769 XXX_sizecache int32 `json:"-"`
1770}
1771
1772func (m *PlanResourceChange_Response) Reset() { *m = PlanResourceChange_Response{} }
1773func (m *PlanResourceChange_Response) String() string { return proto.CompactTextString(m) }
1774func (*PlanResourceChange_Response) ProtoMessage() {}
1775func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) {
1776 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{13, 1}
1777}
1778func (m *PlanResourceChange_Response) XXX_Unmarshal(b []byte) error {
1779 return xxx_messageInfo_PlanResourceChange_Response.Unmarshal(m, b)
1780}
1781func (m *PlanResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1782 return xxx_messageInfo_PlanResourceChange_Response.Marshal(b, m, deterministic)
1783}
1784func (dst *PlanResourceChange_Response) XXX_Merge(src proto.Message) {
1785 xxx_messageInfo_PlanResourceChange_Response.Merge(dst, src)
1786}
1787func (m *PlanResourceChange_Response) XXX_Size() int {
1788 return xxx_messageInfo_PlanResourceChange_Response.Size(m)
1789}
1790func (m *PlanResourceChange_Response) XXX_DiscardUnknown() {
1791 xxx_messageInfo_PlanResourceChange_Response.DiscardUnknown(m)
1792}
1793
1794var xxx_messageInfo_PlanResourceChange_Response proto.InternalMessageInfo
1795
1796func (m *PlanResourceChange_Response) GetPlannedState() *DynamicValue {
1797 if m != nil {
1798 return m.PlannedState
1799 }
1800 return nil
1801}
1802
1803func (m *PlanResourceChange_Response) GetRequiresReplace() []*AttributePath {
1804 if m != nil {
1805 return m.RequiresReplace
1806 }
1807 return nil
1808}
1809
1810func (m *PlanResourceChange_Response) GetPlannedPrivate() []byte {
1811 if m != nil {
1812 return m.PlannedPrivate
1813 }
1814 return nil
1815}
1816
1817func (m *PlanResourceChange_Response) GetDiagnostics() []*Diagnostic {
1818 if m != nil {
1819 return m.Diagnostics
1820 }
1821 return nil
1822}
1823
1824func (m *PlanResourceChange_Response) GetLegacyTypeSystem() bool {
1825 if m != nil {
1826 return m.LegacyTypeSystem
1827 }
1828 return false
1829}
1830
1831type ApplyResourceChange struct {
1832 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1833 XXX_unrecognized []byte `json:"-"`
1834 XXX_sizecache int32 `json:"-"`
1835}
1836
1837func (m *ApplyResourceChange) Reset() { *m = ApplyResourceChange{} }
1838func (m *ApplyResourceChange) String() string { return proto.CompactTextString(m) }
1839func (*ApplyResourceChange) ProtoMessage() {}
1840func (*ApplyResourceChange) Descriptor() ([]byte, []int) {
1841 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{14}
1842}
1843func (m *ApplyResourceChange) XXX_Unmarshal(b []byte) error {
1844 return xxx_messageInfo_ApplyResourceChange.Unmarshal(m, b)
1845}
1846func (m *ApplyResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1847 return xxx_messageInfo_ApplyResourceChange.Marshal(b, m, deterministic)
1848}
1849func (dst *ApplyResourceChange) XXX_Merge(src proto.Message) {
1850 xxx_messageInfo_ApplyResourceChange.Merge(dst, src)
1851}
1852func (m *ApplyResourceChange) XXX_Size() int {
1853 return xxx_messageInfo_ApplyResourceChange.Size(m)
1854}
1855func (m *ApplyResourceChange) XXX_DiscardUnknown() {
1856 xxx_messageInfo_ApplyResourceChange.DiscardUnknown(m)
1857}
1858
1859var xxx_messageInfo_ApplyResourceChange proto.InternalMessageInfo
1860
1861type ApplyResourceChange_Request struct {
1862 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
1863 PriorState *DynamicValue `protobuf:"bytes,2,opt,name=prior_state,json=priorState,proto3" json:"prior_state,omitempty"`
1864 PlannedState *DynamicValue `protobuf:"bytes,3,opt,name=planned_state,json=plannedState,proto3" json:"planned_state,omitempty"`
1865 Config *DynamicValue `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"`
1866 PlannedPrivate []byte `protobuf:"bytes,5,opt,name=planned_private,json=plannedPrivate,proto3" json:"planned_private,omitempty"`
1867 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1868 XXX_unrecognized []byte `json:"-"`
1869 XXX_sizecache int32 `json:"-"`
1870}
1871
1872func (m *ApplyResourceChange_Request) Reset() { *m = ApplyResourceChange_Request{} }
1873func (m *ApplyResourceChange_Request) String() string { return proto.CompactTextString(m) }
1874func (*ApplyResourceChange_Request) ProtoMessage() {}
1875func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) {
1876 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{14, 0}
1877}
1878func (m *ApplyResourceChange_Request) XXX_Unmarshal(b []byte) error {
1879 return xxx_messageInfo_ApplyResourceChange_Request.Unmarshal(m, b)
1880}
1881func (m *ApplyResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1882 return xxx_messageInfo_ApplyResourceChange_Request.Marshal(b, m, deterministic)
1883}
1884func (dst *ApplyResourceChange_Request) XXX_Merge(src proto.Message) {
1885 xxx_messageInfo_ApplyResourceChange_Request.Merge(dst, src)
1886}
1887func (m *ApplyResourceChange_Request) XXX_Size() int {
1888 return xxx_messageInfo_ApplyResourceChange_Request.Size(m)
1889}
1890func (m *ApplyResourceChange_Request) XXX_DiscardUnknown() {
1891 xxx_messageInfo_ApplyResourceChange_Request.DiscardUnknown(m)
1892}
1893
1894var xxx_messageInfo_ApplyResourceChange_Request proto.InternalMessageInfo
1895
1896func (m *ApplyResourceChange_Request) GetTypeName() string {
1897 if m != nil {
1898 return m.TypeName
1899 }
1900 return ""
1901}
1902
1903func (m *ApplyResourceChange_Request) GetPriorState() *DynamicValue {
1904 if m != nil {
1905 return m.PriorState
1906 }
1907 return nil
1908}
1909
1910func (m *ApplyResourceChange_Request) GetPlannedState() *DynamicValue {
1911 if m != nil {
1912 return m.PlannedState
1913 }
1914 return nil
1915}
1916
1917func (m *ApplyResourceChange_Request) GetConfig() *DynamicValue {
1918 if m != nil {
1919 return m.Config
1920 }
1921 return nil
1922}
1923
1924func (m *ApplyResourceChange_Request) GetPlannedPrivate() []byte {
1925 if m != nil {
1926 return m.PlannedPrivate
1927 }
1928 return nil
1929}
1930
1931type ApplyResourceChange_Response struct {
1932 NewState *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"`
1933 Private []byte `protobuf:"bytes,2,opt,name=private,proto3" json:"private,omitempty"`
1934 Diagnostics []*Diagnostic `protobuf:"bytes,3,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
1935 // This may be set only by the helper/schema "SDK" in the main Terraform
1936 // repository, to request that Terraform Core >=0.12 permit additional
1937 // inconsistencies that can result from the legacy SDK type system
1938 // and its imprecise mapping to the >=0.12 type system.
1939 // The change in behavior implied by this flag makes sense only for the
1940 // specific details of the legacy SDK type system, and are not a general
1941 // mechanism to avoid proper type handling in providers.
1942 //
1943 // ==== DO NOT USE THIS ====
1944 // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
1945 // ==== DO NOT USE THIS ====
1946 LegacyTypeSystem bool `protobuf:"varint,4,opt,name=legacy_type_system,json=legacyTypeSystem,proto3" json:"legacy_type_system,omitempty"`
1947 XXX_NoUnkeyedLiteral struct{} `json:"-"`
1948 XXX_unrecognized []byte `json:"-"`
1949 XXX_sizecache int32 `json:"-"`
1950}
1951
1952func (m *ApplyResourceChange_Response) Reset() { *m = ApplyResourceChange_Response{} }
1953func (m *ApplyResourceChange_Response) String() string { return proto.CompactTextString(m) }
1954func (*ApplyResourceChange_Response) ProtoMessage() {}
1955func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) {
1956 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{14, 1}
1957}
1958func (m *ApplyResourceChange_Response) XXX_Unmarshal(b []byte) error {
1959 return xxx_messageInfo_ApplyResourceChange_Response.Unmarshal(m, b)
1960}
1961func (m *ApplyResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
1962 return xxx_messageInfo_ApplyResourceChange_Response.Marshal(b, m, deterministic)
1963}
1964func (dst *ApplyResourceChange_Response) XXX_Merge(src proto.Message) {
1965 xxx_messageInfo_ApplyResourceChange_Response.Merge(dst, src)
1966}
1967func (m *ApplyResourceChange_Response) XXX_Size() int {
1968 return xxx_messageInfo_ApplyResourceChange_Response.Size(m)
1969}
1970func (m *ApplyResourceChange_Response) XXX_DiscardUnknown() {
1971 xxx_messageInfo_ApplyResourceChange_Response.DiscardUnknown(m)
1972}
1973
1974var xxx_messageInfo_ApplyResourceChange_Response proto.InternalMessageInfo
1975
1976func (m *ApplyResourceChange_Response) GetNewState() *DynamicValue {
1977 if m != nil {
1978 return m.NewState
1979 }
1980 return nil
1981}
1982
1983func (m *ApplyResourceChange_Response) GetPrivate() []byte {
1984 if m != nil {
1985 return m.Private
1986 }
1987 return nil
1988}
1989
1990func (m *ApplyResourceChange_Response) GetDiagnostics() []*Diagnostic {
1991 if m != nil {
1992 return m.Diagnostics
1993 }
1994 return nil
1995}
1996
1997func (m *ApplyResourceChange_Response) GetLegacyTypeSystem() bool {
1998 if m != nil {
1999 return m.LegacyTypeSystem
2000 }
2001 return false
2002}
2003
2004type ImportResourceState struct {
2005 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2006 XXX_unrecognized []byte `json:"-"`
2007 XXX_sizecache int32 `json:"-"`
2008}
2009
2010func (m *ImportResourceState) Reset() { *m = ImportResourceState{} }
2011func (m *ImportResourceState) String() string { return proto.CompactTextString(m) }
2012func (*ImportResourceState) ProtoMessage() {}
2013func (*ImportResourceState) Descriptor() ([]byte, []int) {
2014 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15}
2015}
2016func (m *ImportResourceState) XXX_Unmarshal(b []byte) error {
2017 return xxx_messageInfo_ImportResourceState.Unmarshal(m, b)
2018}
2019func (m *ImportResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2020 return xxx_messageInfo_ImportResourceState.Marshal(b, m, deterministic)
2021}
2022func (dst *ImportResourceState) XXX_Merge(src proto.Message) {
2023 xxx_messageInfo_ImportResourceState.Merge(dst, src)
2024}
2025func (m *ImportResourceState) XXX_Size() int {
2026 return xxx_messageInfo_ImportResourceState.Size(m)
2027}
2028func (m *ImportResourceState) XXX_DiscardUnknown() {
2029 xxx_messageInfo_ImportResourceState.DiscardUnknown(m)
2030}
2031
2032var xxx_messageInfo_ImportResourceState proto.InternalMessageInfo
2033
2034type ImportResourceState_Request struct {
2035 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
2036 Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
2037 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2038 XXX_unrecognized []byte `json:"-"`
2039 XXX_sizecache int32 `json:"-"`
2040}
2041
2042func (m *ImportResourceState_Request) Reset() { *m = ImportResourceState_Request{} }
2043func (m *ImportResourceState_Request) String() string { return proto.CompactTextString(m) }
2044func (*ImportResourceState_Request) ProtoMessage() {}
2045func (*ImportResourceState_Request) Descriptor() ([]byte, []int) {
2046 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15, 0}
2047}
2048func (m *ImportResourceState_Request) XXX_Unmarshal(b []byte) error {
2049 return xxx_messageInfo_ImportResourceState_Request.Unmarshal(m, b)
2050}
2051func (m *ImportResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2052 return xxx_messageInfo_ImportResourceState_Request.Marshal(b, m, deterministic)
2053}
2054func (dst *ImportResourceState_Request) XXX_Merge(src proto.Message) {
2055 xxx_messageInfo_ImportResourceState_Request.Merge(dst, src)
2056}
2057func (m *ImportResourceState_Request) XXX_Size() int {
2058 return xxx_messageInfo_ImportResourceState_Request.Size(m)
2059}
2060func (m *ImportResourceState_Request) XXX_DiscardUnknown() {
2061 xxx_messageInfo_ImportResourceState_Request.DiscardUnknown(m)
2062}
2063
2064var xxx_messageInfo_ImportResourceState_Request proto.InternalMessageInfo
2065
2066func (m *ImportResourceState_Request) GetTypeName() string {
2067 if m != nil {
2068 return m.TypeName
2069 }
2070 return ""
2071}
2072
2073func (m *ImportResourceState_Request) GetId() string {
2074 if m != nil {
2075 return m.Id
2076 }
2077 return ""
2078}
2079
2080type ImportResourceState_ImportedResource struct {
2081 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
2082 State *DynamicValue `protobuf:"bytes,2,opt,name=state,proto3" json:"state,omitempty"`
2083 Private []byte `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"`
2084 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2085 XXX_unrecognized []byte `json:"-"`
2086 XXX_sizecache int32 `json:"-"`
2087}
2088
2089func (m *ImportResourceState_ImportedResource) Reset() { *m = ImportResourceState_ImportedResource{} }
2090func (m *ImportResourceState_ImportedResource) String() string { return proto.CompactTextString(m) }
2091func (*ImportResourceState_ImportedResource) ProtoMessage() {}
2092func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) {
2093 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15, 1}
2094}
2095func (m *ImportResourceState_ImportedResource) XXX_Unmarshal(b []byte) error {
2096 return xxx_messageInfo_ImportResourceState_ImportedResource.Unmarshal(m, b)
2097}
2098func (m *ImportResourceState_ImportedResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2099 return xxx_messageInfo_ImportResourceState_ImportedResource.Marshal(b, m, deterministic)
2100}
2101func (dst *ImportResourceState_ImportedResource) XXX_Merge(src proto.Message) {
2102 xxx_messageInfo_ImportResourceState_ImportedResource.Merge(dst, src)
2103}
2104func (m *ImportResourceState_ImportedResource) XXX_Size() int {
2105 return xxx_messageInfo_ImportResourceState_ImportedResource.Size(m)
2106}
2107func (m *ImportResourceState_ImportedResource) XXX_DiscardUnknown() {
2108 xxx_messageInfo_ImportResourceState_ImportedResource.DiscardUnknown(m)
2109}
2110
2111var xxx_messageInfo_ImportResourceState_ImportedResource proto.InternalMessageInfo
2112
2113func (m *ImportResourceState_ImportedResource) GetTypeName() string {
2114 if m != nil {
2115 return m.TypeName
2116 }
2117 return ""
2118}
2119
2120func (m *ImportResourceState_ImportedResource) GetState() *DynamicValue {
2121 if m != nil {
2122 return m.State
2123 }
2124 return nil
2125}
2126
2127func (m *ImportResourceState_ImportedResource) GetPrivate() []byte {
2128 if m != nil {
2129 return m.Private
2130 }
2131 return nil
2132}
2133
2134type ImportResourceState_Response struct {
2135 ImportedResources []*ImportResourceState_ImportedResource `protobuf:"bytes,1,rep,name=imported_resources,json=importedResources,proto3" json:"imported_resources,omitempty"`
2136 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
2137 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2138 XXX_unrecognized []byte `json:"-"`
2139 XXX_sizecache int32 `json:"-"`
2140}
2141
2142func (m *ImportResourceState_Response) Reset() { *m = ImportResourceState_Response{} }
2143func (m *ImportResourceState_Response) String() string { return proto.CompactTextString(m) }
2144func (*ImportResourceState_Response) ProtoMessage() {}
2145func (*ImportResourceState_Response) Descriptor() ([]byte, []int) {
2146 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15, 2}
2147}
2148func (m *ImportResourceState_Response) XXX_Unmarshal(b []byte) error {
2149 return xxx_messageInfo_ImportResourceState_Response.Unmarshal(m, b)
2150}
2151func (m *ImportResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2152 return xxx_messageInfo_ImportResourceState_Response.Marshal(b, m, deterministic)
2153}
2154func (dst *ImportResourceState_Response) XXX_Merge(src proto.Message) {
2155 xxx_messageInfo_ImportResourceState_Response.Merge(dst, src)
2156}
2157func (m *ImportResourceState_Response) XXX_Size() int {
2158 return xxx_messageInfo_ImportResourceState_Response.Size(m)
2159}
2160func (m *ImportResourceState_Response) XXX_DiscardUnknown() {
2161 xxx_messageInfo_ImportResourceState_Response.DiscardUnknown(m)
2162}
2163
2164var xxx_messageInfo_ImportResourceState_Response proto.InternalMessageInfo
2165
2166func (m *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource {
2167 if m != nil {
2168 return m.ImportedResources
2169 }
2170 return nil
2171}
2172
2173func (m *ImportResourceState_Response) GetDiagnostics() []*Diagnostic {
2174 if m != nil {
2175 return m.Diagnostics
2176 }
2177 return nil
2178}
2179
2180type ReadDataSource struct {
2181 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2182 XXX_unrecognized []byte `json:"-"`
2183 XXX_sizecache int32 `json:"-"`
2184}
2185
2186func (m *ReadDataSource) Reset() { *m = ReadDataSource{} }
2187func (m *ReadDataSource) String() string { return proto.CompactTextString(m) }
2188func (*ReadDataSource) ProtoMessage() {}
2189func (*ReadDataSource) Descriptor() ([]byte, []int) {
2190 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{16}
2191}
2192func (m *ReadDataSource) XXX_Unmarshal(b []byte) error {
2193 return xxx_messageInfo_ReadDataSource.Unmarshal(m, b)
2194}
2195func (m *ReadDataSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2196 return xxx_messageInfo_ReadDataSource.Marshal(b, m, deterministic)
2197}
2198func (dst *ReadDataSource) XXX_Merge(src proto.Message) {
2199 xxx_messageInfo_ReadDataSource.Merge(dst, src)
2200}
2201func (m *ReadDataSource) XXX_Size() int {
2202 return xxx_messageInfo_ReadDataSource.Size(m)
2203}
2204func (m *ReadDataSource) XXX_DiscardUnknown() {
2205 xxx_messageInfo_ReadDataSource.DiscardUnknown(m)
2206}
2207
2208var xxx_messageInfo_ReadDataSource proto.InternalMessageInfo
2209
2210type ReadDataSource_Request struct {
2211 TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
2212 Config *DynamicValue `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
2213 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2214 XXX_unrecognized []byte `json:"-"`
2215 XXX_sizecache int32 `json:"-"`
2216}
2217
2218func (m *ReadDataSource_Request) Reset() { *m = ReadDataSource_Request{} }
2219func (m *ReadDataSource_Request) String() string { return proto.CompactTextString(m) }
2220func (*ReadDataSource_Request) ProtoMessage() {}
2221func (*ReadDataSource_Request) Descriptor() ([]byte, []int) {
2222 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{16, 0}
2223}
2224func (m *ReadDataSource_Request) XXX_Unmarshal(b []byte) error {
2225 return xxx_messageInfo_ReadDataSource_Request.Unmarshal(m, b)
2226}
2227func (m *ReadDataSource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2228 return xxx_messageInfo_ReadDataSource_Request.Marshal(b, m, deterministic)
2229}
2230func (dst *ReadDataSource_Request) XXX_Merge(src proto.Message) {
2231 xxx_messageInfo_ReadDataSource_Request.Merge(dst, src)
2232}
2233func (m *ReadDataSource_Request) XXX_Size() int {
2234 return xxx_messageInfo_ReadDataSource_Request.Size(m)
2235}
2236func (m *ReadDataSource_Request) XXX_DiscardUnknown() {
2237 xxx_messageInfo_ReadDataSource_Request.DiscardUnknown(m)
2238}
2239
2240var xxx_messageInfo_ReadDataSource_Request proto.InternalMessageInfo
2241
2242func (m *ReadDataSource_Request) GetTypeName() string {
2243 if m != nil {
2244 return m.TypeName
2245 }
2246 return ""
2247}
2248
2249func (m *ReadDataSource_Request) GetConfig() *DynamicValue {
2250 if m != nil {
2251 return m.Config
2252 }
2253 return nil
2254}
2255
2256type ReadDataSource_Response struct {
2257 State *DynamicValue `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"`
2258 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
2259 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2260 XXX_unrecognized []byte `json:"-"`
2261 XXX_sizecache int32 `json:"-"`
2262}
2263
2264func (m *ReadDataSource_Response) Reset() { *m = ReadDataSource_Response{} }
2265func (m *ReadDataSource_Response) String() string { return proto.CompactTextString(m) }
2266func (*ReadDataSource_Response) ProtoMessage() {}
2267func (*ReadDataSource_Response) Descriptor() ([]byte, []int) {
2268 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{16, 1}
2269}
2270func (m *ReadDataSource_Response) XXX_Unmarshal(b []byte) error {
2271 return xxx_messageInfo_ReadDataSource_Response.Unmarshal(m, b)
2272}
2273func (m *ReadDataSource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2274 return xxx_messageInfo_ReadDataSource_Response.Marshal(b, m, deterministic)
2275}
2276func (dst *ReadDataSource_Response) XXX_Merge(src proto.Message) {
2277 xxx_messageInfo_ReadDataSource_Response.Merge(dst, src)
2278}
2279func (m *ReadDataSource_Response) XXX_Size() int {
2280 return xxx_messageInfo_ReadDataSource_Response.Size(m)
2281}
2282func (m *ReadDataSource_Response) XXX_DiscardUnknown() {
2283 xxx_messageInfo_ReadDataSource_Response.DiscardUnknown(m)
2284}
2285
2286var xxx_messageInfo_ReadDataSource_Response proto.InternalMessageInfo
2287
2288func (m *ReadDataSource_Response) GetState() *DynamicValue {
2289 if m != nil {
2290 return m.State
2291 }
2292 return nil
2293}
2294
2295func (m *ReadDataSource_Response) GetDiagnostics() []*Diagnostic {
2296 if m != nil {
2297 return m.Diagnostics
2298 }
2299 return nil
2300}
2301
2302type GetProvisionerSchema struct {
2303 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2304 XXX_unrecognized []byte `json:"-"`
2305 XXX_sizecache int32 `json:"-"`
2306}
2307
2308func (m *GetProvisionerSchema) Reset() { *m = GetProvisionerSchema{} }
2309func (m *GetProvisionerSchema) String() string { return proto.CompactTextString(m) }
2310func (*GetProvisionerSchema) ProtoMessage() {}
2311func (*GetProvisionerSchema) Descriptor() ([]byte, []int) {
2312 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{17}
2313}
2314func (m *GetProvisionerSchema) XXX_Unmarshal(b []byte) error {
2315 return xxx_messageInfo_GetProvisionerSchema.Unmarshal(m, b)
2316}
2317func (m *GetProvisionerSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2318 return xxx_messageInfo_GetProvisionerSchema.Marshal(b, m, deterministic)
2319}
2320func (dst *GetProvisionerSchema) XXX_Merge(src proto.Message) {
2321 xxx_messageInfo_GetProvisionerSchema.Merge(dst, src)
2322}
2323func (m *GetProvisionerSchema) XXX_Size() int {
2324 return xxx_messageInfo_GetProvisionerSchema.Size(m)
2325}
2326func (m *GetProvisionerSchema) XXX_DiscardUnknown() {
2327 xxx_messageInfo_GetProvisionerSchema.DiscardUnknown(m)
2328}
2329
2330var xxx_messageInfo_GetProvisionerSchema proto.InternalMessageInfo
2331
2332type GetProvisionerSchema_Request struct {
2333 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2334 XXX_unrecognized []byte `json:"-"`
2335 XXX_sizecache int32 `json:"-"`
2336}
2337
2338func (m *GetProvisionerSchema_Request) Reset() { *m = GetProvisionerSchema_Request{} }
2339func (m *GetProvisionerSchema_Request) String() string { return proto.CompactTextString(m) }
2340func (*GetProvisionerSchema_Request) ProtoMessage() {}
2341func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) {
2342 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{17, 0}
2343}
2344func (m *GetProvisionerSchema_Request) XXX_Unmarshal(b []byte) error {
2345 return xxx_messageInfo_GetProvisionerSchema_Request.Unmarshal(m, b)
2346}
2347func (m *GetProvisionerSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2348 return xxx_messageInfo_GetProvisionerSchema_Request.Marshal(b, m, deterministic)
2349}
2350func (dst *GetProvisionerSchema_Request) XXX_Merge(src proto.Message) {
2351 xxx_messageInfo_GetProvisionerSchema_Request.Merge(dst, src)
2352}
2353func (m *GetProvisionerSchema_Request) XXX_Size() int {
2354 return xxx_messageInfo_GetProvisionerSchema_Request.Size(m)
2355}
2356func (m *GetProvisionerSchema_Request) XXX_DiscardUnknown() {
2357 xxx_messageInfo_GetProvisionerSchema_Request.DiscardUnknown(m)
2358}
2359
2360var xxx_messageInfo_GetProvisionerSchema_Request proto.InternalMessageInfo
2361
2362type GetProvisionerSchema_Response struct {
2363 Provisioner *Schema `protobuf:"bytes,1,opt,name=provisioner,proto3" json:"provisioner,omitempty"`
2364 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
2365 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2366 XXX_unrecognized []byte `json:"-"`
2367 XXX_sizecache int32 `json:"-"`
2368}
2369
2370func (m *GetProvisionerSchema_Response) Reset() { *m = GetProvisionerSchema_Response{} }
2371func (m *GetProvisionerSchema_Response) String() string { return proto.CompactTextString(m) }
2372func (*GetProvisionerSchema_Response) ProtoMessage() {}
2373func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) {
2374 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{17, 1}
2375}
2376func (m *GetProvisionerSchema_Response) XXX_Unmarshal(b []byte) error {
2377 return xxx_messageInfo_GetProvisionerSchema_Response.Unmarshal(m, b)
2378}
2379func (m *GetProvisionerSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2380 return xxx_messageInfo_GetProvisionerSchema_Response.Marshal(b, m, deterministic)
2381}
2382func (dst *GetProvisionerSchema_Response) XXX_Merge(src proto.Message) {
2383 xxx_messageInfo_GetProvisionerSchema_Response.Merge(dst, src)
2384}
2385func (m *GetProvisionerSchema_Response) XXX_Size() int {
2386 return xxx_messageInfo_GetProvisionerSchema_Response.Size(m)
2387}
2388func (m *GetProvisionerSchema_Response) XXX_DiscardUnknown() {
2389 xxx_messageInfo_GetProvisionerSchema_Response.DiscardUnknown(m)
2390}
2391
2392var xxx_messageInfo_GetProvisionerSchema_Response proto.InternalMessageInfo
2393
2394func (m *GetProvisionerSchema_Response) GetProvisioner() *Schema {
2395 if m != nil {
2396 return m.Provisioner
2397 }
2398 return nil
2399}
2400
2401func (m *GetProvisionerSchema_Response) GetDiagnostics() []*Diagnostic {
2402 if m != nil {
2403 return m.Diagnostics
2404 }
2405 return nil
2406}
2407
2408type ValidateProvisionerConfig struct {
2409 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2410 XXX_unrecognized []byte `json:"-"`
2411 XXX_sizecache int32 `json:"-"`
2412}
2413
2414func (m *ValidateProvisionerConfig) Reset() { *m = ValidateProvisionerConfig{} }
2415func (m *ValidateProvisionerConfig) String() string { return proto.CompactTextString(m) }
2416func (*ValidateProvisionerConfig) ProtoMessage() {}
2417func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) {
2418 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{18}
2419}
2420func (m *ValidateProvisionerConfig) XXX_Unmarshal(b []byte) error {
2421 return xxx_messageInfo_ValidateProvisionerConfig.Unmarshal(m, b)
2422}
2423func (m *ValidateProvisionerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2424 return xxx_messageInfo_ValidateProvisionerConfig.Marshal(b, m, deterministic)
2425}
2426func (dst *ValidateProvisionerConfig) XXX_Merge(src proto.Message) {
2427 xxx_messageInfo_ValidateProvisionerConfig.Merge(dst, src)
2428}
2429func (m *ValidateProvisionerConfig) XXX_Size() int {
2430 return xxx_messageInfo_ValidateProvisionerConfig.Size(m)
2431}
2432func (m *ValidateProvisionerConfig) XXX_DiscardUnknown() {
2433 xxx_messageInfo_ValidateProvisionerConfig.DiscardUnknown(m)
2434}
2435
2436var xxx_messageInfo_ValidateProvisionerConfig proto.InternalMessageInfo
2437
2438type ValidateProvisionerConfig_Request struct {
2439 Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
2440 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2441 XXX_unrecognized []byte `json:"-"`
2442 XXX_sizecache int32 `json:"-"`
2443}
2444
2445func (m *ValidateProvisionerConfig_Request) Reset() { *m = ValidateProvisionerConfig_Request{} }
2446func (m *ValidateProvisionerConfig_Request) String() string { return proto.CompactTextString(m) }
2447func (*ValidateProvisionerConfig_Request) ProtoMessage() {}
2448func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) {
2449 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{18, 0}
2450}
2451func (m *ValidateProvisionerConfig_Request) XXX_Unmarshal(b []byte) error {
2452 return xxx_messageInfo_ValidateProvisionerConfig_Request.Unmarshal(m, b)
2453}
2454func (m *ValidateProvisionerConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2455 return xxx_messageInfo_ValidateProvisionerConfig_Request.Marshal(b, m, deterministic)
2456}
2457func (dst *ValidateProvisionerConfig_Request) XXX_Merge(src proto.Message) {
2458 xxx_messageInfo_ValidateProvisionerConfig_Request.Merge(dst, src)
2459}
2460func (m *ValidateProvisionerConfig_Request) XXX_Size() int {
2461 return xxx_messageInfo_ValidateProvisionerConfig_Request.Size(m)
2462}
2463func (m *ValidateProvisionerConfig_Request) XXX_DiscardUnknown() {
2464 xxx_messageInfo_ValidateProvisionerConfig_Request.DiscardUnknown(m)
2465}
2466
2467var xxx_messageInfo_ValidateProvisionerConfig_Request proto.InternalMessageInfo
2468
2469func (m *ValidateProvisionerConfig_Request) GetConfig() *DynamicValue {
2470 if m != nil {
2471 return m.Config
2472 }
2473 return nil
2474}
2475
2476type ValidateProvisionerConfig_Response struct {
2477 Diagnostics []*Diagnostic `protobuf:"bytes,1,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
2478 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2479 XXX_unrecognized []byte `json:"-"`
2480 XXX_sizecache int32 `json:"-"`
2481}
2482
2483func (m *ValidateProvisionerConfig_Response) Reset() { *m = ValidateProvisionerConfig_Response{} }
2484func (m *ValidateProvisionerConfig_Response) String() string { return proto.CompactTextString(m) }
2485func (*ValidateProvisionerConfig_Response) ProtoMessage() {}
2486func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) {
2487 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{18, 1}
2488}
2489func (m *ValidateProvisionerConfig_Response) XXX_Unmarshal(b []byte) error {
2490 return xxx_messageInfo_ValidateProvisionerConfig_Response.Unmarshal(m, b)
2491}
2492func (m *ValidateProvisionerConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2493 return xxx_messageInfo_ValidateProvisionerConfig_Response.Marshal(b, m, deterministic)
2494}
2495func (dst *ValidateProvisionerConfig_Response) XXX_Merge(src proto.Message) {
2496 xxx_messageInfo_ValidateProvisionerConfig_Response.Merge(dst, src)
2497}
2498func (m *ValidateProvisionerConfig_Response) XXX_Size() int {
2499 return xxx_messageInfo_ValidateProvisionerConfig_Response.Size(m)
2500}
2501func (m *ValidateProvisionerConfig_Response) XXX_DiscardUnknown() {
2502 xxx_messageInfo_ValidateProvisionerConfig_Response.DiscardUnknown(m)
2503}
2504
2505var xxx_messageInfo_ValidateProvisionerConfig_Response proto.InternalMessageInfo
2506
2507func (m *ValidateProvisionerConfig_Response) GetDiagnostics() []*Diagnostic {
2508 if m != nil {
2509 return m.Diagnostics
2510 }
2511 return nil
2512}
2513
2514type ProvisionResource struct {
2515 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2516 XXX_unrecognized []byte `json:"-"`
2517 XXX_sizecache int32 `json:"-"`
2518}
2519
2520func (m *ProvisionResource) Reset() { *m = ProvisionResource{} }
2521func (m *ProvisionResource) String() string { return proto.CompactTextString(m) }
2522func (*ProvisionResource) ProtoMessage() {}
2523func (*ProvisionResource) Descriptor() ([]byte, []int) {
2524 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{19}
2525}
2526func (m *ProvisionResource) XXX_Unmarshal(b []byte) error {
2527 return xxx_messageInfo_ProvisionResource.Unmarshal(m, b)
2528}
2529func (m *ProvisionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2530 return xxx_messageInfo_ProvisionResource.Marshal(b, m, deterministic)
2531}
2532func (dst *ProvisionResource) XXX_Merge(src proto.Message) {
2533 xxx_messageInfo_ProvisionResource.Merge(dst, src)
2534}
2535func (m *ProvisionResource) XXX_Size() int {
2536 return xxx_messageInfo_ProvisionResource.Size(m)
2537}
2538func (m *ProvisionResource) XXX_DiscardUnknown() {
2539 xxx_messageInfo_ProvisionResource.DiscardUnknown(m)
2540}
2541
2542var xxx_messageInfo_ProvisionResource proto.InternalMessageInfo
2543
2544type ProvisionResource_Request struct {
2545 Config *DynamicValue `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
2546 Connection *DynamicValue `protobuf:"bytes,2,opt,name=connection,proto3" json:"connection,omitempty"`
2547 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2548 XXX_unrecognized []byte `json:"-"`
2549 XXX_sizecache int32 `json:"-"`
2550}
2551
2552func (m *ProvisionResource_Request) Reset() { *m = ProvisionResource_Request{} }
2553func (m *ProvisionResource_Request) String() string { return proto.CompactTextString(m) }
2554func (*ProvisionResource_Request) ProtoMessage() {}
2555func (*ProvisionResource_Request) Descriptor() ([]byte, []int) {
2556 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{19, 0}
2557}
2558func (m *ProvisionResource_Request) XXX_Unmarshal(b []byte) error {
2559 return xxx_messageInfo_ProvisionResource_Request.Unmarshal(m, b)
2560}
2561func (m *ProvisionResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2562 return xxx_messageInfo_ProvisionResource_Request.Marshal(b, m, deterministic)
2563}
2564func (dst *ProvisionResource_Request) XXX_Merge(src proto.Message) {
2565 xxx_messageInfo_ProvisionResource_Request.Merge(dst, src)
2566}
2567func (m *ProvisionResource_Request) XXX_Size() int {
2568 return xxx_messageInfo_ProvisionResource_Request.Size(m)
2569}
2570func (m *ProvisionResource_Request) XXX_DiscardUnknown() {
2571 xxx_messageInfo_ProvisionResource_Request.DiscardUnknown(m)
2572}
2573
2574var xxx_messageInfo_ProvisionResource_Request proto.InternalMessageInfo
2575
2576func (m *ProvisionResource_Request) GetConfig() *DynamicValue {
2577 if m != nil {
2578 return m.Config
2579 }
2580 return nil
2581}
2582
2583func (m *ProvisionResource_Request) GetConnection() *DynamicValue {
2584 if m != nil {
2585 return m.Connection
2586 }
2587 return nil
2588}
2589
2590type ProvisionResource_Response struct {
2591 Output string `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"`
2592 Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
2593 XXX_NoUnkeyedLiteral struct{} `json:"-"`
2594 XXX_unrecognized []byte `json:"-"`
2595 XXX_sizecache int32 `json:"-"`
2596}
2597
2598func (m *ProvisionResource_Response) Reset() { *m = ProvisionResource_Response{} }
2599func (m *ProvisionResource_Response) String() string { return proto.CompactTextString(m) }
2600func (*ProvisionResource_Response) ProtoMessage() {}
2601func (*ProvisionResource_Response) Descriptor() ([]byte, []int) {
2602 return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{19, 1}
2603}
2604func (m *ProvisionResource_Response) XXX_Unmarshal(b []byte) error {
2605 return xxx_messageInfo_ProvisionResource_Response.Unmarshal(m, b)
2606}
2607func (m *ProvisionResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
2608 return xxx_messageInfo_ProvisionResource_Response.Marshal(b, m, deterministic)
2609}
2610func (dst *ProvisionResource_Response) XXX_Merge(src proto.Message) {
2611 xxx_messageInfo_ProvisionResource_Response.Merge(dst, src)
2612}
2613func (m *ProvisionResource_Response) XXX_Size() int {
2614 return xxx_messageInfo_ProvisionResource_Response.Size(m)
2615}
2616func (m *ProvisionResource_Response) XXX_DiscardUnknown() {
2617 xxx_messageInfo_ProvisionResource_Response.DiscardUnknown(m)
2618}
2619
2620var xxx_messageInfo_ProvisionResource_Response proto.InternalMessageInfo
2621
2622func (m *ProvisionResource_Response) GetOutput() string {
2623 if m != nil {
2624 return m.Output
2625 }
2626 return ""
2627}
2628
2629func (m *ProvisionResource_Response) GetDiagnostics() []*Diagnostic {
2630 if m != nil {
2631 return m.Diagnostics
2632 }
2633 return nil
2634}
2635
2636func init() {
2637 proto.RegisterType((*DynamicValue)(nil), "tfplugin5.DynamicValue")
2638 proto.RegisterType((*Diagnostic)(nil), "tfplugin5.Diagnostic")
2639 proto.RegisterType((*AttributePath)(nil), "tfplugin5.AttributePath")
2640 proto.RegisterType((*AttributePath_Step)(nil), "tfplugin5.AttributePath.Step")
2641 proto.RegisterType((*Stop)(nil), "tfplugin5.Stop")
2642 proto.RegisterType((*Stop_Request)(nil), "tfplugin5.Stop.Request")
2643 proto.RegisterType((*Stop_Response)(nil), "tfplugin5.Stop.Response")
2644 proto.RegisterType((*RawState)(nil), "tfplugin5.RawState")
2645 proto.RegisterMapType((map[string]string)(nil), "tfplugin5.RawState.FlatmapEntry")
2646 proto.RegisterType((*Schema)(nil), "tfplugin5.Schema")
2647 proto.RegisterType((*Schema_Block)(nil), "tfplugin5.Schema.Block")
2648 proto.RegisterType((*Schema_Attribute)(nil), "tfplugin5.Schema.Attribute")
2649 proto.RegisterType((*Schema_NestedBlock)(nil), "tfplugin5.Schema.NestedBlock")
2650 proto.RegisterType((*GetProviderSchema)(nil), "tfplugin5.GetProviderSchema")
2651 proto.RegisterType((*GetProviderSchema_Request)(nil), "tfplugin5.GetProviderSchema.Request")
2652 proto.RegisterType((*GetProviderSchema_Response)(nil), "tfplugin5.GetProviderSchema.Response")
2653 proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry")
2654 proto.RegisterMapType((map[string]*Schema)(nil), "tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry")
2655 proto.RegisterType((*PrepareProviderConfig)(nil), "tfplugin5.PrepareProviderConfig")
2656 proto.RegisterType((*PrepareProviderConfig_Request)(nil), "tfplugin5.PrepareProviderConfig.Request")
2657 proto.RegisterType((*PrepareProviderConfig_Response)(nil), "tfplugin5.PrepareProviderConfig.Response")
2658 proto.RegisterType((*UpgradeResourceState)(nil), "tfplugin5.UpgradeResourceState")
2659 proto.RegisterType((*UpgradeResourceState_Request)(nil), "tfplugin5.UpgradeResourceState.Request")
2660 proto.RegisterType((*UpgradeResourceState_Response)(nil), "tfplugin5.UpgradeResourceState.Response")
2661 proto.RegisterType((*ValidateResourceTypeConfig)(nil), "tfplugin5.ValidateResourceTypeConfig")
2662 proto.RegisterType((*ValidateResourceTypeConfig_Request)(nil), "tfplugin5.ValidateResourceTypeConfig.Request")
2663 proto.RegisterType((*ValidateResourceTypeConfig_Response)(nil), "tfplugin5.ValidateResourceTypeConfig.Response")
2664 proto.RegisterType((*ValidateDataSourceConfig)(nil), "tfplugin5.ValidateDataSourceConfig")
2665 proto.RegisterType((*ValidateDataSourceConfig_Request)(nil), "tfplugin5.ValidateDataSourceConfig.Request")
2666 proto.RegisterType((*ValidateDataSourceConfig_Response)(nil), "tfplugin5.ValidateDataSourceConfig.Response")
2667 proto.RegisterType((*Configure)(nil), "tfplugin5.Configure")
2668 proto.RegisterType((*Configure_Request)(nil), "tfplugin5.Configure.Request")
2669 proto.RegisterType((*Configure_Response)(nil), "tfplugin5.Configure.Response")
2670 proto.RegisterType((*ReadResource)(nil), "tfplugin5.ReadResource")
2671 proto.RegisterType((*ReadResource_Request)(nil), "tfplugin5.ReadResource.Request")
2672 proto.RegisterType((*ReadResource_Response)(nil), "tfplugin5.ReadResource.Response")
2673 proto.RegisterType((*PlanResourceChange)(nil), "tfplugin5.PlanResourceChange")
2674 proto.RegisterType((*PlanResourceChange_Request)(nil), "tfplugin5.PlanResourceChange.Request")
2675 proto.RegisterType((*PlanResourceChange_Response)(nil), "tfplugin5.PlanResourceChange.Response")
2676 proto.RegisterType((*ApplyResourceChange)(nil), "tfplugin5.ApplyResourceChange")
2677 proto.RegisterType((*ApplyResourceChange_Request)(nil), "tfplugin5.ApplyResourceChange.Request")
2678 proto.RegisterType((*ApplyResourceChange_Response)(nil), "tfplugin5.ApplyResourceChange.Response")
2679 proto.RegisterType((*ImportResourceState)(nil), "tfplugin5.ImportResourceState")
2680 proto.RegisterType((*ImportResourceState_Request)(nil), "tfplugin5.ImportResourceState.Request")
2681 proto.RegisterType((*ImportResourceState_ImportedResource)(nil), "tfplugin5.ImportResourceState.ImportedResource")
2682 proto.RegisterType((*ImportResourceState_Response)(nil), "tfplugin5.ImportResourceState.Response")
2683 proto.RegisterType((*ReadDataSource)(nil), "tfplugin5.ReadDataSource")
2684 proto.RegisterType((*ReadDataSource_Request)(nil), "tfplugin5.ReadDataSource.Request")
2685 proto.RegisterType((*ReadDataSource_Response)(nil), "tfplugin5.ReadDataSource.Response")
2686 proto.RegisterType((*GetProvisionerSchema)(nil), "tfplugin5.GetProvisionerSchema")
2687 proto.RegisterType((*GetProvisionerSchema_Request)(nil), "tfplugin5.GetProvisionerSchema.Request")
2688 proto.RegisterType((*GetProvisionerSchema_Response)(nil), "tfplugin5.GetProvisionerSchema.Response")
2689 proto.RegisterType((*ValidateProvisionerConfig)(nil), "tfplugin5.ValidateProvisionerConfig")
2690 proto.RegisterType((*ValidateProvisionerConfig_Request)(nil), "tfplugin5.ValidateProvisionerConfig.Request")
2691 proto.RegisterType((*ValidateProvisionerConfig_Response)(nil), "tfplugin5.ValidateProvisionerConfig.Response")
2692 proto.RegisterType((*ProvisionResource)(nil), "tfplugin5.ProvisionResource")
2693 proto.RegisterType((*ProvisionResource_Request)(nil), "tfplugin5.ProvisionResource.Request")
2694 proto.RegisterType((*ProvisionResource_Response)(nil), "tfplugin5.ProvisionResource.Response")
2695 proto.RegisterEnum("tfplugin5.Diagnostic_Severity", Diagnostic_Severity_name, Diagnostic_Severity_value)
2696 proto.RegisterEnum("tfplugin5.Schema_NestedBlock_NestingMode", Schema_NestedBlock_NestingMode_name, Schema_NestedBlock_NestingMode_value)
2697}
2698
2699// Reference imports to suppress errors if they are not otherwise used.
2700var _ context.Context
2701var _ grpc.ClientConn
2702
2703// This is a compile-time assertion to ensure that this generated file
2704// is compatible with the grpc package it is being compiled against.
2705const _ = grpc.SupportPackageIsVersion4
2706
2707// ProviderClient is the client API for Provider service.
2708//
2709// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
2710type ProviderClient interface {
2711 // ////// Information about what a provider supports/expects
2712 GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error)
2713 PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error)
2714 ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error)
2715 ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error)
2716 UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error)
2717 // ////// One-time initialization, called before other functions below
2718 Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error)
2719 // ////// Managed Resource Lifecycle
2720 ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error)
2721 PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error)
2722 ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error)
2723 ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error)
2724 ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error)
2725 // ////// Graceful Shutdown
2726 Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error)
2727}
2728
2729type providerClient struct {
2730 cc *grpc.ClientConn
2731}
2732
2733func NewProviderClient(cc *grpc.ClientConn) ProviderClient {
2734 return &providerClient{cc}
2735}
2736
2737func (c *providerClient) GetSchema(ctx context.Context, in *GetProviderSchema_Request, opts ...grpc.CallOption) (*GetProviderSchema_Response, error) {
2738 out := new(GetProviderSchema_Response)
2739 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/GetSchema", in, out, opts...)
2740 if err != nil {
2741 return nil, err
2742 }
2743 return out, nil
2744}
2745
2746func (c *providerClient) PrepareProviderConfig(ctx context.Context, in *PrepareProviderConfig_Request, opts ...grpc.CallOption) (*PrepareProviderConfig_Response, error) {
2747 out := new(PrepareProviderConfig_Response)
2748 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PrepareProviderConfig", in, out, opts...)
2749 if err != nil {
2750 return nil, err
2751 }
2752 return out, nil
2753}
2754
2755func (c *providerClient) ValidateResourceTypeConfig(ctx context.Context, in *ValidateResourceTypeConfig_Request, opts ...grpc.CallOption) (*ValidateResourceTypeConfig_Response, error) {
2756 out := new(ValidateResourceTypeConfig_Response)
2757 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateResourceTypeConfig", in, out, opts...)
2758 if err != nil {
2759 return nil, err
2760 }
2761 return out, nil
2762}
2763
2764func (c *providerClient) ValidateDataSourceConfig(ctx context.Context, in *ValidateDataSourceConfig_Request, opts ...grpc.CallOption) (*ValidateDataSourceConfig_Response, error) {
2765 out := new(ValidateDataSourceConfig_Response)
2766 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ValidateDataSourceConfig", in, out, opts...)
2767 if err != nil {
2768 return nil, err
2769 }
2770 return out, nil
2771}
2772
2773func (c *providerClient) UpgradeResourceState(ctx context.Context, in *UpgradeResourceState_Request, opts ...grpc.CallOption) (*UpgradeResourceState_Response, error) {
2774 out := new(UpgradeResourceState_Response)
2775 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/UpgradeResourceState", in, out, opts...)
2776 if err != nil {
2777 return nil, err
2778 }
2779 return out, nil
2780}
2781
2782func (c *providerClient) Configure(ctx context.Context, in *Configure_Request, opts ...grpc.CallOption) (*Configure_Response, error) {
2783 out := new(Configure_Response)
2784 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Configure", in, out, opts...)
2785 if err != nil {
2786 return nil, err
2787 }
2788 return out, nil
2789}
2790
2791func (c *providerClient) ReadResource(ctx context.Context, in *ReadResource_Request, opts ...grpc.CallOption) (*ReadResource_Response, error) {
2792 out := new(ReadResource_Response)
2793 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadResource", in, out, opts...)
2794 if err != nil {
2795 return nil, err
2796 }
2797 return out, nil
2798}
2799
2800func (c *providerClient) PlanResourceChange(ctx context.Context, in *PlanResourceChange_Request, opts ...grpc.CallOption) (*PlanResourceChange_Response, error) {
2801 out := new(PlanResourceChange_Response)
2802 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/PlanResourceChange", in, out, opts...)
2803 if err != nil {
2804 return nil, err
2805 }
2806 return out, nil
2807}
2808
2809func (c *providerClient) ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) {
2810 out := new(ApplyResourceChange_Response)
2811 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ApplyResourceChange", in, out, opts...)
2812 if err != nil {
2813 return nil, err
2814 }
2815 return out, nil
2816}
2817
2818func (c *providerClient) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) {
2819 out := new(ImportResourceState_Response)
2820 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ImportResourceState", in, out, opts...)
2821 if err != nil {
2822 return nil, err
2823 }
2824 return out, nil
2825}
2826
2827func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) {
2828 out := new(ReadDataSource_Response)
2829 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/ReadDataSource", in, out, opts...)
2830 if err != nil {
2831 return nil, err
2832 }
2833 return out, nil
2834}
2835
2836func (c *providerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) {
2837 out := new(Stop_Response)
2838 err := c.cc.Invoke(ctx, "/tfplugin5.Provider/Stop", in, out, opts...)
2839 if err != nil {
2840 return nil, err
2841 }
2842 return out, nil
2843}
2844
2845// ProviderServer is the server API for Provider service.
2846type ProviderServer interface {
2847 // ////// Information about what a provider supports/expects
2848 GetSchema(context.Context, *GetProviderSchema_Request) (*GetProviderSchema_Response, error)
2849 PrepareProviderConfig(context.Context, *PrepareProviderConfig_Request) (*PrepareProviderConfig_Response, error)
2850 ValidateResourceTypeConfig(context.Context, *ValidateResourceTypeConfig_Request) (*ValidateResourceTypeConfig_Response, error)
2851 ValidateDataSourceConfig(context.Context, *ValidateDataSourceConfig_Request) (*ValidateDataSourceConfig_Response, error)
2852 UpgradeResourceState(context.Context, *UpgradeResourceState_Request) (*UpgradeResourceState_Response, error)
2853 // ////// One-time initialization, called before other functions below
2854 Configure(context.Context, *Configure_Request) (*Configure_Response, error)
2855 // ////// Managed Resource Lifecycle
2856 ReadResource(context.Context, *ReadResource_Request) (*ReadResource_Response, error)
2857 PlanResourceChange(context.Context, *PlanResourceChange_Request) (*PlanResourceChange_Response, error)
2858 ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error)
2859 ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error)
2860 ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error)
2861 // ////// Graceful Shutdown
2862 Stop(context.Context, *Stop_Request) (*Stop_Response, error)
2863}
2864
2865func RegisterProviderServer(s *grpc.Server, srv ProviderServer) {
2866 s.RegisterService(&_Provider_serviceDesc, srv)
2867}
2868
2869func _Provider_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2870 in := new(GetProviderSchema_Request)
2871 if err := dec(in); err != nil {
2872 return nil, err
2873 }
2874 if interceptor == nil {
2875 return srv.(ProviderServer).GetSchema(ctx, in)
2876 }
2877 info := &grpc.UnaryServerInfo{
2878 Server: srv,
2879 FullMethod: "/tfplugin5.Provider/GetSchema",
2880 }
2881 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2882 return srv.(ProviderServer).GetSchema(ctx, req.(*GetProviderSchema_Request))
2883 }
2884 return interceptor(ctx, in, info, handler)
2885}
2886
2887func _Provider_PrepareProviderConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2888 in := new(PrepareProviderConfig_Request)
2889 if err := dec(in); err != nil {
2890 return nil, err
2891 }
2892 if interceptor == nil {
2893 return srv.(ProviderServer).PrepareProviderConfig(ctx, in)
2894 }
2895 info := &grpc.UnaryServerInfo{
2896 Server: srv,
2897 FullMethod: "/tfplugin5.Provider/PrepareProviderConfig",
2898 }
2899 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2900 return srv.(ProviderServer).PrepareProviderConfig(ctx, req.(*PrepareProviderConfig_Request))
2901 }
2902 return interceptor(ctx, in, info, handler)
2903}
2904
2905func _Provider_ValidateResourceTypeConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2906 in := new(ValidateResourceTypeConfig_Request)
2907 if err := dec(in); err != nil {
2908 return nil, err
2909 }
2910 if interceptor == nil {
2911 return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, in)
2912 }
2913 info := &grpc.UnaryServerInfo{
2914 Server: srv,
2915 FullMethod: "/tfplugin5.Provider/ValidateResourceTypeConfig",
2916 }
2917 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2918 return srv.(ProviderServer).ValidateResourceTypeConfig(ctx, req.(*ValidateResourceTypeConfig_Request))
2919 }
2920 return interceptor(ctx, in, info, handler)
2921}
2922
2923func _Provider_ValidateDataSourceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2924 in := new(ValidateDataSourceConfig_Request)
2925 if err := dec(in); err != nil {
2926 return nil, err
2927 }
2928 if interceptor == nil {
2929 return srv.(ProviderServer).ValidateDataSourceConfig(ctx, in)
2930 }
2931 info := &grpc.UnaryServerInfo{
2932 Server: srv,
2933 FullMethod: "/tfplugin5.Provider/ValidateDataSourceConfig",
2934 }
2935 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2936 return srv.(ProviderServer).ValidateDataSourceConfig(ctx, req.(*ValidateDataSourceConfig_Request))
2937 }
2938 return interceptor(ctx, in, info, handler)
2939}
2940
2941func _Provider_UpgradeResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2942 in := new(UpgradeResourceState_Request)
2943 if err := dec(in); err != nil {
2944 return nil, err
2945 }
2946 if interceptor == nil {
2947 return srv.(ProviderServer).UpgradeResourceState(ctx, in)
2948 }
2949 info := &grpc.UnaryServerInfo{
2950 Server: srv,
2951 FullMethod: "/tfplugin5.Provider/UpgradeResourceState",
2952 }
2953 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2954 return srv.(ProviderServer).UpgradeResourceState(ctx, req.(*UpgradeResourceState_Request))
2955 }
2956 return interceptor(ctx, in, info, handler)
2957}
2958
2959func _Provider_Configure_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2960 in := new(Configure_Request)
2961 if err := dec(in); err != nil {
2962 return nil, err
2963 }
2964 if interceptor == nil {
2965 return srv.(ProviderServer).Configure(ctx, in)
2966 }
2967 info := &grpc.UnaryServerInfo{
2968 Server: srv,
2969 FullMethod: "/tfplugin5.Provider/Configure",
2970 }
2971 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2972 return srv.(ProviderServer).Configure(ctx, req.(*Configure_Request))
2973 }
2974 return interceptor(ctx, in, info, handler)
2975}
2976
2977func _Provider_ReadResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2978 in := new(ReadResource_Request)
2979 if err := dec(in); err != nil {
2980 return nil, err
2981 }
2982 if interceptor == nil {
2983 return srv.(ProviderServer).ReadResource(ctx, in)
2984 }
2985 info := &grpc.UnaryServerInfo{
2986 Server: srv,
2987 FullMethod: "/tfplugin5.Provider/ReadResource",
2988 }
2989 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
2990 return srv.(ProviderServer).ReadResource(ctx, req.(*ReadResource_Request))
2991 }
2992 return interceptor(ctx, in, info, handler)
2993}
2994
2995func _Provider_PlanResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
2996 in := new(PlanResourceChange_Request)
2997 if err := dec(in); err != nil {
2998 return nil, err
2999 }
3000 if interceptor == nil {
3001 return srv.(ProviderServer).PlanResourceChange(ctx, in)
3002 }
3003 info := &grpc.UnaryServerInfo{
3004 Server: srv,
3005 FullMethod: "/tfplugin5.Provider/PlanResourceChange",
3006 }
3007 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3008 return srv.(ProviderServer).PlanResourceChange(ctx, req.(*PlanResourceChange_Request))
3009 }
3010 return interceptor(ctx, in, info, handler)
3011}
3012
3013func _Provider_ApplyResourceChange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
3014 in := new(ApplyResourceChange_Request)
3015 if err := dec(in); err != nil {
3016 return nil, err
3017 }
3018 if interceptor == nil {
3019 return srv.(ProviderServer).ApplyResourceChange(ctx, in)
3020 }
3021 info := &grpc.UnaryServerInfo{
3022 Server: srv,
3023 FullMethod: "/tfplugin5.Provider/ApplyResourceChange",
3024 }
3025 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3026 return srv.(ProviderServer).ApplyResourceChange(ctx, req.(*ApplyResourceChange_Request))
3027 }
3028 return interceptor(ctx, in, info, handler)
3029}
3030
3031func _Provider_ImportResourceState_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
3032 in := new(ImportResourceState_Request)
3033 if err := dec(in); err != nil {
3034 return nil, err
3035 }
3036 if interceptor == nil {
3037 return srv.(ProviderServer).ImportResourceState(ctx, in)
3038 }
3039 info := &grpc.UnaryServerInfo{
3040 Server: srv,
3041 FullMethod: "/tfplugin5.Provider/ImportResourceState",
3042 }
3043 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3044 return srv.(ProviderServer).ImportResourceState(ctx, req.(*ImportResourceState_Request))
3045 }
3046 return interceptor(ctx, in, info, handler)
3047}
3048
3049func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
3050 in := new(ReadDataSource_Request)
3051 if err := dec(in); err != nil {
3052 return nil, err
3053 }
3054 if interceptor == nil {
3055 return srv.(ProviderServer).ReadDataSource(ctx, in)
3056 }
3057 info := &grpc.UnaryServerInfo{
3058 Server: srv,
3059 FullMethod: "/tfplugin5.Provider/ReadDataSource",
3060 }
3061 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3062 return srv.(ProviderServer).ReadDataSource(ctx, req.(*ReadDataSource_Request))
3063 }
3064 return interceptor(ctx, in, info, handler)
3065}
3066
3067func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
3068 in := new(Stop_Request)
3069 if err := dec(in); err != nil {
3070 return nil, err
3071 }
3072 if interceptor == nil {
3073 return srv.(ProviderServer).Stop(ctx, in)
3074 }
3075 info := &grpc.UnaryServerInfo{
3076 Server: srv,
3077 FullMethod: "/tfplugin5.Provider/Stop",
3078 }
3079 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3080 return srv.(ProviderServer).Stop(ctx, req.(*Stop_Request))
3081 }
3082 return interceptor(ctx, in, info, handler)
3083}
3084
3085var _Provider_serviceDesc = grpc.ServiceDesc{
3086 ServiceName: "tfplugin5.Provider",
3087 HandlerType: (*ProviderServer)(nil),
3088 Methods: []grpc.MethodDesc{
3089 {
3090 MethodName: "GetSchema",
3091 Handler: _Provider_GetSchema_Handler,
3092 },
3093 {
3094 MethodName: "PrepareProviderConfig",
3095 Handler: _Provider_PrepareProviderConfig_Handler,
3096 },
3097 {
3098 MethodName: "ValidateResourceTypeConfig",
3099 Handler: _Provider_ValidateResourceTypeConfig_Handler,
3100 },
3101 {
3102 MethodName: "ValidateDataSourceConfig",
3103 Handler: _Provider_ValidateDataSourceConfig_Handler,
3104 },
3105 {
3106 MethodName: "UpgradeResourceState",
3107 Handler: _Provider_UpgradeResourceState_Handler,
3108 },
3109 {
3110 MethodName: "Configure",
3111 Handler: _Provider_Configure_Handler,
3112 },
3113 {
3114 MethodName: "ReadResource",
3115 Handler: _Provider_ReadResource_Handler,
3116 },
3117 {
3118 MethodName: "PlanResourceChange",
3119 Handler: _Provider_PlanResourceChange_Handler,
3120 },
3121 {
3122 MethodName: "ApplyResourceChange",
3123 Handler: _Provider_ApplyResourceChange_Handler,
3124 },
3125 {
3126 MethodName: "ImportResourceState",
3127 Handler: _Provider_ImportResourceState_Handler,
3128 },
3129 {
3130 MethodName: "ReadDataSource",
3131 Handler: _Provider_ReadDataSource_Handler,
3132 },
3133 {
3134 MethodName: "Stop",
3135 Handler: _Provider_Stop_Handler,
3136 },
3137 },
3138 Streams: []grpc.StreamDesc{},
3139 Metadata: "tfplugin5.proto",
3140}
3141
3142// ProvisionerClient is the client API for Provisioner service.
3143//
3144// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
3145type ProvisionerClient interface {
3146 GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error)
3147 ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error)
3148 ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error)
3149 Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error)
3150}
3151
3152type provisionerClient struct {
3153 cc *grpc.ClientConn
3154}
3155
3156func NewProvisionerClient(cc *grpc.ClientConn) ProvisionerClient {
3157 return &provisionerClient{cc}
3158}
3159
3160func (c *provisionerClient) GetSchema(ctx context.Context, in *GetProvisionerSchema_Request, opts ...grpc.CallOption) (*GetProvisionerSchema_Response, error) {
3161 out := new(GetProvisionerSchema_Response)
3162 err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/GetSchema", in, out, opts...)
3163 if err != nil {
3164 return nil, err
3165 }
3166 return out, nil
3167}
3168
3169func (c *provisionerClient) ValidateProvisionerConfig(ctx context.Context, in *ValidateProvisionerConfig_Request, opts ...grpc.CallOption) (*ValidateProvisionerConfig_Response, error) {
3170 out := new(ValidateProvisionerConfig_Response)
3171 err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/ValidateProvisionerConfig", in, out, opts...)
3172 if err != nil {
3173 return nil, err
3174 }
3175 return out, nil
3176}
3177
3178func (c *provisionerClient) ProvisionResource(ctx context.Context, in *ProvisionResource_Request, opts ...grpc.CallOption) (Provisioner_ProvisionResourceClient, error) {
3179 stream, err := c.cc.NewStream(ctx, &_Provisioner_serviceDesc.Streams[0], "/tfplugin5.Provisioner/ProvisionResource", opts...)
3180 if err != nil {
3181 return nil, err
3182 }
3183 x := &provisionerProvisionResourceClient{stream}
3184 if err := x.ClientStream.SendMsg(in); err != nil {
3185 return nil, err
3186 }
3187 if err := x.ClientStream.CloseSend(); err != nil {
3188 return nil, err
3189 }
3190 return x, nil
3191}
3192
3193type Provisioner_ProvisionResourceClient interface {
3194 Recv() (*ProvisionResource_Response, error)
3195 grpc.ClientStream
3196}
3197
3198type provisionerProvisionResourceClient struct {
3199 grpc.ClientStream
3200}
3201
3202func (x *provisionerProvisionResourceClient) Recv() (*ProvisionResource_Response, error) {
3203 m := new(ProvisionResource_Response)
3204 if err := x.ClientStream.RecvMsg(m); err != nil {
3205 return nil, err
3206 }
3207 return m, nil
3208}
3209
3210func (c *provisionerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) {
3211 out := new(Stop_Response)
3212 err := c.cc.Invoke(ctx, "/tfplugin5.Provisioner/Stop", in, out, opts...)
3213 if err != nil {
3214 return nil, err
3215 }
3216 return out, nil
3217}
3218
3219// ProvisionerServer is the server API for Provisioner service.
3220type ProvisionerServer interface {
3221 GetSchema(context.Context, *GetProvisionerSchema_Request) (*GetProvisionerSchema_Response, error)
3222 ValidateProvisionerConfig(context.Context, *ValidateProvisionerConfig_Request) (*ValidateProvisionerConfig_Response, error)
3223 ProvisionResource(*ProvisionResource_Request, Provisioner_ProvisionResourceServer) error
3224 Stop(context.Context, *Stop_Request) (*Stop_Response, error)
3225}
3226
3227func RegisterProvisionerServer(s *grpc.Server, srv ProvisionerServer) {
3228 s.RegisterService(&_Provisioner_serviceDesc, srv)
3229}
3230
3231func _Provisioner_GetSchema_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
3232 in := new(GetProvisionerSchema_Request)
3233 if err := dec(in); err != nil {
3234 return nil, err
3235 }
3236 if interceptor == nil {
3237 return srv.(ProvisionerServer).GetSchema(ctx, in)
3238 }
3239 info := &grpc.UnaryServerInfo{
3240 Server: srv,
3241 FullMethod: "/tfplugin5.Provisioner/GetSchema",
3242 }
3243 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3244 return srv.(ProvisionerServer).GetSchema(ctx, req.(*GetProvisionerSchema_Request))
3245 }
3246 return interceptor(ctx, in, info, handler)
3247}
3248
3249func _Provisioner_ValidateProvisionerConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
3250 in := new(ValidateProvisionerConfig_Request)
3251 if err := dec(in); err != nil {
3252 return nil, err
3253 }
3254 if interceptor == nil {
3255 return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, in)
3256 }
3257 info := &grpc.UnaryServerInfo{
3258 Server: srv,
3259 FullMethod: "/tfplugin5.Provisioner/ValidateProvisionerConfig",
3260 }
3261 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3262 return srv.(ProvisionerServer).ValidateProvisionerConfig(ctx, req.(*ValidateProvisionerConfig_Request))
3263 }
3264 return interceptor(ctx, in, info, handler)
3265}
3266
3267func _Provisioner_ProvisionResource_Handler(srv interface{}, stream grpc.ServerStream) error {
3268 m := new(ProvisionResource_Request)
3269 if err := stream.RecvMsg(m); err != nil {
3270 return err
3271 }
3272 return srv.(ProvisionerServer).ProvisionResource(m, &provisionerProvisionResourceServer{stream})
3273}
3274
3275type Provisioner_ProvisionResourceServer interface {
3276 Send(*ProvisionResource_Response) error
3277 grpc.ServerStream
3278}
3279
3280type provisionerProvisionResourceServer struct {
3281 grpc.ServerStream
3282}
3283
3284func (x *provisionerProvisionResourceServer) Send(m *ProvisionResource_Response) error {
3285 return x.ServerStream.SendMsg(m)
3286}
3287
3288func _Provisioner_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
3289 in := new(Stop_Request)
3290 if err := dec(in); err != nil {
3291 return nil, err
3292 }
3293 if interceptor == nil {
3294 return srv.(ProvisionerServer).Stop(ctx, in)
3295 }
3296 info := &grpc.UnaryServerInfo{
3297 Server: srv,
3298 FullMethod: "/tfplugin5.Provisioner/Stop",
3299 }
3300 handler := func(ctx context.Context, req interface{}) (interface{}, error) {
3301 return srv.(ProvisionerServer).Stop(ctx, req.(*Stop_Request))
3302 }
3303 return interceptor(ctx, in, info, handler)
3304}
3305
3306var _Provisioner_serviceDesc = grpc.ServiceDesc{
3307 ServiceName: "tfplugin5.Provisioner",
3308 HandlerType: (*ProvisionerServer)(nil),
3309 Methods: []grpc.MethodDesc{
3310 {
3311 MethodName: "GetSchema",
3312 Handler: _Provisioner_GetSchema_Handler,
3313 },
3314 {
3315 MethodName: "ValidateProvisionerConfig",
3316 Handler: _Provisioner_ValidateProvisionerConfig_Handler,
3317 },
3318 {
3319 MethodName: "Stop",
3320 Handler: _Provisioner_Stop_Handler,
3321 },
3322 },
3323 Streams: []grpc.StreamDesc{
3324 {
3325 StreamName: "ProvisionResource",
3326 Handler: _Provisioner_ProvisionResource_Handler,
3327 ServerStreams: true,
3328 },
3329 },
3330 Metadata: "tfplugin5.proto",
3331}
3332
3333func init() { proto.RegisterFile("tfplugin5.proto", fileDescriptor_tfplugin5_56820f4fb67360c5) }
3334
3335var fileDescriptor_tfplugin5_56820f4fb67360c5 = []byte{
3336 // 1876 bytes of a gzipped FileDescriptorProto
3337 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x23, 0x49,
3338 0x15, 0x9f, 0x76, 0xdb, 0x89, 0xfd, 0x9c, 0x0f, 0xa7, 0x66, 0x76, 0x30, 0xbd, 0xbb, 0x10, 0xcc,
3339 0x47, 0xb2, 0xda, 0x1d, 0xcf, 0x2a, 0x03, 0xbb, 0x4b, 0x18, 0xad, 0xc8, 0x66, 0x42, 0x26, 0x62,
3340 0x26, 0x1b, 0xca, 0xf3, 0x81, 0x84, 0xb4, 0x56, 0x8d, 0xbb, 0xe2, 0x69, 0xc6, 0xee, 0xee, 0xad,
3341 0x2e, 0x67, 0x62, 0x71, 0x44, 0x70, 0xe6, 0xc2, 0x87, 0xc4, 0xc7, 0x85, 0x03, 0xff, 0x00, 0x07,
3342 0xe0, 0xc6, 0x89, 0x7f, 0x80, 0x1b, 0x70, 0x42, 0x70, 0x43, 0x1c, 0xe1, 0x82, 0x84, 0xea, 0xab,
3343 0xbb, 0x6c, 0xb7, 0x93, 0x9e, 0x64, 0x57, 0x88, 0x5b, 0x57, 0xbd, 0x5f, 0xbd, 0xf7, 0xab, 0xf7,
3344 0x5e, 0xbd, 0x57, 0x65, 0xc3, 0x2a, 0x3f, 0x8e, 0x07, 0xa3, 0x7e, 0x10, 0x7e, 0xa9, 0x1d, 0xb3,
3345 0x88, 0x47, 0xa8, 0x96, 0x4e, 0xb4, 0x6e, 0xc3, 0xd2, 0x9d, 0x71, 0x48, 0x86, 0x41, 0xef, 0x11,
3346 0x19, 0x8c, 0x28, 0x6a, 0xc2, 0xe2, 0x30, 0xe9, 0xc7, 0xa4, 0xf7, 0xac, 0xe9, 0xac, 0x3b, 0x9b,
3347 0x4b, 0xd8, 0x0c, 0x11, 0x82, 0xf2, 0xb7, 0x93, 0x28, 0x6c, 0x96, 0xe4, 0xb4, 0xfc, 0x6e, 0xfd,
3348 0xd5, 0x01, 0xb8, 0x13, 0x90, 0x7e, 0x18, 0x25, 0x3c, 0xe8, 0xa1, 0x6d, 0xa8, 0x26, 0xf4, 0x84,
3349 0xb2, 0x80, 0x8f, 0xe5, 0xea, 0x95, 0xad, 0x4f, 0xb5, 0x33, 0xdb, 0x19, 0xb0, 0xdd, 0xd1, 0x28,
3350 0x9c, 0xe2, 0x85, 0xe1, 0x64, 0x34, 0x1c, 0x12, 0x36, 0x96, 0x16, 0x6a, 0xd8, 0x0c, 0xd1, 0x75,
3351 0x58, 0xf0, 0x29, 0x27, 0xc1, 0xa0, 0xe9, 0x4a, 0x81, 0x1e, 0xa1, 0xb7, 0xa0, 0x46, 0x38, 0x67,
3352 0xc1, 0x93, 0x11, 0xa7, 0xcd, 0xf2, 0xba, 0xb3, 0x59, 0xdf, 0x6a, 0x5a, 0xe6, 0x76, 0x8c, 0xec,
3353 0x88, 0xf0, 0xa7, 0x38, 0x83, 0xb6, 0x6e, 0x42, 0xd5, 0xd8, 0x47, 0x75, 0x58, 0x3c, 0x38, 0x7c,
3354 0xb4, 0x73, 0xef, 0xe0, 0x4e, 0xe3, 0x0a, 0xaa, 0x41, 0x65, 0x0f, 0xe3, 0xf7, 0x71, 0xc3, 0x11,
3355 0xf3, 0x8f, 0x77, 0xf0, 0xe1, 0xc1, 0xe1, 0x7e, 0xa3, 0xd4, 0xfa, 0xb3, 0x03, 0xcb, 0x13, 0xda,
3356 0xd0, 0x2d, 0xa8, 0x24, 0x9c, 0xc6, 0x49, 0xd3, 0x59, 0x77, 0x37, 0xeb, 0x5b, 0xaf, 0xce, 0x33,
3357 0xdb, 0xee, 0x70, 0x1a, 0x63, 0x85, 0xf5, 0x7e, 0xe8, 0x40, 0x59, 0x8c, 0xd1, 0x06, 0xac, 0xa4,
3358 0x6c, 0xba, 0x21, 0x19, 0x52, 0xe9, 0xac, 0xda, 0xdd, 0x2b, 0x78, 0x39, 0x9d, 0x3f, 0x24, 0x43,
3359 0x8a, 0xda, 0x80, 0xe8, 0x80, 0x0e, 0x69, 0xc8, 0xbb, 0xcf, 0xe8, 0xb8, 0x9b, 0x70, 0x16, 0x84,
3360 0x7d, 0xe5, 0x9e, 0xbb, 0x57, 0x70, 0x43, 0xcb, 0xbe, 0x4e, 0xc7, 0x1d, 0x29, 0x41, 0x9b, 0xb0,
3361 0x6a, 0xe3, 0x83, 0x90, 0x4b, 0x97, 0xb9, 0x42, 0x73, 0x06, 0x3e, 0x08, 0xf9, 0x7b, 0x20, 0x22,
3362 0x35, 0xa0, 0x3d, 0x1e, 0xb1, 0xd6, 0x2d, 0x41, 0x2b, 0x8a, 0xbd, 0x1a, 0x2c, 0x62, 0xfa, 0xe1,
3363 0x88, 0x26, 0xdc, 0x5b, 0x87, 0x2a, 0xa6, 0x49, 0x1c, 0x85, 0x09, 0x45, 0xd7, 0xa0, 0xb2, 0xc7,
3364 0x58, 0xc4, 0x14, 0x49, 0xac, 0x06, 0xad, 0x1f, 0x39, 0x50, 0xc5, 0xe4, 0x79, 0x87, 0x13, 0x4e,
3365 0xd3, 0xd4, 0x70, 0xb2, 0xd4, 0x40, 0xdb, 0xb0, 0x78, 0x3c, 0x20, 0x7c, 0x48, 0xe2, 0x66, 0x49,
3366 0x3a, 0x69, 0xdd, 0x72, 0x92, 0x59, 0xd9, 0xfe, 0x9a, 0x82, 0xec, 0x85, 0x9c, 0x8d, 0xb1, 0x59,
3367 0xe0, 0x6d, 0xc3, 0x92, 0x2d, 0x40, 0x0d, 0x70, 0x9f, 0xd1, 0xb1, 0x26, 0x20, 0x3e, 0x05, 0xa9,
3368 0x13, 0x91, 0xaf, 0x3a, 0x57, 0xd4, 0x60, 0xbb, 0xf4, 0x8e, 0xd3, 0xfa, 0x7b, 0x05, 0x16, 0x3a,
3369 0xbd, 0xa7, 0x74, 0x48, 0x44, 0x4a, 0x9d, 0x50, 0x96, 0x04, 0x9a, 0x99, 0x8b, 0xcd, 0x10, 0xdd,
3370 0x80, 0xca, 0x93, 0x41, 0xd4, 0x7b, 0x26, 0x97, 0xd7, 0xb7, 0x3e, 0x61, 0x51, 0x53, 0x6b, 0xdb,
3371 0xef, 0x09, 0x31, 0x56, 0x28, 0xef, 0x17, 0x0e, 0x54, 0xe4, 0xc4, 0x19, 0x2a, 0xbf, 0x02, 0x90,
3372 0x06, 0x2f, 0xd1, 0x5b, 0x7e, 0x79, 0x56, 0x6f, 0x9a, 0x1e, 0xd8, 0x82, 0xa3, 0x77, 0xa1, 0x2e,
3373 0x2d, 0x75, 0xf9, 0x38, 0xa6, 0x49, 0xd3, 0x9d, 0xc9, 0x2a, 0xbd, 0xfa, 0x90, 0x26, 0x9c, 0xfa,
3374 0x8a, 0x1b, 0xc8, 0x15, 0x0f, 0xc4, 0x02, 0xef, 0x0f, 0x0e, 0xd4, 0x52, 0xcd, 0x22, 0x1c, 0x59,
3375 0x56, 0x61, 0xf9, 0x2d, 0xe6, 0x84, 0x6e, 0x73, 0x7a, 0xc5, 0x37, 0x5a, 0x87, 0xba, 0x4f, 0x93,
3376 0x1e, 0x0b, 0x62, 0x2e, 0x36, 0xa4, 0x4e, 0x97, 0x3d, 0x85, 0x3c, 0xa8, 0x32, 0xfa, 0xe1, 0x28,
3377 0x60, 0xd4, 0x97, 0x27, 0xac, 0x8a, 0xd3, 0xb1, 0x90, 0x45, 0x12, 0x45, 0x06, 0xcd, 0x8a, 0x92,
3378 0x99, 0xb1, 0x90, 0xf5, 0xa2, 0x61, 0x3c, 0xe2, 0xd4, 0x6f, 0x2e, 0x28, 0x99, 0x19, 0xa3, 0x57,
3379 0xa0, 0x96, 0xd0, 0x30, 0x09, 0x78, 0x70, 0x42, 0x9b, 0x8b, 0x52, 0x98, 0x4d, 0x78, 0xbf, 0x2a,
3380 0x41, 0xdd, 0xda, 0x25, 0x7a, 0x19, 0x6a, 0x82, 0xab, 0x75, 0x4c, 0x70, 0x55, 0x4c, 0xc8, 0xf3,
3381 0xf1, 0x62, 0x61, 0x44, 0xbb, 0xb0, 0x18, 0xd2, 0x84, 0x8b, 0x33, 0xe4, 0xca, 0xea, 0xf4, 0xda,
3382 0x99, 0x1e, 0x96, 0xdf, 0x41, 0xd8, 0xbf, 0x1f, 0xf9, 0x14, 0x9b, 0x95, 0x82, 0xd0, 0x30, 0x08,
3383 0xbb, 0x01, 0xa7, 0xc3, 0x44, 0xfa, 0xc4, 0xc5, 0xd5, 0x61, 0x10, 0x1e, 0x88, 0xb1, 0x14, 0x92,
3384 0x53, 0x2d, 0xac, 0x68, 0x21, 0x39, 0x95, 0xc2, 0xd6, 0x7d, 0xb5, 0x33, 0xad, 0x71, 0xb2, 0xf4,
3385 0x00, 0x2c, 0x74, 0x0e, 0x0e, 0xf7, 0xef, 0xed, 0x35, 0x1c, 0x54, 0x85, 0xf2, 0xbd, 0x83, 0xce,
3386 0x83, 0x46, 0x09, 0x2d, 0x82, 0xdb, 0xd9, 0x7b, 0xd0, 0x70, 0xc5, 0xc7, 0xfd, 0x9d, 0xa3, 0x46,
3387 0x59, 0x94, 0xa8, 0x7d, 0xfc, 0xfe, 0xc3, 0xa3, 0x46, 0xa5, 0xf5, 0x93, 0x32, 0xac, 0xed, 0x53,
3388 0x7e, 0xc4, 0xa2, 0x93, 0xc0, 0xa7, 0x4c, 0xf1, 0xb7, 0x0f, 0xf1, 0xbf, 0x5c, 0xeb, 0x14, 0xdf,
3389 0x80, 0x6a, 0xac, 0x91, 0xd2, 0x8d, 0xf5, 0xad, 0xb5, 0x99, 0xcd, 0xe3, 0x14, 0x82, 0x28, 0x34,
3390 0x18, 0x4d, 0xa2, 0x11, 0xeb, 0xd1, 0x6e, 0x22, 0x85, 0x26, 0xa7, 0xb7, 0xad, 0x65, 0x33, 0xe6,
3391 0xdb, 0xc6, 0x9e, 0xf8, 0x90, 0xab, 0xd5, 0x7c, 0xa2, 0x0e, 0xf8, 0x2a, 0x9b, 0x9c, 0x45, 0x03,
3392 0xb8, 0xea, 0x13, 0x4e, 0xba, 0x53, 0x96, 0x54, 0xfe, 0xdf, 0x2e, 0x66, 0xe9, 0x0e, 0xe1, 0xa4,
3393 0x33, 0x6b, 0x6b, 0xcd, 0x9f, 0x9e, 0x47, 0x6f, 0x43, 0xdd, 0x4f, 0x7b, 0x90, 0x08, 0x9e, 0xb0,
3394 0xf2, 0x52, 0x6e, 0x87, 0xc2, 0x36, 0xd2, 0x7b, 0x08, 0xd7, 0xf2, 0xf6, 0x93, 0x53, 0x97, 0x36,
3395 0xec, 0xba, 0x94, 0xeb, 0xe3, 0xac, 0x54, 0x79, 0x8f, 0xe1, 0x7a, 0x3e, 0xf9, 0x4b, 0x2a, 0x6e,
3396 0xfd, 0xc9, 0x81, 0x97, 0x8e, 0x18, 0x8d, 0x09, 0xa3, 0xc6, 0x6b, 0xbb, 0x51, 0x78, 0x1c, 0xf4,
3397 0xbd, 0xed, 0x34, 0x3d, 0xd0, 0x4d, 0x58, 0xe8, 0xc9, 0x49, 0x9d, 0x0f, 0xf6, 0xe9, 0xb1, 0xaf,
3398 0x04, 0x58, 0xc3, 0xbc, 0xef, 0x39, 0x56, 0x3e, 0x7d, 0x15, 0x56, 0x63, 0x65, 0xc1, 0xef, 0x16,
3399 0x53, 0xb3, 0x62, 0xf0, 0x8a, 0xca, 0x74, 0x34, 0x4a, 0x45, 0xa3, 0xd1, 0xfa, 0x41, 0x09, 0xae,
3400 0x3d, 0x8c, 0xfb, 0x8c, 0xf8, 0x34, 0x8d, 0x8a, 0x68, 0x26, 0x1e, 0xcb, 0x36, 0x77, 0x66, 0xd9,
3401 0xb0, 0x8a, 0x78, 0x69, 0xb2, 0x88, 0xbf, 0x09, 0x35, 0x46, 0x9e, 0x77, 0x13, 0xa1, 0x4e, 0xd6,
3402 0x88, 0xfa, 0xd6, 0xd5, 0x9c, 0xb6, 0x85, 0xab, 0x4c, 0x7f, 0x79, 0xdf, 0xb5, 0x9d, 0xf2, 0x2e,
3403 0xac, 0x8c, 0x14, 0x31, 0x5f, 0xeb, 0x38, 0xc7, 0x27, 0xcb, 0x06, 0xae, 0xfa, 0xe8, 0x85, 0x5d,
3404 0xf2, 0x3b, 0x07, 0xbc, 0x47, 0x64, 0x10, 0xf8, 0x82, 0x9c, 0xf6, 0x89, 0xe8, 0x0c, 0x3a, 0xea,
3405 0x8f, 0x0b, 0x3a, 0x26, 0x4b, 0x89, 0x52, 0xb1, 0x94, 0xd8, 0xb5, 0x36, 0x3f, 0x45, 0xde, 0x29,
3406 0x4c, 0xfe, 0x37, 0x0e, 0x34, 0x0d, 0xf9, 0xec, 0x3c, 0xfc, 0x5f, 0x50, 0xff, 0xad, 0x03, 0x35,
3407 0x45, 0x74, 0xc4, 0xa8, 0xd7, 0xcf, 0xb8, 0xbe, 0x0e, 0x6b, 0x9c, 0x32, 0x46, 0x8e, 0x23, 0x36,
3408 0xec, 0xda, 0x37, 0x86, 0x1a, 0x6e, 0xa4, 0x82, 0x47, 0x3a, 0xeb, 0xfe, 0x37, 0xdc, 0xff, 0xe9,
3409 0xc0, 0x12, 0xa6, 0xc4, 0x37, 0xf9, 0xe2, 0xf9, 0x05, 0x5d, 0x7d, 0x1b, 0x96, 0x7b, 0x23, 0xc6,
3410 0xc4, 0x2d, 0x53, 0x25, 0xf9, 0x39, 0xac, 0x97, 0x34, 0x5a, 0x1d, 0x98, 0xb1, 0xc5, 0xfd, 0x8b,
3411 0x50, 0x0b, 0xe9, 0xf3, 0x62, 0x47, 0xa5, 0x1a, 0xd2, 0xe7, 0x97, 0x3c, 0x25, 0xbf, 0x2e, 0x03,
3412 0x3a, 0x1a, 0x90, 0xd0, 0xec, 0x78, 0xf7, 0x29, 0x09, 0xfb, 0xd4, 0xfb, 0x8f, 0x53, 0x70, 0xe3,
3413 0xef, 0x40, 0x3d, 0x66, 0x41, 0xc4, 0x8a, 0x6d, 0x1b, 0x24, 0x56, 0x51, 0xde, 0x03, 0x14, 0xb3,
3414 0x28, 0x8e, 0x12, 0xea, 0x77, 0xb3, 0x1d, 0xbb, 0x67, 0x2b, 0x68, 0x98, 0x25, 0x87, 0x66, 0xe7,
3415 0x59, 0xa2, 0x94, 0x0b, 0x25, 0x0a, 0xfa, 0x2c, 0x2c, 0x2b, 0xc6, 0x31, 0x0b, 0x4e, 0x84, 0xc9,
3416 0x8a, 0xbc, 0xfe, 0x2d, 0xc9, 0xc9, 0x23, 0x35, 0xe7, 0xfd, 0xbc, 0x64, 0x85, 0xe4, 0x36, 0x2c,
3417 0xc7, 0x03, 0x12, 0x86, 0x45, 0x2b, 0xd8, 0x92, 0x46, 0x2b, 0x82, 0xbb, 0xe2, 0xda, 0x20, 0xef,
3418 0x87, 0x49, 0x97, 0xd1, 0x78, 0x40, 0x7a, 0x54, 0xc7, 0x67, 0xfe, 0xcb, 0x6c, 0xd5, 0xac, 0xc0,
3419 0x6a, 0x01, 0xda, 0x80, 0x55, 0x43, 0xc1, 0xd0, 0x76, 0x25, 0xed, 0x15, 0x3d, 0xad, 0x89, 0x5f,
3420 0xb8, 0x9f, 0xa3, 0x37, 0x00, 0x0d, 0x68, 0x9f, 0xf4, 0xc6, 0xf2, 0xbe, 0xdd, 0x4d, 0xc6, 0x09,
3421 0xa7, 0x43, 0x7d, 0x89, 0x6d, 0x28, 0x89, 0xa8, 0x9e, 0x1d, 0x39, 0xdf, 0xfa, 0xa3, 0x0b, 0x57,
3422 0x77, 0xe2, 0x78, 0x30, 0x9e, 0xca, 0x9b, 0x7f, 0x7f, 0xfc, 0x79, 0x33, 0x13, 0x0d, 0xf7, 0x45,
3423 0xa2, 0xf1, 0xc2, 0xe9, 0x92, 0xe3, 0xf9, 0x4a, 0x9e, 0xe7, 0xbd, 0xdf, 0x3b, 0x97, 0x3e, 0xc5,
3424 0x4d, 0x58, 0x34, 0x36, 0xd4, 0x9b, 0xc4, 0x0c, 0xa7, 0xc3, 0xea, 0x5e, 0x32, 0xac, 0xe5, 0x39,
3425 0x61, 0xfd, 0x47, 0x09, 0xae, 0x1e, 0x0c, 0xe3, 0x88, 0xf1, 0xc9, 0x5b, 0xc4, 0x5b, 0x05, 0xa3,
3426 0xba, 0x02, 0xa5, 0xc0, 0xd7, 0xef, 0xcf, 0x52, 0xe0, 0x7b, 0xa7, 0xd0, 0x50, 0xea, 0x68, 0x5a,
3427 0x52, 0xcf, 0x7d, 0xbd, 0x14, 0x4a, 0x08, 0x85, 0xb2, 0x1d, 0xe6, 0x4e, 0x38, 0xcc, 0xfb, 0xa5,
3428 0x1d, 0x8d, 0x0f, 0x00, 0x05, 0x9a, 0x46, 0xd7, 0x5c, 0xb7, 0x4d, 0x5b, 0xb8, 0x69, 0x99, 0xc8,
3429 0xd9, 0x7a, 0x7b, 0x9a, 0x3f, 0x5e, 0x0b, 0xa6, 0x66, 0x92, 0x8b, 0x57, 0xdf, 0xbf, 0x38, 0xb0,
3430 0x22, 0xfa, 0x4d, 0xd6, 0xe2, 0x3f, 0xbe, 0xe6, 0xce, 0x26, 0x5e, 0x3e, 0x95, 0x42, 0xa9, 0xa9,
3431 0xdd, 0x7c, 0xe1, 0xfd, 0xfd, 0xd4, 0x81, 0x6b, 0xe6, 0x99, 0x22, 0xda, 0x7a, 0xde, 0x93, 0xec,
3432 0xd4, 0xe2, 0x75, 0x4b, 0x54, 0x85, 0x14, 0x3b, 0xff, 0x51, 0x66, 0xa3, 0x2e, 0xce, 0xee, 0x67,
3433 0x0e, 0x7c, 0xd2, 0x5c, 0xb2, 0x2c, 0x8a, 0x1f, 0xc1, 0xb3, 0xe0, 0x23, 0xb9, 0x8c, 0xfc, 0xcd,
3434 0x81, 0xb5, 0x94, 0x56, 0x7a, 0x23, 0x49, 0x2e, 0x4e, 0x0b, 0xbd, 0x0d, 0xd0, 0x8b, 0xc2, 0x90,
3435 0xf6, 0xb8, 0xb9, 0xe7, 0x9f, 0x55, 0x73, 0x33, 0xa8, 0xf7, 0x2d, 0x6b, 0x3f, 0xd7, 0x61, 0x21,
3436 0x1a, 0xf1, 0x78, 0xc4, 0x75, 0x4a, 0xea, 0xd1, 0x85, 0xc3, 0xb0, 0xf5, 0xe3, 0x1a, 0x54, 0xcd,
3437 0x93, 0x0c, 0x7d, 0x13, 0x6a, 0xfb, 0x94, 0xeb, 0x1f, 0xab, 0x3e, 0x77, 0xce, 0x6b, 0x57, 0x25,
3438 0xd0, 0xe7, 0x0b, 0xbd, 0x89, 0xd1, 0x60, 0xce, 0xfb, 0x0f, 0x6d, 0x5a, 0xeb, 0x73, 0x11, 0xa9,
3439 0xa5, 0xd7, 0x0a, 0x20, 0xb5, 0xb5, 0xef, 0x9c, 0xf5, 0xf8, 0x40, 0x37, 0x2c, 0x45, 0xf3, 0x61,
3440 0xa9, 0xdd, 0x76, 0x51, 0xb8, 0x36, 0x3e, 0x9a, 0xff, 0x78, 0x40, 0xaf, 0xe7, 0xe8, 0x9a, 0x06,
3441 0xa5, 0x86, 0xdf, 0x28, 0x06, 0xd6, 0x66, 0x83, 0xfc, 0x37, 0x28, 0xda, 0xb0, 0xb4, 0xe4, 0x01,
3442 0x52, 0x73, 0x9b, 0xe7, 0x03, 0xb5, 0xa9, 0xbb, 0xd6, 0x1b, 0x03, 0xbd, 0x62, 0x2d, 0x4b, 0x67,
3443 0x53, 0xa5, 0xaf, 0xce, 0x91, 0x6a, 0x4d, 0xdf, 0x98, 0xbc, 0xf1, 0xa3, 0x4f, 0xdb, 0x6f, 0x5b,
3444 0x4b, 0x90, 0xea, 0x5b, 0x9f, 0x0f, 0xd0, 0x2a, 0x7b, 0x79, 0x57, 0x6a, 0x64, 0xa7, 0xe9, 0xac,
3445 0x38, 0x55, 0xff, 0x85, 0xf3, 0x60, 0xda, 0xc8, 0x71, 0xee, 0x05, 0x0c, 0xd9, 0xcb, 0x73, 0xe4,
3446 0xa9, 0x99, 0x8d, 0x73, 0x71, 0x99, 0x9d, 0x9c, 0xb6, 0x38, 0x61, 0x27, 0xaf, 0x6d, 0xe6, 0xd9,
3447 0xc9, 0xc7, 0x69, 0x3b, 0x8f, 0xa7, 0x3b, 0x21, 0xfa, 0xcc, 0x94, 0xa3, 0x33, 0x51, 0xaa, 0xbd,
3448 0x75, 0x16, 0x44, 0x2b, 0xfe, 0xb2, 0xfa, 0x29, 0x1f, 0x4d, 0xfc, 0x12, 0xca, 0xa3, 0x38, 0x55,
3449 0xd2, 0x9c, 0x15, 0xa8, 0xa5, 0x5b, 0xdf, 0x77, 0xa1, 0x6e, 0x35, 0x06, 0xf4, 0x81, 0x5d, 0x9c,
3450 0x36, 0x72, 0xca, 0x8e, 0xdd, 0xe3, 0x72, 0xb3, 0x7a, 0x0e, 0x50, 0x53, 0x3d, 0x3d, 0xa3, 0x1f,
3451 0xa1, 0xbc, 0xb3, 0x38, 0x83, 0x4a, 0x8d, 0xde, 0x28, 0x88, 0xd6, 0x96, 0x9f, 0xe4, 0xb4, 0x9a,
3452 0x89, 0xf2, 0x3b, 0x23, 0xcd, 0x2d, 0xbf, 0x79, 0x28, 0x65, 0xe1, 0x4d, 0xe7, 0x12, 0x81, 0x78,
3453 0xb2, 0x20, 0xff, 0xa3, 0xbb, 0xf5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x16, 0x0b, 0x32,
3454 0xb6, 0x1b, 0x00, 0x00,
3455}
diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto
new file mode 100644
index 0000000..370faf7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto
@@ -0,0 +1,351 @@
1// Terraform Plugin RPC protocol version 5.0
2//
3// This file defines version 5.0 of the RPC protocol. To implement a plugin
4// against this protocol, copy this definition into your own codebase and
5// use protoc to generate stubs for your target language.
6//
7// This file will be updated in-place in the source Terraform repository for
8// any minor versions of protocol 5, but later minor versions will always be
9// backwards compatible. Breaking changes, if any are required, will come
10// in a subsequent major version with its own separate proto definition.
11//
12// Note that only the proto files included in a release tag of Terraform are
13// official protocol releases. Proto files taken from other commits may include
14// incomplete changes or features that did not make it into a final release.
15// In all reasonable cases, plugin developers should take the proto file from
16// the tag of the most recent release of Terraform, and not from the master
17// branch or any other development branch.
18//
19syntax = "proto3";
20
21package tfplugin5;
22
23// DynamicValue is an opaque encoding of terraform data, with the field name
24// indicating the encoding scheme used.
25message DynamicValue {
26 bytes msgpack = 1;
27 bytes json = 2;
28}
29
30message Diagnostic {
31 enum Severity {
32 INVALID = 0;
33 ERROR = 1;
34 WARNING = 2;
35 }
36 Severity severity = 1;
37 string summary = 2;
38 string detail = 3;
39 AttributePath attribute = 4;
40}
41
42message AttributePath {
43 message Step {
44 oneof selector {
45 // Set "attribute_name" to represent looking up an attribute
46 // in the current object value.
47 string attribute_name = 1;
48 // Set "element_key_*" to represent looking up an element in
49 // an indexable collection type.
50 string element_key_string = 2;
51 int64 element_key_int = 3;
52 }
53 }
54 repeated Step steps = 1;
55}
56
57message Stop {
58 message Request {
59 }
60 message Response {
61 string Error = 1;
62 }
63}
64
65// RawState holds the stored state for a resource to be upgraded by the
66// provider. It can be in one of two formats, the current json encoded format
67// in bytes, or the legacy flatmap format as a map of strings.
68message RawState {
69 bytes json = 1;
70 map<string, string> flatmap = 2;
71}
72
73// Schema is the configuration schema for a Resource, Provider, or Provisioner.
74message Schema {
75 message Block {
76 int64 version = 1;
77 repeated Attribute attributes = 2;
78 repeated NestedBlock block_types = 3;
79 }
80
81 message Attribute {
82 string name = 1;
83 bytes type = 2;
84 string description = 3;
85 bool required = 4;
86 bool optional = 5;
87 bool computed = 6;
88 bool sensitive = 7;
89 }
90
91 message NestedBlock {
92 enum NestingMode {
93 INVALID = 0;
94 SINGLE = 1;
95 LIST = 2;
96 SET = 3;
97 MAP = 4;
98 GROUP = 5;
99 }
100
101 string type_name = 1;
102 Block block = 2;
103 NestingMode nesting = 3;
104 int64 min_items = 4;
105 int64 max_items = 5;
106 }
107
108 // The version of the schema.
109 // Schemas are versioned, so that providers can upgrade a saved resource
110 // state when the schema is changed.
111 int64 version = 1;
112
113 // Block is the top level configuration block for this schema.
114 Block block = 2;
115}
116
117service Provider {
118 //////// Information about what a provider supports/expects
119 rpc GetSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response);
120 rpc PrepareProviderConfig(PrepareProviderConfig.Request) returns (PrepareProviderConfig.Response);
121 rpc ValidateResourceTypeConfig(ValidateResourceTypeConfig.Request) returns (ValidateResourceTypeConfig.Response);
122 rpc ValidateDataSourceConfig(ValidateDataSourceConfig.Request) returns (ValidateDataSourceConfig.Response);
123 rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response);
124
125 //////// One-time initialization, called before other functions below
126 rpc Configure(Configure.Request) returns (Configure.Response);
127
128 //////// Managed Resource Lifecycle
129 rpc ReadResource(ReadResource.Request) returns (ReadResource.Response);
130 rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response);
131 rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response);
132 rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response);
133
134 rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response);
135
136 //////// Graceful Shutdown
137 rpc Stop(Stop.Request) returns (Stop.Response);
138}
139
140message GetProviderSchema {
141 message Request {
142 }
143 message Response {
144 Schema provider = 1;
145 map<string, Schema> resource_schemas = 2;
146 map<string, Schema> data_source_schemas = 3;
147 repeated Diagnostic diagnostics = 4;
148 }
149}
150
151message PrepareProviderConfig {
152 message Request {
153 DynamicValue config = 1;
154 }
155 message Response {
156 DynamicValue prepared_config = 1;
157 repeated Diagnostic diagnostics = 2;
158 }
159}
160
161message UpgradeResourceState {
162 message Request {
163 string type_name = 1;
164
165 // version is the schema_version number recorded in the state file
166 int64 version = 2;
167
168 // raw_state is the raw states as stored for the resource. Core does
169 // not have access to the schema of prior_version, so it's the
170 // provider's responsibility to interpret this value using the
171 // appropriate older schema. The raw_state will be the json encoded
172 // state, or a legacy flat-mapped format.
173 RawState raw_state = 3;
174 }
175 message Response {
176 // new_state is a msgpack-encoded data structure that, when interpreted with
177 // the _current_ schema for this resource type, is functionally equivalent to
178 // that which was given in prior_state_raw.
179 DynamicValue upgraded_state = 1;
180
181 // diagnostics describes any errors encountered during migration that could not
182 // be safely resolved, and warnings about any possibly-risky assumptions made
183 // in the upgrade process.
184 repeated Diagnostic diagnostics = 2;
185 }
186}
187
188message ValidateResourceTypeConfig {
189 message Request {
190 string type_name = 1;
191 DynamicValue config = 2;
192 }
193 message Response {
194 repeated Diagnostic diagnostics = 1;
195 }
196}
197
198message ValidateDataSourceConfig {
199 message Request {
200 string type_name = 1;
201 DynamicValue config = 2;
202 }
203 message Response {
204 repeated Diagnostic diagnostics = 1;
205 }
206}
207
208message Configure {
209 message Request {
210 string terraform_version = 1;
211 DynamicValue config = 2;
212 }
213 message Response {
214 repeated Diagnostic diagnostics = 1;
215 }
216}
217
218message ReadResource {
219 message Request {
220 string type_name = 1;
221 DynamicValue current_state = 2;
222 }
223 message Response {
224 DynamicValue new_state = 1;
225 repeated Diagnostic diagnostics = 2;
226 }
227}
228
229message PlanResourceChange {
230 message Request {
231 string type_name = 1;
232 DynamicValue prior_state = 2;
233 DynamicValue proposed_new_state = 3;
234 DynamicValue config = 4;
235 bytes prior_private = 5;
236 }
237
238 message Response {
239 DynamicValue planned_state = 1;
240 repeated AttributePath requires_replace = 2;
241 bytes planned_private = 3;
242 repeated Diagnostic diagnostics = 4;
243
244
245 // This may be set only by the helper/schema "SDK" in the main Terraform
246 // repository, to request that Terraform Core >=0.12 permit additional
247 // inconsistencies that can result from the legacy SDK type system
248 // and its imprecise mapping to the >=0.12 type system.
249 // The change in behavior implied by this flag makes sense only for the
250 // specific details of the legacy SDK type system, and are not a general
251 // mechanism to avoid proper type handling in providers.
252 //
253 // ==== DO NOT USE THIS ====
254 // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
255 // ==== DO NOT USE THIS ====
256 bool legacy_type_system = 5;
257 }
258}
259
260message ApplyResourceChange {
261 message Request {
262 string type_name = 1;
263 DynamicValue prior_state = 2;
264 DynamicValue planned_state = 3;
265 DynamicValue config = 4;
266 bytes planned_private = 5;
267 }
268 message Response {
269 DynamicValue new_state = 1;
270 bytes private = 2;
271 repeated Diagnostic diagnostics = 3;
272
273 // This may be set only by the helper/schema "SDK" in the main Terraform
274 // repository, to request that Terraform Core >=0.12 permit additional
275 // inconsistencies that can result from the legacy SDK type system
276 // and its imprecise mapping to the >=0.12 type system.
277 // The change in behavior implied by this flag makes sense only for the
278 // specific details of the legacy SDK type system, and are not a general
279 // mechanism to avoid proper type handling in providers.
280 //
281 // ==== DO NOT USE THIS ====
282 // ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
283 // ==== DO NOT USE THIS ====
284 bool legacy_type_system = 4;
285 }
286}
287
288message ImportResourceState {
289 message Request {
290 string type_name = 1;
291 string id = 2;
292 }
293
294 message ImportedResource {
295 string type_name = 1;
296 DynamicValue state = 2;
297 bytes private = 3;
298 }
299
300 message Response {
301 repeated ImportedResource imported_resources = 1;
302 repeated Diagnostic diagnostics = 2;
303 }
304}
305
306message ReadDataSource {
307 message Request {
308 string type_name = 1;
309 DynamicValue config = 2;
310 }
311 message Response {
312 DynamicValue state = 1;
313 repeated Diagnostic diagnostics = 2;
314 }
315}
316
317service Provisioner {
318 rpc GetSchema(GetProvisionerSchema.Request) returns (GetProvisionerSchema.Response);
319 rpc ValidateProvisionerConfig(ValidateProvisionerConfig.Request) returns (ValidateProvisionerConfig.Response);
320 rpc ProvisionResource(ProvisionResource.Request) returns (stream ProvisionResource.Response);
321 rpc Stop(Stop.Request) returns (Stop.Response);
322}
323
324message GetProvisionerSchema {
325 message Request {
326 }
327 message Response {
328 Schema provisioner = 1;
329 repeated Diagnostic diagnostics = 2;
330 }
331}
332
333message ValidateProvisionerConfig {
334 message Request {
335 DynamicValue config = 1;
336 }
337 message Response {
338 repeated Diagnostic diagnostics = 1;
339 }
340}
341
342message ProvisionResource {
343 message Request {
344 DynamicValue config = 1;
345 DynamicValue connection = 2;
346 }
347 message Response {
348 string output = 1;
349 repeated Diagnostic diagnostics = 2;
350 }
351}
diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go
new file mode 100644
index 0000000..8f89909
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/doc.go
@@ -0,0 +1,5 @@
1// Package blocktoattr includes some helper functions that can perform
2// preprocessing on a HCL body where a configschema.Block schema is available
3// in order to allow list and set attributes defined in the schema to be
4// optionally written by the user as block syntax.
5package blocktoattr
diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go
new file mode 100644
index 0000000..d8c2e77
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/fixup.go
@@ -0,0 +1,187 @@
1package blocktoattr
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/hcl2/hcldec"
6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/zclconf/go-cty/cty"
8)
9
10// FixUpBlockAttrs takes a raw HCL body and adds some additional normalization
11// functionality to allow attributes that are specified as having list or set
12// type in the schema to be written with HCL block syntax as multiple nested
13// blocks with the attribute name as the block type.
14//
15// This partially restores some of the block/attribute confusion from HCL 1
16// so that existing patterns that depended on that confusion can continue to
17// be used in the short term while we settle on a longer-term strategy.
18//
19// Most of the fixup work is actually done when the returned body is
20// subsequently decoded, so while FixUpBlockAttrs always succeeds, the eventual
21// decode of the body might not, if the content of the body is so ambiguous
22// that there's no safe way to map it to the schema.
23func FixUpBlockAttrs(body hcl.Body, schema *configschema.Block) hcl.Body {
24 // The schema should never be nil, but in practice it seems to be sometimes
25 // in the presence of poorly-configured test mocks, so we'll be robust
26 // by synthesizing an empty one.
27 if schema == nil {
28 schema = &configschema.Block{}
29 }
30
31 return &fixupBody{
32 original: body,
33 schema: schema,
34 names: ambiguousNames(schema),
35 }
36}
37
38type fixupBody struct {
39 original hcl.Body
40 schema *configschema.Block
41 names map[string]struct{}
42}
43
44// Content decodes content from the body. The given schema must be the lower-level
45// representation of the same schema that was previously passed to FixUpBlockAttrs,
46// or else the result is undefined.
47func (b *fixupBody) Content(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Diagnostics) {
48 schema = b.effectiveSchema(schema)
49 content, diags := b.original.Content(schema)
50 return b.fixupContent(content), diags
51}
52
53func (b *fixupBody) PartialContent(schema *hcl.BodySchema) (*hcl.BodyContent, hcl.Body, hcl.Diagnostics) {
54 schema = b.effectiveSchema(schema)
55 content, remain, diags := b.original.PartialContent(schema)
56 remain = &fixupBody{
57 original: remain,
58 schema: b.schema,
59 names: b.names,
60 }
61 return b.fixupContent(content), remain, diags
62}
63
64func (b *fixupBody) JustAttributes() (hcl.Attributes, hcl.Diagnostics) {
65 // FixUpBlockAttrs is not intended to be used in situations where we'd use
66 // JustAttributes, so we just pass this through verbatim to complete our
67 // implementation of hcl.Body.
68 return b.original.JustAttributes()
69}
70
71func (b *fixupBody) MissingItemRange() hcl.Range {
72 return b.original.MissingItemRange()
73}
74
75// effectiveSchema produces a derived *hcl.BodySchema by sniffing the body's
76// content to determine whether the author has used attribute or block syntax
77// for each of the ambigious attributes where both are permitted.
78//
79// The resulting schema will always contain all of the same names that are
80// in the given schema, but some attribute schemas may instead be replaced by
81// block header schemas.
82func (b *fixupBody) effectiveSchema(given *hcl.BodySchema) *hcl.BodySchema {
83 return effectiveSchema(given, b.original, b.names, true)
84}
85
86func (b *fixupBody) fixupContent(content *hcl.BodyContent) *hcl.BodyContent {
87 var ret hcl.BodyContent
88 ret.Attributes = make(hcl.Attributes)
89 for name, attr := range content.Attributes {
90 ret.Attributes[name] = attr
91 }
92 blockAttrVals := make(map[string][]*hcl.Block)
93 for _, block := range content.Blocks {
94 if _, exists := b.names[block.Type]; exists {
95 // If we get here then we've found a block type whose instances need
96 // to be re-interpreted as a list-of-objects attribute. We'll gather
97 // those up and fix them up below.
98 blockAttrVals[block.Type] = append(blockAttrVals[block.Type], block)
99 continue
100 }
101
102 // We need to now re-wrap our inner body so it will be subject to the
103 // same attribute-as-block fixup when recursively decoded.
104 retBlock := *block // shallow copy
105 if blockS, ok := b.schema.BlockTypes[block.Type]; ok {
106 // Would be weird if not ok, but we'll allow it for robustness; body just won't be fixed up, then
107 retBlock.Body = FixUpBlockAttrs(retBlock.Body, &blockS.Block)
108 }
109
110 ret.Blocks = append(ret.Blocks, &retBlock)
111 }
112 // No we'll install synthetic attributes for each of our fixups. We can't
113 // do this exactly because HCL's information model expects an attribute
114 // to be a single decl but we have multiple separate blocks. We'll
115 // approximate things, then, by using only our first block for the source
116 // location information. (We are guaranteed at least one by the above logic.)
117 for name, blocks := range blockAttrVals {
118 ret.Attributes[name] = &hcl.Attribute{
119 Name: name,
120 Expr: &fixupBlocksExpr{
121 blocks: blocks,
122 ety: b.schema.Attributes[name].Type.ElementType(),
123 },
124
125 Range: blocks[0].DefRange,
126 NameRange: blocks[0].TypeRange,
127 }
128 }
129 return &ret
130}
131
132type fixupBlocksExpr struct {
133 blocks hcl.Blocks
134 ety cty.Type
135}
136
137func (e *fixupBlocksExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
138 // In order to produce a suitable value for our expression we need to
139 // now decode the whole descendent block structure under each of our block
140 // bodies.
141 //
142 // That requires us to do something rather strange: we must construct a
143 // synthetic block type schema derived from the element type of the
144 // attribute, thus inverting our usual direction of lowering a schema
145 // into an implied type. Because a type is less detailed than a schema,
146 // the result is imprecise and in particular will just consider all
147 // the attributes to be optional and let the provider eventually decide
148 // whether to return errors if they turn out to be null when required.
149 schema := SchemaForCtyElementType(e.ety) // this schema's ImpliedType will match e.ety
150 spec := schema.DecoderSpec()
151
152 vals := make([]cty.Value, len(e.blocks))
153 var diags hcl.Diagnostics
154 for i, block := range e.blocks {
155 body := FixUpBlockAttrs(block.Body, schema)
156 val, blockDiags := hcldec.Decode(body, spec, ctx)
157 diags = append(diags, blockDiags...)
158 if val == cty.NilVal {
159 val = cty.UnknownVal(e.ety)
160 }
161 vals[i] = val
162 }
163 if len(vals) == 0 {
164 return cty.ListValEmpty(e.ety), diags
165 }
166 return cty.ListVal(vals), diags
167}
168
169func (e *fixupBlocksExpr) Variables() []hcl.Traversal {
170 var ret []hcl.Traversal
171 schema := SchemaForCtyElementType(e.ety)
172 spec := schema.DecoderSpec()
173 for _, block := range e.blocks {
174 ret = append(ret, hcldec.Variables(block.Body, spec)...)
175 }
176 return ret
177}
178
179func (e *fixupBlocksExpr) Range() hcl.Range {
180 // This is not really an appropriate range for the expression but it's
181 // the best we can do from here.
182 return e.blocks[0].DefRange
183}
184
185func (e *fixupBlocksExpr) StartRange() hcl.Range {
186 return e.blocks[0].DefRange
187}
diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go
new file mode 100644
index 0000000..2f2463a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go
@@ -0,0 +1,145 @@
1package blocktoattr
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/terraform/configs/configschema"
6 "github.com/zclconf/go-cty/cty"
7)
8
9func ambiguousNames(schema *configschema.Block) map[string]struct{} {
10 if schema == nil {
11 return nil
12 }
13 ambiguousNames := make(map[string]struct{})
14 for name, attrS := range schema.Attributes {
15 aty := attrS.Type
16 if (aty.IsListType() || aty.IsSetType()) && aty.ElementType().IsObjectType() {
17 ambiguousNames[name] = struct{}{}
18 }
19 }
20 return ambiguousNames
21}
22
23func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[string]struct{}, dynamicExpanded bool) *hcl.BodySchema {
24 ret := &hcl.BodySchema{}
25
26 appearsAsBlock := make(map[string]struct{})
27 {
28 // We'll construct some throwaway schemas here just to probe for
29 // whether each of our ambiguous names seems to be being used as
30 // an attribute or a block. We need to check both because in JSON
31 // syntax we rely on the schema to decide between attribute or block
32 // interpretation and so JSON will always answer yes to both of
33 // these questions and we want to prefer the attribute interpretation
34 // in that case.
35 var probeSchema hcl.BodySchema
36
37 for name := range ambiguousNames {
38 probeSchema = hcl.BodySchema{
39 Attributes: []hcl.AttributeSchema{
40 {
41 Name: name,
42 },
43 },
44 }
45 content, _, _ := body.PartialContent(&probeSchema)
46 if _, exists := content.Attributes[name]; exists {
47 // Can decode as an attribute, so we'll go with that.
48 continue
49 }
50 probeSchema = hcl.BodySchema{
51 Blocks: []hcl.BlockHeaderSchema{
52 {
53 Type: name,
54 },
55 },
56 }
57 content, _, _ = body.PartialContent(&probeSchema)
58 if len(content.Blocks) > 0 {
59 // No attribute present and at least one block present, so
60 // we'll need to rewrite this one as a block for a successful
61 // result.
62 appearsAsBlock[name] = struct{}{}
63 }
64 }
65 if !dynamicExpanded {
66 // If we're deciding for a context where dynamic blocks haven't
67 // been expanded yet then we need to probe for those too.
68 probeSchema = hcl.BodySchema{
69 Blocks: []hcl.BlockHeaderSchema{
70 {
71 Type: "dynamic",
72 LabelNames: []string{"type"},
73 },
74 },
75 }
76 content, _, _ := body.PartialContent(&probeSchema)
77 for _, block := range content.Blocks {
78 if _, exists := ambiguousNames[block.Labels[0]]; exists {
79 appearsAsBlock[block.Labels[0]] = struct{}{}
80 }
81 }
82 }
83 }
84
85 for _, attrS := range given.Attributes {
86 if _, exists := appearsAsBlock[attrS.Name]; exists {
87 ret.Blocks = append(ret.Blocks, hcl.BlockHeaderSchema{
88 Type: attrS.Name,
89 })
90 } else {
91 ret.Attributes = append(ret.Attributes, attrS)
92 }
93 }
94
95 // Anything that is specified as a block type in the input schema remains
96 // that way by just passing through verbatim.
97 ret.Blocks = append(ret.Blocks, given.Blocks...)
98
99 return ret
100}
101
102// SchemaForCtyElementType converts a cty object type into an
103// approximately-equivalent configschema.Block representing the element of
104// a list or set. If the given type is not an object type then this
105// function will panic.
106func SchemaForCtyElementType(ty cty.Type) *configschema.Block {
107 atys := ty.AttributeTypes()
108 ret := &configschema.Block{
109 Attributes: make(map[string]*configschema.Attribute, len(atys)),
110 }
111 for name, aty := range atys {
112 ret.Attributes[name] = &configschema.Attribute{
113 Type: aty,
114 Optional: true,
115 }
116 }
117 return ret
118}
119
120// SchemaForCtyContainerType converts a cty list-of-object or set-of-object type
121// into an approximately-equivalent configschema.NestedBlock. If the given type
122// is not of the expected kind then this function will panic.
123func SchemaForCtyContainerType(ty cty.Type) *configschema.NestedBlock {
124 var nesting configschema.NestingMode
125 switch {
126 case ty.IsListType():
127 nesting = configschema.NestingList
128 case ty.IsSetType():
129 nesting = configschema.NestingSet
130 default:
131 panic("unsuitable type")
132 }
133 nested := SchemaForCtyElementType(ty.ElementType())
134 return &configschema.NestedBlock{
135 Nesting: nesting,
136 Block: *nested,
137 }
138}
139
140// TypeCanBeBlocks returns true if the given type is a list-of-object or
141// set-of-object type, and would thus be subject to the blocktoattr fixup
142// if used as an attribute type.
143func TypeCanBeBlocks(ty cty.Type) bool {
144 return (ty.IsListType() || ty.IsSetType()) && ty.ElementType().IsObjectType()
145}
diff --git a/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go
new file mode 100644
index 0000000..e123b8a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go
@@ -0,0 +1,43 @@
1package blocktoattr
2
3import (
4 "github.com/hashicorp/hcl2/ext/dynblock"
5 "github.com/hashicorp/hcl2/hcl"
6 "github.com/hashicorp/hcl2/hcldec"
7 "github.com/hashicorp/terraform/configs/configschema"
8)
9
10// ExpandedVariables finds all of the global variables referenced in the
11// given body with the given schema while taking into account the possibilities
12// both of "dynamic" blocks being expanded and the possibility of certain
13// attributes being written instead as nested blocks as allowed by the
14// FixUpBlockAttrs function.
15//
16// This function exists to allow variables to be analyzed prior to dynamic
17// block expansion while also dealing with the fact that dynamic block expansion
18// might in turn produce nested blocks that are subject to FixUpBlockAttrs.
19//
20// This is intended as a drop-in replacement for dynblock.VariablesHCLDec,
21// which is itself a drop-in replacement for hcldec.Variables.
22func ExpandedVariables(body hcl.Body, schema *configschema.Block) []hcl.Traversal {
23 rootNode := dynblock.WalkVariables(body)
24 return walkVariables(rootNode, body, schema)
25}
26
27func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *configschema.Block) []hcl.Traversal {
28 givenRawSchema := hcldec.ImpliedSchema(schema.DecoderSpec())
29 ambiguousNames := ambiguousNames(schema)
30 effectiveRawSchema := effectiveSchema(givenRawSchema, body, ambiguousNames, false)
31 vars, children := node.Visit(effectiveRawSchema)
32
33 for _, child := range children {
34 if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists {
35 vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...)
36 } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists {
37 synthSchema := SchemaForCtyElementType(attrS.Type.ElementType())
38 vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...)
39 }
40 }
41
42 return vars
43}
diff --git a/vendor/github.com/hashicorp/terraform/lang/data.go b/vendor/github.com/hashicorp/terraform/lang/data.go
new file mode 100644
index 0000000..80313d6
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/data.go
@@ -0,0 +1,33 @@
1package lang
2
3import (
4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/tfdiags"
6 "github.com/zclconf/go-cty/cty"
7)
8
9// Data is an interface whose implementations can provide cty.Value
10// representations of objects identified by referenceable addresses from
11// the addrs package.
12//
13// This interface will grow each time a new type of reference is added, and so
14// implementations outside of the Terraform codebases are not advised.
15//
16// Each method returns a suitable value and optionally some diagnostics. If the
17// returned diagnostics contains errors then the type of the returned value is
18// used to construct an unknown value of the same type which is then used in
19// place of the requested object so that type checking can still proceed. In
20// cases where it's not possible to even determine a suitable result type,
21// cty.DynamicVal is returned along with errors describing the problem.
22type Data interface {
23 StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics
24
25 GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
26 GetResourceInstance(addrs.ResourceInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
27 GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
28 GetModuleInstance(addrs.ModuleCallInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
29 GetModuleInstanceOutput(addrs.ModuleCallOutput, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
30 GetPathAttr(addrs.PathAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
31 GetTerraformAttr(addrs.TerraformAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
32 GetInputVariable(addrs.InputVariable, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
33}
diff --git a/vendor/github.com/hashicorp/terraform/lang/doc.go b/vendor/github.com/hashicorp/terraform/lang/doc.go
new file mode 100644
index 0000000..af5c5ca
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/doc.go
@@ -0,0 +1,5 @@
1// Package lang deals with the runtime aspects of Terraform's configuration
2// language, with concerns such as expression evaluation. It is closely related
3// to sibling package "configs", which is responsible for configuration
4// parsing and static validation.
5package lang
diff --git a/vendor/github.com/hashicorp/terraform/lang/eval.go b/vendor/github.com/hashicorp/terraform/lang/eval.go
new file mode 100644
index 0000000..a3fb363
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/eval.go
@@ -0,0 +1,477 @@
1package lang
2
3import (
4 "fmt"
5 "log"
6 "strconv"
7
8 "github.com/hashicorp/hcl2/ext/dynblock"
9 "github.com/hashicorp/hcl2/hcl"
10 "github.com/hashicorp/hcl2/hcldec"
11 "github.com/hashicorp/terraform/addrs"
12 "github.com/hashicorp/terraform/configs/configschema"
13 "github.com/hashicorp/terraform/lang/blocktoattr"
14 "github.com/hashicorp/terraform/tfdiags"
15 "github.com/zclconf/go-cty/cty"
16 "github.com/zclconf/go-cty/cty/convert"
17)
18
19// ExpandBlock expands any "dynamic" blocks present in the given body. The
20// result is a body with those blocks expanded, ready to be evaluated with
21// EvalBlock.
22//
23// If the returned diagnostics contains errors then the result may be
24// incomplete or invalid.
25func (s *Scope) ExpandBlock(body hcl.Body, schema *configschema.Block) (hcl.Body, tfdiags.Diagnostics) {
26 spec := schema.DecoderSpec()
27
28 traversals := dynblock.ExpandVariablesHCLDec(body, spec)
29 refs, diags := References(traversals)
30
31 ctx, ctxDiags := s.EvalContext(refs)
32 diags = diags.Append(ctxDiags)
33
34 return dynblock.Expand(body, ctx), diags
35}
36
37// EvalBlock evaluates the given body using the given block schema and returns
38// a cty object value representing its contents. The type of the result conforms
39// to the implied type of the given schema.
40//
41// This function does not automatically expand "dynamic" blocks within the
42// body. If that is desired, first call the ExpandBlock method to obtain
43// an expanded body to pass to this method.
44//
45// If the returned diagnostics contains errors then the result may be
46// incomplete or invalid.
47func (s *Scope) EvalBlock(body hcl.Body, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) {
48 spec := schema.DecoderSpec()
49
50 refs, diags := ReferencesInBlock(body, schema)
51
52 ctx, ctxDiags := s.EvalContext(refs)
53 diags = diags.Append(ctxDiags)
54 if diags.HasErrors() {
55 // We'll stop early if we found problems in the references, because
56 // it's likely evaluation will produce redundant copies of the same errors.
57 return cty.UnknownVal(schema.ImpliedType()), diags
58 }
59
60 // HACK: In order to remain compatible with some assumptions made in
61 // Terraform v0.11 and earlier about the approximate equivalence of
62 // attribute vs. block syntax, we do a just-in-time fixup here to allow
63 // any attribute in the schema that has a list-of-objects or set-of-objects
64 // kind to potentially be populated instead by one or more nested blocks
65 // whose type is the attribute name.
66 body = blocktoattr.FixUpBlockAttrs(body, schema)
67
68 val, evalDiags := hcldec.Decode(body, spec, ctx)
69 diags = diags.Append(evalDiags)
70
71 return val, diags
72}
73
74// EvalExpr evaluates a single expression in the receiving context and returns
75// the resulting value. The value will be converted to the given type before
76// it is returned if possible, or else an error diagnostic will be produced
77// describing the conversion error.
78//
79// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion
80// and just obtain the returned value directly.
81//
82// If the returned diagnostics contains errors then the result may be
83// incomplete, but will always be of the requested type.
84func (s *Scope) EvalExpr(expr hcl.Expression, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) {
85 refs, diags := ReferencesInExpr(expr)
86
87 ctx, ctxDiags := s.EvalContext(refs)
88 diags = diags.Append(ctxDiags)
89 if diags.HasErrors() {
90 // We'll stop early if we found problems in the references, because
91 // it's likely evaluation will produce redundant copies of the same errors.
92 return cty.UnknownVal(wantType), diags
93 }
94
95 val, evalDiags := expr.Value(ctx)
96 diags = diags.Append(evalDiags)
97
98 if wantType != cty.DynamicPseudoType {
99 var convErr error
100 val, convErr = convert.Convert(val, wantType)
101 if convErr != nil {
102 val = cty.UnknownVal(wantType)
103 diags = diags.Append(&hcl.Diagnostic{
104 Severity: hcl.DiagError,
105 Summary: "Incorrect value type",
106 Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)),
107 Subject: expr.Range().Ptr(),
108 })
109 }
110 }
111
112 return val, diags
113}
114
115// EvalReference evaluates the given reference in the receiving scope and
116// returns the resulting value. The value will be converted to the given type before
117// it is returned if possible, or else an error diagnostic will be produced
118// describing the conversion error.
119//
120// Pass an expected type of cty.DynamicPseudoType to skip automatic conversion
121// and just obtain the returned value directly.
122//
123// If the returned diagnostics contains errors then the result may be
124// incomplete, but will always be of the requested type.
125func (s *Scope) EvalReference(ref *addrs.Reference, wantType cty.Type) (cty.Value, tfdiags.Diagnostics) {
126 var diags tfdiags.Diagnostics
127
128 // We cheat a bit here and just build an EvalContext for our requested
129 // reference with the "self" address overridden, and then pull the "self"
130 // result out of it to return.
131 ctx, ctxDiags := s.evalContext([]*addrs.Reference{ref}, ref.Subject)
132 diags = diags.Append(ctxDiags)
133 val := ctx.Variables["self"]
134 if val == cty.NilVal {
135 val = cty.DynamicVal
136 }
137
138 var convErr error
139 val, convErr = convert.Convert(val, wantType)
140 if convErr != nil {
141 val = cty.UnknownVal(wantType)
142 diags = diags.Append(&hcl.Diagnostic{
143 Severity: hcl.DiagError,
144 Summary: "Incorrect value type",
145 Detail: fmt.Sprintf("Invalid expression value: %s.", tfdiags.FormatError(convErr)),
146 Subject: ref.SourceRange.ToHCL().Ptr(),
147 })
148 }
149
150 return val, diags
151}
152
153// EvalContext constructs a HCL expression evaluation context whose variable
154// scope contains sufficient values to satisfy the given set of references.
155//
156// Most callers should prefer to use the evaluation helper methods that
157// this type offers, but this is here for less common situations where the
158// caller will handle the evaluation calls itself.
159func (s *Scope) EvalContext(refs []*addrs.Reference) (*hcl.EvalContext, tfdiags.Diagnostics) {
160 return s.evalContext(refs, s.SelfAddr)
161}
162
163func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceable) (*hcl.EvalContext, tfdiags.Diagnostics) {
164 if s == nil {
165 panic("attempt to construct EvalContext for nil Scope")
166 }
167
168 var diags tfdiags.Diagnostics
169 vals := make(map[string]cty.Value)
170 funcs := s.Functions()
171 ctx := &hcl.EvalContext{
172 Variables: vals,
173 Functions: funcs,
174 }
175
176 if len(refs) == 0 {
177 // Easy path for common case where there are no references at all.
178 return ctx, diags
179 }
180
181 // First we'll do static validation of the references. This catches things
182 // early that might otherwise not get caught due to unknown values being
183 // present in the scope during planning.
184 if staticDiags := s.Data.StaticValidateReferences(refs, selfAddr); staticDiags.HasErrors() {
185 diags = diags.Append(staticDiags)
186 return ctx, diags
187 }
188
189 // The reference set we are given has not been de-duped, and so there can
190 // be redundant requests in it for two reasons:
191 // - The same item is referenced multiple times
192 // - Both an item and that item's container are separately referenced.
193 // We will still visit every reference here and ask our data source for
194 // it, since that allows us to gather a full set of any errors and
195 // warnings, but once we've gathered all the data we'll then skip anything
196 // that's redundant in the process of populating our values map.
197 dataResources := map[string]map[string]map[addrs.InstanceKey]cty.Value{}
198 managedResources := map[string]map[string]map[addrs.InstanceKey]cty.Value{}
199 wholeModules := map[string]map[addrs.InstanceKey]cty.Value{}
200 moduleOutputs := map[string]map[addrs.InstanceKey]map[string]cty.Value{}
201 inputVariables := map[string]cty.Value{}
202 localValues := map[string]cty.Value{}
203 pathAttrs := map[string]cty.Value{}
204 terraformAttrs := map[string]cty.Value{}
205 countAttrs := map[string]cty.Value{}
206 var self cty.Value
207
208 for _, ref := range refs {
209 rng := ref.SourceRange
210 isSelf := false
211
212 rawSubj := ref.Subject
213 if rawSubj == addrs.Self {
214 if selfAddr == nil {
215 diags = diags.Append(&hcl.Diagnostic{
216 Severity: hcl.DiagError,
217 Summary: `Invalid "self" reference`,
218 // This detail message mentions some current practice that
219 // this codepath doesn't really "know about". If the "self"
220 // object starts being supported in more contexts later then
221 // we'll need to adjust this message.
222 Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`,
223 Subject: ref.SourceRange.ToHCL().Ptr(),
224 })
225 continue
226 }
227
228 // Treat "self" as an alias for the configured self address.
229 rawSubj = selfAddr
230 isSelf = true
231
232 if rawSubj == addrs.Self {
233 // Programming error: the self address cannot alias itself.
234 panic("scope SelfAddr attempting to alias itself")
235 }
236 }
237
238 // This type switch must cover all of the "Referenceable" implementations
239 // in package addrs.
240 switch subj := rawSubj.(type) {
241
242 case addrs.ResourceInstance:
243 var into map[string]map[string]map[addrs.InstanceKey]cty.Value
244 switch subj.Resource.Mode {
245 case addrs.ManagedResourceMode:
246 into = managedResources
247 case addrs.DataResourceMode:
248 into = dataResources
249 default:
250 panic(fmt.Errorf("unsupported ResourceMode %s", subj.Resource.Mode))
251 }
252
253 val, valDiags := normalizeRefValue(s.Data.GetResourceInstance(subj, rng))
254 diags = diags.Append(valDiags)
255
256 r := subj.Resource
257 if into[r.Type] == nil {
258 into[r.Type] = make(map[string]map[addrs.InstanceKey]cty.Value)
259 }
260 if into[r.Type][r.Name] == nil {
261 into[r.Type][r.Name] = make(map[addrs.InstanceKey]cty.Value)
262 }
263 into[r.Type][r.Name][subj.Key] = val
264 if isSelf {
265 self = val
266 }
267
268 case addrs.ModuleCallInstance:
269 val, valDiags := normalizeRefValue(s.Data.GetModuleInstance(subj, rng))
270 diags = diags.Append(valDiags)
271
272 if wholeModules[subj.Call.Name] == nil {
273 wholeModules[subj.Call.Name] = make(map[addrs.InstanceKey]cty.Value)
274 }
275 wholeModules[subj.Call.Name][subj.Key] = val
276 if isSelf {
277 self = val
278 }
279
280 case addrs.ModuleCallOutput:
281 val, valDiags := normalizeRefValue(s.Data.GetModuleInstanceOutput(subj, rng))
282 diags = diags.Append(valDiags)
283
284 callName := subj.Call.Call.Name
285 callKey := subj.Call.Key
286 if moduleOutputs[callName] == nil {
287 moduleOutputs[callName] = make(map[addrs.InstanceKey]map[string]cty.Value)
288 }
289 if moduleOutputs[callName][callKey] == nil {
290 moduleOutputs[callName][callKey] = make(map[string]cty.Value)
291 }
292 moduleOutputs[callName][callKey][subj.Name] = val
293 if isSelf {
294 self = val
295 }
296
297 case addrs.InputVariable:
298 val, valDiags := normalizeRefValue(s.Data.GetInputVariable(subj, rng))
299 diags = diags.Append(valDiags)
300 inputVariables[subj.Name] = val
301 if isSelf {
302 self = val
303 }
304
305 case addrs.LocalValue:
306 val, valDiags := normalizeRefValue(s.Data.GetLocalValue(subj, rng))
307 diags = diags.Append(valDiags)
308 localValues[subj.Name] = val
309 if isSelf {
310 self = val
311 }
312
313 case addrs.PathAttr:
314 val, valDiags := normalizeRefValue(s.Data.GetPathAttr(subj, rng))
315 diags = diags.Append(valDiags)
316 pathAttrs[subj.Name] = val
317 if isSelf {
318 self = val
319 }
320
321 case addrs.TerraformAttr:
322 val, valDiags := normalizeRefValue(s.Data.GetTerraformAttr(subj, rng))
323 diags = diags.Append(valDiags)
324 terraformAttrs[subj.Name] = val
325 if isSelf {
326 self = val
327 }
328
329 case addrs.CountAttr:
330 val, valDiags := normalizeRefValue(s.Data.GetCountAttr(subj, rng))
331 diags = diags.Append(valDiags)
332 countAttrs[subj.Name] = val
333 if isSelf {
334 self = val
335 }
336
337 default:
338 // Should never happen
339 panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj))
340 }
341 }
342
343 for k, v := range buildResourceObjects(managedResources) {
344 vals[k] = v
345 }
346 vals["data"] = cty.ObjectVal(buildResourceObjects(dataResources))
347 vals["module"] = cty.ObjectVal(buildModuleObjects(wholeModules, moduleOutputs))
348 vals["var"] = cty.ObjectVal(inputVariables)
349 vals["local"] = cty.ObjectVal(localValues)
350 vals["path"] = cty.ObjectVal(pathAttrs)
351 vals["terraform"] = cty.ObjectVal(terraformAttrs)
352 vals["count"] = cty.ObjectVal(countAttrs)
353 if self != cty.NilVal {
354 vals["self"] = self
355 }
356
357 return ctx, diags
358}
359
360func buildResourceObjects(resources map[string]map[string]map[addrs.InstanceKey]cty.Value) map[string]cty.Value {
361 vals := make(map[string]cty.Value)
362 for typeName, names := range resources {
363 nameVals := make(map[string]cty.Value)
364 for name, keys := range names {
365 nameVals[name] = buildInstanceObjects(keys)
366 }
367 vals[typeName] = cty.ObjectVal(nameVals)
368 }
369 return vals
370}
371
372func buildModuleObjects(wholeModules map[string]map[addrs.InstanceKey]cty.Value, moduleOutputs map[string]map[addrs.InstanceKey]map[string]cty.Value) map[string]cty.Value {
373 vals := make(map[string]cty.Value)
374
375 for name, keys := range wholeModules {
376 vals[name] = buildInstanceObjects(keys)
377 }
378
379 for name, keys := range moduleOutputs {
380 if _, exists := wholeModules[name]; exists {
381 // If we also have a whole module value for this name then we'll
382 // skip this since the individual outputs are embedded in that result.
383 continue
384 }
385
386 // The shape of this collection isn't compatible with buildInstanceObjects,
387 // but rather than replicating most of the buildInstanceObjects logic
388 // here we'll instead first transform the structure to be what that
389 // function expects and then use it. This is a little wasteful, but
390 // we do not expect this these maps to be large and so the extra work
391 // here should not hurt too much.
392 flattened := make(map[addrs.InstanceKey]cty.Value, len(keys))
393 for k, vals := range keys {
394 flattened[k] = cty.ObjectVal(vals)
395 }
396 vals[name] = buildInstanceObjects(flattened)
397 }
398
399 return vals
400}
401
402func buildInstanceObjects(keys map[addrs.InstanceKey]cty.Value) cty.Value {
403 if val, exists := keys[addrs.NoKey]; exists {
404 // If present, a "no key" value supersedes all other values,
405 // since they should be embedded inside it.
406 return val
407 }
408
409 // If we only have individual values then we need to construct
410 // either a list or a map, depending on what sort of keys we
411 // have.
412 haveInt := false
413 haveString := false
414 maxInt := 0
415
416 for k := range keys {
417 switch tk := k.(type) {
418 case addrs.IntKey:
419 haveInt = true
420 if int(tk) > maxInt {
421 maxInt = int(tk)
422 }
423 case addrs.StringKey:
424 haveString = true
425 }
426 }
427
428 // We should either have ints or strings and not both, but
429 // if we have both then we'll prefer strings and let the
430 // language interpreter try to convert the int keys into
431 // strings in a map.
432 switch {
433 case haveString:
434 vals := make(map[string]cty.Value)
435 for k, v := range keys {
436 switch tk := k.(type) {
437 case addrs.StringKey:
438 vals[string(tk)] = v
439 case addrs.IntKey:
440 sk := strconv.Itoa(int(tk))
441 vals[sk] = v
442 }
443 }
444 return cty.ObjectVal(vals)
445 case haveInt:
446 // We'll make a tuple that is long enough for our maximum
447 // index value. It doesn't matter if we end up shorter than
448 // the number of instances because if length(...) were
449 // being evaluated we would've got a NoKey reference and
450 // thus not ended up in this codepath at all.
451 vals := make([]cty.Value, maxInt+1)
452 for i := range vals {
453 if v, exists := keys[addrs.IntKey(i)]; exists {
454 vals[i] = v
455 } else {
456 // Just a placeholder, since nothing will access this anyway
457 vals[i] = cty.DynamicVal
458 }
459 }
460 return cty.TupleVal(vals)
461 default:
462 // Should never happen because there are no other key types.
463 log.Printf("[ERROR] strange makeInstanceObjects call with no supported key types")
464 return cty.EmptyObjectVal
465 }
466}
467
468func normalizeRefValue(val cty.Value, diags tfdiags.Diagnostics) (cty.Value, tfdiags.Diagnostics) {
469 if diags.HasErrors() {
470 // If there are errors then we will force an unknown result so that
471 // we can still evaluate and catch type errors but we'll avoid
472 // producing redundant re-statements of the same errors we've already
473 // dealt with here.
474 return cty.UnknownVal(val.Type()), diags
475 }
476 return val, diags
477}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go b/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go
new file mode 100644
index 0000000..6ce8aa9
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/cidr.go
@@ -0,0 +1,129 @@
1package funcs
2
3import (
4 "fmt"
5 "net"
6
7 "github.com/apparentlymart/go-cidr/cidr"
8 "github.com/zclconf/go-cty/cty"
9 "github.com/zclconf/go-cty/cty/function"
10 "github.com/zclconf/go-cty/cty/gocty"
11)
12
13// CidrHostFunc contructs a function that calculates a full host IP address
14// within a given IP network address prefix.
15var CidrHostFunc = function.New(&function.Spec{
16 Params: []function.Parameter{
17 {
18 Name: "prefix",
19 Type: cty.String,
20 },
21 {
22 Name: "hostnum",
23 Type: cty.Number,
24 },
25 },
26 Type: function.StaticReturnType(cty.String),
27 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
28 var hostNum int
29 if err := gocty.FromCtyValue(args[1], &hostNum); err != nil {
30 return cty.UnknownVal(cty.String), err
31 }
32 _, network, err := net.ParseCIDR(args[0].AsString())
33 if err != nil {
34 return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err)
35 }
36
37 ip, err := cidr.Host(network, hostNum)
38 if err != nil {
39 return cty.UnknownVal(cty.String), err
40 }
41
42 return cty.StringVal(ip.String()), nil
43 },
44})
45
46// CidrNetmaskFunc contructs a function that converts an IPv4 address prefix given
47// in CIDR notation into a subnet mask address.
48var CidrNetmaskFunc = function.New(&function.Spec{
49 Params: []function.Parameter{
50 {
51 Name: "prefix",
52 Type: cty.String,
53 },
54 },
55 Type: function.StaticReturnType(cty.String),
56 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
57 _, network, err := net.ParseCIDR(args[0].AsString())
58 if err != nil {
59 return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err)
60 }
61
62 return cty.StringVal(net.IP(network.Mask).String()), nil
63 },
64})
65
66// CidrSubnetFunc contructs a function that calculates a subnet address within
67// a given IP network address prefix.
68var CidrSubnetFunc = function.New(&function.Spec{
69 Params: []function.Parameter{
70 {
71 Name: "prefix",
72 Type: cty.String,
73 },
74 {
75 Name: "newbits",
76 Type: cty.Number,
77 },
78 {
79 Name: "netnum",
80 Type: cty.Number,
81 },
82 },
83 Type: function.StaticReturnType(cty.String),
84 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
85 var newbits int
86 if err := gocty.FromCtyValue(args[1], &newbits); err != nil {
87 return cty.UnknownVal(cty.String), err
88 }
89 var netnum int
90 if err := gocty.FromCtyValue(args[2], &netnum); err != nil {
91 return cty.UnknownVal(cty.String), err
92 }
93
94 _, network, err := net.ParseCIDR(args[0].AsString())
95 if err != nil {
96 return cty.UnknownVal(cty.String), fmt.Errorf("invalid CIDR expression: %s", err)
97 }
98
99 // For portability with 32-bit systems where the subnet number
100 // will be a 32-bit int, we only allow extension of 32 bits in
101 // one call even if we're running on a 64-bit machine.
102 // (Of course, this is significant only for IPv6.)
103 if newbits > 32 {
104 return cty.UnknownVal(cty.String), fmt.Errorf("may not extend prefix by more than 32 bits")
105 }
106
107 newNetwork, err := cidr.Subnet(network, newbits, netnum)
108 if err != nil {
109 return cty.UnknownVal(cty.String), err
110 }
111
112 return cty.StringVal(newNetwork.String()), nil
113 },
114})
115
116// CidrHost calculates a full host IP address within a given IP network address prefix.
117func CidrHost(prefix, hostnum cty.Value) (cty.Value, error) {
118 return CidrHostFunc.Call([]cty.Value{prefix, hostnum})
119}
120
121// CidrNetmask converts an IPv4 address prefix given in CIDR notation into a subnet mask address.
122func CidrNetmask(prefix cty.Value) (cty.Value, error) {
123 return CidrNetmaskFunc.Call([]cty.Value{prefix})
124}
125
126// CidrSubnet calculates a subnet address within a given IP network address prefix.
127func CidrSubnet(prefix, newbits, netnum cty.Value) (cty.Value, error) {
128 return CidrSubnetFunc.Call([]cty.Value{prefix, newbits, netnum})
129}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go b/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go
new file mode 100644
index 0000000..71b7a84
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/collection.go
@@ -0,0 +1,1511 @@
1package funcs
2
3import (
4 "errors"
5 "fmt"
6 "sort"
7
8 "github.com/zclconf/go-cty/cty"
9 "github.com/zclconf/go-cty/cty/convert"
10 "github.com/zclconf/go-cty/cty/function"
11 "github.com/zclconf/go-cty/cty/function/stdlib"
12 "github.com/zclconf/go-cty/cty/gocty"
13)
14
15var ElementFunc = function.New(&function.Spec{
16 Params: []function.Parameter{
17 {
18 Name: "list",
19 Type: cty.DynamicPseudoType,
20 },
21 {
22 Name: "index",
23 Type: cty.Number,
24 },
25 },
26 Type: func(args []cty.Value) (cty.Type, error) {
27 list := args[0]
28 listTy := list.Type()
29 switch {
30 case listTy.IsListType():
31 return listTy.ElementType(), nil
32 case listTy.IsTupleType():
33 if !args[1].IsKnown() {
34 // If the index isn't known yet then we can't predict the
35 // result type since each tuple element can have its own type.
36 return cty.DynamicPseudoType, nil
37 }
38
39 etys := listTy.TupleElementTypes()
40 var index int
41 err := gocty.FromCtyValue(args[1], &index)
42 if err != nil {
43 // e.g. fractional number where whole number is required
44 return cty.DynamicPseudoType, fmt.Errorf("invalid index: %s", err)
45 }
46 if len(etys) == 0 {
47 return cty.DynamicPseudoType, errors.New("cannot use element function with an empty list")
48 }
49 index = index % len(etys)
50 return etys[index], nil
51 default:
52 return cty.DynamicPseudoType, fmt.Errorf("cannot read elements from %s", listTy.FriendlyName())
53 }
54 },
55 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
56 var index int
57 err := gocty.FromCtyValue(args[1], &index)
58 if err != nil {
59 // can't happen because we checked this in the Type function above
60 return cty.DynamicVal, fmt.Errorf("invalid index: %s", err)
61 }
62
63 if !args[0].IsKnown() {
64 return cty.UnknownVal(retType), nil
65 }
66
67 l := args[0].LengthInt()
68 if l == 0 {
69 return cty.DynamicVal, errors.New("cannot use element function with an empty list")
70 }
71 index = index % l
72
73 // We did all the necessary type checks in the type function above,
74 // so this is guaranteed not to fail.
75 return args[0].Index(cty.NumberIntVal(int64(index))), nil
76 },
77})
78
79var LengthFunc = function.New(&function.Spec{
80 Params: []function.Parameter{
81 {
82 Name: "value",
83 Type: cty.DynamicPseudoType,
84 AllowDynamicType: true,
85 AllowUnknown: true,
86 },
87 },
88 Type: func(args []cty.Value) (cty.Type, error) {
89 collTy := args[0].Type()
90 switch {
91 case collTy == cty.String || collTy.IsTupleType() || collTy.IsObjectType() || collTy.IsListType() || collTy.IsMapType() || collTy.IsSetType() || collTy == cty.DynamicPseudoType:
92 return cty.Number, nil
93 default:
94 return cty.Number, errors.New("argument must be a string, a collection type, or a structural type")
95 }
96 },
97 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
98 coll := args[0]
99 collTy := args[0].Type()
100 switch {
101 case collTy == cty.DynamicPseudoType:
102 return cty.UnknownVal(cty.Number), nil
103 case collTy.IsTupleType():
104 l := len(collTy.TupleElementTypes())
105 return cty.NumberIntVal(int64(l)), nil
106 case collTy.IsObjectType():
107 l := len(collTy.AttributeTypes())
108 return cty.NumberIntVal(int64(l)), nil
109 case collTy == cty.String:
110 // We'll delegate to the cty stdlib strlen function here, because
111 // it deals with all of the complexities of tokenizing unicode
112 // grapheme clusters.
113 return stdlib.Strlen(coll)
114 case collTy.IsListType() || collTy.IsSetType() || collTy.IsMapType():
115 return coll.Length(), nil
116 default:
117 // Should never happen, because of the checks in our Type func above
118 return cty.UnknownVal(cty.Number), errors.New("impossible value type for length(...)")
119 }
120 },
121})
122
123// CoalesceFunc constructs a function that takes any number of arguments and
124// returns the first one that isn't empty. This function was copied from go-cty
125// stdlib and modified so that it returns the first *non-empty* non-null element
126// from a sequence, instead of merely the first non-null.
127var CoalesceFunc = function.New(&function.Spec{
128 Params: []function.Parameter{},
129 VarParam: &function.Parameter{
130 Name: "vals",
131 Type: cty.DynamicPseudoType,
132 AllowUnknown: true,
133 AllowDynamicType: true,
134 AllowNull: true,
135 },
136 Type: func(args []cty.Value) (ret cty.Type, err error) {
137 argTypes := make([]cty.Type, len(args))
138 for i, val := range args {
139 argTypes[i] = val.Type()
140 }
141 retType, _ := convert.UnifyUnsafe(argTypes)
142 if retType == cty.NilType {
143 return cty.NilType, errors.New("all arguments must have the same type")
144 }
145 return retType, nil
146 },
147 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
148 for _, argVal := range args {
149 // We already know this will succeed because of the checks in our Type func above
150 argVal, _ = convert.Convert(argVal, retType)
151 if !argVal.IsKnown() {
152 return cty.UnknownVal(retType), nil
153 }
154 if argVal.IsNull() {
155 continue
156 }
157 if retType == cty.String && argVal.RawEquals(cty.StringVal("")) {
158 continue
159 }
160
161 return argVal, nil
162 }
163 return cty.NilVal, errors.New("no non-null, non-empty-string arguments")
164 },
165})
166
167// CoalesceListFunc constructs a function that takes any number of list arguments
168// and returns the first one that isn't empty.
169var CoalesceListFunc = function.New(&function.Spec{
170 Params: []function.Parameter{},
171 VarParam: &function.Parameter{
172 Name: "vals",
173 Type: cty.DynamicPseudoType,
174 AllowUnknown: true,
175 AllowDynamicType: true,
176 AllowNull: true,
177 },
178 Type: func(args []cty.Value) (ret cty.Type, err error) {
179 if len(args) == 0 {
180 return cty.NilType, errors.New("at least one argument is required")
181 }
182
183 argTypes := make([]cty.Type, len(args))
184
185 for i, arg := range args {
186 // if any argument is unknown, we can't be certain know which type we will return
187 if !arg.IsKnown() {
188 return cty.DynamicPseudoType, nil
189 }
190 ty := arg.Type()
191
192 if !ty.IsListType() && !ty.IsTupleType() {
193 return cty.NilType, errors.New("coalescelist arguments must be lists or tuples")
194 }
195
196 argTypes[i] = arg.Type()
197 }
198
199 last := argTypes[0]
200 // If there are mixed types, we have to return a dynamic type.
201 for _, next := range argTypes[1:] {
202 if !next.Equals(last) {
203 return cty.DynamicPseudoType, nil
204 }
205 }
206
207 return last, nil
208 },
209 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
210 for _, arg := range args {
211 if !arg.IsKnown() {
212 // If we run into an unknown list at some point, we can't
213 // predict the final result yet. (If there's a known, non-empty
214 // arg before this then we won't get here.)
215 return cty.UnknownVal(retType), nil
216 }
217
218 if arg.LengthInt() > 0 {
219 return arg, nil
220 }
221 }
222
223 return cty.NilVal, errors.New("no non-null arguments")
224 },
225})
226
227// CompactFunc constructs a function that takes a list of strings and returns a new list
228// with any empty string elements removed.
229var CompactFunc = function.New(&function.Spec{
230 Params: []function.Parameter{
231 {
232 Name: "list",
233 Type: cty.List(cty.String),
234 },
235 },
236 Type: function.StaticReturnType(cty.List(cty.String)),
237 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
238 listVal := args[0]
239 if !listVal.IsWhollyKnown() {
240 // If some of the element values aren't known yet then we
241 // can't yet return a compacted list
242 return cty.UnknownVal(retType), nil
243 }
244
245 var outputList []cty.Value
246
247 for it := listVal.ElementIterator(); it.Next(); {
248 _, v := it.Element()
249 if v.AsString() == "" {
250 continue
251 }
252 outputList = append(outputList, v)
253 }
254
255 if len(outputList) == 0 {
256 return cty.ListValEmpty(cty.String), nil
257 }
258
259 return cty.ListVal(outputList), nil
260 },
261})
262
263// ContainsFunc constructs a function that determines whether a given list or
264// set contains a given single value as one of its elements.
265var ContainsFunc = function.New(&function.Spec{
266 Params: []function.Parameter{
267 {
268 Name: "list",
269 Type: cty.DynamicPseudoType,
270 },
271 {
272 Name: "value",
273 Type: cty.DynamicPseudoType,
274 },
275 },
276 Type: function.StaticReturnType(cty.Bool),
277 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
278 arg := args[0]
279 ty := arg.Type()
280
281 if !ty.IsListType() && !ty.IsTupleType() && !ty.IsSetType() {
282 return cty.NilVal, errors.New("argument must be list, tuple, or set")
283 }
284
285 _, err = Index(cty.TupleVal(arg.AsValueSlice()), args[1])
286 if err != nil {
287 return cty.False, nil
288 }
289
290 return cty.True, nil
291 },
292})
293
294// IndexFunc constructs a function that finds the element index for a given value in a list.
295var IndexFunc = function.New(&function.Spec{
296 Params: []function.Parameter{
297 {
298 Name: "list",
299 Type: cty.DynamicPseudoType,
300 },
301 {
302 Name: "value",
303 Type: cty.DynamicPseudoType,
304 },
305 },
306 Type: function.StaticReturnType(cty.Number),
307 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
308 if !(args[0].Type().IsListType() || args[0].Type().IsTupleType()) {
309 return cty.NilVal, errors.New("argument must be a list or tuple")
310 }
311
312 if !args[0].IsKnown() {
313 return cty.UnknownVal(cty.Number), nil
314 }
315
316 if args[0].LengthInt() == 0 { // Easy path
317 return cty.NilVal, errors.New("cannot search an empty list")
318 }
319
320 for it := args[0].ElementIterator(); it.Next(); {
321 i, v := it.Element()
322 eq, err := stdlib.Equal(v, args[1])
323 if err != nil {
324 return cty.NilVal, err
325 }
326 if !eq.IsKnown() {
327 return cty.UnknownVal(cty.Number), nil
328 }
329 if eq.True() {
330 return i, nil
331 }
332 }
333 return cty.NilVal, errors.New("item not found")
334
335 },
336})
337
338// DistinctFunc constructs a function that takes a list and returns a new list
339// with any duplicate elements removed.
340var DistinctFunc = function.New(&function.Spec{
341 Params: []function.Parameter{
342 {
343 Name: "list",
344 Type: cty.List(cty.DynamicPseudoType),
345 },
346 },
347 Type: func(args []cty.Value) (cty.Type, error) {
348 return args[0].Type(), nil
349 },
350 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
351 listVal := args[0]
352
353 if !listVal.IsWhollyKnown() {
354 return cty.UnknownVal(retType), nil
355 }
356 var list []cty.Value
357
358 for it := listVal.ElementIterator(); it.Next(); {
359 _, v := it.Element()
360 list, err = appendIfMissing(list, v)
361 if err != nil {
362 return cty.NilVal, err
363 }
364 }
365
366 return cty.ListVal(list), nil
367 },
368})
369
370// ChunklistFunc constructs a function that splits a single list into fixed-size chunks,
371// returning a list of lists.
372var ChunklistFunc = function.New(&function.Spec{
373 Params: []function.Parameter{
374 {
375 Name: "list",
376 Type: cty.List(cty.DynamicPseudoType),
377 },
378 {
379 Name: "size",
380 Type: cty.Number,
381 },
382 },
383 Type: func(args []cty.Value) (cty.Type, error) {
384 return cty.List(args[0].Type()), nil
385 },
386 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
387 listVal := args[0]
388 if !listVal.IsKnown() {
389 return cty.UnknownVal(retType), nil
390 }
391
392 var size int
393 err = gocty.FromCtyValue(args[1], &size)
394 if err != nil {
395 return cty.NilVal, fmt.Errorf("invalid index: %s", err)
396 }
397
398 if size < 0 {
399 return cty.NilVal, errors.New("the size argument must be positive")
400 }
401
402 output := make([]cty.Value, 0)
403
404 // if size is 0, returns a list made of the initial list
405 if size == 0 {
406 output = append(output, listVal)
407 return cty.ListVal(output), nil
408 }
409
410 chunk := make([]cty.Value, 0)
411
412 l := args[0].LengthInt()
413 i := 0
414
415 for it := listVal.ElementIterator(); it.Next(); {
416 _, v := it.Element()
417 chunk = append(chunk, v)
418
419 // Chunk when index isn't 0, or when reaching the values's length
420 if (i+1)%size == 0 || (i+1) == l {
421 output = append(output, cty.ListVal(chunk))
422 chunk = make([]cty.Value, 0)
423 }
424 i++
425 }
426
427 return cty.ListVal(output), nil
428 },
429})
430
431// FlattenFunc constructs a function that takes a list and replaces any elements
432// that are lists with a flattened sequence of the list contents.
433var FlattenFunc = function.New(&function.Spec{
434 Params: []function.Parameter{
435 {
436 Name: "list",
437 Type: cty.DynamicPseudoType,
438 },
439 },
440 Type: func(args []cty.Value) (cty.Type, error) {
441 if !args[0].IsWhollyKnown() {
442 return cty.DynamicPseudoType, nil
443 }
444
445 argTy := args[0].Type()
446 if !argTy.IsListType() && !argTy.IsSetType() && !argTy.IsTupleType() {
447 return cty.NilType, errors.New("can only flatten lists, sets and tuples")
448 }
449
450 retVal, known := flattener(args[0])
451 if !known {
452 return cty.DynamicPseudoType, nil
453 }
454
455 tys := make([]cty.Type, len(retVal))
456 for i, ty := range retVal {
457 tys[i] = ty.Type()
458 }
459 return cty.Tuple(tys), nil
460 },
461 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
462 inputList := args[0]
463 if inputList.LengthInt() == 0 {
464 return cty.EmptyTupleVal, nil
465 }
466
467 out, known := flattener(inputList)
468 if !known {
469 return cty.UnknownVal(retType), nil
470 }
471
472 return cty.TupleVal(out), nil
473 },
474})
475
476// Flatten until it's not a cty.List, and return whether the value is known.
477// We can flatten lists with unknown values, as long as they are not
478// lists themselves.
479func flattener(flattenList cty.Value) ([]cty.Value, bool) {
480 out := make([]cty.Value, 0)
481 for it := flattenList.ElementIterator(); it.Next(); {
482 _, val := it.Element()
483 if val.Type().IsListType() || val.Type().IsSetType() || val.Type().IsTupleType() {
484 if !val.IsKnown() {
485 return out, false
486 }
487
488 res, known := flattener(val)
489 if !known {
490 return res, known
491 }
492 out = append(out, res...)
493 } else {
494 out = append(out, val)
495 }
496 }
497 return out, true
498}
499
500// KeysFunc constructs a function that takes a map and returns a sorted list of the map keys.
501var KeysFunc = function.New(&function.Spec{
502 Params: []function.Parameter{
503 {
504 Name: "inputMap",
505 Type: cty.DynamicPseudoType,
506 AllowUnknown: true,
507 },
508 },
509 Type: func(args []cty.Value) (cty.Type, error) {
510 ty := args[0].Type()
511 switch {
512 case ty.IsMapType():
513 return cty.List(cty.String), nil
514 case ty.IsObjectType():
515 atys := ty.AttributeTypes()
516 if len(atys) == 0 {
517 return cty.EmptyTuple, nil
518 }
519 // All of our result elements will be strings, and atys just
520 // decides how many there are.
521 etys := make([]cty.Type, len(atys))
522 for i := range etys {
523 etys[i] = cty.String
524 }
525 return cty.Tuple(etys), nil
526 default:
527 return cty.DynamicPseudoType, function.NewArgErrorf(0, "must have map or object type")
528 }
529 },
530 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
531 m := args[0]
532 var keys []cty.Value
533
534 switch {
535 case m.Type().IsObjectType():
536 // In this case we allow unknown values so we must work only with
537 // the attribute _types_, not with the value itself.
538 var names []string
539 for name := range m.Type().AttributeTypes() {
540 names = append(names, name)
541 }
542 sort.Strings(names) // same ordering guaranteed by cty's ElementIterator
543 if len(names) == 0 {
544 return cty.EmptyTupleVal, nil
545 }
546 keys = make([]cty.Value, len(names))
547 for i, name := range names {
548 keys[i] = cty.StringVal(name)
549 }
550 return cty.TupleVal(keys), nil
551 default:
552 if !m.IsKnown() {
553 return cty.UnknownVal(retType), nil
554 }
555
556 // cty guarantees that ElementIterator will iterate in lexicographical
557 // order by key.
558 for it := args[0].ElementIterator(); it.Next(); {
559 k, _ := it.Element()
560 keys = append(keys, k)
561 }
562 if len(keys) == 0 {
563 return cty.ListValEmpty(cty.String), nil
564 }
565 return cty.ListVal(keys), nil
566 }
567 },
568})
569
570// ListFunc constructs a function that takes an arbitrary number of arguments
571// and returns a list containing those values in the same order.
572//
573// This function is deprecated in Terraform v0.12
574var ListFunc = function.New(&function.Spec{
575 Params: []function.Parameter{},
576 VarParam: &function.Parameter{
577 Name: "vals",
578 Type: cty.DynamicPseudoType,
579 AllowUnknown: true,
580 AllowDynamicType: true,
581 AllowNull: true,
582 },
583 Type: func(args []cty.Value) (ret cty.Type, err error) {
584 if len(args) == 0 {
585 return cty.NilType, errors.New("at least one argument is required")
586 }
587
588 argTypes := make([]cty.Type, len(args))
589
590 for i, arg := range args {
591 argTypes[i] = arg.Type()
592 }
593
594 retType, _ := convert.UnifyUnsafe(argTypes)
595 if retType == cty.NilType {
596 return cty.NilType, errors.New("all arguments must have the same type")
597 }
598
599 return cty.List(retType), nil
600 },
601 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
602 newList := make([]cty.Value, 0, len(args))
603
604 for _, arg := range args {
605 // We already know this will succeed because of the checks in our Type func above
606 arg, _ = convert.Convert(arg, retType.ElementType())
607 newList = append(newList, arg)
608 }
609
610 return cty.ListVal(newList), nil
611 },
612})
613
614// LookupFunc constructs a function that performs dynamic lookups of map types.
615var LookupFunc = function.New(&function.Spec{
616 Params: []function.Parameter{
617 {
618 Name: "inputMap",
619 Type: cty.DynamicPseudoType,
620 },
621 {
622 Name: "key",
623 Type: cty.String,
624 },
625 },
626 VarParam: &function.Parameter{
627 Name: "default",
628 Type: cty.DynamicPseudoType,
629 AllowUnknown: true,
630 AllowDynamicType: true,
631 AllowNull: true,
632 },
633 Type: func(args []cty.Value) (ret cty.Type, err error) {
634 if len(args) < 1 || len(args) > 3 {
635 return cty.NilType, fmt.Errorf("lookup() takes two or three arguments, got %d", len(args))
636 }
637
638 ty := args[0].Type()
639
640 switch {
641 case ty.IsObjectType():
642 if !args[1].IsKnown() {
643 return cty.DynamicPseudoType, nil
644 }
645
646 key := args[1].AsString()
647 if ty.HasAttribute(key) {
648 return args[0].GetAttr(key).Type(), nil
649 } else if len(args) == 3 {
650 // if the key isn't found but a default is provided,
651 // return the default type
652 return args[2].Type(), nil
653 }
654 return cty.DynamicPseudoType, function.NewArgErrorf(0, "the given object has no attribute %q", key)
655 case ty.IsMapType():
656 return ty.ElementType(), nil
657 default:
658 return cty.NilType, function.NewArgErrorf(0, "lookup() requires a map as the first argument")
659 }
660 },
661 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
662 var defaultVal cty.Value
663 defaultValueSet := false
664
665 if len(args) == 3 {
666 defaultVal = args[2]
667 defaultValueSet = true
668 }
669
670 mapVar := args[0]
671 lookupKey := args[1].AsString()
672
673 if !mapVar.IsWhollyKnown() {
674 return cty.UnknownVal(retType), nil
675 }
676
677 if mapVar.Type().IsObjectType() {
678 if mapVar.Type().HasAttribute(lookupKey) {
679 return mapVar.GetAttr(lookupKey), nil
680 }
681 } else if mapVar.HasIndex(cty.StringVal(lookupKey)) == cty.True {
682 v := mapVar.Index(cty.StringVal(lookupKey))
683 if ty := v.Type(); !ty.Equals(cty.NilType) {
684 switch {
685 case ty.Equals(cty.String):
686 return cty.StringVal(v.AsString()), nil
687 case ty.Equals(cty.Number):
688 return cty.NumberVal(v.AsBigFloat()), nil
689 default:
690 return cty.NilVal, errors.New("lookup() can only be used with flat lists")
691 }
692 }
693 }
694
695 if defaultValueSet {
696 defaultVal, err = convert.Convert(defaultVal, retType)
697 if err != nil {
698 return cty.NilVal, err
699 }
700 return defaultVal, nil
701 }
702
703 return cty.UnknownVal(cty.DynamicPseudoType), fmt.Errorf(
704 "lookup failed to find '%s'", lookupKey)
705 },
706})
707
708// MapFunc constructs a function that takes an even number of arguments and
709// returns a map whose elements are constructed from consecutive pairs of arguments.
710//
711// This function is deprecated in Terraform v0.12
712var MapFunc = function.New(&function.Spec{
713 Params: []function.Parameter{},
714 VarParam: &function.Parameter{
715 Name: "vals",
716 Type: cty.DynamicPseudoType,
717 AllowUnknown: true,
718 AllowDynamicType: true,
719 AllowNull: true,
720 },
721 Type: func(args []cty.Value) (ret cty.Type, err error) {
722 if len(args) < 2 || len(args)%2 != 0 {
723 return cty.NilType, fmt.Errorf("map requires an even number of two or more arguments, got %d", len(args))
724 }
725
726 argTypes := make([]cty.Type, len(args)/2)
727 index := 0
728
729 for i := 0; i < len(args); i += 2 {
730 argTypes[index] = args[i+1].Type()
731 index++
732 }
733
734 valType, _ := convert.UnifyUnsafe(argTypes)
735 if valType == cty.NilType {
736 return cty.NilType, errors.New("all arguments must have the same type")
737 }
738
739 return cty.Map(valType), nil
740 },
741 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
742 for _, arg := range args {
743 if !arg.IsWhollyKnown() {
744 return cty.UnknownVal(retType), nil
745 }
746 }
747
748 outputMap := make(map[string]cty.Value)
749
750 for i := 0; i < len(args); i += 2 {
751
752 key := args[i].AsString()
753
754 err := gocty.FromCtyValue(args[i], &key)
755 if err != nil {
756 return cty.NilVal, err
757 }
758
759 val := args[i+1]
760
761 var variable cty.Value
762 err = gocty.FromCtyValue(val, &variable)
763 if err != nil {
764 return cty.NilVal, err
765 }
766
767 // We already know this will succeed because of the checks in our Type func above
768 variable, _ = convert.Convert(variable, retType.ElementType())
769
770 // Check for duplicate keys
771 if _, ok := outputMap[key]; ok {
772 return cty.NilVal, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key)
773 }
774 outputMap[key] = variable
775 }
776
777 return cty.MapVal(outputMap), nil
778 },
779})
780
781// MatchkeysFunc constructs a function that constructs a new list by taking a
782// subset of elements from one list whose indexes match the corresponding
783// indexes of values in another list.
784var MatchkeysFunc = function.New(&function.Spec{
785 Params: []function.Parameter{
786 {
787 Name: "values",
788 Type: cty.List(cty.DynamicPseudoType),
789 },
790 {
791 Name: "keys",
792 Type: cty.List(cty.DynamicPseudoType),
793 },
794 {
795 Name: "searchset",
796 Type: cty.List(cty.DynamicPseudoType),
797 },
798 },
799 Type: func(args []cty.Value) (cty.Type, error) {
800 if !args[1].Type().Equals(args[2].Type()) {
801 return cty.NilType, errors.New("lists must be of the same type")
802 }
803
804 return args[0].Type(), nil
805 },
806 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
807 if !args[0].IsKnown() {
808 return cty.UnknownVal(cty.List(retType.ElementType())), nil
809 }
810
811 if args[0].LengthInt() != args[1].LengthInt() {
812 return cty.ListValEmpty(retType.ElementType()), errors.New("length of keys and values should be equal")
813 }
814
815 output := make([]cty.Value, 0)
816
817 values := args[0]
818 keys := args[1]
819 searchset := args[2]
820
821 // if searchset is empty, return an empty list.
822 if searchset.LengthInt() == 0 {
823 return cty.ListValEmpty(retType.ElementType()), nil
824 }
825
826 if !values.IsWhollyKnown() || !keys.IsWhollyKnown() {
827 return cty.UnknownVal(retType), nil
828 }
829
830 i := 0
831 for it := keys.ElementIterator(); it.Next(); {
832 _, key := it.Element()
833 for iter := searchset.ElementIterator(); iter.Next(); {
834 _, search := iter.Element()
835 eq, err := stdlib.Equal(key, search)
836 if err != nil {
837 return cty.NilVal, err
838 }
839 if !eq.IsKnown() {
840 return cty.ListValEmpty(retType.ElementType()), nil
841 }
842 if eq.True() {
843 v := values.Index(cty.NumberIntVal(int64(i)))
844 output = append(output, v)
845 break
846 }
847 }
848 i++
849 }
850
851 // if we haven't matched any key, then output is an empty list.
852 if len(output) == 0 {
853 return cty.ListValEmpty(retType.ElementType()), nil
854 }
855 return cty.ListVal(output), nil
856 },
857})
858
859// MergeFunc constructs a function that takes an arbitrary number of maps and
860// returns a single map that contains a merged set of elements from all of the maps.
861//
862// If more than one given map defines the same key then the one that is later in
863// the argument sequence takes precedence.
864var MergeFunc = function.New(&function.Spec{
865 Params: []function.Parameter{},
866 VarParam: &function.Parameter{
867 Name: "maps",
868 Type: cty.DynamicPseudoType,
869 AllowDynamicType: true,
870 AllowNull: true,
871 },
872 Type: function.StaticReturnType(cty.DynamicPseudoType),
873 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
874 outputMap := make(map[string]cty.Value)
875
876 for _, arg := range args {
877 if !arg.IsWhollyKnown() {
878 return cty.UnknownVal(retType), nil
879 }
880 if !arg.Type().IsObjectType() && !arg.Type().IsMapType() {
881 return cty.NilVal, fmt.Errorf("arguments must be maps or objects, got %#v", arg.Type().FriendlyName())
882 }
883 for it := arg.ElementIterator(); it.Next(); {
884 k, v := it.Element()
885 outputMap[k.AsString()] = v
886 }
887 }
888 return cty.ObjectVal(outputMap), nil
889 },
890})
891
892// ReverseFunc takes a sequence and produces a new sequence of the same length
893// with all of the same elements as the given sequence but in reverse order.
894var ReverseFunc = function.New(&function.Spec{
895 Params: []function.Parameter{
896 {
897 Name: "list",
898 Type: cty.DynamicPseudoType,
899 },
900 },
901 Type: func(args []cty.Value) (cty.Type, error) {
902 argTy := args[0].Type()
903 switch {
904 case argTy.IsTupleType():
905 argTys := argTy.TupleElementTypes()
906 retTys := make([]cty.Type, len(argTys))
907 for i, ty := range argTys {
908 retTys[len(retTys)-i-1] = ty
909 }
910 return cty.Tuple(retTys), nil
911 case argTy.IsListType(), argTy.IsSetType(): // We accept sets here to mimic the usual behavior of auto-converting to list
912 return cty.List(argTy.ElementType()), nil
913 default:
914 return cty.NilType, function.NewArgErrorf(0, "can only reverse list or tuple values, not %s", argTy.FriendlyName())
915 }
916 },
917 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
918 in := args[0].AsValueSlice()
919 outVals := make([]cty.Value, len(in))
920 for i, v := range in {
921 outVals[len(outVals)-i-1] = v
922 }
923 switch {
924 case retType.IsTupleType():
925 return cty.TupleVal(outVals), nil
926 default:
927 if len(outVals) == 0 {
928 return cty.ListValEmpty(retType.ElementType()), nil
929 }
930 return cty.ListVal(outVals), nil
931 }
932 },
933})
934
935// SetProductFunc calculates the cartesian product of two or more sets or
936// sequences. If the arguments are all lists then the result is a list of tuples,
937// preserving the ordering of all of the input lists. Otherwise the result is a
938// set of tuples.
939var SetProductFunc = function.New(&function.Spec{
940 Params: []function.Parameter{},
941 VarParam: &function.Parameter{
942 Name: "sets",
943 Type: cty.DynamicPseudoType,
944 },
945 Type: func(args []cty.Value) (retType cty.Type, err error) {
946 if len(args) < 2 {
947 return cty.NilType, errors.New("at least two arguments are required")
948 }
949
950 listCount := 0
951 elemTys := make([]cty.Type, len(args))
952 for i, arg := range args {
953 aty := arg.Type()
954 switch {
955 case aty.IsSetType():
956 elemTys[i] = aty.ElementType()
957 case aty.IsListType():
958 elemTys[i] = aty.ElementType()
959 listCount++
960 case aty.IsTupleType():
961 // We can accept a tuple type only if there's some common type
962 // that all of its elements can be converted to.
963 allEtys := aty.TupleElementTypes()
964 if len(allEtys) == 0 {
965 elemTys[i] = cty.DynamicPseudoType
966 listCount++
967 break
968 }
969 ety, _ := convert.UnifyUnsafe(allEtys)
970 if ety == cty.NilType {
971 return cty.NilType, function.NewArgErrorf(i, "all elements must be of the same type")
972 }
973 elemTys[i] = ety
974 listCount++
975 default:
976 return cty.NilType, function.NewArgErrorf(i, "a set or a list is required")
977 }
978 }
979
980 if listCount == len(args) {
981 return cty.List(cty.Tuple(elemTys)), nil
982 }
983 return cty.Set(cty.Tuple(elemTys)), nil
984 },
985 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
986 ety := retType.ElementType()
987
988 total := 1
989 for _, arg := range args {
990 // Because of our type checking function, we are guaranteed that
991 // all of the arguments are known, non-null values of types that
992 // support LengthInt.
993 total *= arg.LengthInt()
994 }
995
996 if total == 0 {
997 // If any of the arguments was an empty collection then our result
998 // is also an empty collection, which we'll short-circuit here.
999 if retType.IsListType() {
1000 return cty.ListValEmpty(ety), nil
1001 }
1002 return cty.SetValEmpty(ety), nil
1003 }
1004
1005 subEtys := ety.TupleElementTypes()
1006 product := make([][]cty.Value, total)
1007
1008 b := make([]cty.Value, total*len(args))
1009 n := make([]int, len(args))
1010 s := 0
1011 argVals := make([][]cty.Value, len(args))
1012 for i, arg := range args {
1013 argVals[i] = arg.AsValueSlice()
1014 }
1015
1016 for i := range product {
1017 e := s + len(args)
1018 pi := b[s:e]
1019 product[i] = pi
1020 s = e
1021
1022 for j, n := range n {
1023 val := argVals[j][n]
1024 ty := subEtys[j]
1025 if !val.Type().Equals(ty) {
1026 var err error
1027 val, err = convert.Convert(val, ty)
1028 if err != nil {
1029 // Should never happen since we checked this in our
1030 // type-checking function.
1031 return cty.NilVal, fmt.Errorf("failed to convert argVals[%d][%d] to %s; this is a bug in Terraform", j, n, ty.FriendlyName())
1032 }
1033 }
1034 pi[j] = val
1035 }
1036
1037 for j := len(n) - 1; j >= 0; j-- {
1038 n[j]++
1039 if n[j] < len(argVals[j]) {
1040 break
1041 }
1042 n[j] = 0
1043 }
1044 }
1045
1046 productVals := make([]cty.Value, total)
1047 for i, vals := range product {
1048 productVals[i] = cty.TupleVal(vals)
1049 }
1050
1051 if retType.IsListType() {
1052 return cty.ListVal(productVals), nil
1053 }
1054 return cty.SetVal(productVals), nil
1055 },
1056})
1057
1058// SliceFunc constructs a function that extracts some consecutive elements
1059// from within a list.
1060var SliceFunc = function.New(&function.Spec{
1061 Params: []function.Parameter{
1062 {
1063 Name: "list",
1064 Type: cty.DynamicPseudoType,
1065 },
1066 {
1067 Name: "start_index",
1068 Type: cty.Number,
1069 },
1070 {
1071 Name: "end_index",
1072 Type: cty.Number,
1073 },
1074 },
1075 Type: func(args []cty.Value) (cty.Type, error) {
1076 arg := args[0]
1077 argTy := arg.Type()
1078
1079 if argTy.IsSetType() {
1080 return cty.NilType, function.NewArgErrorf(0, "cannot slice a set, because its elements do not have indices; use the tolist function to force conversion to list if the ordering of the result is not important")
1081 }
1082 if !argTy.IsListType() && !argTy.IsTupleType() {
1083 return cty.NilType, function.NewArgErrorf(0, "must be a list or tuple value")
1084 }
1085
1086 startIndex, endIndex, idxsKnown, err := sliceIndexes(args)
1087 if err != nil {
1088 return cty.NilType, err
1089 }
1090
1091 if argTy.IsListType() {
1092 return argTy, nil
1093 }
1094
1095 if !idxsKnown {
1096 // If we don't know our start/end indices then we can't predict
1097 // the result type if we're planning to return a tuple.
1098 return cty.DynamicPseudoType, nil
1099 }
1100 return cty.Tuple(argTy.TupleElementTypes()[startIndex:endIndex]), nil
1101 },
1102 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
1103 inputList := args[0]
1104
1105 if retType == cty.DynamicPseudoType {
1106 return cty.DynamicVal, nil
1107 }
1108
1109 // we ignore idxsKnown return value here because the indices are always
1110 // known here, or else the call would've short-circuited.
1111 startIndex, endIndex, _, err := sliceIndexes(args)
1112 if err != nil {
1113 return cty.NilVal, err
1114 }
1115
1116 if endIndex-startIndex == 0 {
1117 if retType.IsTupleType() {
1118 return cty.EmptyTupleVal, nil
1119 }
1120 return cty.ListValEmpty(retType.ElementType()), nil
1121 }
1122
1123 outputList := inputList.AsValueSlice()[startIndex:endIndex]
1124
1125 if retType.IsTupleType() {
1126 return cty.TupleVal(outputList), nil
1127 }
1128
1129 return cty.ListVal(outputList), nil
1130 },
1131})
1132
1133func sliceIndexes(args []cty.Value) (int, int, bool, error) {
1134 var startIndex, endIndex, length int
1135 var startKnown, endKnown, lengthKnown bool
1136
1137 if args[0].Type().IsTupleType() || args[0].IsKnown() { // if it's a tuple then we always know the length by the type, but lists must be known
1138 length = args[0].LengthInt()
1139 lengthKnown = true
1140 }
1141
1142 if args[1].IsKnown() {
1143 if err := gocty.FromCtyValue(args[1], &startIndex); err != nil {
1144 return 0, 0, false, function.NewArgErrorf(1, "invalid start index: %s", err)
1145 }
1146 if startIndex < 0 {
1147 return 0, 0, false, function.NewArgErrorf(1, "start index must not be less than zero")
1148 }
1149 if lengthKnown && startIndex > length {
1150 return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than the length of the list")
1151 }
1152 startKnown = true
1153 }
1154 if args[2].IsKnown() {
1155 if err := gocty.FromCtyValue(args[2], &endIndex); err != nil {
1156 return 0, 0, false, function.NewArgErrorf(2, "invalid end index: %s", err)
1157 }
1158 if endIndex < 0 {
1159 return 0, 0, false, function.NewArgErrorf(2, "end index must not be less than zero")
1160 }
1161 if lengthKnown && endIndex > length {
1162 return 0, 0, false, function.NewArgErrorf(2, "end index must not be greater than the length of the list")
1163 }
1164 endKnown = true
1165 }
1166 if startKnown && endKnown {
1167 if startIndex > endIndex {
1168 return 0, 0, false, function.NewArgErrorf(1, "start index must not be greater than end index")
1169 }
1170 }
1171 return startIndex, endIndex, startKnown && endKnown, nil
1172}
1173
1174// TransposeFunc contructs a function that takes a map of lists of strings and
1175// TransposeFunc constructs a function that takes a map of lists of strings and
1176// swaps the keys and values to produce a new map of lists of strings.
1177var TransposeFunc = function.New(&function.Spec{
1178 Params: []function.Parameter{
1179 {
1180 Name: "values",
1181 Type: cty.Map(cty.List(cty.String)),
1182 },
1183 },
1184 Type: function.StaticReturnType(cty.Map(cty.List(cty.String))),
1185 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
1186 inputMap := args[0]
1187 if !inputMap.IsWhollyKnown() {
1188 return cty.UnknownVal(retType), nil
1189 }
1190
1191 outputMap := make(map[string]cty.Value)
1192 tmpMap := make(map[string][]string)
1193
1194 for it := inputMap.ElementIterator(); it.Next(); {
1195 inKey, inVal := it.Element()
1196 for iter := inVal.ElementIterator(); iter.Next(); {
1197 _, val := iter.Element()
1198 if !val.Type().Equals(cty.String) {
1199 return cty.MapValEmpty(cty.List(cty.String)), errors.New("input must be a map of lists of strings")
1200 }
1201
1202 outKey := val.AsString()
1203 if _, ok := tmpMap[outKey]; !ok {
1204 tmpMap[outKey] = make([]string, 0)
1205 }
1206 outVal := tmpMap[outKey]
1207 outVal = append(outVal, inKey.AsString())
1208 sort.Strings(outVal)
1209 tmpMap[outKey] = outVal
1210 }
1211 }
1212
1213 for outKey, outVal := range tmpMap {
1214 values := make([]cty.Value, 0)
1215 for _, v := range outVal {
1216 values = append(values, cty.StringVal(v))
1217 }
1218 outputMap[outKey] = cty.ListVal(values)
1219 }
1220
1221 return cty.MapVal(outputMap), nil
1222 },
1223})
1224
1225// ValuesFunc constructs a function that returns a list of the map values,
1226// in the order of the sorted keys.
1227var ValuesFunc = function.New(&function.Spec{
1228 Params: []function.Parameter{
1229 {
1230 Name: "values",
1231 Type: cty.DynamicPseudoType,
1232 },
1233 },
1234 Type: func(args []cty.Value) (ret cty.Type, err error) {
1235 ty := args[0].Type()
1236 if ty.IsMapType() {
1237 return cty.List(ty.ElementType()), nil
1238 } else if ty.IsObjectType() {
1239 // The result is a tuple type with all of the same types as our
1240 // object type's attributes, sorted in lexicographical order by the
1241 // keys. (This matches the sort order guaranteed by ElementIterator
1242 // on a cty object value.)
1243 atys := ty.AttributeTypes()
1244 if len(atys) == 0 {
1245 return cty.EmptyTuple, nil
1246 }
1247 attrNames := make([]string, 0, len(atys))
1248 for name := range atys {
1249 attrNames = append(attrNames, name)
1250 }
1251 sort.Strings(attrNames)
1252
1253 tys := make([]cty.Type, len(attrNames))
1254 for i, name := range attrNames {
1255 tys[i] = atys[name]
1256 }
1257 return cty.Tuple(tys), nil
1258 }
1259 return cty.NilType, errors.New("values() requires a map as the first argument")
1260 },
1261 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
1262 mapVar := args[0]
1263
1264 // We can just iterate the map/object value here because cty guarantees
1265 // that these types always iterate in key lexicographical order.
1266 var values []cty.Value
1267 for it := mapVar.ElementIterator(); it.Next(); {
1268 _, val := it.Element()
1269 values = append(values, val)
1270 }
1271
1272 if retType.IsTupleType() {
1273 return cty.TupleVal(values), nil
1274 }
1275 if len(values) == 0 {
1276 return cty.ListValEmpty(retType.ElementType()), nil
1277 }
1278 return cty.ListVal(values), nil
1279 },
1280})
1281
1282// ZipmapFunc constructs a function that constructs a map from a list of keys
1283// and a corresponding list of values.
1284var ZipmapFunc = function.New(&function.Spec{
1285 Params: []function.Parameter{
1286 {
1287 Name: "keys",
1288 Type: cty.List(cty.String),
1289 },
1290 {
1291 Name: "values",
1292 Type: cty.DynamicPseudoType,
1293 },
1294 },
1295 Type: func(args []cty.Value) (ret cty.Type, err error) {
1296 keys := args[0]
1297 values := args[1]
1298 valuesTy := values.Type()
1299
1300 switch {
1301 case valuesTy.IsListType():
1302 return cty.Map(values.Type().ElementType()), nil
1303 case valuesTy.IsTupleType():
1304 if !keys.IsWhollyKnown() {
1305 // Since zipmap with a tuple produces an object, we need to know
1306 // all of the key names before we can predict our result type.
1307 return cty.DynamicPseudoType, nil
1308 }
1309
1310 keysRaw := keys.AsValueSlice()
1311 valueTypesRaw := valuesTy.TupleElementTypes()
1312 if len(keysRaw) != len(valueTypesRaw) {
1313 return cty.NilType, fmt.Errorf("number of keys (%d) does not match number of values (%d)", len(keysRaw), len(valueTypesRaw))
1314 }
1315 atys := make(map[string]cty.Type, len(valueTypesRaw))
1316 for i, keyVal := range keysRaw {
1317 if keyVal.IsNull() {
1318 return cty.NilType, fmt.Errorf("keys list has null value at index %d", i)
1319 }
1320 key := keyVal.AsString()
1321 atys[key] = valueTypesRaw[i]
1322 }
1323 return cty.Object(atys), nil
1324
1325 default:
1326 return cty.NilType, errors.New("values argument must be a list or tuple value")
1327 }
1328 },
1329 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
1330 keys := args[0]
1331 values := args[1]
1332
1333 if !keys.IsWhollyKnown() {
1334 // Unknown map keys and object attributes are not supported, so
1335 // our entire result must be unknown in this case.
1336 return cty.UnknownVal(retType), nil
1337 }
1338
1339 // both keys and values are guaranteed to be shallowly-known here,
1340 // because our declared params above don't allow unknown or null values.
1341 if keys.LengthInt() != values.LengthInt() {
1342 return cty.NilVal, fmt.Errorf("number of keys (%d) does not match number of values (%d)", keys.LengthInt(), values.LengthInt())
1343 }
1344
1345 output := make(map[string]cty.Value)
1346
1347 i := 0
1348 for it := keys.ElementIterator(); it.Next(); {
1349 _, v := it.Element()
1350 val := values.Index(cty.NumberIntVal(int64(i)))
1351 output[v.AsString()] = val
1352 i++
1353 }
1354
1355 switch {
1356 case retType.IsMapType():
1357 if len(output) == 0 {
1358 return cty.MapValEmpty(retType.ElementType()), nil
1359 }
1360 return cty.MapVal(output), nil
1361 case retType.IsObjectType():
1362 return cty.ObjectVal(output), nil
1363 default:
1364 // Should never happen because the type-check function should've
1365 // caught any other case.
1366 return cty.NilVal, fmt.Errorf("internally selected incorrect result type %s (this is a bug)", retType.FriendlyName())
1367 }
1368 },
1369})
1370
1371// helper function to add an element to a list, if it does not already exist
1372func appendIfMissing(slice []cty.Value, element cty.Value) ([]cty.Value, error) {
1373 for _, ele := range slice {
1374 eq, err := stdlib.Equal(ele, element)
1375 if err != nil {
1376 return slice, err
1377 }
1378 if eq.True() {
1379 return slice, nil
1380 }
1381 }
1382 return append(slice, element), nil
1383}
1384
1385// Element returns a single element from a given list at the given index. If
1386// index is greater than the length of the list then it is wrapped modulo
1387// the list length.
1388func Element(list, index cty.Value) (cty.Value, error) {
1389 return ElementFunc.Call([]cty.Value{list, index})
1390}
1391
1392// Length returns the number of elements in the given collection or number of
1393// Unicode characters in the given string.
1394func Length(collection cty.Value) (cty.Value, error) {
1395 return LengthFunc.Call([]cty.Value{collection})
1396}
1397
1398// Coalesce takes any number of arguments and returns the first one that isn't empty.
1399func Coalesce(args ...cty.Value) (cty.Value, error) {
1400 return CoalesceFunc.Call(args)
1401}
1402
1403// CoalesceList takes any number of list arguments and returns the first one that isn't empty.
1404func CoalesceList(args ...cty.Value) (cty.Value, error) {
1405 return CoalesceListFunc.Call(args)
1406}
1407
1408// Compact takes a list of strings and returns a new list
1409// with any empty string elements removed.
1410func Compact(list cty.Value) (cty.Value, error) {
1411 return CompactFunc.Call([]cty.Value{list})
1412}
1413
1414// Contains determines whether a given list contains a given single value
1415// as one of its elements.
1416func Contains(list, value cty.Value) (cty.Value, error) {
1417 return ContainsFunc.Call([]cty.Value{list, value})
1418}
1419
1420// Index finds the element index for a given value in a list.
1421func Index(list, value cty.Value) (cty.Value, error) {
1422 return IndexFunc.Call([]cty.Value{list, value})
1423}
1424
1425// Distinct takes a list and returns a new list with any duplicate elements removed.
1426func Distinct(list cty.Value) (cty.Value, error) {
1427 return DistinctFunc.Call([]cty.Value{list})
1428}
1429
1430// Chunklist splits a single list into fixed-size chunks, returning a list of lists.
1431func Chunklist(list, size cty.Value) (cty.Value, error) {
1432 return ChunklistFunc.Call([]cty.Value{list, size})
1433}
1434
1435// Flatten takes a list and replaces any elements that are lists with a flattened
1436// sequence of the list contents.
1437func Flatten(list cty.Value) (cty.Value, error) {
1438 return FlattenFunc.Call([]cty.Value{list})
1439}
1440
1441// Keys takes a map and returns a sorted list of the map keys.
1442func Keys(inputMap cty.Value) (cty.Value, error) {
1443 return KeysFunc.Call([]cty.Value{inputMap})
1444}
1445
1446// List takes any number of list arguments and returns a list containing those
1447// values in the same order.
1448func List(args ...cty.Value) (cty.Value, error) {
1449 return ListFunc.Call(args)
1450}
1451
1452// Lookup performs a dynamic lookup into a map.
1453// There are two required arguments, map and key, plus an optional default,
1454// which is a value to return if no key is found in map.
1455func Lookup(args ...cty.Value) (cty.Value, error) {
1456 return LookupFunc.Call(args)
1457}
1458
1459// Map takes an even number of arguments and returns a map whose elements are constructed
1460// from consecutive pairs of arguments.
1461func Map(args ...cty.Value) (cty.Value, error) {
1462 return MapFunc.Call(args)
1463}
1464
1465// Matchkeys constructs a new list by taking a subset of elements from one list
1466// whose indexes match the corresponding indexes of values in another list.
1467func Matchkeys(values, keys, searchset cty.Value) (cty.Value, error) {
1468 return MatchkeysFunc.Call([]cty.Value{values, keys, searchset})
1469}
1470
1471// Merge takes an arbitrary number of maps and returns a single map that contains
1472// a merged set of elements from all of the maps.
1473//
1474// If more than one given map defines the same key then the one that is later in
1475// the argument sequence takes precedence.
1476func Merge(maps ...cty.Value) (cty.Value, error) {
1477 return MergeFunc.Call(maps)
1478}
1479
1480// Reverse takes a sequence and produces a new sequence of the same length
1481// with all of the same elements as the given sequence but in reverse order.
1482func Reverse(list cty.Value) (cty.Value, error) {
1483 return ReverseFunc.Call([]cty.Value{list})
1484}
1485
1486// SetProduct computes the cartesian product of sets or sequences.
1487func SetProduct(sets ...cty.Value) (cty.Value, error) {
1488 return SetProductFunc.Call(sets)
1489}
1490
1491// Slice extracts some consecutive elements from within a list.
1492func Slice(list, start, end cty.Value) (cty.Value, error) {
1493 return SliceFunc.Call([]cty.Value{list, start, end})
1494}
1495
1496// Transpose takes a map of lists of strings and swaps the keys and values to
1497// produce a new map of lists of strings.
1498func Transpose(values cty.Value) (cty.Value, error) {
1499 return TransposeFunc.Call([]cty.Value{values})
1500}
1501
1502// Values returns a list of the map values, in the order of the sorted keys.
1503// This function only works on flat maps.
1504func Values(values cty.Value) (cty.Value, error) {
1505 return ValuesFunc.Call([]cty.Value{values})
1506}
1507
1508// Zipmap constructs a map from a list of keys and a corresponding list of values.
1509func Zipmap(keys, values cty.Value) (cty.Value, error) {
1510 return ZipmapFunc.Call([]cty.Value{keys, values})
1511}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go b/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go
new file mode 100644
index 0000000..83f8597
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/conversion.go
@@ -0,0 +1,87 @@
1package funcs
2
3import (
4 "strconv"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/convert"
8 "github.com/zclconf/go-cty/cty/function"
9)
10
11// MakeToFunc constructs a "to..." function, like "tostring", which converts
12// its argument to a specific type or type kind.
13//
14// The given type wantTy can be any type constraint that cty's "convert" package
15// would accept. In particular, this means that you can pass
16// cty.List(cty.DynamicPseudoType) to mean "list of any single type", which
17// will then cause cty to attempt to unify all of the element types when given
18// a tuple.
19func MakeToFunc(wantTy cty.Type) function.Function {
20 return function.New(&function.Spec{
21 Params: []function.Parameter{
22 {
23 Name: "v",
24 // We use DynamicPseudoType rather than wantTy here so that
25 // all values will pass through the function API verbatim and
26 // we can handle the conversion logic within the Type and
27 // Impl functions. This allows us to customize the error
28 // messages to be more appropriate for an explicit type
29 // conversion, whereas the cty function system produces
30 // messages aimed at _implicit_ type conversions.
31 Type: cty.DynamicPseudoType,
32 AllowNull: true,
33 },
34 },
35 Type: func(args []cty.Value) (cty.Type, error) {
36 gotTy := args[0].Type()
37 if gotTy.Equals(wantTy) {
38 return wantTy, nil
39 }
40 conv := convert.GetConversionUnsafe(args[0].Type(), wantTy)
41 if conv == nil {
42 // We'll use some specialized errors for some trickier cases,
43 // but most we can handle in a simple way.
44 switch {
45 case gotTy.IsTupleType() && wantTy.IsTupleType():
46 return cty.NilType, function.NewArgErrorf(0, "incompatible tuple type for conversion: %s", convert.MismatchMessage(gotTy, wantTy))
47 case gotTy.IsObjectType() && wantTy.IsObjectType():
48 return cty.NilType, function.NewArgErrorf(0, "incompatible object type for conversion: %s", convert.MismatchMessage(gotTy, wantTy))
49 default:
50 return cty.NilType, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint())
51 }
52 }
53 // If a conversion is available then everything is fine.
54 return wantTy, nil
55 },
56 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
57 // We didn't set "AllowUnknown" on our argument, so it is guaranteed
58 // to be known here but may still be null.
59 ret, err := convert.Convert(args[0], retType)
60 if err != nil {
61 // Because we used GetConversionUnsafe above, conversion can
62 // still potentially fail in here. For example, if the user
63 // asks to convert the string "a" to bool then we'll
64 // optimistically permit it during type checking but fail here
65 // once we note that the value isn't either "true" or "false".
66 gotTy := args[0].Type()
67 switch {
68 case gotTy == cty.String && wantTy == cty.Bool:
69 what := "string"
70 if !args[0].IsNull() {
71 what = strconv.Quote(args[0].AsString())
72 }
73 return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to bool; only the strings "true" or "false" are allowed`, what)
74 case gotTy == cty.String && wantTy == cty.Number:
75 what := "string"
76 if !args[0].IsNull() {
77 what = strconv.Quote(args[0].AsString())
78 }
79 return cty.NilVal, function.NewArgErrorf(0, `cannot convert %s to number; given string must be a decimal representation of a number`, what)
80 default:
81 return cty.NilVal, function.NewArgErrorf(0, "cannot convert %s to %s", gotTy.FriendlyName(), wantTy.FriendlyNameForConstraint())
82 }
83 }
84 return ret, nil
85 },
86 })
87}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go b/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go
new file mode 100644
index 0000000..5cb4bc5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go
@@ -0,0 +1,285 @@
1package funcs
2
3import (
4 "crypto/md5"
5 "crypto/rsa"
6 "crypto/sha1"
7 "crypto/sha256"
8 "crypto/sha512"
9 "crypto/x509"
10 "encoding/base64"
11 "encoding/hex"
12 "encoding/pem"
13 "fmt"
14 "hash"
15
16 uuid "github.com/hashicorp/go-uuid"
17 "github.com/zclconf/go-cty/cty"
18 "github.com/zclconf/go-cty/cty/function"
19 "github.com/zclconf/go-cty/cty/gocty"
20 "golang.org/x/crypto/bcrypt"
21)
22
23var UUIDFunc = function.New(&function.Spec{
24 Params: []function.Parameter{},
25 Type: function.StaticReturnType(cty.String),
26 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
27 result, err := uuid.GenerateUUID()
28 if err != nil {
29 return cty.UnknownVal(cty.String), err
30 }
31 return cty.StringVal(result), nil
32 },
33})
34
35// Base64Sha256Func constructs a function that computes the SHA256 hash of a given string
36// and encodes it with Base64.
37var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString)
38
39// MakeFileBase64Sha256Func constructs a function that is like Base64Sha256Func but reads the
40// contents of a file rather than hashing a given literal string.
41func MakeFileBase64Sha256Func(baseDir string) function.Function {
42 return makeFileHashFunction(baseDir, sha256.New, base64.StdEncoding.EncodeToString)
43}
44
45// Base64Sha512Func constructs a function that computes the SHA256 hash of a given string
46// and encodes it with Base64.
47var Base64Sha512Func = makeStringHashFunction(sha512.New, base64.StdEncoding.EncodeToString)
48
49// MakeFileBase64Sha512Func constructs a function that is like Base64Sha512Func but reads the
50// contents of a file rather than hashing a given literal string.
51func MakeFileBase64Sha512Func(baseDir string) function.Function {
52 return makeFileHashFunction(baseDir, sha512.New, base64.StdEncoding.EncodeToString)
53}
54
55// BcryptFunc constructs a function that computes a hash of the given string using the Blowfish cipher.
56var BcryptFunc = function.New(&function.Spec{
57 Params: []function.Parameter{
58 {
59 Name: "str",
60 Type: cty.String,
61 },
62 },
63 VarParam: &function.Parameter{
64 Name: "cost",
65 Type: cty.Number,
66 },
67 Type: function.StaticReturnType(cty.String),
68 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
69 defaultCost := 10
70
71 if len(args) > 1 {
72 var val int
73 if err := gocty.FromCtyValue(args[1], &val); err != nil {
74 return cty.UnknownVal(cty.String), err
75 }
76 defaultCost = val
77 }
78
79 if len(args) > 2 {
80 return cty.UnknownVal(cty.String), fmt.Errorf("bcrypt() takes no more than two arguments")
81 }
82
83 input := args[0].AsString()
84 out, err := bcrypt.GenerateFromPassword([]byte(input), defaultCost)
85 if err != nil {
86 return cty.UnknownVal(cty.String), fmt.Errorf("error occured generating password %s", err.Error())
87 }
88
89 return cty.StringVal(string(out)), nil
90 },
91})
92
93// Md5Func constructs a function that computes the MD5 hash of a given string and encodes it with hexadecimal digits.
94var Md5Func = makeStringHashFunction(md5.New, hex.EncodeToString)
95
96// MakeFileMd5Func constructs a function that is like Md5Func but reads the
97// contents of a file rather than hashing a given literal string.
98func MakeFileMd5Func(baseDir string) function.Function {
99 return makeFileHashFunction(baseDir, md5.New, hex.EncodeToString)
100}
101
102// RsaDecryptFunc constructs a function that decrypts an RSA-encrypted ciphertext.
103var RsaDecryptFunc = function.New(&function.Spec{
104 Params: []function.Parameter{
105 {
106 Name: "ciphertext",
107 Type: cty.String,
108 },
109 {
110 Name: "privatekey",
111 Type: cty.String,
112 },
113 },
114 Type: function.StaticReturnType(cty.String),
115 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
116 s := args[0].AsString()
117 key := args[1].AsString()
118
119 b, err := base64.StdEncoding.DecodeString(s)
120 if err != nil {
121 return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode input %q: cipher text must be base64-encoded", s)
122 }
123
124 block, _ := pem.Decode([]byte(key))
125 if block == nil {
126 return cty.UnknownVal(cty.String), fmt.Errorf("failed to parse key: no key found")
127 }
128 if block.Headers["Proc-Type"] == "4,ENCRYPTED" {
129 return cty.UnknownVal(cty.String), fmt.Errorf(
130 "failed to parse key: password protected keys are not supported. Please decrypt the key prior to use",
131 )
132 }
133
134 x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes)
135 if err != nil {
136 return cty.UnknownVal(cty.String), err
137 }
138
139 out, err := rsa.DecryptPKCS1v15(nil, x509Key, b)
140 if err != nil {
141 return cty.UnknownVal(cty.String), err
142 }
143
144 return cty.StringVal(string(out)), nil
145 },
146})
147
148// Sha1Func contructs a function that computes the SHA1 hash of a given string
149// and encodes it with hexadecimal digits.
150var Sha1Func = makeStringHashFunction(sha1.New, hex.EncodeToString)
151
152// MakeFileSha1Func constructs a function that is like Sha1Func but reads the
153// contents of a file rather than hashing a given literal string.
154func MakeFileSha1Func(baseDir string) function.Function {
155 return makeFileHashFunction(baseDir, sha1.New, hex.EncodeToString)
156}
157
158// Sha256Func contructs a function that computes the SHA256 hash of a given string
159// and encodes it with hexadecimal digits.
160var Sha256Func = makeStringHashFunction(sha256.New, hex.EncodeToString)
161
162// MakeFileSha256Func constructs a function that is like Sha256Func but reads the
163// contents of a file rather than hashing a given literal string.
164func MakeFileSha256Func(baseDir string) function.Function {
165 return makeFileHashFunction(baseDir, sha256.New, hex.EncodeToString)
166}
167
168// Sha512Func contructs a function that computes the SHA512 hash of a given string
169// and encodes it with hexadecimal digits.
170var Sha512Func = makeStringHashFunction(sha512.New, hex.EncodeToString)
171
172// MakeFileSha512Func constructs a function that is like Sha512Func but reads the
173// contents of a file rather than hashing a given literal string.
174func MakeFileSha512Func(baseDir string) function.Function {
175 return makeFileHashFunction(baseDir, sha512.New, hex.EncodeToString)
176}
177
178func makeStringHashFunction(hf func() hash.Hash, enc func([]byte) string) function.Function {
179 return function.New(&function.Spec{
180 Params: []function.Parameter{
181 {
182 Name: "str",
183 Type: cty.String,
184 },
185 },
186 Type: function.StaticReturnType(cty.String),
187 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
188 s := args[0].AsString()
189 h := hf()
190 h.Write([]byte(s))
191 rv := enc(h.Sum(nil))
192 return cty.StringVal(rv), nil
193 },
194 })
195}
196
197func makeFileHashFunction(baseDir string, hf func() hash.Hash, enc func([]byte) string) function.Function {
198 return function.New(&function.Spec{
199 Params: []function.Parameter{
200 {
201 Name: "path",
202 Type: cty.String,
203 },
204 },
205 Type: function.StaticReturnType(cty.String),
206 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
207 path := args[0].AsString()
208 src, err := readFileBytes(baseDir, path)
209 if err != nil {
210 return cty.UnknownVal(cty.String), err
211 }
212
213 h := hf()
214 h.Write(src)
215 rv := enc(h.Sum(nil))
216 return cty.StringVal(rv), nil
217 },
218 })
219}
220
221// UUID generates and returns a Type-4 UUID in the standard hexadecimal string
222// format.
223//
224// This is not a pure function: it will generate a different result for each
225// call. It must therefore be registered as an impure function in the function
226// table in the "lang" package.
227func UUID() (cty.Value, error) {
228 return UUIDFunc.Call(nil)
229}
230
231// Base64Sha256 computes the SHA256 hash of a given string and encodes it with
232// Base64.
233//
234// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied
235// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning.
236// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4.
237func Base64Sha256(str cty.Value) (cty.Value, error) {
238 return Base64Sha256Func.Call([]cty.Value{str})
239}
240
241// Base64Sha512 computes the SHA512 hash of a given string and encodes it with
242// Base64.
243//
244// The given string is first encoded as UTF-8 and then the SHA256 algorithm is applied
245// as defined in RFC 4634. The raw hash is then encoded with Base64 before returning.
246// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4
247func Base64Sha512(str cty.Value) (cty.Value, error) {
248 return Base64Sha512Func.Call([]cty.Value{str})
249}
250
251// Bcrypt computes a hash of the given string using the Blowfish cipher,
252// returning a string in the Modular Crypt Format
253// usually expected in the shadow password file on many Unix systems.
254func Bcrypt(str cty.Value, cost ...cty.Value) (cty.Value, error) {
255 args := make([]cty.Value, len(cost)+1)
256 args[0] = str
257 copy(args[1:], cost)
258 return BcryptFunc.Call(args)
259}
260
261// Md5 computes the MD5 hash of a given string and encodes it with hexadecimal digits.
262func Md5(str cty.Value) (cty.Value, error) {
263 return Md5Func.Call([]cty.Value{str})
264}
265
266// RsaDecrypt decrypts an RSA-encrypted ciphertext, returning the corresponding
267// cleartext.
268func RsaDecrypt(ciphertext, privatekey cty.Value) (cty.Value, error) {
269 return RsaDecryptFunc.Call([]cty.Value{ciphertext, privatekey})
270}
271
272// Sha1 computes the SHA1 hash of a given string and encodes it with hexadecimal digits.
273func Sha1(str cty.Value) (cty.Value, error) {
274 return Sha1Func.Call([]cty.Value{str})
275}
276
277// Sha256 computes the SHA256 hash of a given string and encodes it with hexadecimal digits.
278func Sha256(str cty.Value) (cty.Value, error) {
279 return Sha256Func.Call([]cty.Value{str})
280}
281
282// Sha512 computes the SHA512 hash of a given string and encodes it with hexadecimal digits.
283func Sha512(str cty.Value) (cty.Value, error) {
284 return Sha512Func.Call([]cty.Value{str})
285}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go b/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go
new file mode 100644
index 0000000..5dae198
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/datetime.go
@@ -0,0 +1,70 @@
1package funcs
2
3import (
4 "time"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/function"
8)
9
10// TimestampFunc constructs a function that returns a string representation of the current date and time.
11var TimestampFunc = function.New(&function.Spec{
12 Params: []function.Parameter{},
13 Type: function.StaticReturnType(cty.String),
14 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
15 return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil
16 },
17})
18
19// TimeAddFunc constructs a function that adds a duration to a timestamp, returning a new timestamp.
20var TimeAddFunc = function.New(&function.Spec{
21 Params: []function.Parameter{
22 {
23 Name: "timestamp",
24 Type: cty.String,
25 },
26 {
27 Name: "duration",
28 Type: cty.String,
29 },
30 },
31 Type: function.StaticReturnType(cty.String),
32 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
33 ts, err := time.Parse(time.RFC3339, args[0].AsString())
34 if err != nil {
35 return cty.UnknownVal(cty.String), err
36 }
37 duration, err := time.ParseDuration(args[1].AsString())
38 if err != nil {
39 return cty.UnknownVal(cty.String), err
40 }
41
42 return cty.StringVal(ts.Add(duration).Format(time.RFC3339)), nil
43 },
44})
45
46// Timestamp returns a string representation of the current date and time.
47//
48// In the Terraform language, timestamps are conventionally represented as
49// strings using RFC 3339 "Date and Time format" syntax, and so timestamp
50// returns a string in this format.
51func Timestamp() (cty.Value, error) {
52 return TimestampFunc.Call([]cty.Value{})
53}
54
55// TimeAdd adds a duration to a timestamp, returning a new timestamp.
56//
57// In the Terraform language, timestamps are conventionally represented as
58// strings using RFC 3339 "Date and Time format" syntax. Timeadd requires
59// the timestamp argument to be a string conforming to this syntax.
60//
61// `duration` is a string representation of a time difference, consisting of
62// sequences of number and unit pairs, like `"1.5h"` or `1h30m`. The accepted
63// units are `ns`, `us` (or `µs`), `"ms"`, `"s"`, `"m"`, and `"h"`. The first
64// number may be negative to indicate a negative duration, like `"-2h5m"`.
65//
66// The result is a string, also in RFC 3339 format, representing the result
67// of adding the given direction to the given timestamp.
68func TimeAdd(timestamp cty.Value, duration cty.Value) (cty.Value, error) {
69 return TimeAddFunc.Call([]cty.Value{timestamp, duration})
70}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go b/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go
new file mode 100644
index 0000000..af93f08
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/encoding.go
@@ -0,0 +1,140 @@
1package funcs
2
3import (
4 "bytes"
5 "compress/gzip"
6 "encoding/base64"
7 "fmt"
8 "log"
9 "net/url"
10 "unicode/utf8"
11
12 "github.com/zclconf/go-cty/cty"
13 "github.com/zclconf/go-cty/cty/function"
14)
15
16// Base64DecodeFunc constructs a function that decodes a string containing a base64 sequence.
17var Base64DecodeFunc = function.New(&function.Spec{
18 Params: []function.Parameter{
19 {
20 Name: "str",
21 Type: cty.String,
22 },
23 },
24 Type: function.StaticReturnType(cty.String),
25 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
26 s := args[0].AsString()
27 sDec, err := base64.StdEncoding.DecodeString(s)
28 if err != nil {
29 return cty.UnknownVal(cty.String), fmt.Errorf("failed to decode base64 data '%s'", s)
30 }
31 if !utf8.Valid([]byte(sDec)) {
32 log.Printf("[DEBUG] the result of decoding the the provided string is not valid UTF-8: %s", sDec)
33 return cty.UnknownVal(cty.String), fmt.Errorf("the result of decoding the the provided string is not valid UTF-8")
34 }
35 return cty.StringVal(string(sDec)), nil
36 },
37})
38
39// Base64EncodeFunc constructs a function that encodes a string to a base64 sequence.
40var Base64EncodeFunc = function.New(&function.Spec{
41 Params: []function.Parameter{
42 {
43 Name: "str",
44 Type: cty.String,
45 },
46 },
47 Type: function.StaticReturnType(cty.String),
48 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
49 return cty.StringVal(base64.StdEncoding.EncodeToString([]byte(args[0].AsString()))), nil
50 },
51})
52
53// Base64GzipFunc constructs a function that compresses a string with gzip and then encodes the result in
54// Base64 encoding.
55var Base64GzipFunc = function.New(&function.Spec{
56 Params: []function.Parameter{
57 {
58 Name: "str",
59 Type: cty.String,
60 },
61 },
62 Type: function.StaticReturnType(cty.String),
63 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
64 s := args[0].AsString()
65
66 var b bytes.Buffer
67 gz := gzip.NewWriter(&b)
68 if _, err := gz.Write([]byte(s)); err != nil {
69 return cty.UnknownVal(cty.String), fmt.Errorf("failed to write gzip raw data: '%s'", s)
70 }
71 if err := gz.Flush(); err != nil {
72 return cty.UnknownVal(cty.String), fmt.Errorf("failed to flush gzip writer: '%s'", s)
73 }
74 if err := gz.Close(); err != nil {
75 return cty.UnknownVal(cty.String), fmt.Errorf("failed to close gzip writer: '%s'", s)
76 }
77 return cty.StringVal(base64.StdEncoding.EncodeToString(b.Bytes())), nil
78 },
79})
80
81// URLEncodeFunc constructs a function that applies URL encoding to a given string.
82var URLEncodeFunc = function.New(&function.Spec{
83 Params: []function.Parameter{
84 {
85 Name: "str",
86 Type: cty.String,
87 },
88 },
89 Type: function.StaticReturnType(cty.String),
90 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
91 return cty.StringVal(url.QueryEscape(args[0].AsString())), nil
92 },
93})
94
95// Base64Decode decodes a string containing a base64 sequence.
96//
97// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4.
98//
99// Strings in the Terraform language are sequences of unicode characters rather
100// than bytes, so this function will also interpret the resulting bytes as
101// UTF-8. If the bytes after Base64 decoding are _not_ valid UTF-8, this function
102// produces an error.
103func Base64Decode(str cty.Value) (cty.Value, error) {
104 return Base64DecodeFunc.Call([]cty.Value{str})
105}
106
107// Base64Encode applies Base64 encoding to a string.
108//
109// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4.
110//
111// Strings in the Terraform language are sequences of unicode characters rather
112// than bytes, so this function will first encode the characters from the string
113// as UTF-8, and then apply Base64 encoding to the result.
114func Base64Encode(str cty.Value) (cty.Value, error) {
115 return Base64EncodeFunc.Call([]cty.Value{str})
116}
117
118// Base64Gzip compresses a string with gzip and then encodes the result in
119// Base64 encoding.
120//
121// Terraform uses the "standard" Base64 alphabet as defined in RFC 4648 section 4.
122//
123// Strings in the Terraform language are sequences of unicode characters rather
124// than bytes, so this function will first encode the characters from the string
125// as UTF-8, then apply gzip compression, and then finally apply Base64 encoding.
126func Base64Gzip(str cty.Value) (cty.Value, error) {
127 return Base64GzipFunc.Call([]cty.Value{str})
128}
129
130// URLEncode applies URL encoding to a given string.
131//
132// This function identifies characters in the given string that would have a
133// special meaning when included as a query string argument in a URL and
134// escapes them using RFC 3986 "percent encoding".
135//
136// If the given string contains non-ASCII characters, these are first encoded as
137// UTF-8 and then percent encoding is applied separately to each UTF-8 byte.
138func URLEncode(str cty.Value) (cty.Value, error) {
139 return URLEncodeFunc.Call([]cty.Value{str})
140}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go b/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go
new file mode 100644
index 0000000..7dfc905
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go
@@ -0,0 +1,345 @@
1package funcs
2
3import (
4 "encoding/base64"
5 "fmt"
6 "io/ioutil"
7 "os"
8 "path/filepath"
9 "unicode/utf8"
10
11 "github.com/hashicorp/hcl2/hcl"
12 "github.com/hashicorp/hcl2/hcl/hclsyntax"
13 homedir "github.com/mitchellh/go-homedir"
14 "github.com/zclconf/go-cty/cty"
15 "github.com/zclconf/go-cty/cty/function"
16)
17
18// MakeFileFunc constructs a function that takes a file path and returns the
19// contents of that file, either directly as a string (where valid UTF-8 is
20// required) or as a string containing base64 bytes.
21func MakeFileFunc(baseDir string, encBase64 bool) function.Function {
22 return function.New(&function.Spec{
23 Params: []function.Parameter{
24 {
25 Name: "path",
26 Type: cty.String,
27 },
28 },
29 Type: function.StaticReturnType(cty.String),
30 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
31 path := args[0].AsString()
32 src, err := readFileBytes(baseDir, path)
33 if err != nil {
34 return cty.UnknownVal(cty.String), err
35 }
36
37 switch {
38 case encBase64:
39 enc := base64.StdEncoding.EncodeToString(src)
40 return cty.StringVal(enc), nil
41 default:
42 if !utf8.Valid(src) {
43 return cty.UnknownVal(cty.String), fmt.Errorf("contents of %s are not valid UTF-8; use the filebase64 function to obtain the Base64 encoded contents or the other file functions (e.g. filemd5, filesha256) to obtain file hashing results instead", path)
44 }
45 return cty.StringVal(string(src)), nil
46 }
47 },
48 })
49}
50
51// MakeTemplateFileFunc constructs a function that takes a file path and
52// an arbitrary object of named values and attempts to render the referenced
53// file as a template using HCL template syntax.
54//
55// The template itself may recursively call other functions so a callback
56// must be provided to get access to those functions. The template cannot,
57// however, access any variables defined in the scope: it is restricted only to
58// those variables provided in the second function argument, to ensure that all
59// dependencies on other graph nodes can be seen before executing this function.
60//
61// As a special exception, a referenced template file may not recursively call
62// the templatefile function, since that would risk the same file being
63// included into itself indefinitely.
64func MakeTemplateFileFunc(baseDir string, funcsCb func() map[string]function.Function) function.Function {
65
66 params := []function.Parameter{
67 {
68 Name: "path",
69 Type: cty.String,
70 },
71 {
72 Name: "vars",
73 Type: cty.DynamicPseudoType,
74 },
75 }
76
77 loadTmpl := func(fn string) (hcl.Expression, error) {
78 // We re-use File here to ensure the same filename interpretation
79 // as it does, along with its other safety checks.
80 tmplVal, err := File(baseDir, cty.StringVal(fn))
81 if err != nil {
82 return nil, err
83 }
84
85 expr, diags := hclsyntax.ParseTemplate([]byte(tmplVal.AsString()), fn, hcl.Pos{Line: 1, Column: 1})
86 if diags.HasErrors() {
87 return nil, diags
88 }
89
90 return expr, nil
91 }
92
93 renderTmpl := func(expr hcl.Expression, varsVal cty.Value) (cty.Value, error) {
94 if varsTy := varsVal.Type(); !(varsTy.IsMapType() || varsTy.IsObjectType()) {
95 return cty.DynamicVal, function.NewArgErrorf(1, "invalid vars value: must be a map") // or an object, but we don't strongly distinguish these most of the time
96 }
97
98 ctx := &hcl.EvalContext{
99 Variables: varsVal.AsValueMap(),
100 }
101
102 // We'll pre-check references in the template here so we can give a
103 // more specialized error message than HCL would by default, so it's
104 // clearer that this problem is coming from a templatefile call.
105 for _, traversal := range expr.Variables() {
106 root := traversal.RootName()
107 if _, ok := ctx.Variables[root]; !ok {
108 return cty.DynamicVal, function.NewArgErrorf(1, "vars map does not contain key %q, referenced at %s", root, traversal[0].SourceRange())
109 }
110 }
111
112 givenFuncs := funcsCb() // this callback indirection is to avoid chicken/egg problems
113 funcs := make(map[string]function.Function, len(givenFuncs))
114 for name, fn := range givenFuncs {
115 if name == "templatefile" {
116 // We stub this one out to prevent recursive calls.
117 funcs[name] = function.New(&function.Spec{
118 Params: params,
119 Type: func(args []cty.Value) (cty.Type, error) {
120 return cty.NilType, fmt.Errorf("cannot recursively call templatefile from inside templatefile call")
121 },
122 })
123 continue
124 }
125 funcs[name] = fn
126 }
127 ctx.Functions = funcs
128
129 val, diags := expr.Value(ctx)
130 if diags.HasErrors() {
131 return cty.DynamicVal, diags
132 }
133 return val, nil
134 }
135
136 return function.New(&function.Spec{
137 Params: params,
138 Type: func(args []cty.Value) (cty.Type, error) {
139 if !(args[0].IsKnown() && args[1].IsKnown()) {
140 return cty.DynamicPseudoType, nil
141 }
142
143 // We'll render our template now to see what result type it produces.
144 // A template consisting only of a single interpolation an potentially
145 // return any type.
146 expr, err := loadTmpl(args[0].AsString())
147 if err != nil {
148 return cty.DynamicPseudoType, err
149 }
150
151 // This is safe even if args[1] contains unknowns because the HCL
152 // template renderer itself knows how to short-circuit those.
153 val, err := renderTmpl(expr, args[1])
154 return val.Type(), err
155 },
156 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
157 expr, err := loadTmpl(args[0].AsString())
158 if err != nil {
159 return cty.DynamicVal, err
160 }
161 return renderTmpl(expr, args[1])
162 },
163 })
164
165}
166
167// MakeFileExistsFunc constructs a function that takes a path
168// and determines whether a file exists at that path
169func MakeFileExistsFunc(baseDir string) function.Function {
170 return function.New(&function.Spec{
171 Params: []function.Parameter{
172 {
173 Name: "path",
174 Type: cty.String,
175 },
176 },
177 Type: function.StaticReturnType(cty.Bool),
178 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
179 path := args[0].AsString()
180 path, err := homedir.Expand(path)
181 if err != nil {
182 return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to expand ~: %s", err)
183 }
184
185 if !filepath.IsAbs(path) {
186 path = filepath.Join(baseDir, path)
187 }
188
189 // Ensure that the path is canonical for the host OS
190 path = filepath.Clean(path)
191
192 fi, err := os.Stat(path)
193 if err != nil {
194 if os.IsNotExist(err) {
195 return cty.False, nil
196 }
197 return cty.UnknownVal(cty.Bool), fmt.Errorf("failed to stat %s", path)
198 }
199
200 if fi.Mode().IsRegular() {
201 return cty.True, nil
202 }
203
204 return cty.False, fmt.Errorf("%s is not a regular file, but %q",
205 path, fi.Mode().String())
206 },
207 })
208}
209
210// BasenameFunc constructs a function that takes a string containing a filesystem path
211// and removes all except the last portion from it.
212var BasenameFunc = function.New(&function.Spec{
213 Params: []function.Parameter{
214 {
215 Name: "path",
216 Type: cty.String,
217 },
218 },
219 Type: function.StaticReturnType(cty.String),
220 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
221 return cty.StringVal(filepath.Base(args[0].AsString())), nil
222 },
223})
224
225// DirnameFunc constructs a function that takes a string containing a filesystem path
226// and removes the last portion from it.
227var DirnameFunc = function.New(&function.Spec{
228 Params: []function.Parameter{
229 {
230 Name: "path",
231 Type: cty.String,
232 },
233 },
234 Type: function.StaticReturnType(cty.String),
235 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
236 return cty.StringVal(filepath.Dir(args[0].AsString())), nil
237 },
238})
239
240// PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory.
241var PathExpandFunc = function.New(&function.Spec{
242 Params: []function.Parameter{
243 {
244 Name: "path",
245 Type: cty.String,
246 },
247 },
248 Type: function.StaticReturnType(cty.String),
249 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
250
251 homePath, err := homedir.Expand(args[0].AsString())
252 return cty.StringVal(homePath), err
253 },
254})
255
256func readFileBytes(baseDir, path string) ([]byte, error) {
257 path, err := homedir.Expand(path)
258 if err != nil {
259 return nil, fmt.Errorf("failed to expand ~: %s", err)
260 }
261
262 if !filepath.IsAbs(path) {
263 path = filepath.Join(baseDir, path)
264 }
265
266 // Ensure that the path is canonical for the host OS
267 path = filepath.Clean(path)
268
269 src, err := ioutil.ReadFile(path)
270 if err != nil {
271 // ReadFile does not return Terraform-user-friendly error
272 // messages, so we'll provide our own.
273 if os.IsNotExist(err) {
274 return nil, fmt.Errorf("no file exists at %s", path)
275 }
276 return nil, fmt.Errorf("failed to read %s", path)
277 }
278
279 return src, nil
280}
281
282// File reads the contents of the file at the given path.
283//
284// The file must contain valid UTF-8 bytes, or this function will return an error.
285//
286// The underlying function implementation works relative to a particular base
287// directory, so this wrapper takes a base directory string and uses it to
288// construct the underlying function before calling it.
289func File(baseDir string, path cty.Value) (cty.Value, error) {
290 fn := MakeFileFunc(baseDir, false)
291 return fn.Call([]cty.Value{path})
292}
293
294// FileExists determines whether a file exists at the given path.
295//
296// The underlying function implementation works relative to a particular base
297// directory, so this wrapper takes a base directory string and uses it to
298// construct the underlying function before calling it.
299func FileExists(baseDir string, path cty.Value) (cty.Value, error) {
300 fn := MakeFileExistsFunc(baseDir)
301 return fn.Call([]cty.Value{path})
302}
303
304// FileBase64 reads the contents of the file at the given path.
305//
306// The bytes from the file are encoded as base64 before returning.
307//
308// The underlying function implementation works relative to a particular base
309// directory, so this wrapper takes a base directory string and uses it to
310// construct the underlying function before calling it.
311func FileBase64(baseDir string, path cty.Value) (cty.Value, error) {
312 fn := MakeFileFunc(baseDir, true)
313 return fn.Call([]cty.Value{path})
314}
315
316// Basename takes a string containing a filesystem path and removes all except the last portion from it.
317//
318// The underlying function implementation works only with the path string and does not access the filesystem itself.
319// It is therefore unable to take into account filesystem features such as symlinks.
320//
321// If the path is empty then the result is ".", representing the current working directory.
322func Basename(path cty.Value) (cty.Value, error) {
323 return BasenameFunc.Call([]cty.Value{path})
324}
325
326// Dirname takes a string containing a filesystem path and removes the last portion from it.
327//
328// The underlying function implementation works only with the path string and does not access the filesystem itself.
329// It is therefore unable to take into account filesystem features such as symlinks.
330//
331// If the path is empty then the result is ".", representing the current working directory.
332func Dirname(path cty.Value) (cty.Value, error) {
333 return DirnameFunc.Call([]cty.Value{path})
334}
335
336// Pathexpand takes a string that might begin with a `~` segment, and if so it replaces that segment with
337// the current user's home directory path.
338//
339// The underlying function implementation works only with the path string and does not access the filesystem itself.
340// It is therefore unable to take into account filesystem features such as symlinks.
341//
342// If the leading segment in the path is not `~` then the given path is returned unmodified.
343func Pathexpand(path cty.Value) (cty.Value, error) {
344 return PathExpandFunc.Call([]cty.Value{path})
345}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/number.go b/vendor/github.com/hashicorp/terraform/lang/funcs/number.go
new file mode 100644
index 0000000..15cfe71
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/number.go
@@ -0,0 +1,155 @@
1package funcs
2
3import (
4 "math"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/function"
8 "github.com/zclconf/go-cty/cty/gocty"
9)
10
11// CeilFunc contructs a function that returns the closest whole number greater
12// than or equal to the given value.
13var CeilFunc = function.New(&function.Spec{
14 Params: []function.Parameter{
15 {
16 Name: "num",
17 Type: cty.Number,
18 },
19 },
20 Type: function.StaticReturnType(cty.Number),
21 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
22 var val float64
23 if err := gocty.FromCtyValue(args[0], &val); err != nil {
24 return cty.UnknownVal(cty.String), err
25 }
26 return cty.NumberIntVal(int64(math.Ceil(val))), nil
27 },
28})
29
30// FloorFunc contructs a function that returns the closest whole number lesser
31// than or equal to the given value.
32var FloorFunc = function.New(&function.Spec{
33 Params: []function.Parameter{
34 {
35 Name: "num",
36 Type: cty.Number,
37 },
38 },
39 Type: function.StaticReturnType(cty.Number),
40 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
41 var val float64
42 if err := gocty.FromCtyValue(args[0], &val); err != nil {
43 return cty.UnknownVal(cty.String), err
44 }
45 return cty.NumberIntVal(int64(math.Floor(val))), nil
46 },
47})
48
49// LogFunc contructs a function that returns the logarithm of a given number in a given base.
50var LogFunc = function.New(&function.Spec{
51 Params: []function.Parameter{
52 {
53 Name: "num",
54 Type: cty.Number,
55 },
56 {
57 Name: "base",
58 Type: cty.Number,
59 },
60 },
61 Type: function.StaticReturnType(cty.Number),
62 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
63 var num float64
64 if err := gocty.FromCtyValue(args[0], &num); err != nil {
65 return cty.UnknownVal(cty.String), err
66 }
67
68 var base float64
69 if err := gocty.FromCtyValue(args[1], &base); err != nil {
70 return cty.UnknownVal(cty.String), err
71 }
72
73 return cty.NumberFloatVal(math.Log(num) / math.Log(base)), nil
74 },
75})
76
77// PowFunc contructs a function that returns the logarithm of a given number in a given base.
78var PowFunc = function.New(&function.Spec{
79 Params: []function.Parameter{
80 {
81 Name: "num",
82 Type: cty.Number,
83 },
84 {
85 Name: "power",
86 Type: cty.Number,
87 },
88 },
89 Type: function.StaticReturnType(cty.Number),
90 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
91 var num float64
92 if err := gocty.FromCtyValue(args[0], &num); err != nil {
93 return cty.UnknownVal(cty.String), err
94 }
95
96 var power float64
97 if err := gocty.FromCtyValue(args[1], &power); err != nil {
98 return cty.UnknownVal(cty.String), err
99 }
100
101 return cty.NumberFloatVal(math.Pow(num, power)), nil
102 },
103})
104
105// SignumFunc contructs a function that returns the closest whole number greater
106// than or equal to the given value.
107var SignumFunc = function.New(&function.Spec{
108 Params: []function.Parameter{
109 {
110 Name: "num",
111 Type: cty.Number,
112 },
113 },
114 Type: function.StaticReturnType(cty.Number),
115 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
116 var num int
117 if err := gocty.FromCtyValue(args[0], &num); err != nil {
118 return cty.UnknownVal(cty.String), err
119 }
120 switch {
121 case num < 0:
122 return cty.NumberIntVal(-1), nil
123 case num > 0:
124 return cty.NumberIntVal(+1), nil
125 default:
126 return cty.NumberIntVal(0), nil
127 }
128 },
129})
130
131// Ceil returns the closest whole number greater than or equal to the given value.
132func Ceil(num cty.Value) (cty.Value, error) {
133 return CeilFunc.Call([]cty.Value{num})
134}
135
136// Floor returns the closest whole number lesser than or equal to the given value.
137func Floor(num cty.Value) (cty.Value, error) {
138 return FloorFunc.Call([]cty.Value{num})
139}
140
141// Log returns returns the logarithm of a given number in a given base.
142func Log(num, base cty.Value) (cty.Value, error) {
143 return LogFunc.Call([]cty.Value{num, base})
144}
145
146// Pow returns the logarithm of a given number in a given base.
147func Pow(num, power cty.Value) (cty.Value, error) {
148 return PowFunc.Call([]cty.Value{num, power})
149}
150
151// Signum determines the sign of a number, returning a number between -1 and
152// 1 to represent the sign.
153func Signum(num cty.Value) (cty.Value, error) {
154 return SignumFunc.Call([]cty.Value{num})
155}
diff --git a/vendor/github.com/hashicorp/terraform/lang/funcs/string.go b/vendor/github.com/hashicorp/terraform/lang/funcs/string.go
new file mode 100644
index 0000000..c9ddf19
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/funcs/string.go
@@ -0,0 +1,280 @@
1package funcs
2
3import (
4 "fmt"
5 "regexp"
6 "sort"
7 "strings"
8
9 "github.com/zclconf/go-cty/cty"
10 "github.com/zclconf/go-cty/cty/function"
11 "github.com/zclconf/go-cty/cty/gocty"
12)
13
14var JoinFunc = function.New(&function.Spec{
15 Params: []function.Parameter{
16 {
17 Name: "separator",
18 Type: cty.String,
19 },
20 },
21 VarParam: &function.Parameter{
22 Name: "lists",
23 Type: cty.List(cty.String),
24 },
25 Type: function.StaticReturnType(cty.String),
26 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
27 sep := args[0].AsString()
28 listVals := args[1:]
29 if len(listVals) < 1 {
30 return cty.UnknownVal(cty.String), fmt.Errorf("at least one list is required")
31 }
32
33 l := 0
34 for _, list := range listVals {
35 if !list.IsWhollyKnown() {
36 return cty.UnknownVal(cty.String), nil
37 }
38 l += list.LengthInt()
39 }
40
41 items := make([]string, 0, l)
42 for ai, list := range listVals {
43 ei := 0
44 for it := list.ElementIterator(); it.Next(); {
45 _, val := it.Element()
46 if val.IsNull() {
47 if len(listVals) > 1 {
48 return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d of list %d is null; cannot concatenate null values", ei, ai+1)
49 }
50 return cty.UnknownVal(cty.String), function.NewArgErrorf(ai+1, "element %d is null; cannot concatenate null values", ei)
51 }
52 items = append(items, val.AsString())
53 ei++
54 }
55 }
56
57 return cty.StringVal(strings.Join(items, sep)), nil
58 },
59})
60
61var SortFunc = function.New(&function.Spec{
62 Params: []function.Parameter{
63 {
64 Name: "list",
65 Type: cty.List(cty.String),
66 },
67 },
68 Type: function.StaticReturnType(cty.List(cty.String)),
69 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
70 listVal := args[0]
71
72 if !listVal.IsWhollyKnown() {
73 // If some of the element values aren't known yet then we
74 // can't yet preduct the order of the result.
75 return cty.UnknownVal(retType), nil
76 }
77 if listVal.LengthInt() == 0 { // Easy path
78 return listVal, nil
79 }
80
81 list := make([]string, 0, listVal.LengthInt())
82 for it := listVal.ElementIterator(); it.Next(); {
83 iv, v := it.Element()
84 if v.IsNull() {
85 return cty.UnknownVal(retType), fmt.Errorf("given list element %s is null; a null string cannot be sorted", iv.AsBigFloat().String())
86 }
87 list = append(list, v.AsString())
88 }
89
90 sort.Strings(list)
91 retVals := make([]cty.Value, len(list))
92 for i, s := range list {
93 retVals[i] = cty.StringVal(s)
94 }
95 return cty.ListVal(retVals), nil
96 },
97})
98
99var SplitFunc = function.New(&function.Spec{
100 Params: []function.Parameter{
101 {
102 Name: "separator",
103 Type: cty.String,
104 },
105 {
106 Name: "str",
107 Type: cty.String,
108 },
109 },
110 Type: function.StaticReturnType(cty.List(cty.String)),
111 Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
112 sep := args[0].AsString()
113 str := args[1].AsString()
114 elems := strings.Split(str, sep)
115 elemVals := make([]cty.Value, len(elems))
116 for i, s := range elems {
117 elemVals[i] = cty.StringVal(s)
118 }
119 if len(elemVals) == 0 {
120 return cty.ListValEmpty(cty.String), nil
121 }
122 return cty.ListVal(elemVals), nil
123 },
124})
125
126// ChompFunc constructions a function that removes newline characters at the end of a string.
127var ChompFunc = function.New(&function.Spec{
128 Params: []function.Parameter{
129 {
130 Name: "str",
131 Type: cty.String,
132 },
133 },
134 Type: function.StaticReturnType(cty.String),
135 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
136 newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`)
137 return cty.StringVal(newlines.ReplaceAllString(args[0].AsString(), "")), nil
138 },
139})
140
141// IndentFunc constructions a function that adds a given number of spaces to the
142// beginnings of all but the first line in a given multi-line string.
143var IndentFunc = function.New(&function.Spec{
144 Params: []function.Parameter{
145 {
146 Name: "spaces",
147 Type: cty.Number,
148 },
149 {
150 Name: "str",
151 Type: cty.String,
152 },
153 },
154 Type: function.StaticReturnType(cty.String),
155 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
156 var spaces int
157 if err := gocty.FromCtyValue(args[0], &spaces); err != nil {
158 return cty.UnknownVal(cty.String), err
159 }
160 data := args[1].AsString()
161 pad := strings.Repeat(" ", spaces)
162 return cty.StringVal(strings.Replace(data, "\n", "\n"+pad, -1)), nil
163 },
164})
165
166// ReplaceFunc constructions a function that searches a given string for another
167// given substring, and replaces each occurence with a given replacement string.
168var ReplaceFunc = function.New(&function.Spec{
169 Params: []function.Parameter{
170 {
171 Name: "str",
172 Type: cty.String,
173 },
174 {
175 Name: "substr",
176 Type: cty.String,
177 },
178 {
179 Name: "replace",
180 Type: cty.String,
181 },
182 },
183 Type: function.StaticReturnType(cty.String),
184 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
185 str := args[0].AsString()
186 substr := args[1].AsString()
187 replace := args[2].AsString()
188
189 // We search/replace using a regexp if the string is surrounded
190 // in forward slashes.
191 if len(substr) > 1 && substr[0] == '/' && substr[len(substr)-1] == '/' {
192 re, err := regexp.Compile(substr[1 : len(substr)-1])
193 if err != nil {
194 return cty.UnknownVal(cty.String), err
195 }
196
197 return cty.StringVal(re.ReplaceAllString(str, replace)), nil
198 }
199
200 return cty.StringVal(strings.Replace(str, substr, replace, -1)), nil
201 },
202})
203
204// TitleFunc constructions a function that converts the first letter of each word
205// in the given string to uppercase.
206var TitleFunc = function.New(&function.Spec{
207 Params: []function.Parameter{
208 {
209 Name: "str",
210 Type: cty.String,
211 },
212 },
213 Type: function.StaticReturnType(cty.String),
214 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
215 return cty.StringVal(strings.Title(args[0].AsString())), nil
216 },
217})
218
219// TrimSpaceFunc constructions a function that removes any space characters from
220// the start and end of the given string.
221var TrimSpaceFunc = function.New(&function.Spec{
222 Params: []function.Parameter{
223 {
224 Name: "str",
225 Type: cty.String,
226 },
227 },
228 Type: function.StaticReturnType(cty.String),
229 Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
230 return cty.StringVal(strings.TrimSpace(args[0].AsString())), nil
231 },
232})
233
234// Join concatenates together the string elements of one or more lists with a
235// given separator.
236func Join(sep cty.Value, lists ...cty.Value) (cty.Value, error) {
237 args := make([]cty.Value, len(lists)+1)
238 args[0] = sep
239 copy(args[1:], lists)
240 return JoinFunc.Call(args)
241}
242
243// Sort re-orders the elements of a given list of strings so that they are
244// in ascending lexicographical order.
245func Sort(list cty.Value) (cty.Value, error) {
246 return SortFunc.Call([]cty.Value{list})
247}
248
249// Split divides a given string by a given separator, returning a list of
250// strings containing the characters between the separator sequences.
251func Split(sep, str cty.Value) (cty.Value, error) {
252 return SplitFunc.Call([]cty.Value{sep, str})
253}
254
255// Chomp removes newline characters at the end of a string.
256func Chomp(str cty.Value) (cty.Value, error) {
257 return ChompFunc.Call([]cty.Value{str})
258}
259
260// Indent adds a given number of spaces to the beginnings of all but the first
261// line in a given multi-line string.
262func Indent(spaces, str cty.Value) (cty.Value, error) {
263 return IndentFunc.Call([]cty.Value{spaces, str})
264}
265
266// Replace searches a given string for another given substring,
267// and replaces all occurences with a given replacement string.
268func Replace(str, substr, replace cty.Value) (cty.Value, error) {
269 return ReplaceFunc.Call([]cty.Value{str, substr, replace})
270}
271
272// Title converts the first letter of each word in the given string to uppercase.
273func Title(str cty.Value) (cty.Value, error) {
274 return TitleFunc.Call([]cty.Value{str})
275}
276
277// TrimSpace removes any space characters from the start and end of the given string.
278func TrimSpace(str cty.Value) (cty.Value, error) {
279 return TrimSpaceFunc.Call([]cty.Value{str})
280}
diff --git a/vendor/github.com/hashicorp/terraform/lang/functions.go b/vendor/github.com/hashicorp/terraform/lang/functions.go
new file mode 100644
index 0000000..2c7b548
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/functions.go
@@ -0,0 +1,147 @@
1package lang
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/function"
8 "github.com/zclconf/go-cty/cty/function/stdlib"
9
10 "github.com/hashicorp/terraform/lang/funcs"
11)
12
13var impureFunctions = []string{
14 "bcrypt",
15 "timestamp",
16 "uuid",
17}
18
19// Functions returns the set of functions that should be used to when evaluating
20// expressions in the receiving scope.
21func (s *Scope) Functions() map[string]function.Function {
22 s.funcsLock.Lock()
23 if s.funcs == nil {
24 // Some of our functions are just directly the cty stdlib functions.
25 // Others are implemented in the subdirectory "funcs" here in this
26 // repository. New functions should generally start out their lives
27 // in the "funcs" directory and potentially graduate to cty stdlib
28 // later if the functionality seems to be something domain-agnostic
29 // that would be useful to all applications using cty functions.
30
31 s.funcs = map[string]function.Function{
32 "abs": stdlib.AbsoluteFunc,
33 "basename": funcs.BasenameFunc,
34 "base64decode": funcs.Base64DecodeFunc,
35 "base64encode": funcs.Base64EncodeFunc,
36 "base64gzip": funcs.Base64GzipFunc,
37 "base64sha256": funcs.Base64Sha256Func,
38 "base64sha512": funcs.Base64Sha512Func,
39 "bcrypt": funcs.BcryptFunc,
40 "ceil": funcs.CeilFunc,
41 "chomp": funcs.ChompFunc,
42 "cidrhost": funcs.CidrHostFunc,
43 "cidrnetmask": funcs.CidrNetmaskFunc,
44 "cidrsubnet": funcs.CidrSubnetFunc,
45 "coalesce": funcs.CoalesceFunc,
46 "coalescelist": funcs.CoalesceListFunc,
47 "compact": funcs.CompactFunc,
48 "concat": stdlib.ConcatFunc,
49 "contains": funcs.ContainsFunc,
50 "csvdecode": stdlib.CSVDecodeFunc,
51 "dirname": funcs.DirnameFunc,
52 "distinct": funcs.DistinctFunc,
53 "element": funcs.ElementFunc,
54 "chunklist": funcs.ChunklistFunc,
55 "file": funcs.MakeFileFunc(s.BaseDir, false),
56 "fileexists": funcs.MakeFileExistsFunc(s.BaseDir),
57 "filebase64": funcs.MakeFileFunc(s.BaseDir, true),
58 "filebase64sha256": funcs.MakeFileBase64Sha256Func(s.BaseDir),
59 "filebase64sha512": funcs.MakeFileBase64Sha512Func(s.BaseDir),
60 "filemd5": funcs.MakeFileMd5Func(s.BaseDir),
61 "filesha1": funcs.MakeFileSha1Func(s.BaseDir),
62 "filesha256": funcs.MakeFileSha256Func(s.BaseDir),
63 "filesha512": funcs.MakeFileSha512Func(s.BaseDir),
64 "flatten": funcs.FlattenFunc,
65 "floor": funcs.FloorFunc,
66 "format": stdlib.FormatFunc,
67 "formatdate": stdlib.FormatDateFunc,
68 "formatlist": stdlib.FormatListFunc,
69 "indent": funcs.IndentFunc,
70 "index": funcs.IndexFunc,
71 "join": funcs.JoinFunc,
72 "jsondecode": stdlib.JSONDecodeFunc,
73 "jsonencode": stdlib.JSONEncodeFunc,
74 "keys": funcs.KeysFunc,
75 "length": funcs.LengthFunc,
76 "list": funcs.ListFunc,
77 "log": funcs.LogFunc,
78 "lookup": funcs.LookupFunc,
79 "lower": stdlib.LowerFunc,
80 "map": funcs.MapFunc,
81 "matchkeys": funcs.MatchkeysFunc,
82 "max": stdlib.MaxFunc,
83 "md5": funcs.Md5Func,
84 "merge": funcs.MergeFunc,
85 "min": stdlib.MinFunc,
86 "pathexpand": funcs.PathExpandFunc,
87 "pow": funcs.PowFunc,
88 "replace": funcs.ReplaceFunc,
89 "reverse": funcs.ReverseFunc,
90 "rsadecrypt": funcs.RsaDecryptFunc,
91 "setintersection": stdlib.SetIntersectionFunc,
92 "setproduct": funcs.SetProductFunc,
93 "setunion": stdlib.SetUnionFunc,
94 "sha1": funcs.Sha1Func,
95 "sha256": funcs.Sha256Func,
96 "sha512": funcs.Sha512Func,
97 "signum": funcs.SignumFunc,
98 "slice": funcs.SliceFunc,
99 "sort": funcs.SortFunc,
100 "split": funcs.SplitFunc,
101 "strrev": stdlib.ReverseFunc,
102 "substr": stdlib.SubstrFunc,
103 "timestamp": funcs.TimestampFunc,
104 "timeadd": funcs.TimeAddFunc,
105 "title": funcs.TitleFunc,
106 "tostring": funcs.MakeToFunc(cty.String),
107 "tonumber": funcs.MakeToFunc(cty.Number),
108 "tobool": funcs.MakeToFunc(cty.Bool),
109 "toset": funcs.MakeToFunc(cty.Set(cty.DynamicPseudoType)),
110 "tolist": funcs.MakeToFunc(cty.List(cty.DynamicPseudoType)),
111 "tomap": funcs.MakeToFunc(cty.Map(cty.DynamicPseudoType)),
112 "transpose": funcs.TransposeFunc,
113 "trimspace": funcs.TrimSpaceFunc,
114 "upper": stdlib.UpperFunc,
115 "urlencode": funcs.URLEncodeFunc,
116 "uuid": funcs.UUIDFunc,
117 "values": funcs.ValuesFunc,
118 "zipmap": funcs.ZipmapFunc,
119 }
120
121 s.funcs["templatefile"] = funcs.MakeTemplateFileFunc(s.BaseDir, func() map[string]function.Function {
122 // The templatefile function prevents recursive calls to itself
123 // by copying this map and overwriting the "templatefile" entry.
124 return s.funcs
125 })
126
127 if s.PureOnly {
128 // Force our few impure functions to return unknown so that we
129 // can defer evaluating them until a later pass.
130 for _, name := range impureFunctions {
131 s.funcs[name] = function.Unpredictable(s.funcs[name])
132 }
133 }
134 }
135 s.funcsLock.Unlock()
136
137 return s.funcs
138}
139
140var unimplFunc = function.New(&function.Spec{
141 Type: func([]cty.Value) (cty.Type, error) {
142 return cty.DynamicPseudoType, fmt.Errorf("function not yet implemented")
143 },
144 Impl: func([]cty.Value, cty.Type) (cty.Value, error) {
145 return cty.DynamicVal, fmt.Errorf("function not yet implemented")
146 },
147})
diff --git a/vendor/github.com/hashicorp/terraform/lang/references.go b/vendor/github.com/hashicorp/terraform/lang/references.go
new file mode 100644
index 0000000..d688477
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/references.go
@@ -0,0 +1,81 @@
1package lang
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/terraform/addrs"
6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/hashicorp/terraform/lang/blocktoattr"
8 "github.com/hashicorp/terraform/tfdiags"
9)
10
11// References finds all of the references in the given set of traversals,
12// returning diagnostics if any of the traversals cannot be interpreted as a
13// reference.
14//
15// This function does not do any de-duplication of references, since references
16// have source location information embedded in them and so any invalid
17// references that are duplicated should have errors reported for each
18// occurence.
19//
20// If the returned diagnostics contains errors then the result may be
21// incomplete or invalid. Otherwise, the returned slice has one reference per
22// given traversal, though it is not guaranteed that the references will
23// appear in the same order as the given traversals.
24func References(traversals []hcl.Traversal) ([]*addrs.Reference, tfdiags.Diagnostics) {
25 if len(traversals) == 0 {
26 return nil, nil
27 }
28
29 var diags tfdiags.Diagnostics
30 refs := make([]*addrs.Reference, 0, len(traversals))
31
32 for _, traversal := range traversals {
33 ref, refDiags := addrs.ParseRef(traversal)
34 diags = diags.Append(refDiags)
35 if ref == nil {
36 continue
37 }
38 refs = append(refs, ref)
39 }
40
41 return refs, diags
42}
43
44// ReferencesInBlock is a helper wrapper around References that first searches
45// the given body for traversals, before converting those traversals to
46// references.
47//
48// A block schema must be provided so that this function can determine where in
49// the body variables are expected.
50func ReferencesInBlock(body hcl.Body, schema *configschema.Block) ([]*addrs.Reference, tfdiags.Diagnostics) {
51 if body == nil {
52 return nil, nil
53 }
54
55 // We use blocktoattr.ExpandedVariables instead of hcldec.Variables or
56 // dynblock.VariablesHCLDec here because when we evaluate a block we'll
57 // first apply the dynamic block extension and _then_ the blocktoattr
58 // transform, and so blocktoattr.ExpandedVariables takes into account
59 // both of those transforms when it analyzes the body to ensure we find
60 // all of the references as if they'd already moved into their final
61 // locations, even though we can't expand dynamic blocks yet until we
62 // already know which variables are required.
63 //
64 // The set of cases we want to detect here is covered by the tests for
65 // the plan graph builder in the main 'terraform' package, since it's
66 // in a better position to test this due to having mock providers etc
67 // available.
68 traversals := blocktoattr.ExpandedVariables(body, schema)
69 return References(traversals)
70}
71
72// ReferencesInExpr is a helper wrapper around References that first searches
73// the given expression for traversals, before converting those traversals
74// to references.
75func ReferencesInExpr(expr hcl.Expression) ([]*addrs.Reference, tfdiags.Diagnostics) {
76 if expr == nil {
77 return nil, nil
78 }
79 traversals := expr.Variables()
80 return References(traversals)
81}
diff --git a/vendor/github.com/hashicorp/terraform/lang/scope.go b/vendor/github.com/hashicorp/terraform/lang/scope.go
new file mode 100644
index 0000000..98fca6b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/lang/scope.go
@@ -0,0 +1,34 @@
1package lang
2
3import (
4 "sync"
5
6 "github.com/zclconf/go-cty/cty/function"
7
8 "github.com/hashicorp/terraform/addrs"
9)
10
11// Scope is the main type in this package, allowing dynamic evaluation of
12// blocks and expressions based on some contextual information that informs
13// which variables and functions will be available.
14type Scope struct {
15 // Data is used to resolve references in expressions.
16 Data Data
17
18 // SelfAddr is the address that the "self" object should be an alias of,
19 // or nil if the "self" object should not be available at all.
20 SelfAddr addrs.Referenceable
21
22 // BaseDir is the base directory used by any interpolation functions that
23 // accept filesystem paths as arguments.
24 BaseDir string
25
26 // PureOnly can be set to true to request that any non-pure functions
27 // produce unknown value results rather than actually executing. This is
28 // important during a plan phase to avoid generating results that could
29 // then differ during apply.
30 PureOnly bool
31
32 funcs map[string]function.Function
33 funcsLock sync.Mutex
34}
diff --git a/vendor/github.com/hashicorp/terraform/plans/action.go b/vendor/github.com/hashicorp/terraform/plans/action.go
new file mode 100644
index 0000000..c3e6a32
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/action.go
@@ -0,0 +1,22 @@
1package plans
2
3type Action rune
4
5const (
6 NoOp Action = 0
7 Create Action = '+'
8 Read Action = '←'
9 Update Action = '~'
10 DeleteThenCreate Action = '∓'
11 CreateThenDelete Action = '±'
12 Delete Action = '-'
13)
14
15//go:generate stringer -type Action
16
17// IsReplace returns true if the action is one of the two actions that
18// represents replacing an existing object with a new object:
19// DeleteThenCreate or CreateThenDelete.
20func (a Action) IsReplace() bool {
21 return a == DeleteThenCreate || a == CreateThenDelete
22}
diff --git a/vendor/github.com/hashicorp/terraform/plans/action_string.go b/vendor/github.com/hashicorp/terraform/plans/action_string.go
new file mode 100644
index 0000000..be43ab1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/action_string.go
@@ -0,0 +1,49 @@
1// Code generated by "stringer -type Action"; DO NOT EDIT.
2
3package plans
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[NoOp-0]
12 _ = x[Create-43]
13 _ = x[Read-8592]
14 _ = x[Update-126]
15 _ = x[DeleteThenCreate-8723]
16 _ = x[CreateThenDelete-177]
17 _ = x[Delete-45]
18}
19
20const (
21 _Action_name_0 = "NoOp"
22 _Action_name_1 = "Create"
23 _Action_name_2 = "Delete"
24 _Action_name_3 = "Update"
25 _Action_name_4 = "CreateThenDelete"
26 _Action_name_5 = "Read"
27 _Action_name_6 = "DeleteThenCreate"
28)
29
30func (i Action) String() string {
31 switch {
32 case i == 0:
33 return _Action_name_0
34 case i == 43:
35 return _Action_name_1
36 case i == 45:
37 return _Action_name_2
38 case i == 126:
39 return _Action_name_3
40 case i == 177:
41 return _Action_name_4
42 case i == 8592:
43 return _Action_name_5
44 case i == 8723:
45 return _Action_name_6
46 default:
47 return "Action(" + strconv.FormatInt(int64(i), 10) + ")"
48 }
49}
diff --git a/vendor/github.com/hashicorp/terraform/plans/changes.go b/vendor/github.com/hashicorp/terraform/plans/changes.go
new file mode 100644
index 0000000..d7e0dcd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/changes.go
@@ -0,0 +1,308 @@
1package plans
2
3import (
4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/states"
6 "github.com/zclconf/go-cty/cty"
7)
8
9// Changes describes various actions that Terraform will attempt to take if
10// the corresponding plan is applied.
11//
12// A Changes object can be rendered into a visual diff (by the caller, using
13// code in another package) for display to the user.
14type Changes struct {
15 // Resources tracks planned changes to resource instance objects.
16 Resources []*ResourceInstanceChangeSrc
17
18 // Outputs tracks planned changes output values.
19 //
20 // Note that although an in-memory plan contains planned changes for
21 // outputs throughout the configuration, a plan serialized
22 // to disk retains only the root outputs because they are
23 // externally-visible, while other outputs are implementation details and
24 // can be easily re-calculated during the apply phase. Therefore only root
25 // module outputs will survive a round-trip through a plan file.
26 Outputs []*OutputChangeSrc
27}
28
29// NewChanges returns a valid Changes object that describes no changes.
30func NewChanges() *Changes {
31 return &Changes{}
32}
33
34func (c *Changes) Empty() bool {
35 for _, res := range c.Resources {
36 if res.Action != NoOp {
37 return false
38 }
39 }
40 return true
41}
42
43// ResourceInstance returns the planned change for the current object of the
44// resource instance of the given address, if any. Returns nil if no change is
45// planned.
46func (c *Changes) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstanceChangeSrc {
47 addrStr := addr.String()
48 for _, rc := range c.Resources {
49 if rc.Addr.String() == addrStr && rc.DeposedKey == states.NotDeposed {
50 return rc
51 }
52 }
53
54 return nil
55}
56
57// ResourceInstanceDeposed returns the plan change of a deposed object of
58// the resource instance of the given address, if any. Returns nil if no change
59// is planned.
60func (c *Changes) ResourceInstanceDeposed(addr addrs.AbsResourceInstance, key states.DeposedKey) *ResourceInstanceChangeSrc {
61 addrStr := addr.String()
62 for _, rc := range c.Resources {
63 if rc.Addr.String() == addrStr && rc.DeposedKey == key {
64 return rc
65 }
66 }
67
68 return nil
69}
70
71// OutputValue returns the planned change for the output value with the
72// given address, if any. Returns nil if no change is planned.
73func (c *Changes) OutputValue(addr addrs.AbsOutputValue) *OutputChangeSrc {
74 addrStr := addr.String()
75 for _, oc := range c.Outputs {
76 if oc.Addr.String() == addrStr {
77 return oc
78 }
79 }
80
81 return nil
82}
83
84// SyncWrapper returns a wrapper object around the receiver that can be used
85// to make certain changes to the receiver in a concurrency-safe way, as long
86// as all callers share the same wrapper object.
87func (c *Changes) SyncWrapper() *ChangesSync {
88 return &ChangesSync{
89 changes: c,
90 }
91}
92
93// ResourceInstanceChange describes a change to a particular resource instance
94// object.
95type ResourceInstanceChange struct {
96 // Addr is the absolute address of the resource instance that the change
97 // will apply to.
98 Addr addrs.AbsResourceInstance
99
100 // DeposedKey is the identifier for a deposed object associated with the
101 // given instance, or states.NotDeposed if this change applies to the
102 // current object.
103 //
104 // A Replace change for a resource with create_before_destroy set will
105 // create a new DeposedKey temporarily during replacement. In that case,
106 // DeposedKey in the plan is always states.NotDeposed, representing that
107 // the current object is being replaced with the deposed.
108 DeposedKey states.DeposedKey
109
110 // Provider is the address of the provider configuration that was used
111 // to plan this change, and thus the configuration that must also be
112 // used to apply it.
113 ProviderAddr addrs.AbsProviderConfig
114
115 // Change is an embedded description of the change.
116 Change
117
118 // RequiredReplace is a set of paths that caused the change action to be
119 // Replace rather than Update. Always nil if the change action is not
120 // Replace.
121 //
122 // This is retained only for UI-plan-rendering purposes and so it does not
123 // currently survive a round-trip through a saved plan file.
124 RequiredReplace cty.PathSet
125
126 // Private allows a provider to stash any extra data that is opaque to
127 // Terraform that relates to this change. Terraform will save this
128 // byte-for-byte and return it to the provider in the apply call.
129 Private []byte
130}
131
132// Encode produces a variant of the reciever that has its change values
133// serialized so it can be written to a plan file. Pass the implied type of the
134// corresponding resource type schema for correct operation.
135func (rc *ResourceInstanceChange) Encode(ty cty.Type) (*ResourceInstanceChangeSrc, error) {
136 cs, err := rc.Change.Encode(ty)
137 if err != nil {
138 return nil, err
139 }
140 return &ResourceInstanceChangeSrc{
141 Addr: rc.Addr,
142 DeposedKey: rc.DeposedKey,
143 ProviderAddr: rc.ProviderAddr,
144 ChangeSrc: *cs,
145 RequiredReplace: rc.RequiredReplace,
146 Private: rc.Private,
147 }, err
148}
149
150// Simplify will, where possible, produce a change with a simpler action than
151// the receiever given a flag indicating whether the caller is dealing with
152// a normal apply or a destroy. This flag deals with the fact that Terraform
153// Core uses a specialized graph node type for destroying; only that
154// specialized node should set "destroying" to true.
155//
156// The following table shows the simplification behavior:
157//
158// Action Destroying? New Action
159// --------+-------------+-----------
160// Create true NoOp
161// Delete false NoOp
162// Replace true Delete
163// Replace false Create
164//
165// For any combination not in the above table, the Simplify just returns the
166// receiver as-is.
167func (rc *ResourceInstanceChange) Simplify(destroying bool) *ResourceInstanceChange {
168 if destroying {
169 switch rc.Action {
170 case Delete:
171 // We'll fall out and just return rc verbatim, then.
172 case CreateThenDelete, DeleteThenCreate:
173 return &ResourceInstanceChange{
174 Addr: rc.Addr,
175 DeposedKey: rc.DeposedKey,
176 Private: rc.Private,
177 ProviderAddr: rc.ProviderAddr,
178 Change: Change{
179 Action: Delete,
180 Before: rc.Before,
181 After: cty.NullVal(rc.Before.Type()),
182 },
183 }
184 default:
185 return &ResourceInstanceChange{
186 Addr: rc.Addr,
187 DeposedKey: rc.DeposedKey,
188 Private: rc.Private,
189 ProviderAddr: rc.ProviderAddr,
190 Change: Change{
191 Action: NoOp,
192 Before: rc.Before,
193 After: rc.Before,
194 },
195 }
196 }
197 } else {
198 switch rc.Action {
199 case Delete:
200 return &ResourceInstanceChange{
201 Addr: rc.Addr,
202 DeposedKey: rc.DeposedKey,
203 Private: rc.Private,
204 ProviderAddr: rc.ProviderAddr,
205 Change: Change{
206 Action: NoOp,
207 Before: rc.Before,
208 After: rc.Before,
209 },
210 }
211 case CreateThenDelete, DeleteThenCreate:
212 return &ResourceInstanceChange{
213 Addr: rc.Addr,
214 DeposedKey: rc.DeposedKey,
215 Private: rc.Private,
216 ProviderAddr: rc.ProviderAddr,
217 Change: Change{
218 Action: Create,
219 Before: cty.NullVal(rc.After.Type()),
220 After: rc.After,
221 },
222 }
223 }
224 }
225
226 // If we fall out here then our change is already simple enough.
227 return rc
228}
229
230// OutputChange describes a change to an output value.
231type OutputChange struct {
232 // Addr is the absolute address of the output value that the change
233 // will apply to.
234 Addr addrs.AbsOutputValue
235
236 // Change is an embedded description of the change.
237 //
238 // For output value changes, the type constraint for the DynamicValue
239 // instances is always cty.DynamicPseudoType.
240 Change
241
242 // Sensitive, if true, indicates that either the old or new value in the
243 // change is sensitive and so a rendered version of the plan in the UI
244 // should elide the actual values while still indicating the action of the
245 // change.
246 Sensitive bool
247}
248
249// Encode produces a variant of the reciever that has its change values
250// serialized so it can be written to a plan file.
251func (oc *OutputChange) Encode() (*OutputChangeSrc, error) {
252 cs, err := oc.Change.Encode(cty.DynamicPseudoType)
253 if err != nil {
254 return nil, err
255 }
256 return &OutputChangeSrc{
257 Addr: oc.Addr,
258 ChangeSrc: *cs,
259 Sensitive: oc.Sensitive,
260 }, err
261}
262
263// Change describes a single change with a given action.
264type Change struct {
265 // Action defines what kind of change is being made.
266 Action Action
267
268 // Interpretation of Before and After depend on Action:
269 //
270 // NoOp Before and After are the same, unchanged value
271 // Create Before is nil, and After is the expected value after create.
272 // Read Before is any prior value (nil if no prior), and After is the
273 // value that was or will be read.
274 // Update Before is the value prior to update, and After is the expected
275 // value after update.
276 // Replace As with Update.
277 // Delete Before is the value prior to delete, and After is always nil.
278 //
279 // Unknown values may appear anywhere within the Before and After values,
280 // either as the values themselves or as nested elements within known
281 // collections/structures.
282 Before, After cty.Value
283}
284
285// Encode produces a variant of the reciever that has its change values
286// serialized so it can be written to a plan file. Pass the type constraint
287// that the values are expected to conform to; to properly decode the values
288// later an identical type constraint must be provided at that time.
289//
290// Where a Change is embedded in some other struct, it's generally better
291// to call the corresponding Encode method of that struct rather than working
292// directly with its embedded Change.
293func (c *Change) Encode(ty cty.Type) (*ChangeSrc, error) {
294 beforeDV, err := NewDynamicValue(c.Before, ty)
295 if err != nil {
296 return nil, err
297 }
298 afterDV, err := NewDynamicValue(c.After, ty)
299 if err != nil {
300 return nil, err
301 }
302
303 return &ChangeSrc{
304 Action: c.Action,
305 Before: beforeDV,
306 After: afterDV,
307 }, nil
308}
diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_src.go b/vendor/github.com/hashicorp/terraform/plans/changes_src.go
new file mode 100644
index 0000000..90153ea
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/changes_src.go
@@ -0,0 +1,190 @@
1package plans
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/states"
8 "github.com/zclconf/go-cty/cty"
9)
10
11// ResourceInstanceChangeSrc is a not-yet-decoded ResourceInstanceChange.
12// Pass the associated resource type's schema type to method Decode to
13// obtain a ResourceInstancChange.
14type ResourceInstanceChangeSrc struct {
15 // Addr is the absolute address of the resource instance that the change
16 // will apply to.
17 Addr addrs.AbsResourceInstance
18
19 // DeposedKey is the identifier for a deposed object associated with the
20 // given instance, or states.NotDeposed if this change applies to the
21 // current object.
22 //
23 // A Replace change for a resource with create_before_destroy set will
24 // create a new DeposedKey temporarily during replacement. In that case,
25 // DeposedKey in the plan is always states.NotDeposed, representing that
26 // the current object is being replaced with the deposed.
27 DeposedKey states.DeposedKey
28
29 // Provider is the address of the provider configuration that was used
30 // to plan this change, and thus the configuration that must also be
31 // used to apply it.
32 ProviderAddr addrs.AbsProviderConfig
33
34 // ChangeSrc is an embedded description of the not-yet-decoded change.
35 ChangeSrc
36
37 // RequiredReplace is a set of paths that caused the change action to be
38 // Replace rather than Update. Always nil if the change action is not
39 // Replace.
40 //
41 // This is retained only for UI-plan-rendering purposes and so it does not
42 // currently survive a round-trip through a saved plan file.
43 RequiredReplace cty.PathSet
44
45 // Private allows a provider to stash any extra data that is opaque to
46 // Terraform that relates to this change. Terraform will save this
47 // byte-for-byte and return it to the provider in the apply call.
48 Private []byte
49}
50
51// Decode unmarshals the raw representation of the instance object being
52// changed. Pass the implied type of the corresponding resource type schema
53// for correct operation.
54func (rcs *ResourceInstanceChangeSrc) Decode(ty cty.Type) (*ResourceInstanceChange, error) {
55 change, err := rcs.ChangeSrc.Decode(ty)
56 if err != nil {
57 return nil, err
58 }
59 return &ResourceInstanceChange{
60 Addr: rcs.Addr,
61 DeposedKey: rcs.DeposedKey,
62 ProviderAddr: rcs.ProviderAddr,
63 Change: *change,
64 RequiredReplace: rcs.RequiredReplace,
65 Private: rcs.Private,
66 }, nil
67}
68
69// DeepCopy creates a copy of the receiver where any pointers to nested mutable
70// values are also copied, thus ensuring that future mutations of the receiver
71// will not affect the copy.
72//
73// Some types used within a resource change are immutable by convention even
74// though the Go language allows them to be mutated, such as the types from
75// the addrs package. These are _not_ copied by this method, under the
76// assumption that callers will behave themselves.
77func (rcs *ResourceInstanceChangeSrc) DeepCopy() *ResourceInstanceChangeSrc {
78 if rcs == nil {
79 return nil
80 }
81 ret := *rcs
82
83 ret.RequiredReplace = cty.NewPathSet(ret.RequiredReplace.List()...)
84
85 if len(ret.Private) != 0 {
86 private := make([]byte, len(ret.Private))
87 copy(private, ret.Private)
88 ret.Private = private
89 }
90
91 ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy()
92 ret.ChangeSrc.After = ret.ChangeSrc.After.Copy()
93
94 return &ret
95}
96
97// OutputChangeSrc describes a change to an output value.
98type OutputChangeSrc struct {
99 // Addr is the absolute address of the output value that the change
100 // will apply to.
101 Addr addrs.AbsOutputValue
102
103 // ChangeSrc is an embedded description of the not-yet-decoded change.
104 //
105 // For output value changes, the type constraint for the DynamicValue
106 // instances is always cty.DynamicPseudoType.
107 ChangeSrc
108
109 // Sensitive, if true, indicates that either the old or new value in the
110 // change is sensitive and so a rendered version of the plan in the UI
111 // should elide the actual values while still indicating the action of the
112 // change.
113 Sensitive bool
114}
115
116// Decode unmarshals the raw representation of the output value being
117// changed.
118func (ocs *OutputChangeSrc) Decode() (*OutputChange, error) {
119 change, err := ocs.ChangeSrc.Decode(cty.DynamicPseudoType)
120 if err != nil {
121 return nil, err
122 }
123 return &OutputChange{
124 Addr: ocs.Addr,
125 Change: *change,
126 Sensitive: ocs.Sensitive,
127 }, nil
128}
129
130// DeepCopy creates a copy of the receiver where any pointers to nested mutable
131// values are also copied, thus ensuring that future mutations of the receiver
132// will not affect the copy.
133//
134// Some types used within a resource change are immutable by convention even
135// though the Go language allows them to be mutated, such as the types from
136// the addrs package. These are _not_ copied by this method, under the
137// assumption that callers will behave themselves.
138func (ocs *OutputChangeSrc) DeepCopy() *OutputChangeSrc {
139 if ocs == nil {
140 return nil
141 }
142 ret := *ocs
143
144 ret.ChangeSrc.Before = ret.ChangeSrc.Before.Copy()
145 ret.ChangeSrc.After = ret.ChangeSrc.After.Copy()
146
147 return &ret
148}
149
150// ChangeSrc is a not-yet-decoded Change.
151type ChangeSrc struct {
152 // Action defines what kind of change is being made.
153 Action Action
154
155 // Before and After correspond to the fields of the same name in Change,
156 // but have not yet been decoded from the serialized value used for
157 // storage.
158 Before, After DynamicValue
159}
160
161// Decode unmarshals the raw representations of the before and after values
162// to produce a Change object. Pass the type constraint that the result must
163// conform to.
164//
165// Where a ChangeSrc is embedded in some other struct, it's generally better
166// to call the corresponding Decode method of that struct rather than working
167// directly with its embedded Change.
168func (cs *ChangeSrc) Decode(ty cty.Type) (*Change, error) {
169 var err error
170 before := cty.NullVal(ty)
171 after := cty.NullVal(ty)
172
173 if len(cs.Before) > 0 {
174 before, err = cs.Before.Decode(ty)
175 if err != nil {
176 return nil, fmt.Errorf("error decoding 'before' value: %s", err)
177 }
178 }
179 if len(cs.After) > 0 {
180 after, err = cs.After.Decode(ty)
181 if err != nil {
182 return nil, fmt.Errorf("error decoding 'after' value: %s", err)
183 }
184 }
185 return &Change{
186 Action: cs.Action,
187 Before: before,
188 After: after,
189 }, nil
190}
diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_state.go b/vendor/github.com/hashicorp/terraform/plans/changes_state.go
new file mode 100644
index 0000000..543e6c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/changes_state.go
@@ -0,0 +1,15 @@
1package plans
2
3import (
4 "github.com/hashicorp/terraform/states"
5)
6
7// PlannedState merges the set of changes described by the receiver into the
8// given prior state to produce the planned result state.
9//
10// The result is an approximation of the state as it would exist after
11// applying these changes, omitting any values that cannot be determined until
12// the changes are actually applied.
13func (c *Changes) PlannedState(prior *states.State) (*states.State, error) {
14 panic("Changes.PlannedState not yet implemented")
15}
diff --git a/vendor/github.com/hashicorp/terraform/plans/changes_sync.go b/vendor/github.com/hashicorp/terraform/plans/changes_sync.go
new file mode 100644
index 0000000..6b4ff98
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/changes_sync.go
@@ -0,0 +1,144 @@
1package plans
2
3import (
4 "fmt"
5 "sync"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/states"
9)
10
11// ChangesSync is a wrapper around a Changes that provides a concurrency-safe
12// interface to insert new changes and retrieve copies of existing changes.
13//
14// Each ChangesSync is independent of all others, so all concurrent writers
15// to a particular Changes must share a single ChangesSync. Behavior is
16// undefined if any other caller makes changes to the underlying Changes
17// object or its nested objects concurrently with any of the methods of a
18// particular ChangesSync.
19type ChangesSync struct {
20 lock sync.Mutex
21 changes *Changes
22}
23
24// AppendResourceInstanceChange records the given resource instance change in
25// the set of planned resource changes.
26//
27// The caller must ensure that there are no concurrent writes to the given
28// change while this method is running, but it is safe to resume mutating
29// it after this method returns without affecting the saved change.
30func (cs *ChangesSync) AppendResourceInstanceChange(changeSrc *ResourceInstanceChangeSrc) {
31 if cs == nil {
32 panic("AppendResourceInstanceChange on nil ChangesSync")
33 }
34 cs.lock.Lock()
35 defer cs.lock.Unlock()
36
37 s := changeSrc.DeepCopy()
38 cs.changes.Resources = append(cs.changes.Resources, s)
39}
40
41// GetResourceInstanceChange searches the set of resource instance changes for
42// one matching the given address and generation, returning it if it exists.
43//
44// If no such change exists, nil is returned.
45//
46// The returned object is a deep copy of the change recorded in the plan, so
47// callers may mutate it although it's generally better (less confusing) to
48// treat planned changes as immutable after they've been initially constructed.
49func (cs *ChangesSync) GetResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) *ResourceInstanceChangeSrc {
50 if cs == nil {
51 panic("GetResourceInstanceChange on nil ChangesSync")
52 }
53 cs.lock.Lock()
54 defer cs.lock.Unlock()
55
56 if gen == states.CurrentGen {
57 return cs.changes.ResourceInstance(addr).DeepCopy()
58 }
59 if dk, ok := gen.(states.DeposedKey); ok {
60 return cs.changes.ResourceInstanceDeposed(addr, dk).DeepCopy()
61 }
62 panic(fmt.Sprintf("unsupported generation value %#v", gen))
63}
64
65// RemoveResourceInstanceChange searches the set of resource instance changes
66// for one matching the given address and generation, and removes it from the
67// set if it exists.
68func (cs *ChangesSync) RemoveResourceInstanceChange(addr addrs.AbsResourceInstance, gen states.Generation) {
69 if cs == nil {
70 panic("RemoveResourceInstanceChange on nil ChangesSync")
71 }
72 cs.lock.Lock()
73 defer cs.lock.Unlock()
74
75 dk := states.NotDeposed
76 if realDK, ok := gen.(states.DeposedKey); ok {
77 dk = realDK
78 }
79
80 addrStr := addr.String()
81 for i, r := range cs.changes.Resources {
82 if r.Addr.String() != addrStr || r.DeposedKey != dk {
83 continue
84 }
85 copy(cs.changes.Resources[i:], cs.changes.Resources[i+1:])
86 cs.changes.Resources = cs.changes.Resources[:len(cs.changes.Resources)-1]
87 return
88 }
89}
90
91// AppendOutputChange records the given output value change in the set of
92// planned value changes.
93//
94// The caller must ensure that there are no concurrent writes to the given
95// change while this method is running, but it is safe to resume mutating
96// it after this method returns without affecting the saved change.
97func (cs *ChangesSync) AppendOutputChange(changeSrc *OutputChangeSrc) {
98 if cs == nil {
99 panic("AppendOutputChange on nil ChangesSync")
100 }
101 cs.lock.Lock()
102 defer cs.lock.Unlock()
103
104 s := changeSrc.DeepCopy()
105 cs.changes.Outputs = append(cs.changes.Outputs, s)
106}
107
108// GetOutputChange searches the set of output value changes for one matching
109// the given address, returning it if it exists.
110//
111// If no such change exists, nil is returned.
112//
113// The returned object is a deep copy of the change recorded in the plan, so
114// callers may mutate it although it's generally better (less confusing) to
115// treat planned changes as immutable after they've been initially constructed.
116func (cs *ChangesSync) GetOutputChange(addr addrs.AbsOutputValue) *OutputChangeSrc {
117 if cs == nil {
118 panic("GetOutputChange on nil ChangesSync")
119 }
120 cs.lock.Lock()
121 defer cs.lock.Unlock()
122
123 return cs.changes.OutputValue(addr)
124}
125
126// RemoveOutputChange searches the set of output value changes for one matching
127// the given address, and removes it from the set if it exists.
128func (cs *ChangesSync) RemoveOutputChange(addr addrs.AbsOutputValue) {
129 if cs == nil {
130 panic("RemoveOutputChange on nil ChangesSync")
131 }
132 cs.lock.Lock()
133 defer cs.lock.Unlock()
134
135 addrStr := addr.String()
136 for i, o := range cs.changes.Outputs {
137 if o.Addr.String() != addrStr {
138 continue
139 }
140 copy(cs.changes.Outputs[i:], cs.changes.Outputs[i+1:])
141 cs.changes.Outputs = cs.changes.Outputs[:len(cs.changes.Outputs)-1]
142 return
143 }
144}
diff --git a/vendor/github.com/hashicorp/terraform/plans/doc.go b/vendor/github.com/hashicorp/terraform/plans/doc.go
new file mode 100644
index 0000000..01ca389
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/doc.go
@@ -0,0 +1,5 @@
1// Package plans contains the types that are used to represent Terraform plans.
2//
3// A plan describes a set of changes that Terraform will make to update remote
4// objects to match with changes to the configuration.
5package plans
diff --git a/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go b/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go
new file mode 100644
index 0000000..51fbb24
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/dynamic_value.go
@@ -0,0 +1,96 @@
1package plans
2
3import (
4 "github.com/zclconf/go-cty/cty"
5 ctymsgpack "github.com/zclconf/go-cty/cty/msgpack"
6)
7
8// DynamicValue is the representation in the plan of a value whose type cannot
9// be determined at compile time, such as because it comes from a schema
10// defined in a plugin.
11//
12// This type is used as an indirection so that the overall plan structure can
13// be decoded without schema available, and then the dynamic values accessed
14// at a later time once the appropriate schema has been determined.
15//
16// Internally, DynamicValue is a serialized version of a cty.Value created
17// against a particular type constraint. Callers should not access directly
18// the serialized form, whose format may change in future. Values of this
19// type must always be created by calling NewDynamicValue.
20//
21// The zero value of DynamicValue is nil, and represents the absense of a
22// value within the Go type system. This is distinct from a cty.NullVal
23// result, which represents the absense of a value within the cty type system.
24type DynamicValue []byte
25
26// NewDynamicValue creates a DynamicValue by serializing the given value
27// against the given type constraint. The value must conform to the type
28// constraint, or the result is undefined.
29//
30// If the value to be encoded has no predefined schema (for example, for
31// module output values and input variables), set the type constraint to
32// cty.DynamicPseudoType in order to save type information as part of the
33// value, and then also pass cty.DynamicPseudoType to method Decode to recover
34// the original value.
35//
36// cty.NilVal can be used to represent the absense of a value, but callers
37// must be careful to distinguish values that are absent at the Go layer
38// (cty.NilVal) vs. values that are absent at the cty layer (cty.NullVal
39// results).
40func NewDynamicValue(val cty.Value, ty cty.Type) (DynamicValue, error) {
41 // If we're given cty.NilVal (the zero value of cty.Value, which is
42 // distinct from a typed null value created by cty.NullVal) then we'll
43 // assume the caller is trying to represent the _absense_ of a value,
44 // and so we'll return a nil DynamicValue.
45 if val == cty.NilVal {
46 return DynamicValue(nil), nil
47 }
48
49 // Currently our internal encoding is msgpack, via ctymsgpack.
50 buf, err := ctymsgpack.Marshal(val, ty)
51 if err != nil {
52 return nil, err
53 }
54
55 return DynamicValue(buf), nil
56}
57
58// Decode retrieves the effective value from the receiever by interpreting the
59// serialized form against the given type constraint. For correct results,
60// the type constraint must match (or be consistent with) the one that was
61// used to create the receiver.
62//
63// A nil DynamicValue decodes to cty.NilVal, which is not a valid value and
64// instead represents the absense of a value.
65func (v DynamicValue) Decode(ty cty.Type) (cty.Value, error) {
66 if v == nil {
67 return cty.NilVal, nil
68 }
69
70 return ctymsgpack.Unmarshal([]byte(v), ty)
71}
72
73// ImpliedType returns the type implied by the serialized structure of the
74// receiving value.
75//
76// This will not necessarily be exactly the type that was given when the
77// value was encoded, and in particular must not be used for values that
78// were encoded with their static type given as cty.DynamicPseudoType.
79// It is however safe to use this method for values that were encoded using
80// their runtime type as the conforming type, with the result being
81// semantically equivalent but with all lists and sets represented as tuples,
82// and maps as objects, due to ambiguities of the serialization.
83func (v DynamicValue) ImpliedType() (cty.Type, error) {
84 return ctymsgpack.ImpliedType([]byte(v))
85}
86
87// Copy produces a copy of the receiver with a distinct backing array.
88func (v DynamicValue) Copy() DynamicValue {
89 if v == nil {
90 return nil
91 }
92
93 ret := make(DynamicValue, len(v))
94 copy(ret, v)
95 return ret
96}
diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go b/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go
new file mode 100644
index 0000000..18a7e99
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/objchange/all_null.go
@@ -0,0 +1,18 @@
1package objchange
2
3import (
4 "github.com/hashicorp/terraform/configs/configschema"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// AllAttributesNull constructs a non-null cty.Value of the object type implied
9// by the given schema that has all of its leaf attributes set to null and all
10// of its nested block collections set to zero-length.
11//
12// This simulates what would result from decoding an empty configuration block
13// with the given schema, except that it does not produce errors
14func AllAttributesNull(schema *configschema.Block) cty.Value {
15 // "All attributes null" happens to be the definition of EmptyValue for
16 // a Block, so we can just delegate to that.
17 return schema.EmptyValue()
18}
diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go b/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go
new file mode 100644
index 0000000..8b7ef43
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go
@@ -0,0 +1,437 @@
1package objchange
2
3import (
4 "fmt"
5 "strconv"
6
7 "github.com/zclconf/go-cty/cty"
8 "github.com/zclconf/go-cty/cty/convert"
9
10 "github.com/hashicorp/terraform/configs/configschema"
11)
12
13// AssertObjectCompatible checks whether the given "actual" value is a valid
14// completion of the possibly-partially-unknown "planned" value.
15//
16// This means that any known leaf value in "planned" must be equal to the
17// corresponding value in "actual", and various other similar constraints.
18//
19// Any inconsistencies are reported by returning a non-zero number of errors.
20// These errors are usually (but not necessarily) cty.PathError values
21// referring to a particular nested value within the "actual" value.
22//
23// The two values must have types that conform to the given schema's implied
24// type, or this function will panic.
25func AssertObjectCompatible(schema *configschema.Block, planned, actual cty.Value) []error {
26 return assertObjectCompatible(schema, planned, actual, nil)
27}
28
29func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Value, path cty.Path) []error {
30 var errs []error
31 if planned.IsNull() && !actual.IsNull() {
32 errs = append(errs, path.NewErrorf("was absent, but now present"))
33 return errs
34 }
35 if actual.IsNull() && !planned.IsNull() {
36 errs = append(errs, path.NewErrorf("was present, but now absent"))
37 return errs
38 }
39 if planned.IsNull() {
40 // No further checks possible if both values are null
41 return errs
42 }
43
44 for name, attrS := range schema.Attributes {
45 plannedV := planned.GetAttr(name)
46 actualV := actual.GetAttr(name)
47
48 path := append(path, cty.GetAttrStep{Name: name})
49 moreErrs := assertValueCompatible(plannedV, actualV, path)
50 if attrS.Sensitive {
51 if len(moreErrs) > 0 {
52 // Use a vague placeholder message instead, to avoid disclosing
53 // sensitive information.
54 errs = append(errs, path.NewErrorf("inconsistent values for sensitive attribute"))
55 }
56 } else {
57 errs = append(errs, moreErrs...)
58 }
59 }
60 for name, blockS := range schema.BlockTypes {
61 plannedV := planned.GetAttr(name)
62 actualV := actual.GetAttr(name)
63
64 // As a special case, if there were any blocks whose leaf attributes
65 // are all unknown then we assume (possibly incorrectly) that the
66 // HCL dynamic block extension is in use with an unknown for_each
67 // argument, and so we will do looser validation here that allows
68 // for those blocks to have expanded into a different number of blocks
69 // if the for_each value is now known.
70 maybeUnknownBlocks := couldHaveUnknownBlockPlaceholder(plannedV, blockS, false)
71
72 path := append(path, cty.GetAttrStep{Name: name})
73 switch blockS.Nesting {
74 case configschema.NestingSingle, configschema.NestingGroup:
75 // If an unknown block placeholder was present then the placeholder
76 // may have expanded out into zero blocks, which is okay.
77 if maybeUnknownBlocks && actualV.IsNull() {
78 continue
79 }
80 moreErrs := assertObjectCompatible(&blockS.Block, plannedV, actualV, path)
81 errs = append(errs, moreErrs...)
82 case configschema.NestingList:
83 // A NestingList might either be a list or a tuple, depending on
84 // whether there are dynamically-typed attributes inside. However,
85 // both support a similar-enough API that we can treat them the
86 // same for our purposes here.
87 if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() {
88 continue
89 }
90
91 if maybeUnknownBlocks {
92 // When unknown blocks are present the final blocks may be
93 // at different indices than the planned blocks, so unfortunately
94 // we can't do our usual checks in this case without generating
95 // false negatives.
96 continue
97 }
98
99 plannedL := plannedV.LengthInt()
100 actualL := actualV.LengthInt()
101 if plannedL != actualL {
102 errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL))
103 continue
104 }
105 for it := plannedV.ElementIterator(); it.Next(); {
106 idx, plannedEV := it.Element()
107 if !actualV.HasIndex(idx).True() {
108 continue
109 }
110 actualEV := actualV.Index(idx)
111 moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx}))
112 errs = append(errs, moreErrs...)
113 }
114 case configschema.NestingMap:
115 // A NestingMap might either be a map or an object, depending on
116 // whether there are dynamically-typed attributes inside, but
117 // that's decided statically and so both values will have the same
118 // kind.
119 if plannedV.Type().IsObjectType() {
120 plannedAtys := plannedV.Type().AttributeTypes()
121 actualAtys := actualV.Type().AttributeTypes()
122 for k := range plannedAtys {
123 if _, ok := actualAtys[k]; !ok {
124 errs = append(errs, path.NewErrorf("block key %q has vanished", k))
125 continue
126 }
127
128 plannedEV := plannedV.GetAttr(k)
129 actualEV := actualV.GetAttr(k)
130 moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.GetAttrStep{Name: k}))
131 errs = append(errs, moreErrs...)
132 }
133 if !maybeUnknownBlocks { // new blocks may appear if unknown blocks were present in the plan
134 for k := range actualAtys {
135 if _, ok := plannedAtys[k]; !ok {
136 errs = append(errs, path.NewErrorf("new block key %q has appeared", k))
137 continue
138 }
139 }
140 }
141 } else {
142 if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() {
143 continue
144 }
145 plannedL := plannedV.LengthInt()
146 actualL := actualV.LengthInt()
147 if plannedL != actualL && !maybeUnknownBlocks { // new blocks may appear if unknown blocks were persent in the plan
148 errs = append(errs, path.NewErrorf("block count changed from %d to %d", plannedL, actualL))
149 continue
150 }
151 for it := plannedV.ElementIterator(); it.Next(); {
152 idx, plannedEV := it.Element()
153 if !actualV.HasIndex(idx).True() {
154 continue
155 }
156 actualEV := actualV.Index(idx)
157 moreErrs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: idx}))
158 errs = append(errs, moreErrs...)
159 }
160 }
161 case configschema.NestingSet:
162 if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() {
163 continue
164 }
165
166 setErrs := assertSetValuesCompatible(plannedV, actualV, path, func(plannedEV, actualEV cty.Value) bool {
167 errs := assertObjectCompatible(&blockS.Block, plannedEV, actualEV, append(path, cty.IndexStep{Key: actualEV}))
168 return len(errs) == 0
169 })
170 errs = append(errs, setErrs...)
171
172 // There can be fewer elements in a set after its elements are all
173 // known (values that turn out to be equal will coalesce) but the
174 // number of elements must never get larger.
175 plannedL := plannedV.LengthInt()
176 actualL := actualV.LengthInt()
177 if plannedL < actualL {
178 errs = append(errs, path.NewErrorf("block set length changed from %d to %d", plannedL, actualL))
179 }
180 default:
181 panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting))
182 }
183 }
184 return errs
185}
186
187func assertValueCompatible(planned, actual cty.Value, path cty.Path) []error {
188 // NOTE: We don't normally use the GoString rendering of cty.Value in
189 // user-facing error messages as a rule, but we make an exception
190 // for this function because we expect the user to pass this message on
191 // verbatim to the provider development team and so more detail is better.
192
193 var errs []error
194 if planned.Type() == cty.DynamicPseudoType {
195 // Anything goes, then
196 return errs
197 }
198 if problems := planned.Type().TestConformance(actual.Type()); len(problems) > 0 {
199 errs = append(errs, path.NewErrorf("wrong final value type: %s", convert.MismatchMessage(actual.Type(), planned.Type())))
200 // If the types don't match then we can't do any other comparisons,
201 // so we bail early.
202 return errs
203 }
204
205 if !planned.IsKnown() {
206 // We didn't know what were going to end up with during plan, so
207 // anything goes during apply.
208 return errs
209 }
210
211 if actual.IsNull() {
212 if planned.IsNull() {
213 return nil
214 }
215 errs = append(errs, path.NewErrorf("was %#v, but now null", planned))
216 return errs
217 }
218 if planned.IsNull() {
219 errs = append(errs, path.NewErrorf("was null, but now %#v", actual))
220 return errs
221 }
222
223 ty := planned.Type()
224 switch {
225
226 case !actual.IsKnown():
227 errs = append(errs, path.NewErrorf("was known, but now unknown"))
228
229 case ty.IsPrimitiveType():
230 if !actual.Equals(planned).True() {
231 errs = append(errs, path.NewErrorf("was %#v, but now %#v", planned, actual))
232 }
233
234 case ty.IsListType() || ty.IsMapType() || ty.IsTupleType():
235 for it := planned.ElementIterator(); it.Next(); {
236 k, plannedV := it.Element()
237 if !actual.HasIndex(k).True() {
238 errs = append(errs, path.NewErrorf("element %s has vanished", indexStrForErrors(k)))
239 continue
240 }
241
242 actualV := actual.Index(k)
243 moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: k}))
244 errs = append(errs, moreErrs...)
245 }
246
247 for it := actual.ElementIterator(); it.Next(); {
248 k, _ := it.Element()
249 if !planned.HasIndex(k).True() {
250 errs = append(errs, path.NewErrorf("new element %s has appeared", indexStrForErrors(k)))
251 }
252 }
253
254 case ty.IsObjectType():
255 atys := ty.AttributeTypes()
256 for name := range atys {
257 // Because we already tested that the two values have the same type,
258 // we can assume that the same attributes are present in both and
259 // focus just on testing their values.
260 plannedV := planned.GetAttr(name)
261 actualV := actual.GetAttr(name)
262 moreErrs := assertValueCompatible(plannedV, actualV, append(path, cty.GetAttrStep{Name: name}))
263 errs = append(errs, moreErrs...)
264 }
265
266 case ty.IsSetType():
267 // We can't really do anything useful for sets here because changing
268 // an unknown element to known changes the identity of the element, and
269 // so we can't correlate them properly. However, we will at least check
270 // to ensure that the number of elements is consistent, along with
271 // the general type-match checks we ran earlier in this function.
272 if planned.IsKnown() && !planned.IsNull() && !actual.IsNull() {
273
274 setErrs := assertSetValuesCompatible(planned, actual, path, func(plannedV, actualV cty.Value) bool {
275 errs := assertValueCompatible(plannedV, actualV, append(path, cty.IndexStep{Key: actualV}))
276 return len(errs) == 0
277 })
278 errs = append(errs, setErrs...)
279
280 // There can be fewer elements in a set after its elements are all
281 // known (values that turn out to be equal will coalesce) but the
282 // number of elements must never get larger.
283
284 plannedL := planned.LengthInt()
285 actualL := actual.LengthInt()
286 if plannedL < actualL {
287 errs = append(errs, path.NewErrorf("length changed from %d to %d", plannedL, actualL))
288 }
289 }
290 }
291
292 return errs
293}
294
295func indexStrForErrors(v cty.Value) string {
296 switch v.Type() {
297 case cty.Number:
298 return v.AsBigFloat().Text('f', -1)
299 case cty.String:
300 return strconv.Quote(v.AsString())
301 default:
302 // Should be impossible, since no other index types are allowed!
303 return fmt.Sprintf("%#v", v)
304 }
305}
306
307// couldHaveUnknownBlockPlaceholder is a heuristic that recognizes how the
308// HCL dynamic block extension behaves when it's asked to expand a block whose
309// for_each argument is unknown. In such cases, it generates a single placeholder
310// block with all leaf attribute values unknown, and once the for_each
311// expression becomes known the placeholder may be replaced with any number
312// of blocks, so object compatibility checks would need to be more liberal.
313//
314// Set "nested" if testing a block that is nested inside a candidate block
315// placeholder; this changes the interpretation of there being no blocks of
316// a type to allow for there being zero nested blocks.
317func couldHaveUnknownBlockPlaceholder(v cty.Value, blockS *configschema.NestedBlock, nested bool) bool {
318 switch blockS.Nesting {
319 case configschema.NestingSingle, configschema.NestingGroup:
320 if nested && v.IsNull() {
321 return true // for nested blocks, a single block being unset doesn't disqualify from being an unknown block placeholder
322 }
323 return couldBeUnknownBlockPlaceholderElement(v, &blockS.Block)
324 default:
325 // These situations should be impossible for correct providers, but
326 // we permit the legacy SDK to produce some incorrect outcomes
327 // for compatibility with its existing logic, and so we must be
328 // tolerant here.
329 if !v.IsKnown() {
330 return true
331 }
332 if v.IsNull() {
333 return false // treated as if the list were empty, so we would see zero iterations below
334 }
335
336 // For all other nesting modes, our value should be something iterable.
337 for it := v.ElementIterator(); it.Next(); {
338 _, ev := it.Element()
339 if couldBeUnknownBlockPlaceholderElement(ev, &blockS.Block) {
340 return true
341 }
342 }
343
344 // Our default changes depending on whether we're testing the candidate
345 // block itself or something nested inside of it: zero blocks of a type
346 // can never contain a dynamic block placeholder, but a dynamic block
347 // placeholder might contain zero blocks of one of its own nested block
348 // types, if none were set in the config at all.
349 return nested
350 }
351}
352
353func couldBeUnknownBlockPlaceholderElement(v cty.Value, schema *configschema.Block) bool {
354 if v.IsNull() {
355 return false // null value can never be a placeholder element
356 }
357 if !v.IsKnown() {
358 return true // this should never happen for well-behaved providers, but can happen with the legacy SDK opt-outs
359 }
360 for name := range schema.Attributes {
361 av := v.GetAttr(name)
362
363 // Unknown block placeholders contain only unknown or null attribute
364 // values, depending on whether or not a particular attribute was set
365 // explicitly inside the content block. Note that this is imprecise:
366 // non-placeholders can also match this, so this function can generate
367 // false positives.
368 if av.IsKnown() && !av.IsNull() {
369 return false
370 }
371 }
372 for name, blockS := range schema.BlockTypes {
373 if !couldHaveUnknownBlockPlaceholder(v.GetAttr(name), blockS, true) {
374 return false
375 }
376 }
377 return true
378}
379
380// assertSetValuesCompatible checks that each of the elements in a can
381// be correlated with at least one equivalent element in b and vice-versa,
382// using the given correlation function.
383//
384// This allows the number of elements in the sets to change as long as all
385// elements in both sets can be correlated, making this function safe to use
386// with sets that may contain unknown values as long as the unknown case is
387// addressed in some reasonable way in the callback function.
388//
389// The callback always recieves values from set a as its first argument and
390// values from set b in its second argument, so it is safe to use with
391// non-commutative functions.
392//
393// As with assertValueCompatible, we assume that the target audience of error
394// messages here is a provider developer (via a bug report from a user) and so
395// we intentionally violate our usual rule of keeping cty implementation
396// details out of error messages.
397func assertSetValuesCompatible(planned, actual cty.Value, path cty.Path, f func(aVal, bVal cty.Value) bool) []error {
398 a := planned
399 b := actual
400
401 // Our methodology here is a little tricky, to deal with the fact that
402 // it's impossible to directly correlate two non-equal set elements because
403 // they don't have identities separate from their values.
404 // The approach is to count the number of equivalent elements each element
405 // of a has in b and vice-versa, and then return true only if each element
406 // in both sets has at least one equivalent.
407 as := a.AsValueSlice()
408 bs := b.AsValueSlice()
409 aeqs := make([]bool, len(as))
410 beqs := make([]bool, len(bs))
411 for ai, av := range as {
412 for bi, bv := range bs {
413 if f(av, bv) {
414 aeqs[ai] = true
415 beqs[bi] = true
416 }
417 }
418 }
419
420 var errs []error
421 for i, eq := range aeqs {
422 if !eq {
423 errs = append(errs, path.NewErrorf("planned set element %#v does not correlate with any element in actual", as[i]))
424 }
425 }
426 if len(errs) > 0 {
427 // Exit early since otherwise we're likely to generate duplicate
428 // error messages from the other perspective in the subsequent loop.
429 return errs
430 }
431 for i, eq := range beqs {
432 if !eq {
433 errs = append(errs, path.NewErrorf("actual set element %#v does not correlate with any element in plan", bs[i]))
434 }
435 }
436 return errs
437}
diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go b/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go
new file mode 100644
index 0000000..2c18a01
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/objchange/doc.go
@@ -0,0 +1,4 @@
1// Package objchange deals with the business logic of taking a prior state
2// value and a config value and producing a proposed new merged value, along
3// with other related rules in this domain.
4package objchange
diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go b/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go
new file mode 100644
index 0000000..cbfefdd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/objchange/lcs.go
@@ -0,0 +1,104 @@
1package objchange
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// LongestCommonSubsequence finds a sequence of values that are common to both
8// x and y, with the same relative ordering as in both collections. This result
9// is useful as a first step towards computing a diff showing added/removed
10// elements in a sequence.
11//
12// The approached used here is a "naive" one, assuming that both xs and ys will
13// generally be small in most reasonable Terraform configurations. For larger
14// lists the time/space usage may be sub-optimal.
15//
16// A pair of lists may have multiple longest common subsequences. In that
17// case, the one selected by this function is undefined.
18func LongestCommonSubsequence(xs, ys []cty.Value) []cty.Value {
19 if len(xs) == 0 || len(ys) == 0 {
20 return make([]cty.Value, 0)
21 }
22
23 c := make([]int, len(xs)*len(ys))
24 eqs := make([]bool, len(xs)*len(ys))
25 w := len(xs)
26
27 for y := 0; y < len(ys); y++ {
28 for x := 0; x < len(xs); x++ {
29 eqV := xs[x].Equals(ys[y])
30 eq := false
31 if eqV.IsKnown() && eqV.True() {
32 eq = true
33 eqs[(w*y)+x] = true // equality tests can be expensive, so cache it
34 }
35 if eq {
36 // Sequence gets one longer than for the cell at top left,
37 // since we'd append a new item to the sequence here.
38 if x == 0 || y == 0 {
39 c[(w*y)+x] = 1
40 } else {
41 c[(w*y)+x] = c[(w*(y-1))+(x-1)] + 1
42 }
43 } else {
44 // We follow the longest of the sequence above and the sequence
45 // to the left of us in the matrix.
46 l := 0
47 u := 0
48 if x > 0 {
49 l = c[(w*y)+(x-1)]
50 }
51 if y > 0 {
52 u = c[(w*(y-1))+x]
53 }
54 if l > u {
55 c[(w*y)+x] = l
56 } else {
57 c[(w*y)+x] = u
58 }
59 }
60 }
61 }
62
63 // The bottom right cell tells us how long our longest sequence will be
64 seq := make([]cty.Value, c[len(c)-1])
65
66 // Now we will walk back from the bottom right cell, finding again all
67 // of the equal pairs to construct our sequence.
68 x := len(xs) - 1
69 y := len(ys) - 1
70 i := len(seq) - 1
71
72 for x > -1 && y > -1 {
73 if eqs[(w*y)+x] {
74 // Add the value to our result list and then walk diagonally
75 // up and to the left.
76 seq[i] = xs[x]
77 x--
78 y--
79 i--
80 } else {
81 // Take the path with the greatest sequence length in the matrix.
82 l := 0
83 u := 0
84 if x > 0 {
85 l = c[(w*y)+(x-1)]
86 }
87 if y > 0 {
88 u = c[(w*(y-1))+x]
89 }
90 if l > u {
91 x--
92 } else {
93 y--
94 }
95 }
96 }
97
98 if i > -1 {
99 // should never happen if the matrix was constructed properly
100 panic("not enough elements in sequence")
101 }
102
103 return seq
104}
diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go b/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go
new file mode 100644
index 0000000..c23f44d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/objchange/normalize_obj.go
@@ -0,0 +1,132 @@
1package objchange
2
3import (
4 "github.com/hashicorp/terraform/configs/configschema"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// NormalizeObjectFromLegacySDK takes an object that may have been generated
9// by the legacy Terraform SDK (i.e. returned from a provider with the
10// LegacyTypeSystem opt-out set) and does its best to normalize it for the
11// assumptions we would normally enforce if the provider had not opted out.
12//
13// In particular, this function guarantees that a value representing a nested
14// block will never itself be unknown or null, instead representing that as
15// a non-null value that may contain null/unknown values.
16//
17// The input value must still conform to the implied type of the given schema,
18// or else this function may produce garbage results or panic. This is usually
19// okay because type consistency is enforced when deserializing the value
20// returned from the provider over the RPC wire protocol anyway.
21func NormalizeObjectFromLegacySDK(val cty.Value, schema *configschema.Block) cty.Value {
22 if val == cty.NilVal || val.IsNull() {
23 // This should never happen in reasonable use, but we'll allow it
24 // and normalize to a null of the expected type rather than panicking
25 // below.
26 return cty.NullVal(schema.ImpliedType())
27 }
28
29 vals := make(map[string]cty.Value)
30 for name := range schema.Attributes {
31 // No normalization for attributes, since them being type-conformant
32 // is all that we require.
33 vals[name] = val.GetAttr(name)
34 }
35 for name, blockS := range schema.BlockTypes {
36 lv := val.GetAttr(name)
37
38 // Legacy SDK never generates dynamically-typed attributes and so our
39 // normalization code doesn't deal with them, but we need to make sure
40 // we still pass them through properly so that we don't interfere with
41 // objects generated by other SDKs.
42 if ty := blockS.Block.ImpliedType(); ty.HasDynamicTypes() {
43 vals[name] = lv
44 continue
45 }
46
47 switch blockS.Nesting {
48 case configschema.NestingSingle, configschema.NestingGroup:
49 if lv.IsKnown() {
50 if lv.IsNull() && blockS.Nesting == configschema.NestingGroup {
51 vals[name] = blockS.EmptyValue()
52 } else {
53 vals[name] = NormalizeObjectFromLegacySDK(lv, &blockS.Block)
54 }
55 } else {
56 vals[name] = unknownBlockStub(&blockS.Block)
57 }
58 case configschema.NestingList:
59 switch {
60 case !lv.IsKnown():
61 vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)})
62 case lv.IsNull() || lv.LengthInt() == 0:
63 vals[name] = cty.ListValEmpty(blockS.Block.ImpliedType())
64 default:
65 subVals := make([]cty.Value, 0, lv.LengthInt())
66 for it := lv.ElementIterator(); it.Next(); {
67 _, subVal := it.Element()
68 subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block))
69 }
70 vals[name] = cty.ListVal(subVals)
71 }
72 case configschema.NestingSet:
73 switch {
74 case !lv.IsKnown():
75 vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)})
76 case lv.IsNull() || lv.LengthInt() == 0:
77 vals[name] = cty.SetValEmpty(blockS.Block.ImpliedType())
78 default:
79 subVals := make([]cty.Value, 0, lv.LengthInt())
80 for it := lv.ElementIterator(); it.Next(); {
81 _, subVal := it.Element()
82 subVals = append(subVals, NormalizeObjectFromLegacySDK(subVal, &blockS.Block))
83 }
84 vals[name] = cty.SetVal(subVals)
85 }
86 default:
87 // The legacy SDK doesn't support NestingMap, so we just assume
88 // maps are always okay. (If not, we would've detected and returned
89 // an error to the user before we got here.)
90 vals[name] = lv
91 }
92 }
93 return cty.ObjectVal(vals)
94}
95
96// unknownBlockStub constructs an object value that approximates an unknown
97// block by producing a known block object with all of its leaf attribute
98// values set to unknown.
99//
100// Blocks themselves cannot be unknown, so if the legacy SDK tries to return
101// such a thing, we'll use this result instead. This convention mimics how
102// the dynamic block feature deals with being asked to iterate over an unknown
103// value, because our value-checking functions already accept this convention
104// as a special case.
105func unknownBlockStub(schema *configschema.Block) cty.Value {
106 vals := make(map[string]cty.Value)
107 for name, attrS := range schema.Attributes {
108 vals[name] = cty.UnknownVal(attrS.Type)
109 }
110 for name, blockS := range schema.BlockTypes {
111 switch blockS.Nesting {
112 case configschema.NestingSingle, configschema.NestingGroup:
113 vals[name] = unknownBlockStub(&blockS.Block)
114 case configschema.NestingList:
115 // In principle we may be expected to produce a tuple value here,
116 // if there are any dynamically-typed attributes in our nested block,
117 // but the legacy SDK doesn't support that, so we just assume it'll
118 // never be necessary to normalize those. (Incorrect usage in any
119 // other SDK would be caught and returned as an error before we
120 // get here.)
121 vals[name] = cty.ListVal([]cty.Value{unknownBlockStub(&blockS.Block)})
122 case configschema.NestingSet:
123 vals[name] = cty.SetVal([]cty.Value{unknownBlockStub(&blockS.Block)})
124 case configschema.NestingMap:
125 // A nesting map can never be unknown since we then wouldn't know
126 // what the keys are. (Legacy SDK doesn't support NestingMap anyway,
127 // so this should never arise.)
128 vals[name] = cty.MapValEmpty(blockS.Block.ImpliedType())
129 }
130 }
131 return cty.ObjectVal(vals)
132}
diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go b/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go
new file mode 100644
index 0000000..5a8af14
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/objchange/objchange.go
@@ -0,0 +1,390 @@
1package objchange
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7
8 "github.com/hashicorp/terraform/configs/configschema"
9)
10
11// ProposedNewObject constructs a proposed new object value by combining the
12// computed attribute values from "prior" with the configured attribute values
13// from "config".
14//
15// Both value must conform to the given schema's implied type, or this function
16// will panic.
17//
18// The prior value must be wholly known, but the config value may be unknown
19// or have nested unknown values.
20//
21// The merging of the two objects includes the attributes of any nested blocks,
22// which will be correlated in a manner appropriate for their nesting mode.
23// Note in particular that the correlation for blocks backed by sets is a
24// heuristic based on matching non-computed attribute values and so it may
25// produce strange results with more "extreme" cases, such as a nested set
26// block where _all_ attributes are computed.
27func ProposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value {
28 // If the config and prior are both null, return early here before
29 // populating the prior block. The prevents non-null blocks from appearing
30 // the proposed state value.
31 if config.IsNull() && prior.IsNull() {
32 return prior
33 }
34
35 if prior.IsNull() {
36 // In this case, we will construct a synthetic prior value that is
37 // similar to the result of decoding an empty configuration block,
38 // which simplifies our handling of the top-level attributes/blocks
39 // below by giving us one non-null level of object to pull values from.
40 prior = AllAttributesNull(schema)
41 }
42 return proposedNewObject(schema, prior, config)
43}
44
45// PlannedDataResourceObject is similar to ProposedNewObject but tailored for
46// planning data resources in particular. Specifically, it replaces the values
47// of any Computed attributes not set in the configuration with an unknown
48// value, which serves as a placeholder for a value to be filled in by the
49// provider when the data resource is finally read.
50//
51// Data resources are different because the planning of them is handled
52// entirely within Terraform Core and not subject to customization by the
53// provider. This function is, in effect, producing an equivalent result to
54// passing the ProposedNewObject result into a provider's PlanResourceChange
55// function, assuming a fixed implementation of PlanResourceChange that just
56// fills in unknown values as needed.
57func PlannedDataResourceObject(schema *configschema.Block, config cty.Value) cty.Value {
58 // Our trick here is to run the ProposedNewObject logic with an
59 // entirely-unknown prior value. Because of cty's unknown short-circuit
60 // behavior, any operation on prior returns another unknown, and so
61 // unknown values propagate into all of the parts of the resulting value
62 // that would normally be filled in by preserving the prior state.
63 prior := cty.UnknownVal(schema.ImpliedType())
64 return proposedNewObject(schema, prior, config)
65}
66
67func proposedNewObject(schema *configschema.Block, prior, config cty.Value) cty.Value {
68 if config.IsNull() || !config.IsKnown() {
69 // This is a weird situation, but we'll allow it anyway to free
70 // callers from needing to specifically check for these cases.
71 return prior
72 }
73 if (!prior.Type().IsObjectType()) || (!config.Type().IsObjectType()) {
74 panic("ProposedNewObject only supports object-typed values")
75 }
76
77 // From this point onwards, we can assume that both values are non-null
78 // object types, and that the config value itself is known (though it
79 // may contain nested values that are unknown.)
80
81 newAttrs := map[string]cty.Value{}
82 for name, attr := range schema.Attributes {
83 priorV := prior.GetAttr(name)
84 configV := config.GetAttr(name)
85 var newV cty.Value
86 switch {
87 case attr.Computed && attr.Optional:
88 // This is the trickiest scenario: we want to keep the prior value
89 // if the config isn't overriding it. Note that due to some
90 // ambiguity here, setting an optional+computed attribute from
91 // config and then later switching the config to null in a
92 // subsequent change causes the initial config value to be "sticky"
93 // unless the provider specifically overrides it during its own
94 // plan customization step.
95 if configV.IsNull() {
96 newV = priorV
97 } else {
98 newV = configV
99 }
100 case attr.Computed:
101 // configV will always be null in this case, by definition.
102 // priorV may also be null, but that's okay.
103 newV = priorV
104 default:
105 // For non-computed attributes, we always take the config value,
106 // even if it is null. If it's _required_ then null values
107 // should've been caught during an earlier validation step, and
108 // so we don't really care about that here.
109 newV = configV
110 }
111 newAttrs[name] = newV
112 }
113
114 // Merging nested blocks is a little more complex, since we need to
115 // correlate blocks between both objects and then recursively propose
116 // a new object for each. The correlation logic depends on the nesting
117 // mode for each block type.
118 for name, blockType := range schema.BlockTypes {
119 priorV := prior.GetAttr(name)
120 configV := config.GetAttr(name)
121 var newV cty.Value
122 switch blockType.Nesting {
123
124 case configschema.NestingSingle, configschema.NestingGroup:
125 newV = ProposedNewObject(&blockType.Block, priorV, configV)
126
127 case configschema.NestingList:
128 // Nested blocks are correlated by index.
129 configVLen := 0
130 if configV.IsKnown() && !configV.IsNull() {
131 configVLen = configV.LengthInt()
132 }
133 if configVLen > 0 {
134 newVals := make([]cty.Value, 0, configVLen)
135 for it := configV.ElementIterator(); it.Next(); {
136 idx, configEV := it.Element()
137 if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) {
138 // If there is no corresponding prior element then
139 // we just take the config value as-is.
140 newVals = append(newVals, configEV)
141 continue
142 }
143 priorEV := priorV.Index(idx)
144
145 newEV := ProposedNewObject(&blockType.Block, priorEV, configEV)
146 newVals = append(newVals, newEV)
147 }
148 // Despite the name, a NestingList might also be a tuple, if
149 // its nested schema contains dynamically-typed attributes.
150 if configV.Type().IsTupleType() {
151 newV = cty.TupleVal(newVals)
152 } else {
153 newV = cty.ListVal(newVals)
154 }
155 } else {
156 // Despite the name, a NestingList might also be a tuple, if
157 // its nested schema contains dynamically-typed attributes.
158 if configV.Type().IsTupleType() {
159 newV = cty.EmptyTupleVal
160 } else {
161 newV = cty.ListValEmpty(blockType.ImpliedType())
162 }
163 }
164
165 case configschema.NestingMap:
166 // Despite the name, a NestingMap may produce either a map or
167 // object value, depending on whether the nested schema contains
168 // dynamically-typed attributes.
169 if configV.Type().IsObjectType() {
170 // Nested blocks are correlated by key.
171 configVLen := 0
172 if configV.IsKnown() && !configV.IsNull() {
173 configVLen = configV.LengthInt()
174 }
175 if configVLen > 0 {
176 newVals := make(map[string]cty.Value, configVLen)
177 atys := configV.Type().AttributeTypes()
178 for name := range atys {
179 configEV := configV.GetAttr(name)
180 if !priorV.IsKnown() || priorV.IsNull() || !priorV.Type().HasAttribute(name) {
181 // If there is no corresponding prior element then
182 // we just take the config value as-is.
183 newVals[name] = configEV
184 continue
185 }
186 priorEV := priorV.GetAttr(name)
187
188 newEV := ProposedNewObject(&blockType.Block, priorEV, configEV)
189 newVals[name] = newEV
190 }
191 // Although we call the nesting mode "map", we actually use
192 // object values so that elements might have different types
193 // in case of dynamically-typed attributes.
194 newV = cty.ObjectVal(newVals)
195 } else {
196 newV = cty.EmptyObjectVal
197 }
198 } else {
199 configVLen := 0
200 if configV.IsKnown() && !configV.IsNull() {
201 configVLen = configV.LengthInt()
202 }
203 if configVLen > 0 {
204 newVals := make(map[string]cty.Value, configVLen)
205 for it := configV.ElementIterator(); it.Next(); {
206 idx, configEV := it.Element()
207 k := idx.AsString()
208 if priorV.IsKnown() && (priorV.IsNull() || !priorV.HasIndex(idx).True()) {
209 // If there is no corresponding prior element then
210 // we just take the config value as-is.
211 newVals[k] = configEV
212 continue
213 }
214 priorEV := priorV.Index(idx)
215
216 newEV := ProposedNewObject(&blockType.Block, priorEV, configEV)
217 newVals[k] = newEV
218 }
219 newV = cty.MapVal(newVals)
220 } else {
221 newV = cty.MapValEmpty(blockType.ImpliedType())
222 }
223 }
224
225 case configschema.NestingSet:
226 if !configV.Type().IsSetType() {
227 panic("configschema.NestingSet value is not a set as expected")
228 }
229
230 // Nested blocks are correlated by comparing the element values
231 // after eliminating all of the computed attributes. In practice,
232 // this means that any config change produces an entirely new
233 // nested object, and we only propagate prior computed values
234 // if the non-computed attribute values are identical.
235 var cmpVals [][2]cty.Value
236 if priorV.IsKnown() && !priorV.IsNull() {
237 cmpVals = setElementCompareValues(&blockType.Block, priorV, false)
238 }
239 configVLen := 0
240 if configV.IsKnown() && !configV.IsNull() {
241 configVLen = configV.LengthInt()
242 }
243 if configVLen > 0 {
244 used := make([]bool, len(cmpVals)) // track used elements in case multiple have the same compare value
245 newVals := make([]cty.Value, 0, configVLen)
246 for it := configV.ElementIterator(); it.Next(); {
247 _, configEV := it.Element()
248 var priorEV cty.Value
249 for i, cmp := range cmpVals {
250 if used[i] {
251 continue
252 }
253 if cmp[1].RawEquals(configEV) {
254 priorEV = cmp[0]
255 used[i] = true // we can't use this value on a future iteration
256 break
257 }
258 }
259 if priorEV == cty.NilVal {
260 priorEV = cty.NullVal(blockType.ImpliedType())
261 }
262
263 newEV := ProposedNewObject(&blockType.Block, priorEV, configEV)
264 newVals = append(newVals, newEV)
265 }
266 newV = cty.SetVal(newVals)
267 } else {
268 newV = cty.SetValEmpty(blockType.Block.ImpliedType())
269 }
270
271 default:
272 // Should never happen, since the above cases are comprehensive.
273 panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting))
274 }
275
276 newAttrs[name] = newV
277 }
278
279 return cty.ObjectVal(newAttrs)
280}
281
282// setElementCompareValues takes a known, non-null value of a cty.Set type and
283// returns a table -- constructed of two-element arrays -- that maps original
284// set element values to corresponding values that have all of the computed
285// values removed, making them suitable for comparison with values obtained
286// from configuration. The element type of the set must conform to the implied
287// type of the given schema, or this function will panic.
288//
289// In the resulting slice, the zeroth element of each array is the original
290// value and the one-indexed element is the corresponding "compare value".
291//
292// This is intended to help correlate prior elements with configured elements
293// in ProposedNewObject. The result is a heuristic rather than an exact science,
294// since e.g. two separate elements may reduce to the same value through this
295// process. The caller must therefore be ready to deal with duplicates.
296func setElementCompareValues(schema *configschema.Block, set cty.Value, isConfig bool) [][2]cty.Value {
297 ret := make([][2]cty.Value, 0, set.LengthInt())
298 for it := set.ElementIterator(); it.Next(); {
299 _, ev := it.Element()
300 ret = append(ret, [2]cty.Value{ev, setElementCompareValue(schema, ev, isConfig)})
301 }
302 return ret
303}
304
305// setElementCompareValue creates a new value that has all of the same
306// non-computed attribute values as the one given but has all computed
307// attribute values forced to null.
308//
309// If isConfig is true then non-null Optional+Computed attribute values will
310// be preserved. Otherwise, they will also be set to null.
311//
312// The input value must conform to the schema's implied type, and the return
313// value is guaranteed to conform to it.
314func setElementCompareValue(schema *configschema.Block, v cty.Value, isConfig bool) cty.Value {
315 if v.IsNull() || !v.IsKnown() {
316 return v
317 }
318
319 attrs := map[string]cty.Value{}
320 for name, attr := range schema.Attributes {
321 switch {
322 case attr.Computed && attr.Optional:
323 if isConfig {
324 attrs[name] = v.GetAttr(name)
325 } else {
326 attrs[name] = cty.NullVal(attr.Type)
327 }
328 case attr.Computed:
329 attrs[name] = cty.NullVal(attr.Type)
330 default:
331 attrs[name] = v.GetAttr(name)
332 }
333 }
334
335 for name, blockType := range schema.BlockTypes {
336 switch blockType.Nesting {
337
338 case configschema.NestingSingle, configschema.NestingGroup:
339 attrs[name] = setElementCompareValue(&blockType.Block, v.GetAttr(name), isConfig)
340
341 case configschema.NestingList, configschema.NestingSet:
342 cv := v.GetAttr(name)
343 if cv.IsNull() || !cv.IsKnown() {
344 attrs[name] = cv
345 continue
346 }
347 if l := cv.LengthInt(); l > 0 {
348 elems := make([]cty.Value, 0, l)
349 for it := cv.ElementIterator(); it.Next(); {
350 _, ev := it.Element()
351 elems = append(elems, setElementCompareValue(&blockType.Block, ev, isConfig))
352 }
353 if blockType.Nesting == configschema.NestingSet {
354 // SetValEmpty would panic if given elements that are not
355 // all of the same type, but that's guaranteed not to
356 // happen here because our input value was _already_ a
357 // set and we've not changed the types of any elements here.
358 attrs[name] = cty.SetVal(elems)
359 } else {
360 attrs[name] = cty.TupleVal(elems)
361 }
362 } else {
363 if blockType.Nesting == configschema.NestingSet {
364 attrs[name] = cty.SetValEmpty(blockType.Block.ImpliedType())
365 } else {
366 attrs[name] = cty.EmptyTupleVal
367 }
368 }
369
370 case configschema.NestingMap:
371 cv := v.GetAttr(name)
372 if cv.IsNull() || !cv.IsKnown() {
373 attrs[name] = cv
374 continue
375 }
376 elems := make(map[string]cty.Value)
377 for it := cv.ElementIterator(); it.Next(); {
378 kv, ev := it.Element()
379 elems[kv.AsString()] = setElementCompareValue(&blockType.Block, ev, isConfig)
380 }
381 attrs[name] = cty.ObjectVal(elems)
382
383 default:
384 // Should never happen, since the above cases are comprehensive.
385 panic(fmt.Sprintf("unsupported block nesting mode %s", blockType.Nesting))
386 }
387 }
388
389 return cty.ObjectVal(attrs)
390}
diff --git a/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go b/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go
new file mode 100644
index 0000000..69acb89
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/objchange/plan_valid.go
@@ -0,0 +1,267 @@
1package objchange
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7
8 "github.com/hashicorp/terraform/configs/configschema"
9)
10
11// AssertPlanValid checks checks whether a planned new state returned by a
12// provider's PlanResourceChange method is suitable to achieve a change
13// from priorState to config. It returns a slice with nonzero length if
14// any problems are detected. Because problems here indicate bugs in the
15// provider that generated the plannedState, they are written with provider
16// developers as an audience, rather than end-users.
17//
18// All of the given values must have the same type and must conform to the
19// implied type of the given schema, or this function may panic or produce
20// garbage results.
21//
22// During planning, a provider may only make changes to attributes that are
23// null (unset) in the configuration and are marked as "computed" in the
24// resource type schema, in order to insert any default values the provider
25// may know about. If the default value cannot be determined until apply time,
26// the provider can return an unknown value. Providers are forbidden from
27// planning a change that disagrees with any non-null argument in the
28// configuration.
29//
30// As a special exception, providers _are_ allowed to provide attribute values
31// conflicting with configuration if and only if the planned value exactly
32// matches the corresponding attribute value in the prior state. The provider
33// can use this to signal that the new value is functionally equivalent to
34// the old and thus no change is required.
35func AssertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value) []error {
36 return assertPlanValid(schema, priorState, config, plannedState, nil)
37}
38
39func assertPlanValid(schema *configschema.Block, priorState, config, plannedState cty.Value, path cty.Path) []error {
40 var errs []error
41 if plannedState.IsNull() && !config.IsNull() {
42 errs = append(errs, path.NewErrorf("planned for absense but config wants existence"))
43 return errs
44 }
45 if config.IsNull() && !plannedState.IsNull() {
46 errs = append(errs, path.NewErrorf("planned for existence but config wants absense"))
47 return errs
48 }
49 if plannedState.IsNull() {
50 // No further checks possible if the planned value is null
51 return errs
52 }
53
54 impTy := schema.ImpliedType()
55
56 for name, attrS := range schema.Attributes {
57 plannedV := plannedState.GetAttr(name)
58 configV := config.GetAttr(name)
59 priorV := cty.NullVal(attrS.Type)
60 if !priorState.IsNull() {
61 priorV = priorState.GetAttr(name)
62 }
63
64 path := append(path, cty.GetAttrStep{Name: name})
65 moreErrs := assertPlannedValueValid(attrS, priorV, configV, plannedV, path)
66 errs = append(errs, moreErrs...)
67 }
68 for name, blockS := range schema.BlockTypes {
69 path := append(path, cty.GetAttrStep{Name: name})
70 plannedV := plannedState.GetAttr(name)
71 configV := config.GetAttr(name)
72 priorV := cty.NullVal(impTy.AttributeType(name))
73 if !priorState.IsNull() {
74 priorV = priorState.GetAttr(name)
75 }
76 if plannedV.RawEquals(configV) {
77 // Easy path: nothing has changed at all
78 continue
79 }
80 if !plannedV.IsKnown() {
81 errs = append(errs, path.NewErrorf("attribute representing nested block must not be unknown itself; set nested attribute values to unknown instead"))
82 continue
83 }
84
85 switch blockS.Nesting {
86 case configschema.NestingSingle, configschema.NestingGroup:
87 moreErrs := assertPlanValid(&blockS.Block, priorV, configV, plannedV, path)
88 errs = append(errs, moreErrs...)
89 case configschema.NestingList:
90 // A NestingList might either be a list or a tuple, depending on
91 // whether there are dynamically-typed attributes inside. However,
92 // both support a similar-enough API that we can treat them the
93 // same for our purposes here.
94 if plannedV.IsNull() {
95 errs = append(errs, path.NewErrorf("attribute representing a list of nested blocks must be empty to indicate no blocks, not null"))
96 continue
97 }
98
99 plannedL := plannedV.LengthInt()
100 configL := configV.LengthInt()
101 if plannedL != configL {
102 errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL))
103 continue
104 }
105 for it := plannedV.ElementIterator(); it.Next(); {
106 idx, plannedEV := it.Element()
107 path := append(path, cty.IndexStep{Key: idx})
108 if !plannedEV.IsKnown() {
109 errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead"))
110 continue
111 }
112 if !configV.HasIndex(idx).True() {
113 continue // should never happen since we checked the lengths above
114 }
115 configEV := configV.Index(idx)
116 priorEV := cty.NullVal(blockS.ImpliedType())
117 if !priorV.IsNull() && priorV.HasIndex(idx).True() {
118 priorEV = priorV.Index(idx)
119 }
120
121 moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path)
122 errs = append(errs, moreErrs...)
123 }
124 case configschema.NestingMap:
125 if plannedV.IsNull() {
126 errs = append(errs, path.NewErrorf("attribute representing a map of nested blocks must be empty to indicate no blocks, not null"))
127 continue
128 }
129
130 // A NestingMap might either be a map or an object, depending on
131 // whether there are dynamically-typed attributes inside, but
132 // that's decided statically and so all values will have the same
133 // kind.
134 if plannedV.Type().IsObjectType() {
135 plannedAtys := plannedV.Type().AttributeTypes()
136 configAtys := configV.Type().AttributeTypes()
137 for k := range plannedAtys {
138 if _, ok := configAtys[k]; !ok {
139 errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k))
140 continue
141 }
142 path := append(path, cty.GetAttrStep{Name: k})
143
144 plannedEV := plannedV.GetAttr(k)
145 if !plannedEV.IsKnown() {
146 errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead"))
147 continue
148 }
149 configEV := configV.GetAttr(k)
150 priorEV := cty.NullVal(blockS.ImpliedType())
151 if !priorV.IsNull() && priorV.Type().HasAttribute(k) {
152 priorEV = priorV.GetAttr(k)
153 }
154 moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path)
155 errs = append(errs, moreErrs...)
156 }
157 for k := range configAtys {
158 if _, ok := plannedAtys[k]; !ok {
159 errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", k))
160 continue
161 }
162 }
163 } else {
164 plannedL := plannedV.LengthInt()
165 configL := configV.LengthInt()
166 if plannedL != configL {
167 errs = append(errs, path.NewErrorf("block count in plan (%d) disagrees with count in config (%d)", plannedL, configL))
168 continue
169 }
170 for it := plannedV.ElementIterator(); it.Next(); {
171 idx, plannedEV := it.Element()
172 path := append(path, cty.IndexStep{Key: idx})
173 if !plannedEV.IsKnown() {
174 errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead"))
175 continue
176 }
177 k := idx.AsString()
178 if !configV.HasIndex(idx).True() {
179 errs = append(errs, path.NewErrorf("block key %q from plan is not present in config", k))
180 continue
181 }
182 configEV := configV.Index(idx)
183 priorEV := cty.NullVal(blockS.ImpliedType())
184 if !priorV.IsNull() && priorV.HasIndex(idx).True() {
185 priorEV = priorV.Index(idx)
186 }
187 moreErrs := assertPlanValid(&blockS.Block, priorEV, configEV, plannedEV, path)
188 errs = append(errs, moreErrs...)
189 }
190 for it := configV.ElementIterator(); it.Next(); {
191 idx, _ := it.Element()
192 if !plannedV.HasIndex(idx).True() {
193 errs = append(errs, path.NewErrorf("block key %q from config is not present in plan", idx.AsString()))
194 continue
195 }
196 }
197 }
198 case configschema.NestingSet:
199 if plannedV.IsNull() {
200 errs = append(errs, path.NewErrorf("attribute representing a set of nested blocks must be empty to indicate no blocks, not null"))
201 continue
202 }
203
204 // Because set elements have no identifier with which to correlate
205 // them, we can't robustly validate the plan for a nested block
206 // backed by a set, and so unfortunately we need to just trust the
207 // provider to do the right thing. :(
208 //
209 // (In principle we could correlate elements by matching the
210 // subset of attributes explicitly set in config, except for the
211 // special diff suppression rule which allows for there to be a
212 // planned value that is constructed by mixing part of a prior
213 // value with part of a config value, creating an entirely new
214 // element that is not present in either prior nor config.)
215 for it := plannedV.ElementIterator(); it.Next(); {
216 idx, plannedEV := it.Element()
217 path := append(path, cty.IndexStep{Key: idx})
218 if !plannedEV.IsKnown() {
219 errs = append(errs, path.NewErrorf("element representing nested block must not be unknown itself; set nested attribute values to unknown instead"))
220 continue
221 }
222 }
223
224 default:
225 panic(fmt.Sprintf("unsupported nesting mode %s", blockS.Nesting))
226 }
227 }
228
229 return errs
230}
231
232func assertPlannedValueValid(attrS *configschema.Attribute, priorV, configV, plannedV cty.Value, path cty.Path) []error {
233 var errs []error
234 if plannedV.RawEquals(configV) {
235 // This is the easy path: provider didn't change anything at all.
236 return errs
237 }
238 if plannedV.RawEquals(priorV) && !priorV.IsNull() {
239 // Also pretty easy: there is a prior value and the provider has
240 // returned it unchanged. This indicates that configV and plannedV
241 // are functionally equivalent and so the provider wishes to disregard
242 // the configuration value in favor of the prior.
243 return errs
244 }
245 if attrS.Computed && configV.IsNull() {
246 // The provider is allowed to change the value of any computed
247 // attribute that isn't explicitly set in the config.
248 return errs
249 }
250
251 // If none of the above conditions match, the provider has made an invalid
252 // change to this attribute.
253 if priorV.IsNull() {
254 if attrS.Sensitive {
255 errs = append(errs, path.NewErrorf("sensitive planned value does not match config value"))
256 } else {
257 errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v", plannedV, configV))
258 }
259 return errs
260 }
261 if attrS.Sensitive {
262 errs = append(errs, path.NewErrorf("sensitive planned value does not match config value nor prior value"))
263 } else {
264 errs = append(errs, path.NewErrorf("planned value %#v does not match config value %#v nor prior value %#v", plannedV, configV, priorV))
265 }
266 return errs
267}
diff --git a/vendor/github.com/hashicorp/terraform/plans/plan.go b/vendor/github.com/hashicorp/terraform/plans/plan.go
new file mode 100644
index 0000000..5a3e454
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plans/plan.go
@@ -0,0 +1,92 @@
1package plans
2
3import (
4 "sort"
5
6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/configs/configschema"
8 "github.com/zclconf/go-cty/cty"
9)
10
11// Plan is the top-level type representing a planned set of changes.
12//
13// A plan is a summary of the set of changes required to move from a current
14// state to a goal state derived from configuration. The described changes
15// are not applied directly, but contain an approximation of the final
16// result that will be completed during apply by resolving any values that
17// cannot be predicted.
18//
19// A plan must always be accompanied by the state and configuration it was
20// built from, since the plan does not itself include all of the information
21// required to make the changes indicated.
22type Plan struct {
23 VariableValues map[string]DynamicValue
24 Changes *Changes
25 TargetAddrs []addrs.Targetable
26 ProviderSHA256s map[string][]byte
27 Backend Backend
28}
29
30// Backend represents the backend-related configuration and other data as it
31// existed when a plan was created.
32type Backend struct {
33 // Type is the type of backend that the plan will apply against.
34 Type string
35
36 // Config is the configuration of the backend, whose schema is decided by
37 // the backend Type.
38 Config DynamicValue
39
40 // Workspace is the name of the workspace that was active when the plan
41 // was created. It is illegal to apply a plan created for one workspace
42 // to the state of another workspace.
43 // (This constraint is already enforced by the statefile lineage mechanism,
44 // but storing this explicitly allows us to return a better error message
45 // in the situation where the user has the wrong workspace selected.)
46 Workspace string
47}
48
49func NewBackend(typeName string, config cty.Value, configSchema *configschema.Block, workspaceName string) (*Backend, error) {
50 dv, err := NewDynamicValue(config, configSchema.ImpliedType())
51 if err != nil {
52 return nil, err
53 }
54
55 return &Backend{
56 Type: typeName,
57 Config: dv,
58 Workspace: workspaceName,
59 }, nil
60}
61
62// ProviderAddrs returns a list of all of the provider configuration addresses
63// referenced throughout the receiving plan.
64//
65// The result is de-duplicated so that each distinct address appears only once.
66func (p *Plan) ProviderAddrs() []addrs.AbsProviderConfig {
67 if p == nil || p.Changes == nil {
68 return nil
69 }
70
71 m := map[string]addrs.AbsProviderConfig{}
72 for _, rc := range p.Changes.Resources {
73 m[rc.ProviderAddr.String()] = rc.ProviderAddr
74 }
75 if len(m) == 0 {
76 return nil
77 }
78
79 // This is mainly just so we'll get stable results for testing purposes.
80 keys := make([]string, 0, len(m))
81 for k := range m {
82 keys = append(keys, k)
83 }
84 sort.Strings(keys)
85
86 ret := make([]addrs.AbsProviderConfig, len(keys))
87 for i, key := range keys {
88 ret[i] = m[key]
89 }
90
91 return ret
92}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/client.go b/vendor/github.com/hashicorp/terraform/plugin/client.go
index 7e2f4fe..0eab538 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/client.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/client.go
@@ -19,11 +19,13 @@ func ClientConfig(m discovery.PluginMeta) *plugin.ClientConfig {
19 }) 19 })
20 20
21 return &plugin.ClientConfig{ 21 return &plugin.ClientConfig{
22 Cmd: exec.Command(m.Path), 22 Cmd: exec.Command(m.Path),
23 HandshakeConfig: Handshake, 23 HandshakeConfig: Handshake,
24 Managed: true, 24 VersionedPlugins: VersionedPlugins,
25 Plugins: PluginMap, 25 Managed: true,
26 Logger: logger, 26 Logger: logger,
27 AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC},
28 AutoMTLS: true,
27 } 29 }
28} 30}
29 31
diff --git a/vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go b/vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go
new file mode 100644
index 0000000..51cb2fe
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/convert/diagnostics.go
@@ -0,0 +1,132 @@
1package convert
2
3import (
4 proto "github.com/hashicorp/terraform/internal/tfplugin5"
5 "github.com/hashicorp/terraform/tfdiags"
6 "github.com/zclconf/go-cty/cty"
7)
8
9// WarnsAndErrorsToProto converts the warnings and errors return by the legacy
10// provider to protobuf diagnostics.
11func WarnsAndErrsToProto(warns []string, errs []error) (diags []*proto.Diagnostic) {
12 for _, w := range warns {
13 diags = AppendProtoDiag(diags, w)
14 }
15
16 for _, e := range errs {
17 diags = AppendProtoDiag(diags, e)
18 }
19
20 return diags
21}
22
23// AppendProtoDiag appends a new diagnostic from a warning string or an error.
24// This panics if d is not a string or error.
25func AppendProtoDiag(diags []*proto.Diagnostic, d interface{}) []*proto.Diagnostic {
26 switch d := d.(type) {
27 case cty.PathError:
28 ap := PathToAttributePath(d.Path)
29 diags = append(diags, &proto.Diagnostic{
30 Severity: proto.Diagnostic_ERROR,
31 Summary: d.Error(),
32 Attribute: ap,
33 })
34 case error:
35 diags = append(diags, &proto.Diagnostic{
36 Severity: proto.Diagnostic_ERROR,
37 Summary: d.Error(),
38 })
39 case string:
40 diags = append(diags, &proto.Diagnostic{
41 Severity: proto.Diagnostic_WARNING,
42 Summary: d,
43 })
44 case *proto.Diagnostic:
45 diags = append(diags, d)
46 case []*proto.Diagnostic:
47 diags = append(diags, d...)
48 }
49 return diags
50}
51
52// ProtoToDiagnostics converts a list of proto.Diagnostics to a tf.Diagnostics.
53func ProtoToDiagnostics(ds []*proto.Diagnostic) tfdiags.Diagnostics {
54 var diags tfdiags.Diagnostics
55 for _, d := range ds {
56 var severity tfdiags.Severity
57
58 switch d.Severity {
59 case proto.Diagnostic_ERROR:
60 severity = tfdiags.Error
61 case proto.Diagnostic_WARNING:
62 severity = tfdiags.Warning
63 }
64
65 var newDiag tfdiags.Diagnostic
66
67 // if there's an attribute path, we need to create a AttributeValue diagnostic
68 if d.Attribute != nil {
69 path := AttributePathToPath(d.Attribute)
70 newDiag = tfdiags.AttributeValue(severity, d.Summary, d.Detail, path)
71 } else {
72 newDiag = tfdiags.WholeContainingBody(severity, d.Summary, d.Detail)
73 }
74
75 diags = diags.Append(newDiag)
76 }
77
78 return diags
79}
80
81// AttributePathToPath takes the proto encoded path and converts it to a cty.Path
82func AttributePathToPath(ap *proto.AttributePath) cty.Path {
83 var p cty.Path
84 for _, step := range ap.Steps {
85 switch selector := step.Selector.(type) {
86 case *proto.AttributePath_Step_AttributeName:
87 p = p.GetAttr(selector.AttributeName)
88 case *proto.AttributePath_Step_ElementKeyString:
89 p = p.Index(cty.StringVal(selector.ElementKeyString))
90 case *proto.AttributePath_Step_ElementKeyInt:
91 p = p.Index(cty.NumberIntVal(selector.ElementKeyInt))
92 }
93 }
94 return p
95}
96
97// AttributePathToPath takes a cty.Path and converts it to a proto-encoded path.
98func PathToAttributePath(p cty.Path) *proto.AttributePath {
99 ap := &proto.AttributePath{}
100 for _, step := range p {
101 switch selector := step.(type) {
102 case cty.GetAttrStep:
103 ap.Steps = append(ap.Steps, &proto.AttributePath_Step{
104 Selector: &proto.AttributePath_Step_AttributeName{
105 AttributeName: selector.Name,
106 },
107 })
108 case cty.IndexStep:
109 key := selector.Key
110 switch key.Type() {
111 case cty.String:
112 ap.Steps = append(ap.Steps, &proto.AttributePath_Step{
113 Selector: &proto.AttributePath_Step_ElementKeyString{
114 ElementKeyString: key.AsString(),
115 },
116 })
117 case cty.Number:
118 v, _ := key.AsBigFloat().Int64()
119 ap.Steps = append(ap.Steps, &proto.AttributePath_Step{
120 Selector: &proto.AttributePath_Step_ElementKeyInt{
121 ElementKeyInt: v,
122 },
123 })
124 default:
125 // We'll bail early if we encounter anything else, and just
126 // return the valid prefix.
127 return ap
128 }
129 }
130 }
131 return ap
132}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/convert/schema.go b/vendor/github.com/hashicorp/terraform/plugin/convert/schema.go
new file mode 100644
index 0000000..6a45f54
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/convert/schema.go
@@ -0,0 +1,154 @@
1package convert
2
3import (
4 "encoding/json"
5 "reflect"
6 "sort"
7
8 "github.com/hashicorp/terraform/configs/configschema"
9 proto "github.com/hashicorp/terraform/internal/tfplugin5"
10 "github.com/hashicorp/terraform/providers"
11)
12
13// ConfigSchemaToProto takes a *configschema.Block and converts it to a
14// proto.Schema_Block for a grpc response.
15func ConfigSchemaToProto(b *configschema.Block) *proto.Schema_Block {
16 block := &proto.Schema_Block{}
17
18 for _, name := range sortedKeys(b.Attributes) {
19 a := b.Attributes[name]
20 attr := &proto.Schema_Attribute{
21 Name: name,
22 Description: a.Description,
23 Optional: a.Optional,
24 Computed: a.Computed,
25 Required: a.Required,
26 Sensitive: a.Sensitive,
27 }
28
29 ty, err := json.Marshal(a.Type)
30 if err != nil {
31 panic(err)
32 }
33
34 attr.Type = ty
35
36 block.Attributes = append(block.Attributes, attr)
37 }
38
39 for _, name := range sortedKeys(b.BlockTypes) {
40 b := b.BlockTypes[name]
41 block.BlockTypes = append(block.BlockTypes, protoSchemaNestedBlock(name, b))
42 }
43
44 return block
45}
46
47func protoSchemaNestedBlock(name string, b *configschema.NestedBlock) *proto.Schema_NestedBlock {
48 var nesting proto.Schema_NestedBlock_NestingMode
49 switch b.Nesting {
50 case configschema.NestingSingle:
51 nesting = proto.Schema_NestedBlock_SINGLE
52 case configschema.NestingGroup:
53 nesting = proto.Schema_NestedBlock_GROUP
54 case configschema.NestingList:
55 nesting = proto.Schema_NestedBlock_LIST
56 case configschema.NestingSet:
57 nesting = proto.Schema_NestedBlock_SET
58 case configschema.NestingMap:
59 nesting = proto.Schema_NestedBlock_MAP
60 default:
61 nesting = proto.Schema_NestedBlock_INVALID
62 }
63 return &proto.Schema_NestedBlock{
64 TypeName: name,
65 Block: ConfigSchemaToProto(&b.Block),
66 Nesting: nesting,
67 MinItems: int64(b.MinItems),
68 MaxItems: int64(b.MaxItems),
69 }
70}
71
72// ProtoToProviderSchema takes a proto.Schema and converts it to a providers.Schema.
73func ProtoToProviderSchema(s *proto.Schema) providers.Schema {
74 return providers.Schema{
75 Version: s.Version,
76 Block: ProtoToConfigSchema(s.Block),
77 }
78}
79
80// ProtoToConfigSchema takes the GetSchcema_Block from a grpc response and converts it
81// to a terraform *configschema.Block.
82func ProtoToConfigSchema(b *proto.Schema_Block) *configschema.Block {
83 block := &configschema.Block{
84 Attributes: make(map[string]*configschema.Attribute),
85 BlockTypes: make(map[string]*configschema.NestedBlock),
86 }
87
88 for _, a := range b.Attributes {
89 attr := &configschema.Attribute{
90 Description: a.Description,
91 Required: a.Required,
92 Optional: a.Optional,
93 Computed: a.Computed,
94 Sensitive: a.Sensitive,
95 }
96
97 if err := json.Unmarshal(a.Type, &attr.Type); err != nil {
98 panic(err)
99 }
100
101 block.Attributes[a.Name] = attr
102 }
103
104 for _, b := range b.BlockTypes {
105 block.BlockTypes[b.TypeName] = schemaNestedBlock(b)
106 }
107
108 return block
109}
110
111func schemaNestedBlock(b *proto.Schema_NestedBlock) *configschema.NestedBlock {
112 var nesting configschema.NestingMode
113 switch b.Nesting {
114 case proto.Schema_NestedBlock_SINGLE:
115 nesting = configschema.NestingSingle
116 case proto.Schema_NestedBlock_GROUP:
117 nesting = configschema.NestingGroup
118 case proto.Schema_NestedBlock_LIST:
119 nesting = configschema.NestingList
120 case proto.Schema_NestedBlock_MAP:
121 nesting = configschema.NestingMap
122 case proto.Schema_NestedBlock_SET:
123 nesting = configschema.NestingSet
124 default:
125 // In all other cases we'll leave it as the zero value (invalid) and
126 // let the caller validate it and deal with this.
127 }
128
129 nb := &configschema.NestedBlock{
130 Nesting: nesting,
131 MinItems: int(b.MinItems),
132 MaxItems: int(b.MaxItems),
133 }
134
135 nested := ProtoToConfigSchema(b.Block)
136 nb.Block = *nested
137 return nb
138}
139
140// sortedKeys returns the lexically sorted keys from the given map. This is
141// used to make schema conversions are deterministic. This panics if map keys
142// are not a string.
143func sortedKeys(m interface{}) []string {
144 v := reflect.ValueOf(m)
145 keys := make([]string, v.Len())
146
147 mapKeys := v.MapKeys()
148 for i, k := range mapKeys {
149 keys[i] = k.Interface().(string)
150 }
151
152 sort.Strings(keys)
153 return keys
154}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go
index df855a7..729e970 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/error.go
@@ -22,9 +22,43 @@ const ErrorNoSuitableVersion = Error("no suitable version is available")
22// version of Terraform. 22// version of Terraform.
23const ErrorNoVersionCompatible = Error("no available version is compatible with this version of Terraform") 23const ErrorNoVersionCompatible = Error("no available version is compatible with this version of Terraform")
24 24
25// ErrorVersionIncompatible indicates that all of the versions within the
26// constraints are not compatible with the current version of Terrafrom, though
27// there does exist a version outside of the constaints that is compatible.
28const ErrorVersionIncompatible = Error("incompatible provider version")
29
25// ErrorNoSuchProvider indicates that no provider exists with a name given 30// ErrorNoSuchProvider indicates that no provider exists with a name given
26const ErrorNoSuchProvider = Error("no provider exists with the given name") 31const ErrorNoSuchProvider = Error("no provider exists with the given name")
27 32
33// ErrorNoVersionCompatibleWithPlatform indicates that all of the available
34// versions that otherwise met constraints are not compatible with the
35// requested platform
36const ErrorNoVersionCompatibleWithPlatform = Error("no available version is compatible for the requested platform")
37
38// ErrorMissingChecksumVerification indicates that either the provider
39// distribution is missing the SHA256SUMS file or the checksum file does
40// not contain a checksum for the binary plugin
41const ErrorMissingChecksumVerification = Error("unable to verify checksum")
42
43// ErrorChecksumVerification indicates that the current checksum of the
44// provider plugin has changed since the initial release and is not trusted
45// to download
46const ErrorChecksumVerification = Error("unexpected plugin checksum")
47
48// ErrorSignatureVerification indicates that the digital signature for a
49// provider distribution could not be verified for one of the following
50// reasons: missing signature file, missing public key, or the signature
51// was not signed by any known key for the publisher
52const ErrorSignatureVerification = Error("unable to verify signature")
53
54// ErrorServiceUnreachable indicates that the network was unable to connect
55// to the registry service
56const ErrorServiceUnreachable = Error("registry service is unreachable")
57
58// ErrorPublicRegistryUnreachable indicates that the network was unable to connect
59// to the public registry in particular, so we can show a link to the statuspage
60const ErrorPublicRegistryUnreachable = Error("registry service is unreachable, check https://status.hashicorp.com/ for status updates")
61
28func (err Error) Error() string { 62func (err Error) Error() string {
29 return string(err) 63 return string(err)
30} 64}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go
index 815640f..b1d01fb 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/get.go
@@ -13,28 +13,27 @@ import (
13 "strconv" 13 "strconv"
14 "strings" 14 "strings"
15 15
16 "golang.org/x/net/html" 16 "github.com/hashicorp/errwrap"
17
18 getter "github.com/hashicorp/go-getter" 17 getter "github.com/hashicorp/go-getter"
19 multierror "github.com/hashicorp/go-multierror" 18 multierror "github.com/hashicorp/go-multierror"
20 "github.com/hashicorp/terraform/httpclient" 19 "github.com/hashicorp/terraform/httpclient"
20 "github.com/hashicorp/terraform/registry"
21 "github.com/hashicorp/terraform/registry/regsrc"
22 "github.com/hashicorp/terraform/registry/response"
23 "github.com/hashicorp/terraform/svchost/disco"
24 "github.com/hashicorp/terraform/tfdiags"
25 tfversion "github.com/hashicorp/terraform/version"
21 "github.com/mitchellh/cli" 26 "github.com/mitchellh/cli"
22) 27)
23 28
24// Releases are located by parsing the html listing from releases.hashicorp.com. 29// Releases are located by querying the terraform registry.
25//
26// The URL for releases follows the pattern:
27// https://releases.hashicorp.com/terraform-provider-name/<x.y.z>/terraform-provider-name_<x.y.z>_<os>_<arch>.<ext>
28//
29// The plugin protocol version will be saved with the release and returned in
30// the header X-TERRAFORM_PROTOCOL_VERSION.
31 30
32const protocolVersionHeader = "x-terraform-protocol-version" 31const protocolVersionHeader = "x-terraform-protocol-version"
33 32
34var releaseHost = "https://releases.hashicorp.com"
35
36var httpClient *http.Client 33var httpClient *http.Client
37 34
35var errVersionNotFound = errors.New("version not found")
36
38func init() { 37func init() {
39 httpClient = httpclient.New() 38 httpClient = httpclient.New()
40 39
@@ -50,7 +49,7 @@ func init() {
50// An Installer maintains a local cache of plugins by downloading plugins 49// An Installer maintains a local cache of plugins by downloading plugins
51// from an online repository. 50// from an online repository.
52type Installer interface { 51type Installer interface {
53 Get(name string, req Constraints) (PluginMeta, error) 52 Get(name string, req Constraints) (PluginMeta, tfdiags.Diagnostics, error)
54 PurgeUnused(used map[string]PluginMeta) (removed PluginMetaSet, err error) 53 PurgeUnused(used map[string]PluginMeta) (removed PluginMetaSet, err error)
55} 54}
56 55
@@ -79,6 +78,13 @@ type ProviderInstaller struct {
79 SkipVerify bool 78 SkipVerify bool
80 79
81 Ui cli.Ui // Ui for output 80 Ui cli.Ui // Ui for output
81
82 // Services is a required *disco.Disco, which may have services and
83 // credentials pre-loaded.
84 Services *disco.Disco
85
86 // registry client
87 registry *registry.Client
82} 88}
83 89
84// Get is part of an implementation of type Installer, and attempts to download 90// Get is part of an implementation of type Installer, and attempts to download
@@ -100,96 +106,170 @@ type ProviderInstaller struct {
100// are produced under the assumption that if presented to the user they will 106// are produced under the assumption that if presented to the user they will
101// be presented alongside context about what is being installed, and thus the 107// be presented alongside context about what is being installed, and thus the
102// error messages do not redundantly include such information. 108// error messages do not redundantly include such information.
103func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, error) { 109func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, tfdiags.Diagnostics, error) {
104 versions, err := i.listProviderVersions(provider) 110 var diags tfdiags.Diagnostics
111
112 // a little bit of initialization.
113 if i.OS == "" {
114 i.OS = runtime.GOOS
115 }
116 if i.Arch == "" {
117 i.Arch = runtime.GOARCH
118 }
119 if i.registry == nil {
120 i.registry = registry.NewClient(i.Services, nil)
121 }
122
123 // get a full listing of versions for the requested provider
124 allVersions, err := i.listProviderVersions(provider)
125
105 // TODO: return multiple errors 126 // TODO: return multiple errors
106 if err != nil { 127 if err != nil {
107 return PluginMeta{}, err 128 log.Printf("[DEBUG] %s", err)
129 if registry.IsServiceUnreachable(err) {
130 registryHost, err := i.hostname()
131 if err == nil && registryHost == regsrc.PublicRegistryHost.Raw {
132 return PluginMeta{}, diags, ErrorPublicRegistryUnreachable
133 }
134 return PluginMeta{}, diags, ErrorServiceUnreachable
135 }
136 if registry.IsServiceNotProvided(err) {
137 return PluginMeta{}, diags, err
138 }
139 return PluginMeta{}, diags, ErrorNoSuchProvider
108 } 140 }
109 141
110 if len(versions) == 0 { 142 // Add any warnings from the response to diags
111 return PluginMeta{}, ErrorNoSuitableVersion 143 for _, warning := range allVersions.Warnings {
144 hostname, err := i.hostname()
145 if err != nil {
146 return PluginMeta{}, diags, err
147 }
148 diag := tfdiags.SimpleWarning(fmt.Sprintf("%s: %s", hostname, warning))
149 diags = diags.Append(diag)
112 } 150 }
113 151
114 versions = allowedVersions(versions, req) 152 if len(allVersions.Versions) == 0 {
153 return PluginMeta{}, diags, ErrorNoSuitableVersion
154 }
155 providerSource := allVersions.ID
156
157 // Filter the list of plugin versions to those which meet the version constraints
158 versions := allowedVersions(allVersions, req)
115 if len(versions) == 0 { 159 if len(versions) == 0 {
116 return PluginMeta{}, ErrorNoSuitableVersion 160 return PluginMeta{}, diags, ErrorNoSuitableVersion
117 } 161 }
118 162
119 // sort them newest to oldest 163 // sort them newest to oldest. The newest version wins!
120 Versions(versions).Sort() 164 response.ProviderVersionCollection(versions).Sort()
121 165
122 // Ensure that our installation directory exists 166 // if the chosen provider version does not support the requested platform,
123 err = os.MkdirAll(i.Dir, os.ModePerm) 167 // filter the list of acceptable versions to those that support that platform
124 if err != nil { 168 if err := i.checkPlatformCompatibility(versions[0]); err != nil {
125 return PluginMeta{}, fmt.Errorf("failed to create plugin dir %s: %s", i.Dir, err) 169 versions = i.platformCompatibleVersions(versions)
170 if len(versions) == 0 {
171 return PluginMeta{}, diags, ErrorNoVersionCompatibleWithPlatform
172 }
126 } 173 }
127 174
128 // take the first matching plugin we find 175 // we now have a winning platform-compatible version
129 for _, v := range versions { 176 versionMeta := versions[0]
130 url := i.providerURL(provider, v.String()) 177 v := VersionStr(versionMeta.Version).MustParse()
131 178
132 if !i.SkipVerify { 179 // check protocol compatibility
133 sha256, err := i.getProviderChecksum(provider, v.String()) 180 if err := i.checkPluginProtocol(versionMeta); err != nil {
134 if err != nil { 181 closestMatch, err := i.findClosestProtocolCompatibleVersion(allVersions.Versions)
135 return PluginMeta{}, err 182 if err != nil {
136 } 183 // No operation here if we can't find a version with compatible protocol
184 return PluginMeta{}, diags, err
185 }
137 186
138 // add the checksum parameter for go-getter to verify the download for us. 187 // Prompt version suggestion to UI based on closest protocol match
139 if sha256 != "" { 188 var errMsg string
140 url = url + "?checksum=sha256:" + sha256 189 closestVersion := VersionStr(closestMatch.Version).MustParse()
141 } 190 if v.NewerThan(closestVersion) {
191 errMsg = providerProtocolTooNew
192 } else {
193 errMsg = providerProtocolTooOld
142 } 194 }
143 195
144 log.Printf("[DEBUG] fetching provider info for %s version %s", provider, v) 196 constraintStr := req.String()
145 if checkPlugin(url, i.PluginProtocolVersion) { 197 if constraintStr == "" {
146 i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %q (%s)...", provider, v.String())) 198 constraintStr = "(any version)"
147 log.Printf("[DEBUG] getting provider %q version %q", provider, v) 199 }
148 err := i.install(provider, v, url)
149 if err != nil {
150 return PluginMeta{}, err
151 }
152 200
153 // Find what we just installed 201 return PluginMeta{}, diags, errwrap.Wrap(ErrorVersionIncompatible, fmt.Errorf(fmt.Sprintf(
154 // (This is weird, because go-getter doesn't directly return 202 errMsg, provider, v.String(), tfversion.String(),
155 // information about what was extracted, and we just extracted 203 closestVersion.String(), closestVersion.MinorUpgradeConstraintStr(), constraintStr)))
156 // the archive directly into a shared dir here.) 204 }
157 log.Printf("[DEBUG] looking for the %s %s plugin we just installed", provider, v)
158 metas := FindPlugins("provider", []string{i.Dir})
159 log.Printf("[DEBUG] all plugins found %#v", metas)
160 metas, _ = metas.ValidateVersions()
161 metas = metas.WithName(provider).WithVersion(v)
162 log.Printf("[DEBUG] filtered plugins %#v", metas)
163 if metas.Count() == 0 {
164 // This should never happen. Suggests that the release archive
165 // contains an executable file whose name doesn't match the
166 // expected convention.
167 return PluginMeta{}, fmt.Errorf(
168 "failed to find installed plugin version %s; this is a bug in Terraform and should be reported",
169 v,
170 )
171 }
172 205
173 if metas.Count() > 1 { 206 downloadURLs, err := i.listProviderDownloadURLs(providerSource, versionMeta.Version)
174 // This should also never happen, and suggests that a 207 providerURL := downloadURLs.DownloadURL
175 // particular version was re-released with a different 208
176 // executable filename. We consider releases as immutable, so 209 if !i.SkipVerify {
177 // this is an error. 210 // Terraform verifies the integrity of a provider release before downloading
178 return PluginMeta{}, fmt.Errorf( 211 // the plugin binary. The digital signature (SHA256SUMS.sig) on the
179 "multiple plugins installed for version %s; this is a bug in Terraform and should be reported", 212 // release distribution (SHA256SUMS) is verified with the public key of the
180 v, 213 // publisher provided in the Terraform Registry response, ensuring that
181 ) 214 // everything is as intended by the publisher. The checksum of the provider
182 } 215 // plugin is expected in the SHA256SUMS file and is double checked to match
216 // the checksum of the original published release to the Registry. This
217 // enforces immutability of releases between the Registry and the plugin's
218 // host location. Lastly, the integrity of the binary is verified upon
219 // download matches the Registry and signed checksum.
220 sha256, err := i.getProviderChecksum(downloadURLs)
221 if err != nil {
222 return PluginMeta{}, diags, err
223 }
183 224
184 // By now we know we have exactly one meta, and so "Newest" will 225 // add the checksum parameter for go-getter to verify the download for us.
185 // return that one. 226 if sha256 != "" {
186 return metas.Newest(), nil 227 providerURL = providerURL + "?checksum=sha256:" + sha256
187 } 228 }
229 }
230
231 printedProviderName := fmt.Sprintf("%q (%s)", provider, providerSource)
232 i.Ui.Info(fmt.Sprintf("- Downloading plugin for provider %s %s...", printedProviderName, versionMeta.Version))
233 log.Printf("[DEBUG] getting provider %s version %q", printedProviderName, versionMeta.Version)
234 err = i.install(provider, v, providerURL)
235 if err != nil {
236 return PluginMeta{}, diags, err
237 }
238
239 // Find what we just installed
240 // (This is weird, because go-getter doesn't directly return
241 // information about what was extracted, and we just extracted
242 // the archive directly into a shared dir here.)
243 log.Printf("[DEBUG] looking for the %s %s plugin we just installed", provider, versionMeta.Version)
244 metas := FindPlugins("provider", []string{i.Dir})
245 log.Printf("[DEBUG] all plugins found %#v", metas)
246 metas, _ = metas.ValidateVersions()
247 metas = metas.WithName(provider).WithVersion(v)
248 log.Printf("[DEBUG] filtered plugins %#v", metas)
249 if metas.Count() == 0 {
250 // This should never happen. Suggests that the release archive
251 // contains an executable file whose name doesn't match the
252 // expected convention.
253 return PluginMeta{}, diags, fmt.Errorf(
254 "failed to find installed plugin version %s; this is a bug in Terraform and should be reported",
255 versionMeta.Version,
256 )
257 }
188 258
189 log.Printf("[INFO] incompatible ProtocolVersion for %s version %s", provider, v) 259 if metas.Count() > 1 {
260 // This should also never happen, and suggests that a
261 // particular version was re-released with a different
262 // executable filename. We consider releases as immutable, so
263 // this is an error.
264 return PluginMeta{}, diags, fmt.Errorf(
265 "multiple plugins installed for version %s; this is a bug in Terraform and should be reported",
266 versionMeta.Version,
267 )
190 } 268 }
191 269
192 return PluginMeta{}, ErrorNoVersionCompatible 270 // By now we know we have exactly one meta, and so "Newest" will
271 // return that one.
272 return metas.Newest(), diags, nil
193} 273}
194 274
195func (i *ProviderInstaller) install(provider string, version Version, url string) error { 275func (i *ProviderInstaller) install(provider string, version Version, url string) error {
@@ -215,6 +295,14 @@ func (i *ProviderInstaller) install(provider string, version Version, url string
215 // normal resolution machinery can find it. 295 // normal resolution machinery can find it.
216 filename := filepath.Base(cached) 296 filename := filepath.Base(cached)
217 targetPath := filepath.Join(i.Dir, filename) 297 targetPath := filepath.Join(i.Dir, filename)
298 // check if the target dir exists, and create it if not
299 var err error
300 if _, StatErr := os.Stat(i.Dir); os.IsNotExist(StatErr) {
301 err = os.MkdirAll(i.Dir, 0700)
302 }
303 if err != nil {
304 return err
305 }
218 306
219 log.Printf("[DEBUG] installing %s %s to %s from local cache %s", provider, version, targetPath, cached) 307 log.Printf("[DEBUG] installing %s %s to %s from local cache %s", provider, version, targetPath, cached)
220 308
@@ -280,7 +368,6 @@ func (i *ProviderInstaller) install(provider string, version Version, url string
280 return err 368 return err
281 } 369 }
282 } 370 }
283
284 return nil 371 return nil
285} 372}
286 373
@@ -316,182 +403,222 @@ func (i *ProviderInstaller) PurgeUnused(used map[string]PluginMeta) (PluginMetaS
316 return removed, errs 403 return removed, errs
317} 404}
318 405
319// Plugins are referred to by the short name, but all URLs and files will use 406func (i *ProviderInstaller) getProviderChecksum(resp *response.TerraformProviderPlatformLocation) (string, error) {
320// the full name prefixed with terraform-<plugin_type>- 407 // Get SHA256SUMS file.
321func (i *ProviderInstaller) providerName(name string) string { 408 shasums, err := getFile(resp.ShasumsURL)
322 return "terraform-provider-" + name 409 if err != nil {
323} 410 log.Printf("[ERROR] error fetching checksums from %q: %s", resp.ShasumsURL, err)
411 return "", ErrorMissingChecksumVerification
412 }
324 413
325func (i *ProviderInstaller) providerFileName(name, version string) string { 414 // Get SHA256SUMS.sig file.
326 os := i.OS 415 signature, err := getFile(resp.ShasumsSignatureURL)
327 arch := i.Arch 416 if err != nil {
328 if os == "" { 417 log.Printf("[ERROR] error fetching checksums signature from %q: %s", resp.ShasumsSignatureURL, err)
329 os = runtime.GOOS 418 return "", ErrorSignatureVerification
330 } 419 }
331 if arch == "" { 420
332 arch = runtime.GOARCH 421 // Verify the GPG signature returned from the Registry.
422 asciiArmor := resp.SigningKeys.GPGASCIIArmor()
423 signer, err := verifySig(shasums, signature, asciiArmor)
424 if err != nil {
425 log.Printf("[ERROR] error verifying signature: %s", err)
426 return "", ErrorSignatureVerification
333 } 427 }
334 return fmt.Sprintf("%s_%s_%s_%s.zip", i.providerName(name), version, os, arch)
335}
336 428
337// providerVersionsURL returns the path to the released versions directory for the provider: 429 // Also verify the GPG signature against the HashiCorp public key. This is
338// https://releases.hashicorp.com/terraform-provider-name/ 430 // a temporary additional check until a more robust key verification
339func (i *ProviderInstaller) providerVersionsURL(name string) string { 431 // process is added in a future release.
340 return releaseHost + "/" + i.providerName(name) + "/" 432 _, err = verifySig(shasums, signature, HashicorpPublicKey)
341} 433 if err != nil {
434 log.Printf("[ERROR] error verifying signature against HashiCorp public key: %s", err)
435 return "", ErrorSignatureVerification
436 }
342 437
343// providerURL returns the full path to the provider file, using the current OS 438 // Display identity for GPG key which succeeded verifying the signature.
344// and ARCH: 439 // This could also be used to display to the user with i.Ui.Info().
345// .../terraform-provider-name_<x.y.z>/terraform-provider-name_<x.y.z>_<os>_<arch>.<ext> 440 identities := []string{}
346func (i *ProviderInstaller) providerURL(name, version string) string { 441 for k := range signer.Identities {
347 return fmt.Sprintf("%s%s/%s", i.providerVersionsURL(name), version, i.providerFileName(name, version)) 442 identities = append(identities, k)
348} 443 }
444 identity := strings.Join(identities, ", ")
445 log.Printf("[DEBUG] verified GPG signature with key from %s", identity)
446
447 // Extract checksum for this os/arch platform binary and verify against Registry
448 checksum := checksumForFile(shasums, resp.Filename)
449 if checksum == "" {
450 log.Printf("[ERROR] missing checksum for %s from source %s", resp.Filename, resp.ShasumsURL)
451 return "", ErrorMissingChecksumVerification
452 } else if checksum != resp.Shasum {
453 log.Printf("[ERROR] unexpected checksum for %s from source %q", resp.Filename, resp.ShasumsURL)
454 return "", ErrorChecksumVerification
455 }
349 456
350func (i *ProviderInstaller) providerChecksumURL(name, version string) string { 457 return checksum, nil
351 fileName := fmt.Sprintf("%s_%s_SHA256SUMS", i.providerName(name), version)
352 u := fmt.Sprintf("%s%s/%s", i.providerVersionsURL(name), version, fileName)
353 return u
354} 458}
355 459
356func (i *ProviderInstaller) getProviderChecksum(name, version string) (string, error) { 460func (i *ProviderInstaller) hostname() (string, error) {
357 checksums, err := getPluginSHA256SUMs(i.providerChecksumURL(name, version)) 461 provider := regsrc.NewTerraformProvider("", i.OS, i.Arch)
462 svchost, err := provider.SvcHost()
358 if err != nil { 463 if err != nil {
359 return "", err 464 return "", err
360 } 465 }
361 466
362 return checksumForFile(checksums, i.providerFileName(name, version)), nil 467 return svchost.ForDisplay(), nil
363} 468}
364 469
365// Return the plugin version by making a HEAD request to the provided url. 470// list all versions available for the named provider
366// If the header is not present, we assume the latest version will be 471func (i *ProviderInstaller) listProviderVersions(name string) (*response.TerraformProviderVersions, error) {
367// compatible, and leave the check for discovery or execution. 472 provider := regsrc.NewTerraformProvider(name, i.OS, i.Arch)
368func checkPlugin(url string, pluginProtocolVersion uint) bool { 473 versions, err := i.registry.TerraformProviderVersions(provider)
369 resp, err := httpClient.Head(url) 474 return versions, err
370 if err != nil { 475}
371 log.Printf("[ERROR] error fetching plugin headers: %s", err)
372 return false
373 }
374 476
375 if resp.StatusCode != http.StatusOK { 477func (i *ProviderInstaller) listProviderDownloadURLs(name, version string) (*response.TerraformProviderPlatformLocation, error) {
376 log.Println("[ERROR] non-200 status fetching plugin headers:", resp.Status) 478 urls, err := i.registry.TerraformProviderLocation(regsrc.NewTerraformProvider(name, i.OS, i.Arch), version)
377 return false 479 if urls == nil {
480 return nil, fmt.Errorf("No download urls found for provider %s", name)
378 } 481 }
482 return urls, err
483}
484
485// findClosestProtocolCompatibleVersion searches for the provider version with the closest protocol match.
486// Prerelease versions are filtered.
487func (i *ProviderInstaller) findClosestProtocolCompatibleVersion(versions []*response.TerraformProviderVersion) (*response.TerraformProviderVersion, error) {
488 // Loop through all the provider versions to find the earliest and latest
489 // versions that match the installer protocol to then select the closest of the two
490 var latest, earliest *response.TerraformProviderVersion
491 for _, version := range versions {
492 // Prereleases are filtered and will not be suggested
493 v, err := VersionStr(version.Version).Parse()
494 if err != nil || v.IsPrerelease() {
495 continue
496 }
379 497
380 proto := resp.Header.Get(protocolVersionHeader) 498 if err := i.checkPluginProtocol(version); err == nil {
381 if proto == "" { 499 if earliest == nil {
382 // The header isn't present, but we don't make this error fatal since 500 // Found the first provider version with compatible protocol
383 // the latest version will probably work. 501 earliest = version
384 log.Printf("[WARN] missing %s from: %s", protocolVersionHeader, url) 502 }
385 return true 503 // Update the latest protocol compatible version
504 latest = version
505 }
506 }
507 if earliest == nil {
508 // No compatible protocol was found for any version
509 return nil, ErrorNoVersionCompatible
386 } 510 }
387 511
388 protoVersion, err := strconv.Atoi(proto) 512 // Convert protocols to comparable types
513 protoString := strconv.Itoa(int(i.PluginProtocolVersion))
514 protocolVersion, err := VersionStr(protoString).Parse()
389 if err != nil { 515 if err != nil {
390 log.Printf("[ERROR] invalid ProtocolVersion: %s", proto) 516 return nil, fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion)
391 return false
392 } 517 }
393 518
394 return protoVersion == int(pluginProtocolVersion) 519 earliestVersionProtocol, err := VersionStr(earliest.Protocols[0]).Parse()
395}
396
397// list the version available for the named plugin
398func (i *ProviderInstaller) listProviderVersions(name string) ([]Version, error) {
399 versions, err := listPluginVersions(i.providerVersionsURL(name))
400 if err != nil { 520 if err != nil {
401 // listPluginVersions returns a verbose error message indicating
402 // what was being accessed and what failed
403 return nil, err 521 return nil, err
404 } 522 }
405 return versions, nil
406}
407
408var errVersionNotFound = errors.New("version not found")
409 523
410// take the list of available versions for a plugin, and filter out those that 524 // Compare installer protocol version with the first protocol listed of the earliest match
411// don't fit the constraints. 525 // [A, B] where A is assumed the earliest compatible major version of the protocol pair
412func allowedVersions(available []Version, required Constraints) []Version { 526 if protocolVersion.NewerThan(earliestVersionProtocol) {
413 var allowed []Version 527 // Provider protocols are too old, the closest version is the earliest compatible version
414 528 return earliest, nil
415 for _, v := range available {
416 if required.Allows(v) {
417 allowed = append(allowed, v)
418 }
419 } 529 }
420 530
421 return allowed 531 // Provider protocols are too new, the closest version is the latest compatible version
532 return latest, nil
422} 533}
423 534
424// return a list of the plugin versions at the given URL 535func (i *ProviderInstaller) checkPluginProtocol(versionMeta *response.TerraformProviderVersion) error {
425func listPluginVersions(url string) ([]Version, error) { 536 // TODO: should this be a different error? We should probably differentiate between
426 resp, err := httpClient.Get(url) 537 // no compatible versions and no protocol versions listed at all
538 if len(versionMeta.Protocols) == 0 {
539 return fmt.Errorf("no plugin protocol versions listed")
540 }
541
542 protoString := strconv.Itoa(int(i.PluginProtocolVersion))
543 protocolVersion, err := VersionStr(protoString).Parse()
427 if err != nil { 544 if err != nil {
428 // http library produces a verbose error message that includes the 545 return fmt.Errorf("invalid plugin protocol version: %q", i.PluginProtocolVersion)
429 // URL being accessed, etc. 546 }
430 return nil, err 547 protocolConstraint, err := protocolVersion.MinorUpgradeConstraintStr().Parse()
548 if err != nil {
549 // This should not fail if the preceding function succeeded.
550 return fmt.Errorf("invalid plugin protocol version: %q", protocolVersion.String())
431 } 551 }
432 defer resp.Body.Close()
433 552
434 if resp.StatusCode != http.StatusOK { 553 for _, p := range versionMeta.Protocols {
435 body, _ := ioutil.ReadAll(resp.Body) 554 proPro, err := VersionStr(p).Parse()
436 log.Printf("[ERROR] failed to fetch plugin versions from %s\n%s\n%s", url, resp.Status, body) 555 if err != nil {
437 556 // invalid protocol reported by the registry. Move along.
438 switch resp.StatusCode { 557 log.Printf("[WARN] invalid provider protocol version %q found in the registry", versionMeta.Version)
439 case http.StatusNotFound, http.StatusForbidden: 558 continue
440 // These are treated as indicative of the given name not being 559 }
441 // a valid provider name at all. 560 // success!
442 return nil, ErrorNoSuchProvider 561 if protocolConstraint.Allows(proPro) {
443 562 return nil
444 default:
445 // All other errors are assumed to be operational problems.
446 return nil, fmt.Errorf("error accessing %s: %s", url, resp.Status)
447 } 563 }
448
449 } 564 }
450 565
451 body, err := html.Parse(resp.Body) 566 return ErrorNoVersionCompatible
452 if err != nil { 567}
453 log.Fatal(err) 568
569// REVIEWER QUESTION (again): this ends up swallowing a bunch of errors from
570// checkPluginProtocol. Do they need to be percolated up better, or would
571// debug messages would suffice in these situations?
572func (i *ProviderInstaller) findPlatformCompatibleVersion(versions []*response.TerraformProviderVersion) (*response.TerraformProviderVersion, error) {
573 for _, version := range versions {
574 if err := i.checkPlatformCompatibility(version); err == nil {
575 return version, nil
576 }
454 } 577 }
455 578
456 names := []string{} 579 return nil, ErrorNoVersionCompatibleWithPlatform
580}
457 581
458 // all we need to do is list links on the directory listing page that look like plugins 582// platformCompatibleVersions returns a list of provider versions that are
459 var f func(*html.Node) 583// compatible with the requested platform.
460 f = func(n *html.Node) { 584func (i *ProviderInstaller) platformCompatibleVersions(versions []*response.TerraformProviderVersion) []*response.TerraformProviderVersion {
461 if n.Type == html.ElementNode && n.Data == "a" { 585 var v []*response.TerraformProviderVersion
462 c := n.FirstChild 586 for _, version := range versions {
463 if c != nil && c.Type == html.TextNode && strings.HasPrefix(c.Data, "terraform-") { 587 if err := i.checkPlatformCompatibility(version); err == nil {
464 names = append(names, c.Data) 588 v = append(v, version)
465 return
466 }
467 }
468 for c := n.FirstChild; c != nil; c = c.NextSibling {
469 f(c)
470 } 589 }
471 } 590 }
472 f(body) 591 return v
592}
473 593
474 return versionsFromNames(names), nil 594func (i *ProviderInstaller) checkPlatformCompatibility(versionMeta *response.TerraformProviderVersion) error {
595 if len(versionMeta.Platforms) == 0 {
596 return fmt.Errorf("no supported provider platforms listed")
597 }
598 for _, p := range versionMeta.Platforms {
599 if p.Arch == i.Arch && p.OS == i.OS {
600 return nil
601 }
602 }
603 return fmt.Errorf("version %s does not support the requested platform %s_%s", versionMeta.Version, i.OS, i.Arch)
475} 604}
476 605
477// parse the list of directory names into a sorted list of available versions 606// take the list of available versions for a plugin, and filter out those that
478func versionsFromNames(names []string) []Version { 607// don't fit the constraints.
479 var versions []Version 608func allowedVersions(available *response.TerraformProviderVersions, required Constraints) []*response.TerraformProviderVersion {
480 for _, name := range names { 609 var allowed []*response.TerraformProviderVersion
481 parts := strings.SplitN(name, "_", 2)
482 if len(parts) == 2 && parts[1] != "" {
483 v, err := VersionStr(parts[1]).Parse()
484 if err != nil {
485 // filter invalid versions scraped from the page
486 log.Printf("[WARN] invalid version found for %q: %s", name, err)
487 continue
488 }
489 610
490 versions = append(versions, v) 611 for _, v := range available.Versions {
612 version, err := VersionStr(v.Version).Parse()
613 if err != nil {
614 log.Printf("[WARN] invalid version found for %q: %s", available.ID, err)
615 continue
616 }
617 if required.Allows(version) {
618 allowed = append(allowed, v)
491 } 619 }
492 } 620 }
493 621 return allowed
494 return versions
495} 622}
496 623
497func checksumForFile(sums []byte, name string) string { 624func checksumForFile(sums []byte, name string) string {
@@ -504,27 +631,6 @@ func checksumForFile(sums []byte, name string) string {
504 return "" 631 return ""
505} 632}
506 633
507// fetch the SHA256SUMS file provided, and verify its signature.
508func getPluginSHA256SUMs(sumsURL string) ([]byte, error) {
509 sigURL := sumsURL + ".sig"
510
511 sums, err := getFile(sumsURL)
512 if err != nil {
513 return nil, fmt.Errorf("error fetching checksums: %s", err)
514 }
515
516 sig, err := getFile(sigURL)
517 if err != nil {
518 return nil, fmt.Errorf("error fetching checksums signature: %s", err)
519 }
520
521 if err := verifySig(sums, sig); err != nil {
522 return nil, err
523 }
524
525 return sums, nil
526}
527
528func getFile(url string) ([]byte, error) { 634func getFile(url string) ([]byte, error) {
529 resp, err := httpClient.Get(url) 635 resp, err := httpClient.Get(url)
530 if err != nil { 636 if err != nil {
@@ -543,6 +649,41 @@ func getFile(url string) ([]byte, error) {
543 return data, nil 649 return data, nil
544} 650}
545 651
546func GetReleaseHost() string { 652// providerProtocolTooOld is a message sent to the CLI UI if the provider's
547 return releaseHost 653// supported protocol versions are too old for the user's version of terraform,
548} 654// but an older version of the provider is compatible.
655const providerProtocolTooOld = `
656[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red]
657
658Provider version %s is the earliest compatible version. Select it with
659the following version constraint:
660
661 version = %q
662
663Terraform checked all of the plugin versions matching the given constraint:
664 %s
665
666Consult the documentation for this provider for more information on
667compatibility between provider and Terraform versions.
668`
669
670// providerProtocolTooNew is a message sent to the CLI UI if the provider's
671// supported protocol versions are too new for the user's version of terraform,
672// and the user could either upgrade terraform or choose an older version of the
673// provider
674const providerProtocolTooNew = `
675[reset][bold][red]Provider %q v%s is not compatible with Terraform %s.[reset][red]
676
677Provider version %s is the latest compatible version. Select it with
678the following constraint:
679
680 version = %q
681
682Terraform checked all of the plugin versions matching the given constraint:
683 %s
684
685Consult the documentation for this provider for more information on
686compatibility between provider and Terraform versions.
687
688Alternatively, upgrade to the latest version of Terraform for compatibility with newer provider releases.
689`
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go
new file mode 100644
index 0000000..4622ca0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/hashicorp.go
@@ -0,0 +1,34 @@
1package discovery
2
3// HashicorpPublicKey is the HashiCorp public key, also available at
4// https://www.hashicorp.com/security
5const HashicorpPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
6Version: GnuPG v1
7
8mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f
9W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq
10fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA
113drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca
12KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k
13SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1
14cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG
15CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n
16Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i
17SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi
18psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w
19sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO
20klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW
21WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9
22wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j
232tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM
24skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo
25mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y
260H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA
27CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc
28z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP
290BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG
30unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ
31EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ
32oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C
33=LYpS
34-----END PGP PUBLIC KEY BLOCK-----`
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go
index 181ea1f..3a99289 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/meta_set.go
@@ -63,7 +63,7 @@ func (s PluginMetaSet) WithName(name string) PluginMetaSet {
63// WithVersion returns the subset of metas that have the given version. 63// WithVersion returns the subset of metas that have the given version.
64// 64//
65// This should be used only with the "valid" result from ValidateVersions; 65// This should be used only with the "valid" result from ValidateVersions;
66// it will ignore any plugin metas that have a invalid version strings. 66// it will ignore any plugin metas that have invalid version strings.
67func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet { 67func (s PluginMetaSet) WithVersion(version Version) PluginMetaSet {
68 ns := make(PluginMetaSet) 68 ns := make(PluginMetaSet)
69 for p := range s { 69 for p := range s {
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go
index 75430fd..0466ab2 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/requirements.go
@@ -4,6 +4,12 @@ import (
4 "bytes" 4 "bytes"
5) 5)
6 6
7// PluginInstallProtocolVersion is the protocol version TF-core
8// supports to communicate with servers, and is used to resolve
9// plugin discovery with terraform registry, in addition to
10// any specified plugin version constraints
11const PluginInstallProtocolVersion = 5
12
7// PluginRequirements describes a set of plugins (assumed to be of a consistent 13// PluginRequirements describes a set of plugins (assumed to be of a consistent
8// kind) that are required to exist and have versions within the given 14// kind) that are required to exist and have versions within the given
9// corresponding sets. 15// corresponding sets.
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go
index b6686a5..7bbae50 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/signature.go
@@ -2,7 +2,6 @@ package discovery
2 2
3import ( 3import (
4 "bytes" 4 "bytes"
5 "log"
6 "strings" 5 "strings"
7 6
8 "golang.org/x/crypto/openpgp" 7 "golang.org/x/crypto/openpgp"
@@ -10,44 +9,11 @@ import (
10 9
11// Verify the data using the provided openpgp detached signature and the 10// Verify the data using the provided openpgp detached signature and the
12// embedded hashicorp public key. 11// embedded hashicorp public key.
13func verifySig(data, sig []byte) error { 12func verifySig(data, sig []byte, armor string) (*openpgp.Entity, error) {
14 el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(hashiPublicKey)) 13 el, err := openpgp.ReadArmoredKeyRing(strings.NewReader(armor))
15 if err != nil { 14 if err != nil {
16 log.Fatal(err) 15 return nil, err
17 } 16 }
18 17
19 _, err = openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig)) 18 return openpgp.CheckDetachedSignature(el, bytes.NewReader(data), bytes.NewReader(sig))
20 return err
21} 19}
22
23// this is the public key that signs the checksums file for releases.
24const hashiPublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK-----
25Version: GnuPG v1
26
27mQENBFMORM0BCADBRyKO1MhCirazOSVwcfTr1xUxjPvfxD3hjUwHtjsOy/bT6p9f
28W2mRPfwnq2JB5As+paL3UGDsSRDnK9KAxQb0NNF4+eVhr/EJ18s3wwXXDMjpIifq
29fIm2WyH3G+aRLTLPIpscUNKDyxFOUbsmgXAmJ46Re1fn8uKxKRHbfa39aeuEYWFA
303drdL1WoUngvED7f+RnKBK2G6ZEpO+LDovQk19xGjiMTtPJrjMjZJ3QXqPvx5wca
31KSZLr4lMTuoTI/ZXyZy5bD4tShiZz6KcyX27cD70q2iRcEZ0poLKHyEIDAi3TM5k
32SwbbWBFd5RNPOR0qzrb/0p9ksKK48IIfH2FvABEBAAG0K0hhc2hpQ29ycCBTZWN1
33cml0eSA8c2VjdXJpdHlAaGFzaGljb3JwLmNvbT6JATgEEwECACIFAlMORM0CGwMG
34CwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEFGFLYc0j/xMyWIIAIPhcVqiQ59n
35Jc07gjUX0SWBJAxEG1lKxfzS4Xp+57h2xxTpdotGQ1fZwsihaIqow337YHQI3q0i
36SqV534Ms+j/tU7X8sq11xFJIeEVG8PASRCwmryUwghFKPlHETQ8jJ+Y8+1asRydi
37psP3B/5Mjhqv/uOK+Vy3zAyIpyDOMtIpOVfjSpCplVRdtSTFWBu9Em7j5I2HMn1w
38sJZnJgXKpybpibGiiTtmnFLOwibmprSu04rsnP4ncdC2XRD4wIjoyA+4PKgX3sCO
39klEzKryWYBmLkJOMDdo52LttP3279s7XrkLEE7ia0fXa2c12EQ0f0DQ1tGUvyVEW
40WmJVccm5bq25AQ0EUw5EzQEIANaPUY04/g7AmYkOMjaCZ6iTp9hB5Rsj/4ee/ln9
41wArzRO9+3eejLWh53FoN1rO+su7tiXJA5YAzVy6tuolrqjM8DBztPxdLBbEi4V+j
422tK0dATdBQBHEh3OJApO2UBtcjaZBT31zrG9K55D+CrcgIVEHAKY8Cb4kLBkb5wM
43skn+DrASKU0BNIV1qRsxfiUdQHZfSqtp004nrql1lbFMLFEuiY8FZrkkQ9qduixo
44mTT6f34/oiY+Jam3zCK7RDN/OjuWheIPGj/Qbx9JuNiwgX6yRj7OE1tjUx6d8g9y
450H1fmLJbb3WZZbuuGFnK6qrE3bGeY8+AWaJAZ37wpWh1p0cAEQEAAYkBHwQYAQIA
46CQUCUw5EzQIbDAAKCRBRhS2HNI/8TJntCAClU7TOO/X053eKF1jqNW4A1qpxctVc
47z8eTcY8Om5O4f6a/rfxfNFKn9Qyja/OG1xWNobETy7MiMXYjaa8uUx5iFy6kMVaP
480BXJ59NLZjMARGw6lVTYDTIvzqqqwLxgliSDfSnqUhubGwvykANPO+93BBx89MRG
49unNoYGXtPlhNFrAsB1VR8+EyKLv2HQtGCPSFBhrjuzH3gxGibNDDdFQLxxuJWepJ
50EK1UbTS4ms0NgZ2Uknqn1WRU1Ki7rE4sTy68iZtWpKQXZEJa0IGnuI2sSINGcXCJ
51oEIgXTMyCILo34Fa/C6VCm2WBgz9zZO8/rHIiQm1J5zqz0DrDwKBUM9C
52=LYpS
53-----END PGP PUBLIC KEY BLOCK-----`
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go
index 8fad58d..4311d51 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/version.go
@@ -55,6 +55,11 @@ func (v Version) Equal(other Version) bool {
55 return v.raw.Equal(other.raw) 55 return v.raw.Equal(other.raw)
56} 56}
57 57
58// IsPrerelease determines if version is a prerelease
59func (v Version) IsPrerelease() bool {
60 return v.raw.Prerelease() != ""
61}
62
58// MinorUpgradeConstraintStr returns a ConstraintStr that would permit 63// MinorUpgradeConstraintStr returns a ConstraintStr that would permit
59// minor upgrades relative to the receiving version. 64// minor upgrades relative to the receiving version.
60func (v Version) MinorUpgradeConstraintStr() ConstraintStr { 65func (v Version) MinorUpgradeConstraintStr() ConstraintStr {
diff --git a/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go b/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go
index 0aefd75..de02f5e 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/discovery/version_set.go
@@ -36,6 +36,11 @@ type Constraints struct {
36 raw version.Constraints 36 raw version.Constraints
37} 37}
38 38
39// NewConstraints creates a Constraints based on a version.Constraints.
40func NewConstraints(c version.Constraints) Constraints {
41 return Constraints{c}
42}
43
39// AllVersions is a Constraints containing all versions 44// AllVersions is a Constraints containing all versions
40var AllVersions Constraints 45var AllVersions Constraints
41 46
diff --git a/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go b/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go
new file mode 100644
index 0000000..ae9a400
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go
@@ -0,0 +1,562 @@
1package plugin
2
3import (
4 "context"
5 "errors"
6 "log"
7 "sync"
8
9 "github.com/zclconf/go-cty/cty"
10
11 plugin "github.com/hashicorp/go-plugin"
12 proto "github.com/hashicorp/terraform/internal/tfplugin5"
13 "github.com/hashicorp/terraform/plugin/convert"
14 "github.com/hashicorp/terraform/providers"
15 "github.com/hashicorp/terraform/version"
16 "github.com/zclconf/go-cty/cty/msgpack"
17 "google.golang.org/grpc"
18)
19
20// GRPCProviderPlugin implements plugin.GRPCPlugin for the go-plugin package.
21type GRPCProviderPlugin struct {
22 plugin.Plugin
23 GRPCProvider func() proto.ProviderServer
24}
25
26func (p *GRPCProviderPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) {
27 return &GRPCProvider{
28 client: proto.NewProviderClient(c),
29 ctx: ctx,
30 }, nil
31}
32
33func (p *GRPCProviderPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error {
34 proto.RegisterProviderServer(s, p.GRPCProvider())
35 return nil
36}
37
38// GRPCProvider handles the client, or core side of the plugin rpc connection.
39// The GRPCProvider methods are mostly a translation layer between the
40// terraform provioders types and the grpc proto types, directly converting
41// between the two.
42type GRPCProvider struct {
43 // PluginClient provides a reference to the plugin.Client which controls the plugin process.
44 // This allows the GRPCProvider a way to shutdown the plugin process.
45 PluginClient *plugin.Client
46
47 // TestServer contains a grpc.Server to close when the GRPCProvider is being
48 // used in an end to end test of a provider.
49 TestServer *grpc.Server
50
51 // Proto client use to make the grpc service calls.
52 client proto.ProviderClient
53
54 // this context is created by the plugin package, and is canceled when the
55 // plugin process ends.
56 ctx context.Context
57
58 // schema stores the schema for this provider. This is used to properly
59 // serialize the state for requests.
60 mu sync.Mutex
61 schemas providers.GetSchemaResponse
62}
63
64// getSchema is used internally to get the saved provider schema. The schema
65// should have already been fetched from the provider, but we have to
66// synchronize access to avoid being called concurrently with GetSchema.
67func (p *GRPCProvider) getSchema() providers.GetSchemaResponse {
68 p.mu.Lock()
69 // unlock inline in case GetSchema needs to be called
70 if p.schemas.Provider.Block != nil {
71 p.mu.Unlock()
72 return p.schemas
73 }
74 p.mu.Unlock()
75
76 // the schema should have been fetched already, but give it another shot
77 // just in case things are being called out of order. This may happen for
78 // tests.
79 schemas := p.GetSchema()
80 if schemas.Diagnostics.HasErrors() {
81 panic(schemas.Diagnostics.Err())
82 }
83
84 return schemas
85}
86
87// getResourceSchema is a helper to extract the schema for a resource, and
88// panics if the schema is not available.
89func (p *GRPCProvider) getResourceSchema(name string) providers.Schema {
90 schema := p.getSchema()
91 resSchema, ok := schema.ResourceTypes[name]
92 if !ok {
93 panic("unknown resource type " + name)
94 }
95 return resSchema
96}
97
98// gettDatasourceSchema is a helper to extract the schema for a datasource, and
99// panics if that schema is not available.
100func (p *GRPCProvider) getDatasourceSchema(name string) providers.Schema {
101 schema := p.getSchema()
102 dataSchema, ok := schema.DataSources[name]
103 if !ok {
104 panic("unknown data source " + name)
105 }
106 return dataSchema
107}
108
109func (p *GRPCProvider) GetSchema() (resp providers.GetSchemaResponse) {
110 log.Printf("[TRACE] GRPCProvider: GetSchema")
111 p.mu.Lock()
112 defer p.mu.Unlock()
113
114 if p.schemas.Provider.Block != nil {
115 return p.schemas
116 }
117
118 resp.ResourceTypes = make(map[string]providers.Schema)
119 resp.DataSources = make(map[string]providers.Schema)
120
121 // Some providers may generate quite large schemas, and the internal default
122 // grpc response size limit is 4MB. 64MB should cover most any use case, and
123 // if we get providers nearing that we may want to consider a finer-grained
124 // API to fetch individual resource schemas.
125 // Note: this option is marked as EXPERIMENTAL in the grpc API.
126 const maxRecvSize = 64 << 20
127 protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProviderSchema_Request), grpc.MaxRecvMsgSizeCallOption{MaxRecvMsgSize: maxRecvSize})
128 if err != nil {
129 resp.Diagnostics = resp.Diagnostics.Append(err)
130 return resp
131 }
132
133 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
134
135 if protoResp.Provider == nil {
136 resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provider schema"))
137 return resp
138 }
139
140 resp.Provider = convert.ProtoToProviderSchema(protoResp.Provider)
141
142 for name, res := range protoResp.ResourceSchemas {
143 resp.ResourceTypes[name] = convert.ProtoToProviderSchema(res)
144 }
145
146 for name, data := range protoResp.DataSourceSchemas {
147 resp.DataSources[name] = convert.ProtoToProviderSchema(data)
148 }
149
150 p.schemas = resp
151
152 return resp
153}
154
155func (p *GRPCProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) (resp providers.PrepareProviderConfigResponse) {
156 log.Printf("[TRACE] GRPCProvider: PrepareProviderConfig")
157
158 schema := p.getSchema()
159 ty := schema.Provider.Block.ImpliedType()
160
161 mp, err := msgpack.Marshal(r.Config, ty)
162 if err != nil {
163 resp.Diagnostics = resp.Diagnostics.Append(err)
164 return resp
165 }
166
167 protoReq := &proto.PrepareProviderConfig_Request{
168 Config: &proto.DynamicValue{Msgpack: mp},
169 }
170
171 protoResp, err := p.client.PrepareProviderConfig(p.ctx, protoReq)
172 if err != nil {
173 resp.Diagnostics = resp.Diagnostics.Append(err)
174 return resp
175 }
176
177 config := cty.NullVal(ty)
178 if protoResp.PreparedConfig != nil {
179 config, err = msgpack.Unmarshal(protoResp.PreparedConfig.Msgpack, ty)
180 if err != nil {
181 resp.Diagnostics = resp.Diagnostics.Append(err)
182 return resp
183 }
184 }
185 resp.PreparedConfig = config
186
187 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
188 return resp
189}
190
191func (p *GRPCProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) (resp providers.ValidateResourceTypeConfigResponse) {
192 log.Printf("[TRACE] GRPCProvider: ValidateResourceTypeConfig")
193 resourceSchema := p.getResourceSchema(r.TypeName)
194
195 mp, err := msgpack.Marshal(r.Config, resourceSchema.Block.ImpliedType())
196 if err != nil {
197 resp.Diagnostics = resp.Diagnostics.Append(err)
198 return resp
199 }
200
201 protoReq := &proto.ValidateResourceTypeConfig_Request{
202 TypeName: r.TypeName,
203 Config: &proto.DynamicValue{Msgpack: mp},
204 }
205
206 protoResp, err := p.client.ValidateResourceTypeConfig(p.ctx, protoReq)
207 if err != nil {
208 resp.Diagnostics = resp.Diagnostics.Append(err)
209 return resp
210 }
211
212 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
213 return resp
214}
215
216func (p *GRPCProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) (resp providers.ValidateDataSourceConfigResponse) {
217 log.Printf("[TRACE] GRPCProvider: ValidateDataSourceConfig")
218
219 dataSchema := p.getDatasourceSchema(r.TypeName)
220
221 mp, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType())
222 if err != nil {
223 resp.Diagnostics = resp.Diagnostics.Append(err)
224 return resp
225 }
226
227 protoReq := &proto.ValidateDataSourceConfig_Request{
228 TypeName: r.TypeName,
229 Config: &proto.DynamicValue{Msgpack: mp},
230 }
231
232 protoResp, err := p.client.ValidateDataSourceConfig(p.ctx, protoReq)
233 if err != nil {
234 resp.Diagnostics = resp.Diagnostics.Append(err)
235 return resp
236 }
237 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
238 return resp
239}
240
241func (p *GRPCProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) (resp providers.UpgradeResourceStateResponse) {
242 log.Printf("[TRACE] GRPCProvider: UpgradeResourceState")
243
244 resSchema := p.getResourceSchema(r.TypeName)
245
246 protoReq := &proto.UpgradeResourceState_Request{
247 TypeName: r.TypeName,
248 Version: int64(r.Version),
249 RawState: &proto.RawState{
250 Json: r.RawStateJSON,
251 Flatmap: r.RawStateFlatmap,
252 },
253 }
254
255 protoResp, err := p.client.UpgradeResourceState(p.ctx, protoReq)
256 if err != nil {
257 resp.Diagnostics = resp.Diagnostics.Append(err)
258 return resp
259 }
260 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
261
262 state := cty.NullVal(resSchema.Block.ImpliedType())
263 if protoResp.UpgradedState != nil {
264 state, err = msgpack.Unmarshal(protoResp.UpgradedState.Msgpack, resSchema.Block.ImpliedType())
265 if err != nil {
266 resp.Diagnostics = resp.Diagnostics.Append(err)
267 return resp
268 }
269 }
270
271 resp.UpgradedState = state
272 return resp
273}
274
275func (p *GRPCProvider) Configure(r providers.ConfigureRequest) (resp providers.ConfigureResponse) {
276 log.Printf("[TRACE] GRPCProvider: Configure")
277
278 schema := p.getSchema()
279
280 var mp []byte
281
282 // we don't have anything to marshal if there's no config
283 mp, err := msgpack.Marshal(r.Config, schema.Provider.Block.ImpliedType())
284 if err != nil {
285 resp.Diagnostics = resp.Diagnostics.Append(err)
286 return resp
287 }
288
289 protoReq := &proto.Configure_Request{
290 TerraformVersion: version.Version,
291 Config: &proto.DynamicValue{
292 Msgpack: mp,
293 },
294 }
295
296 protoResp, err := p.client.Configure(p.ctx, protoReq)
297 if err != nil {
298 resp.Diagnostics = resp.Diagnostics.Append(err)
299 return resp
300 }
301 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
302 return resp
303}
304
305func (p *GRPCProvider) Stop() error {
306 log.Printf("[TRACE] GRPCProvider: Stop")
307
308 resp, err := p.client.Stop(p.ctx, new(proto.Stop_Request))
309 if err != nil {
310 return err
311 }
312
313 if resp.Error != "" {
314 return errors.New(resp.Error)
315 }
316 return nil
317}
318
319func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp providers.ReadResourceResponse) {
320 log.Printf("[TRACE] GRPCProvider: ReadResource")
321
322 resSchema := p.getResourceSchema(r.TypeName)
323
324 mp, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType())
325 if err != nil {
326 resp.Diagnostics = resp.Diagnostics.Append(err)
327 return resp
328 }
329
330 protoReq := &proto.ReadResource_Request{
331 TypeName: r.TypeName,
332 CurrentState: &proto.DynamicValue{Msgpack: mp},
333 }
334
335 protoResp, err := p.client.ReadResource(p.ctx, protoReq)
336 if err != nil {
337 resp.Diagnostics = resp.Diagnostics.Append(err)
338 return resp
339 }
340 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
341
342 state := cty.NullVal(resSchema.Block.ImpliedType())
343 if protoResp.NewState != nil {
344 state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType())
345 if err != nil {
346 resp.Diagnostics = resp.Diagnostics.Append(err)
347 return resp
348 }
349 }
350 resp.NewState = state
351
352 return resp
353}
354
355func (p *GRPCProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
356 log.Printf("[TRACE] GRPCProvider: PlanResourceChange")
357
358 resSchema := p.getResourceSchema(r.TypeName)
359
360 priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType())
361 if err != nil {
362 resp.Diagnostics = resp.Diagnostics.Append(err)
363 return resp
364 }
365
366 configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType())
367 if err != nil {
368 resp.Diagnostics = resp.Diagnostics.Append(err)
369 return resp
370 }
371
372 propMP, err := msgpack.Marshal(r.ProposedNewState, resSchema.Block.ImpliedType())
373 if err != nil {
374 resp.Diagnostics = resp.Diagnostics.Append(err)
375 return resp
376 }
377
378 protoReq := &proto.PlanResourceChange_Request{
379 TypeName: r.TypeName,
380 PriorState: &proto.DynamicValue{Msgpack: priorMP},
381 Config: &proto.DynamicValue{Msgpack: configMP},
382 ProposedNewState: &proto.DynamicValue{Msgpack: propMP},
383 PriorPrivate: r.PriorPrivate,
384 }
385
386 protoResp, err := p.client.PlanResourceChange(p.ctx, protoReq)
387 if err != nil {
388 resp.Diagnostics = resp.Diagnostics.Append(err)
389 return resp
390 }
391 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
392
393 state := cty.NullVal(resSchema.Block.ImpliedType())
394 if protoResp.PlannedState != nil {
395 state, err = msgpack.Unmarshal(protoResp.PlannedState.Msgpack, resSchema.Block.ImpliedType())
396 if err != nil {
397 resp.Diagnostics = resp.Diagnostics.Append(err)
398 return resp
399 }
400 }
401 resp.PlannedState = state
402
403 for _, p := range protoResp.RequiresReplace {
404 resp.RequiresReplace = append(resp.RequiresReplace, convert.AttributePathToPath(p))
405 }
406
407 resp.PlannedPrivate = protoResp.PlannedPrivate
408
409 resp.LegacyTypeSystem = protoResp.LegacyTypeSystem
410
411 return resp
412}
413
414func (p *GRPCProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) {
415 log.Printf("[TRACE] GRPCProvider: ApplyResourceChange")
416
417 resSchema := p.getResourceSchema(r.TypeName)
418
419 priorMP, err := msgpack.Marshal(r.PriorState, resSchema.Block.ImpliedType())
420 if err != nil {
421 resp.Diagnostics = resp.Diagnostics.Append(err)
422 return resp
423 }
424 plannedMP, err := msgpack.Marshal(r.PlannedState, resSchema.Block.ImpliedType())
425 if err != nil {
426 resp.Diagnostics = resp.Diagnostics.Append(err)
427 return resp
428 }
429 configMP, err := msgpack.Marshal(r.Config, resSchema.Block.ImpliedType())
430 if err != nil {
431 resp.Diagnostics = resp.Diagnostics.Append(err)
432 return resp
433 }
434
435 protoReq := &proto.ApplyResourceChange_Request{
436 TypeName: r.TypeName,
437 PriorState: &proto.DynamicValue{Msgpack: priorMP},
438 PlannedState: &proto.DynamicValue{Msgpack: plannedMP},
439 Config: &proto.DynamicValue{Msgpack: configMP},
440 PlannedPrivate: r.PlannedPrivate,
441 }
442
443 protoResp, err := p.client.ApplyResourceChange(p.ctx, protoReq)
444 if err != nil {
445 resp.Diagnostics = resp.Diagnostics.Append(err)
446 return resp
447 }
448 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
449
450 resp.Private = protoResp.Private
451
452 state := cty.NullVal(resSchema.Block.ImpliedType())
453 if protoResp.NewState != nil {
454 state, err = msgpack.Unmarshal(protoResp.NewState.Msgpack, resSchema.Block.ImpliedType())
455 if err != nil {
456 resp.Diagnostics = resp.Diagnostics.Append(err)
457 return resp
458 }
459 }
460 resp.NewState = state
461
462 resp.LegacyTypeSystem = protoResp.LegacyTypeSystem
463
464 return resp
465}
466
467func (p *GRPCProvider) ImportResourceState(r providers.ImportResourceStateRequest) (resp providers.ImportResourceStateResponse) {
468 log.Printf("[TRACE] GRPCProvider: ImportResourceState")
469
470 protoReq := &proto.ImportResourceState_Request{
471 TypeName: r.TypeName,
472 Id: r.ID,
473 }
474
475 protoResp, err := p.client.ImportResourceState(p.ctx, protoReq)
476 if err != nil {
477 resp.Diagnostics = resp.Diagnostics.Append(err)
478 return resp
479 }
480 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
481
482 for _, imported := range protoResp.ImportedResources {
483 resource := providers.ImportedResource{
484 TypeName: imported.TypeName,
485 Private: imported.Private,
486 }
487
488 resSchema := p.getResourceSchema(resource.TypeName)
489 state := cty.NullVal(resSchema.Block.ImpliedType())
490 if imported.State != nil {
491 state, err = msgpack.Unmarshal(imported.State.Msgpack, resSchema.Block.ImpliedType())
492 if err != nil {
493 resp.Diagnostics = resp.Diagnostics.Append(err)
494 return resp
495 }
496 }
497 resource.State = state
498 resp.ImportedResources = append(resp.ImportedResources, resource)
499 }
500
501 return resp
502}
503
504func (p *GRPCProvider) ReadDataSource(r providers.ReadDataSourceRequest) (resp providers.ReadDataSourceResponse) {
505 log.Printf("[TRACE] GRPCProvider: ReadDataSource")
506
507 dataSchema := p.getDatasourceSchema(r.TypeName)
508
509 config, err := msgpack.Marshal(r.Config, dataSchema.Block.ImpliedType())
510 if err != nil {
511 resp.Diagnostics = resp.Diagnostics.Append(err)
512 return resp
513 }
514
515 protoReq := &proto.ReadDataSource_Request{
516 TypeName: r.TypeName,
517 Config: &proto.DynamicValue{
518 Msgpack: config,
519 },
520 }
521
522 protoResp, err := p.client.ReadDataSource(p.ctx, protoReq)
523 if err != nil {
524 resp.Diagnostics = resp.Diagnostics.Append(err)
525 return resp
526 }
527 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
528
529 state := cty.NullVal(dataSchema.Block.ImpliedType())
530 if protoResp.State != nil {
531 state, err = msgpack.Unmarshal(protoResp.State.Msgpack, dataSchema.Block.ImpliedType())
532 if err != nil {
533 resp.Diagnostics = resp.Diagnostics.Append(err)
534 return resp
535 }
536 }
537 resp.State = state
538
539 return resp
540}
541
542// closing the grpc connection is final, and terraform will call it at the end of every phase.
543func (p *GRPCProvider) Close() error {
544 log.Printf("[TRACE] GRPCProvider: Close")
545
546 // Make sure to stop the server if we're not running within go-plugin.
547 if p.TestServer != nil {
548 p.TestServer.Stop()
549 }
550
551 // Check this since it's not automatically inserted during plugin creation.
552 // It's currently only inserted by the command package, because that is
553 // where the factory is built and is the only point with access to the
554 // plugin.Client.
555 if p.PluginClient == nil {
556 log.Println("[DEBUG] provider has no plugin.Client")
557 return nil
558 }
559
560 p.PluginClient.Kill()
561 return nil
562}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go
new file mode 100644
index 0000000..136c88d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/plugin/grpc_provisioner.go
@@ -0,0 +1,178 @@
1package plugin
2
3import (
4 "context"
5 "errors"
6 "io"
7 "log"
8 "sync"
9
10 plugin "github.com/hashicorp/go-plugin"
11 "github.com/hashicorp/terraform/configs/configschema"
12 proto "github.com/hashicorp/terraform/internal/tfplugin5"
13 "github.com/hashicorp/terraform/plugin/convert"
14 "github.com/hashicorp/terraform/provisioners"
15 "github.com/zclconf/go-cty/cty"
16 "github.com/zclconf/go-cty/cty/msgpack"
17 "google.golang.org/grpc"
18)
19
20// GRPCProvisionerPlugin is the plugin.GRPCPlugin implementation.
21type GRPCProvisionerPlugin struct {
22 plugin.Plugin
23 GRPCProvisioner func() proto.ProvisionerServer
24}
25
26func (p *GRPCProvisionerPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) {
27 return &GRPCProvisioner{
28 client: proto.NewProvisionerClient(c),
29 ctx: ctx,
30 }, nil
31}
32
33func (p *GRPCProvisionerPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error {
34 proto.RegisterProvisionerServer(s, p.GRPCProvisioner())
35 return nil
36}
37
38// provisioners.Interface grpc implementation
39type GRPCProvisioner struct {
40 // PluginClient provides a reference to the plugin.Client which controls the plugin process.
41 // This allows the GRPCProvider a way to shutdown the plugin process.
42 PluginClient *plugin.Client
43
44 client proto.ProvisionerClient
45 ctx context.Context
46
47 // Cache the schema since we need it for serialization in each method call.
48 mu sync.Mutex
49 schema *configschema.Block
50}
51
52func (p *GRPCProvisioner) GetSchema() (resp provisioners.GetSchemaResponse) {
53 p.mu.Lock()
54 defer p.mu.Unlock()
55
56 if p.schema != nil {
57 return provisioners.GetSchemaResponse{
58 Provisioner: p.schema,
59 }
60 }
61
62 protoResp, err := p.client.GetSchema(p.ctx, new(proto.GetProvisionerSchema_Request))
63 if err != nil {
64 resp.Diagnostics = resp.Diagnostics.Append(err)
65 return resp
66 }
67 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
68
69 if protoResp.Provisioner == nil {
70 resp.Diagnostics = resp.Diagnostics.Append(errors.New("missing provisioner schema"))
71 return resp
72 }
73
74 resp.Provisioner = convert.ProtoToConfigSchema(protoResp.Provisioner.Block)
75
76 p.schema = resp.Provisioner
77
78 return resp
79}
80
81func (p *GRPCProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) (resp provisioners.ValidateProvisionerConfigResponse) {
82 schema := p.GetSchema()
83 if schema.Diagnostics.HasErrors() {
84 resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics)
85 return resp
86 }
87
88 mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType())
89 if err != nil {
90 resp.Diagnostics = resp.Diagnostics.Append(err)
91 return resp
92 }
93
94 protoReq := &proto.ValidateProvisionerConfig_Request{
95 Config: &proto.DynamicValue{Msgpack: mp},
96 }
97 protoResp, err := p.client.ValidateProvisionerConfig(p.ctx, protoReq)
98 if err != nil {
99 resp.Diagnostics = resp.Diagnostics.Append(err)
100 return resp
101 }
102 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(protoResp.Diagnostics))
103 return resp
104}
105
106func (p *GRPCProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) (resp provisioners.ProvisionResourceResponse) {
107 schema := p.GetSchema()
108 if schema.Diagnostics.HasErrors() {
109 resp.Diagnostics = resp.Diagnostics.Append(schema.Diagnostics)
110 return resp
111 }
112
113 mp, err := msgpack.Marshal(r.Config, schema.Provisioner.ImpliedType())
114 if err != nil {
115 resp.Diagnostics = resp.Diagnostics.Append(err)
116 return resp
117 }
118
119 // connection is always assumed to be a simple string map
120 connMP, err := msgpack.Marshal(r.Connection, cty.Map(cty.String))
121 if err != nil {
122 resp.Diagnostics = resp.Diagnostics.Append(err)
123 return resp
124 }
125
126 protoReq := &proto.ProvisionResource_Request{
127 Config: &proto.DynamicValue{Msgpack: mp},
128 Connection: &proto.DynamicValue{Msgpack: connMP},
129 }
130
131 outputClient, err := p.client.ProvisionResource(p.ctx, protoReq)
132 if err != nil {
133 resp.Diagnostics = resp.Diagnostics.Append(err)
134 return resp
135 }
136
137 for {
138 rcv, err := outputClient.Recv()
139 if rcv != nil {
140 r.UIOutput.Output(rcv.Output)
141 }
142 if err != nil {
143 if err != io.EOF {
144 resp.Diagnostics = resp.Diagnostics.Append(err)
145 }
146 break
147 }
148
149 if len(rcv.Diagnostics) > 0 {
150 resp.Diagnostics = resp.Diagnostics.Append(convert.ProtoToDiagnostics(rcv.Diagnostics))
151 break
152 }
153 }
154
155 return resp
156}
157
158func (p *GRPCProvisioner) Stop() error {
159 protoResp, err := p.client.Stop(p.ctx, &proto.Stop_Request{})
160 if err != nil {
161 return err
162 }
163 if protoResp.Error != "" {
164 return errors.New(protoResp.Error)
165 }
166 return nil
167}
168
169func (p *GRPCProvisioner) Close() error {
170 // check this since it's not automatically inserted during plugin creation
171 if p.PluginClient == nil {
172 log.Println("[DEBUG] provider has no plugin.Client")
173 return nil
174 }
175
176 p.PluginClient.Kill()
177 return nil
178}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/plugin.go b/vendor/github.com/hashicorp/terraform/plugin/plugin.go
index 00fa7b2..e4fb577 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/plugin.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/plugin.go
@@ -6,8 +6,9 @@ import (
6 6
7// See serve.go for serving plugins 7// See serve.go for serving plugins
8 8
9// PluginMap should be used by clients for the map of plugins. 9var VersionedPlugins = map[int]plugin.PluginSet{
10var PluginMap = map[string]plugin.Plugin{ 10 5: {
11 "provider": &ResourceProviderPlugin{}, 11 "provider": &GRPCProviderPlugin{},
12 "provisioner": &ResourceProvisionerPlugin{}, 12 "provisioner": &GRPCProvisionerPlugin{},
13 },
13} 14}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
index d6a433c..459661a 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
@@ -9,11 +9,14 @@ import (
9 9
10// ResourceProviderPlugin is the plugin.Plugin implementation. 10// ResourceProviderPlugin is the plugin.Plugin implementation.
11type ResourceProviderPlugin struct { 11type ResourceProviderPlugin struct {
12 F func() terraform.ResourceProvider 12 ResourceProvider func() terraform.ResourceProvider
13} 13}
14 14
15func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { 15func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) {
16 return &ResourceProviderServer{Broker: b, Provider: p.F()}, nil 16 return &ResourceProviderServer{
17 Broker: b,
18 Provider: p.ResourceProvider(),
19 }, nil
17} 20}
18 21
19func (p *ResourceProviderPlugin) Client( 22func (p *ResourceProviderPlugin) Client(
diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go
index 8fce9d8..f0cc341 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go
@@ -4,16 +4,20 @@ import (
4 "net/rpc" 4 "net/rpc"
5 5
6 "github.com/hashicorp/go-plugin" 6 "github.com/hashicorp/go-plugin"
7 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/hashicorp/terraform/terraform" 8 "github.com/hashicorp/terraform/terraform"
8) 9)
9 10
10// ResourceProvisionerPlugin is the plugin.Plugin implementation. 11// ResourceProvisionerPlugin is the plugin.Plugin implementation.
11type ResourceProvisionerPlugin struct { 12type ResourceProvisionerPlugin struct {
12 F func() terraform.ResourceProvisioner 13 ResourceProvisioner func() terraform.ResourceProvisioner
13} 14}
14 15
15func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) { 16func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) {
16 return &ResourceProvisionerServer{Broker: b, Provisioner: p.F()}, nil 17 return &ResourceProvisionerServer{
18 Broker: b,
19 Provisioner: p.ResourceProvisioner(),
20 }, nil
17} 21}
18 22
19func (p *ResourceProvisionerPlugin) Client( 23func (p *ResourceProvisionerPlugin) Client(
@@ -28,6 +32,11 @@ type ResourceProvisioner struct {
28 Client *rpc.Client 32 Client *rpc.Client
29} 33}
30 34
35func (p *ResourceProvisioner) GetConfigSchema() (*configschema.Block, error) {
36 panic("not implemented")
37 return nil, nil
38}
39
31func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) { 40func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) {
32 var resp ResourceProvisionerValidateResponse 41 var resp ResourceProvisionerValidateResponse
33 args := ResourceProvisionerValidateArgs{ 42 args := ResourceProvisionerValidateArgs{
diff --git a/vendor/github.com/hashicorp/terraform/plugin/serve.go b/vendor/github.com/hashicorp/terraform/plugin/serve.go
index 2028a61..8d056c5 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/serve.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/serve.go
@@ -2,14 +2,23 @@ package plugin
2 2
3import ( 3import (
4 "github.com/hashicorp/go-plugin" 4 "github.com/hashicorp/go-plugin"
5 grpcplugin "github.com/hashicorp/terraform/helper/plugin"
6 proto "github.com/hashicorp/terraform/internal/tfplugin5"
5 "github.com/hashicorp/terraform/terraform" 7 "github.com/hashicorp/terraform/terraform"
6) 8)
7 9
8// The constants below are the names of the plugins that can be dispensed
9// from the plugin server.
10const ( 10const (
11 // The constants below are the names of the plugins that can be dispensed
12 // from the plugin server.
11 ProviderPluginName = "provider" 13 ProviderPluginName = "provider"
12 ProvisionerPluginName = "provisioner" 14 ProvisionerPluginName = "provisioner"
15
16 // DefaultProtocolVersion is the protocol version assumed for legacy clients that don't specify
17 // a particular version during their handshake. This is the version used when Terraform 0.10
18 // and 0.11 launch plugins that were built with support for both versions 4 and 5, and must
19 // stay unchanged at 4 until we intentionally build plugins that are not compatible with 0.10 and
20 // 0.11.
21 DefaultProtocolVersion = 4
13) 22)
14 23
15// Handshake is the HandshakeConfig used to configure clients and servers. 24// Handshake is the HandshakeConfig used to configure clients and servers.
@@ -19,7 +28,7 @@ var Handshake = plugin.HandshakeConfig{
19 // one or the other that makes it so that they can't safely communicate. 28 // one or the other that makes it so that they can't safely communicate.
20 // This could be adding a new interface value, it could be how 29 // This could be adding a new interface value, it could be how
21 // helper/schema computes diffs, etc. 30 // helper/schema computes diffs, etc.
22 ProtocolVersion: 4, 31 ProtocolVersion: DefaultProtocolVersion,
23 32
24 // The magic cookie values should NEVER be changed. 33 // The magic cookie values should NEVER be changed.
25 MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE", 34 MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE",
@@ -28,27 +37,85 @@ var Handshake = plugin.HandshakeConfig{
28 37
29type ProviderFunc func() terraform.ResourceProvider 38type ProviderFunc func() terraform.ResourceProvider
30type ProvisionerFunc func() terraform.ResourceProvisioner 39type ProvisionerFunc func() terraform.ResourceProvisioner
40type GRPCProviderFunc func() proto.ProviderServer
41type GRPCProvisionerFunc func() proto.ProvisionerServer
31 42
32// ServeOpts are the configurations to serve a plugin. 43// ServeOpts are the configurations to serve a plugin.
33type ServeOpts struct { 44type ServeOpts struct {
34 ProviderFunc ProviderFunc 45 ProviderFunc ProviderFunc
35 ProvisionerFunc ProvisionerFunc 46 ProvisionerFunc ProvisionerFunc
47
48 // Wrapped versions of the above plugins will automatically shimmed and
49 // added to the GRPC functions when possible.
50 GRPCProviderFunc GRPCProviderFunc
51 GRPCProvisionerFunc GRPCProvisionerFunc
36} 52}
37 53
38// Serve serves a plugin. This function never returns and should be the final 54// Serve serves a plugin. This function never returns and should be the final
39// function called in the main function of the plugin. 55// function called in the main function of the plugin.
40func Serve(opts *ServeOpts) { 56func Serve(opts *ServeOpts) {
57 // since the plugins may not yet be aware of the new protocol, we
58 // automatically wrap the plugins in the grpc shims.
59 if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil {
60 provider := grpcplugin.NewGRPCProviderServerShim(opts.ProviderFunc())
61 // this is almost always going to be a *schema.Provider, but check that
62 // we got back a valid provider just in case.
63 if provider != nil {
64 opts.GRPCProviderFunc = func() proto.ProviderServer {
65 return provider
66 }
67 }
68 }
69 if opts.GRPCProvisionerFunc == nil && opts.ProvisionerFunc != nil {
70 provisioner := grpcplugin.NewGRPCProvisionerServerShim(opts.ProvisionerFunc())
71 if provisioner != nil {
72 opts.GRPCProvisionerFunc = func() proto.ProvisionerServer {
73 return provisioner
74 }
75 }
76 }
77
41 plugin.Serve(&plugin.ServeConfig{ 78 plugin.Serve(&plugin.ServeConfig{
42 HandshakeConfig: Handshake, 79 HandshakeConfig: Handshake,
43 Plugins: pluginMap(opts), 80 VersionedPlugins: pluginSet(opts),
81 GRPCServer: plugin.DefaultGRPCServer,
44 }) 82 })
45} 83}
46 84
47// pluginMap returns the map[string]plugin.Plugin to use for configuring a plugin 85// pluginMap returns the legacy map[string]plugin.Plugin to use for configuring
48// server or client. 86// a plugin server or client.
49func pluginMap(opts *ServeOpts) map[string]plugin.Plugin { 87func legacyPluginMap(opts *ServeOpts) map[string]plugin.Plugin {
50 return map[string]plugin.Plugin{ 88 return map[string]plugin.Plugin{
51 "provider": &ResourceProviderPlugin{F: opts.ProviderFunc}, 89 "provider": &ResourceProviderPlugin{
52 "provisioner": &ResourceProvisionerPlugin{F: opts.ProvisionerFunc}, 90 ResourceProvider: opts.ProviderFunc,
91 },
92 "provisioner": &ResourceProvisionerPlugin{
93 ResourceProvisioner: opts.ProvisionerFunc,
94 },
95 }
96}
97
98func pluginSet(opts *ServeOpts) map[int]plugin.PluginSet {
99 // Set the legacy netrpc plugins at version 4.
100 // The oldest version is returned in when executed by a legacy go-plugin
101 // client.
102 plugins := map[int]plugin.PluginSet{
103 4: legacyPluginMap(opts),
104 }
105
106 // add the new protocol versions if they're configured
107 if opts.GRPCProviderFunc != nil || opts.GRPCProvisionerFunc != nil {
108 plugins[5] = plugin.PluginSet{}
109 if opts.GRPCProviderFunc != nil {
110 plugins[5]["provider"] = &GRPCProviderPlugin{
111 GRPCProvider: opts.GRPCProviderFunc,
112 }
113 }
114 if opts.GRPCProvisionerFunc != nil {
115 plugins[5]["provisioner"] = &GRPCProvisionerPlugin{
116 GRPCProvisioner: opts.GRPCProvisionerFunc,
117 }
118 }
53 } 119 }
120 return plugins
54} 121}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go
index 493efc0..3469e6a 100644
--- a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go
+++ b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go
@@ -1,19 +1,20 @@
1package plugin 1package plugin
2 2
3import ( 3import (
4 "context"
4 "net/rpc" 5 "net/rpc"
5 6
6 "github.com/hashicorp/go-plugin" 7 "github.com/hashicorp/go-plugin"
7 "github.com/hashicorp/terraform/terraform" 8 "github.com/hashicorp/terraform/terraform"
8) 9)
9 10
10// UIInput is an implementatin of terraform.UIInput that communicates 11// UIInput is an implementation of terraform.UIInput that communicates
11// over RPC. 12// over RPC.
12type UIInput struct { 13type UIInput struct {
13 Client *rpc.Client 14 Client *rpc.Client
14} 15}
15 16
16func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) { 17func (i *UIInput) Input(ctx context.Context, opts *terraform.InputOpts) (string, error) {
17 var resp UIInputInputResponse 18 var resp UIInputInputResponse
18 err := i.Client.Call("Plugin.Input", opts, &resp) 19 err := i.Client.Call("Plugin.Input", opts, &resp)
19 if err != nil { 20 if err != nil {
@@ -41,7 +42,7 @@ type UIInputServer struct {
41func (s *UIInputServer) Input( 42func (s *UIInputServer) Input(
42 opts *terraform.InputOpts, 43 opts *terraform.InputOpts,
43 reply *UIInputInputResponse) error { 44 reply *UIInputInputResponse) error {
44 value, err := s.UIInput.Input(opts) 45 value, err := s.UIInput.Input(context.Background(), opts)
45 *reply = UIInputInputResponse{ 46 *reply = UIInputInputResponse{
46 Value: value, 47 Value: value,
47 Error: plugin.NewBasicError(err), 48 Error: plugin.NewBasicError(err),
diff --git a/vendor/github.com/hashicorp/terraform/providers/addressed_types.go b/vendor/github.com/hashicorp/terraform/providers/addressed_types.go
new file mode 100644
index 0000000..7ed523f
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/providers/addressed_types.go
@@ -0,0 +1,47 @@
1package providers
2
3import (
4 "sort"
5
6 "github.com/hashicorp/terraform/addrs"
7)
8
9// AddressedTypes is a helper that extracts all of the distinct provider
10// types from the given list of relative provider configuration addresses.
11func AddressedTypes(providerAddrs []addrs.ProviderConfig) []string {
12 if len(providerAddrs) == 0 {
13 return nil
14 }
15 m := map[string]struct{}{}
16 for _, addr := range providerAddrs {
17 m[addr.Type] = struct{}{}
18 }
19
20 names := make([]string, 0, len(m))
21 for typeName := range m {
22 names = append(names, typeName)
23 }
24
25 sort.Strings(names) // Stable result for tests
26 return names
27}
28
29// AddressedTypesAbs is a helper that extracts all of the distinct provider
30// types from the given list of absolute provider configuration addresses.
31func AddressedTypesAbs(providerAddrs []addrs.AbsProviderConfig) []string {
32 if len(providerAddrs) == 0 {
33 return nil
34 }
35 m := map[string]struct{}{}
36 for _, addr := range providerAddrs {
37 m[addr.ProviderConfig.Type] = struct{}{}
38 }
39
40 names := make([]string, 0, len(m))
41 for typeName := range m {
42 names = append(names, typeName)
43 }
44
45 sort.Strings(names) // Stable result for tests
46 return names
47}
diff --git a/vendor/github.com/hashicorp/terraform/providers/doc.go b/vendor/github.com/hashicorp/terraform/providers/doc.go
new file mode 100644
index 0000000..39aa1de
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/providers/doc.go
@@ -0,0 +1,3 @@
1// Package providers contains the interface and primary types required to
2// implement a Terraform resource provider.
3package providers
diff --git a/vendor/github.com/hashicorp/terraform/providers/provider.go b/vendor/github.com/hashicorp/terraform/providers/provider.go
new file mode 100644
index 0000000..1aa08c2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/providers/provider.go
@@ -0,0 +1,351 @@
1package providers
2
3import (
4 "github.com/zclconf/go-cty/cty"
5
6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/hashicorp/terraform/states"
8 "github.com/hashicorp/terraform/tfdiags"
9)
10
11// Interface represents the set of methods required for a complete resource
12// provider plugin.
13type Interface interface {
14 // GetSchema returns the complete schema for the provider.
15 GetSchema() GetSchemaResponse
16
17 // PrepareProviderConfig allows the provider to validate the configuration
18 // values, and set or override any values with defaults.
19 PrepareProviderConfig(PrepareProviderConfigRequest) PrepareProviderConfigResponse
20
21 // ValidateResourceTypeConfig allows the provider to validate the resource
22 // configuration values.
23 ValidateResourceTypeConfig(ValidateResourceTypeConfigRequest) ValidateResourceTypeConfigResponse
24
25 // ValidateDataSource allows the provider to validate the data source
26 // configuration values.
27 ValidateDataSourceConfig(ValidateDataSourceConfigRequest) ValidateDataSourceConfigResponse
28
29 // UpgradeResourceState is called when the state loader encounters an
30 // instance state whose schema version is less than the one reported by the
31 // currently-used version of the corresponding provider, and the upgraded
32 // result is used for any further processing.
33 UpgradeResourceState(UpgradeResourceStateRequest) UpgradeResourceStateResponse
34
35 // Configure configures and initialized the provider.
36 Configure(ConfigureRequest) ConfigureResponse
37
38 // Stop is called when the provider should halt any in-flight actions.
39 //
40 // Stop should not block waiting for in-flight actions to complete. It
41 // should take any action it wants and return immediately acknowledging it
42 // has received the stop request. Terraform will not make any further API
43 // calls to the provider after Stop is called.
44 //
45 // The error returned, if non-nil, is assumed to mean that signaling the
46 // stop somehow failed and that the user should expect potentially waiting
47 // a longer period of time.
48 Stop() error
49
50 // ReadResource refreshes a resource and returns its current state.
51 ReadResource(ReadResourceRequest) ReadResourceResponse
52
53 // PlanResourceChange takes the current state and proposed state of a
54 // resource, and returns the planned final state.
55 PlanResourceChange(PlanResourceChangeRequest) PlanResourceChangeResponse
56
57 // ApplyResourceChange takes the planned state for a resource, which may
58 // yet contain unknown computed values, and applies the changes returning
59 // the final state.
60 ApplyResourceChange(ApplyResourceChangeRequest) ApplyResourceChangeResponse
61
62 // ImportResourceState requests that the given resource be imported.
63 ImportResourceState(ImportResourceStateRequest) ImportResourceStateResponse
64
65 // ReadDataSource returns the data source's current state.
66 ReadDataSource(ReadDataSourceRequest) ReadDataSourceResponse
67
68 // Close shuts down the plugin process if applicable.
69 Close() error
70}
71
72type GetSchemaResponse struct {
73 // Provider is the schema for the provider itself.
74 Provider Schema
75
76 // ResourceTypes map the resource type name to that type's schema.
77 ResourceTypes map[string]Schema
78
79 // DataSources maps the data source name to that data source's schema.
80 DataSources map[string]Schema
81
82 // Diagnostics contains any warnings or errors from the method call.
83 Diagnostics tfdiags.Diagnostics
84}
85
86// Schema pairs a provider or resource schema with that schema's version.
87// This is used to be able to upgrade the schema in UpgradeResourceState.
88type Schema struct {
89 Version int64
90 Block *configschema.Block
91}
92
93type PrepareProviderConfigRequest struct {
94 // Config is the raw configuration value for the provider.
95 Config cty.Value
96}
97
98type PrepareProviderConfigResponse struct {
99 // PreparedConfig is the configuration as prepared by the provider.
100 PreparedConfig cty.Value
101 // Diagnostics contains any warnings or errors from the method call.
102 Diagnostics tfdiags.Diagnostics
103}
104
105type ValidateResourceTypeConfigRequest struct {
106 // TypeName is the name of the resource type to validate.
107 TypeName string
108
109 // Config is the configuration value to validate, which may contain unknown
110 // values.
111 Config cty.Value
112}
113
114type ValidateResourceTypeConfigResponse struct {
115 // Diagnostics contains any warnings or errors from the method call.
116 Diagnostics tfdiags.Diagnostics
117}
118
119type ValidateDataSourceConfigRequest struct {
120 // TypeName is the name of the data source type to validate.
121 TypeName string
122
123 // Config is the configuration value to validate, which may contain unknown
124 // values.
125 Config cty.Value
126}
127
128type ValidateDataSourceConfigResponse struct {
129 // Diagnostics contains any warnings or errors from the method call.
130 Diagnostics tfdiags.Diagnostics
131}
132
133type UpgradeResourceStateRequest struct {
134 // TypeName is the name of the resource type being upgraded
135 TypeName string
136
137 // Version is version of the schema that created the current state.
138 Version int64
139
140 // RawStateJSON and RawStateFlatmap contiain the state that needs to be
141 // upgraded to match the current schema version. Because the schema is
142 // unknown, this contains only the raw data as stored in the state.
143 // RawStateJSON is the current json state encoding.
144 // RawStateFlatmap is the legacy flatmap encoding.
145 // Only on of these fields may be set for the upgrade request.
146 RawStateJSON []byte
147 RawStateFlatmap map[string]string
148}
149
150type UpgradeResourceStateResponse struct {
151 // UpgradedState is the newly upgraded resource state.
152 UpgradedState cty.Value
153
154 // Diagnostics contains any warnings or errors from the method call.
155 Diagnostics tfdiags.Diagnostics
156}
157
158type ConfigureRequest struct {
159 // Terraform version is the version string from the running instance of
160 // terraform. Providers can use TerraformVersion to verify compatibility,
161 // and to store for informational purposes.
162 TerraformVersion string
163
164 // Config is the complete configuration value for the provider.
165 Config cty.Value
166}
167
168type ConfigureResponse struct {
169 // Diagnostics contains any warnings or errors from the method call.
170 Diagnostics tfdiags.Diagnostics
171}
172
173type ReadResourceRequest struct {
174 // TypeName is the name of the resource type being read.
175 TypeName string
176
177 // PriorState contains the previously saved state value for this resource.
178 PriorState cty.Value
179}
180
181type ReadResourceResponse struct {
182 // NewState contains the current state of the resource.
183 NewState cty.Value
184
185 // Diagnostics contains any warnings or errors from the method call.
186 Diagnostics tfdiags.Diagnostics
187}
188
189type PlanResourceChangeRequest struct {
190 // TypeName is the name of the resource type to plan.
191 TypeName string
192
193 // PriorState is the previously saved state value for this resource.
194 PriorState cty.Value
195
196 // ProposedNewState is the expected state after the new configuration is
197 // applied. This is created by directly applying the configuration to the
198 // PriorState. The provider is then responsible for applying any further
199 // changes required to create the proposed final state.
200 ProposedNewState cty.Value
201
202 // Config is the resource configuration, before being merged with the
203 // PriorState. Any value not explicitly set in the configuration will be
204 // null. Config is supplied for reference, but Provider implementations
205 // should prefer the ProposedNewState in most circumstances.
206 Config cty.Value
207
208 // PriorPrivate is the previously saved private data returned from the
209 // provider during the last apply.
210 PriorPrivate []byte
211}
212
213type PlanResourceChangeResponse struct {
214 // PlannedState is the expected state of the resource once the current
215 // configuration is applied.
216 PlannedState cty.Value
217
218 // RequiresReplace is the list of thee attributes that are requiring
219 // resource replacement.
220 RequiresReplace []cty.Path
221
222 // PlannedPrivate is an opaque blob that is not interpreted by terraform
223 // core. This will be saved and relayed back to the provider during
224 // ApplyResourceChange.
225 PlannedPrivate []byte
226
227 // Diagnostics contains any warnings or errors from the method call.
228 Diagnostics tfdiags.Diagnostics
229
230 // LegacyTypeSystem is set only if the provider is using the legacy SDK
231 // whose type system cannot be precisely mapped into the Terraform type
232 // system. We use this to bypass certain consistency checks that would
233 // otherwise fail due to this imprecise mapping. No other provider or SDK
234 // implementation is permitted to set this.
235 LegacyTypeSystem bool
236}
237
238type ApplyResourceChangeRequest struct {
239 // TypeName is the name of the resource type being applied.
240 TypeName string
241
242 // PriorState is the current state of resource.
243 PriorState cty.Value
244
245 // Planned state is the state returned from PlanResourceChange, and should
246 // represent the new state, minus any remaining computed attributes.
247 PlannedState cty.Value
248
249 // Config is the resource configuration, before being merged with the
250 // PriorState. Any value not explicitly set in the configuration will be
251 // null. Config is supplied for reference, but Provider implementations
252 // should prefer the PlannedState in most circumstances.
253 Config cty.Value
254
255 // PlannedPrivate is the same value as returned by PlanResourceChange.
256 PlannedPrivate []byte
257}
258
259type ApplyResourceChangeResponse struct {
260 // NewState is the new complete state after applying the planned change.
261 // In the event of an error, NewState should represent the most recent
262 // known state of the resource, if it exists.
263 NewState cty.Value
264
265 // Private is an opaque blob that will be stored in state along with the
266 // resource. It is intended only for interpretation by the provider itself.
267 Private []byte
268
269 // Diagnostics contains any warnings or errors from the method call.
270 Diagnostics tfdiags.Diagnostics
271
272 // LegacyTypeSystem is set only if the provider is using the legacy SDK
273 // whose type system cannot be precisely mapped into the Terraform type
274 // system. We use this to bypass certain consistency checks that would
275 // otherwise fail due to this imprecise mapping. No other provider or SDK
276 // implementation is permitted to set this.
277 LegacyTypeSystem bool
278}
279
280type ImportResourceStateRequest struct {
281 // TypeName is the name of the resource type to be imported.
282 TypeName string
283
284 // ID is a string with which the provider can identify the resource to be
285 // imported.
286 ID string
287}
288
289type ImportResourceStateResponse struct {
290 // ImportedResources contains one or more state values related to the
291 // imported resource. It is not required that these be complete, only that
292 // there is enough identifying information for the provider to successfully
293 // update the states in ReadResource.
294 ImportedResources []ImportedResource
295
296 // Diagnostics contains any warnings or errors from the method call.
297 Diagnostics tfdiags.Diagnostics
298}
299
300// ImportedResource represents an object being imported into Terraform with the
301// help of a provider. An ImportedObject is a RemoteObject that has been read
302// by the provider's import handler but hasn't yet been committed to state.
303type ImportedResource struct {
304 // TypeName is the name of the resource type associated with the
305 // returned state. It's possible for providers to import multiple related
306 // types with a single import request.
307 TypeName string
308
309 // State is the state of the remote object being imported. This may not be
310 // complete, but must contain enough information to uniquely identify the
311 // resource.
312 State cty.Value
313
314 // Private is an opaque blob that will be stored in state along with the
315 // resource. It is intended only for interpretation by the provider itself.
316 Private []byte
317}
318
319// AsInstanceObject converts the receiving ImportedObject into a
320// ResourceInstanceObject that has status ObjectReady.
321//
322// The returned object does not know its own resource type, so the caller must
323// retain the ResourceType value from the source object if this information is
324// needed.
325//
326// The returned object also has no dependency addresses, but the caller may
327// freely modify the direct fields of the returned object without affecting
328// the receiver.
329func (ir ImportedResource) AsInstanceObject() *states.ResourceInstanceObject {
330 return &states.ResourceInstanceObject{
331 Status: states.ObjectReady,
332 Value: ir.State,
333 Private: ir.Private,
334 }
335}
336
337type ReadDataSourceRequest struct {
338 // TypeName is the name of the data source type to Read.
339 TypeName string
340
341 // Config is the complete configuration for the requested data source.
342 Config cty.Value
343}
344
345type ReadDataSourceResponse struct {
346 // State is the current state of the requested data source.
347 State cty.Value
348
349 // Diagnostics contains any warnings or errors from the method call.
350 Diagnostics tfdiags.Diagnostics
351}
diff --git a/vendor/github.com/hashicorp/terraform/providers/resolver.go b/vendor/github.com/hashicorp/terraform/providers/resolver.go
new file mode 100644
index 0000000..4de8e0a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/providers/resolver.go
@@ -0,0 +1,112 @@
1package providers
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/plugin/discovery"
7)
8
9// Resolver is an interface implemented by objects that are able to resolve
10// a given set of resource provider version constraints into Factory
11// callbacks.
12type Resolver interface {
13 // Given a constraint map, return a Factory for each requested provider.
14 // If some or all of the constraints cannot be satisfied, return a non-nil
15 // slice of errors describing the problems.
16 ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error)
17}
18
19// ResolverFunc wraps a callback function and turns it into a Resolver
20// implementation, for convenience in situations where a function and its
21// associated closure are sufficient as a resolver implementation.
22type ResolverFunc func(reqd discovery.PluginRequirements) (map[string]Factory, []error)
23
24// ResolveProviders implements Resolver by calling the
25// wrapped function.
26func (f ResolverFunc) ResolveProviders(reqd discovery.PluginRequirements) (map[string]Factory, []error) {
27 return f(reqd)
28}
29
30// ResolverFixed returns a Resolver that has a fixed set of provider factories
31// provided by the caller. The returned resolver ignores version constraints
32// entirely and just returns the given factory for each requested provider
33// name.
34//
35// This function is primarily used in tests, to provide mock providers or
36// in-process providers under test.
37func ResolverFixed(factories map[string]Factory) Resolver {
38 return ResolverFunc(func(reqd discovery.PluginRequirements) (map[string]Factory, []error) {
39 ret := make(map[string]Factory, len(reqd))
40 var errs []error
41 for name := range reqd {
42 if factory, exists := factories[name]; exists {
43 ret[name] = factory
44 } else {
45 errs = append(errs, fmt.Errorf("provider %q is not available", name))
46 }
47 }
48 return ret, errs
49 })
50}
51
52// Factory is a function type that creates a new instance of a resource
53// provider, or returns an error if that is impossible.
54type Factory func() (Interface, error)
55
56// FactoryFixed is a helper that creates a Factory that just returns some given
57// single provider.
58//
59// Unlike usual factories, the exact same instance is returned for each call
60// to the factory and so this must be used in only specialized situations where
61// the caller can take care to either not mutate the given provider at all
62// or to mutate it in ways that will not cause unexpected behavior for others
63// holding the same reference.
64func FactoryFixed(p Interface) Factory {
65 return func() (Interface, error) {
66 return p, nil
67 }
68}
69
70// ProviderHasResource is a helper that requests schema from the given provider
71// and checks if it has a resource type of the given name.
72//
73// This function is more expensive than it may first appear since it must
74// retrieve the entire schema from the underlying provider, and so it should
75// be used sparingly and especially not in tight loops.
76//
77// Since retrieving the provider may fail (e.g. if the provider is accessed
78// over an RPC channel that has operational problems), this function will
79// return false if the schema cannot be retrieved, under the assumption that
80// a subsequent call to do anything with the resource type would fail
81// anyway.
82func ProviderHasResource(provider Interface, typeName string) bool {
83 resp := provider.GetSchema()
84 if resp.Diagnostics.HasErrors() {
85 return false
86 }
87
88 _, exists := resp.ResourceTypes[typeName]
89 return exists
90}
91
92// ProviderHasDataSource is a helper that requests schema from the given
93// provider and checks if it has a data source of the given name.
94//
95// This function is more expensive than it may first appear since it must
96// retrieve the entire schema from the underlying provider, and so it should
97// be used sparingly and especially not in tight loops.
98//
99// Since retrieving the provider may fail (e.g. if the provider is accessed
100// over an RPC channel that has operational problems), this function will
101// return false if the schema cannot be retrieved, under the assumption that
102// a subsequent call to do anything with the data source would fail
103// anyway.
104func ProviderHasDataSource(provider Interface, dataSourceName string) bool {
105 resp := provider.GetSchema()
106 if resp.Diagnostics.HasErrors() {
107 return false
108 }
109
110 _, exists := resp.DataSources[dataSourceName]
111 return exists
112}
diff --git a/vendor/github.com/hashicorp/terraform/provisioners/doc.go b/vendor/github.com/hashicorp/terraform/provisioners/doc.go
new file mode 100644
index 0000000..b03ba9a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/provisioners/doc.go
@@ -0,0 +1,3 @@
1// Package provisioners contains the interface and primary types to implement a
2// Terraform resource provisioner.
3package provisioners
diff --git a/vendor/github.com/hashicorp/terraform/provisioners/factory.go b/vendor/github.com/hashicorp/terraform/provisioners/factory.go
new file mode 100644
index 0000000..590b97a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/provisioners/factory.go
@@ -0,0 +1,19 @@
1package provisioners
2
3// Factory is a function type that creates a new instance of a resource
4// provisioner, or returns an error if that is impossible.
5type Factory func() (Interface, error)
6
7// FactoryFixed is a helper that creates a Factory that just returns some given
8// single provisioner.
9//
10// Unlike usual factories, the exact same instance is returned for each call
11// to the factory and so this must be used in only specialized situations where
12// the caller can take care to either not mutate the given provider at all
13// or to mutate it in ways that will not cause unexpected behavior for others
14// holding the same reference.
15func FactoryFixed(p Interface) Factory {
16 return func() (Interface, error) {
17 return p, nil
18 }
19}
diff --git a/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go b/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go
new file mode 100644
index 0000000..e53c884
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/provisioners/provisioner.go
@@ -0,0 +1,82 @@
1package provisioners
2
3import (
4 "github.com/hashicorp/terraform/configs/configschema"
5 "github.com/hashicorp/terraform/tfdiags"
6 "github.com/zclconf/go-cty/cty"
7)
8
9// Interface is the set of methods required for a resource provisioner plugin.
10type Interface interface {
11 // GetSchema returns the schema for the provisioner configuration.
12 GetSchema() GetSchemaResponse
13
14 // ValidateProvisionerConfig allows the provisioner to validate the
15 // configuration values.
16 ValidateProvisionerConfig(ValidateProvisionerConfigRequest) ValidateProvisionerConfigResponse
17
18 // ProvisionResource runs the provisioner with provided configuration.
19 // ProvisionResource blocks until the execution is complete.
20 // If the returned diagnostics contain any errors, the resource will be
21 // left in a tainted state.
22 ProvisionResource(ProvisionResourceRequest) ProvisionResourceResponse
23
24 // Stop is called to interrupt the provisioner.
25 //
26 // Stop should not block waiting for in-flight actions to complete. It
27 // should take any action it wants and return immediately acknowledging it
28 // has received the stop request. Terraform will not make any further API
29 // calls to the provisioner after Stop is called.
30 //
31 // The error returned, if non-nil, is assumed to mean that signaling the
32 // stop somehow failed and that the user should expect potentially waiting
33 // a longer period of time.
34 Stop() error
35
36 // Close shuts down the plugin process if applicable.
37 Close() error
38}
39
40type GetSchemaResponse struct {
41 // Provisioner contains the schema for this provisioner.
42 Provisioner *configschema.Block
43
44 // Diagnostics contains any warnings or errors from the method call.
45 Diagnostics tfdiags.Diagnostics
46}
47
48// UIOutput provides the Output method for resource provisioner
49// plugins to write any output to the UI.
50//
51// Provisioners may call the Output method multiple times while Apply is in
52// progress. It is invalid to call Output after Apply returns.
53type UIOutput interface {
54 Output(string)
55}
56
57type ValidateProvisionerConfigRequest struct {
58 // Config is the complete configuration to be used for the provisioner.
59 Config cty.Value
60}
61
62type ValidateProvisionerConfigResponse struct {
63 // Diagnostics contains any warnings or errors from the method call.
64 Diagnostics tfdiags.Diagnostics
65}
66
67type ProvisionResourceRequest struct {
68 // Config is the complete provisioner configuration.
69 Config cty.Value
70
71 // Connection contains any information required to access the resource
72 // instance.
73 Connection cty.Value
74
75 // UIOutput is used to return output during the Apply operation.
76 UIOutput UIOutput
77}
78
79type ProvisionResourceResponse struct {
80 // Diagnostics contains any warnings or errors from the method call.
81 Diagnostics tfdiags.Diagnostics
82}
diff --git a/vendor/github.com/hashicorp/terraform/registry/client.go b/vendor/github.com/hashicorp/terraform/registry/client.go
index a18e6b8..93424d1 100644
--- a/vendor/github.com/hashicorp/terraform/registry/client.go
+++ b/vendor/github.com/hashicorp/terraform/registry/client.go
@@ -20,10 +20,11 @@ import (
20) 20)
21 21
22const ( 22const (
23 xTerraformGet = "X-Terraform-Get" 23 xTerraformGet = "X-Terraform-Get"
24 xTerraformVersion = "X-Terraform-Version" 24 xTerraformVersion = "X-Terraform-Version"
25 requestTimeout = 10 * time.Second 25 requestTimeout = 10 * time.Second
26 serviceID = "modules.v1" 26 modulesServiceID = "modules.v1"
27 providersServiceID = "providers.v1"
27) 28)
28 29
29var tfVersion = version.String() 30var tfVersion = version.String()
@@ -58,10 +59,10 @@ func NewClient(services *disco.Disco, client *http.Client) *Client {
58} 59}
59 60
60// Discover queries the host, and returns the url for the registry. 61// Discover queries the host, and returns the url for the registry.
61func (c *Client) Discover(host svchost.Hostname) (*url.URL, error) { 62func (c *Client) Discover(host svchost.Hostname, serviceID string) (*url.URL, error) {
62 service, err := c.services.DiscoverServiceURL(host, serviceID) 63 service, err := c.services.DiscoverServiceURL(host, serviceID)
63 if err != nil { 64 if err != nil {
64 return nil, err 65 return nil, &ServiceUnreachableError{err}
65 } 66 }
66 if !strings.HasSuffix(service.Path, "/") { 67 if !strings.HasSuffix(service.Path, "/") {
67 service.Path += "/" 68 service.Path += "/"
@@ -69,14 +70,14 @@ func (c *Client) Discover(host svchost.Hostname) (*url.URL, error) {
69 return service, nil 70 return service, nil
70} 71}
71 72
72// Versions queries the registry for a module, and returns the available versions. 73// ModuleVersions queries the registry for a module, and returns the available versions.
73func (c *Client) Versions(module *regsrc.Module) (*response.ModuleVersions, error) { 74func (c *Client) ModuleVersions(module *regsrc.Module) (*response.ModuleVersions, error) {
74 host, err := module.SvcHost() 75 host, err := module.SvcHost()
75 if err != nil { 76 if err != nil {
76 return nil, err 77 return nil, err
77 } 78 }
78 79
79 service, err := c.Discover(host) 80 service, err := c.Discover(host, modulesServiceID)
80 if err != nil { 81 if err != nil {
81 return nil, err 82 return nil, err
82 } 83 }
@@ -141,15 +142,15 @@ func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) {
141 } 142 }
142} 143}
143 144
144// Location find the download location for a specific version module. 145// ModuleLocation find the download location for a specific version module.
145// This returns a string, because the final location may contain special go-getter syntax. 146// This returns a string, because the final location may contain special go-getter syntax.
146func (c *Client) Location(module *regsrc.Module, version string) (string, error) { 147func (c *Client) ModuleLocation(module *regsrc.Module, version string) (string, error) {
147 host, err := module.SvcHost() 148 host, err := module.SvcHost()
148 if err != nil { 149 if err != nil {
149 return "", err 150 return "", err
150 } 151 }
151 152
152 service, err := c.Discover(host) 153 service, err := c.Discover(host, modulesServiceID)
153 if err != nil { 154 if err != nil {
154 return "", err 155 return "", err
155 } 156 }
@@ -225,3 +226,118 @@ func (c *Client) Location(module *regsrc.Module, version string) (string, error)
225 226
226 return location, nil 227 return location, nil
227} 228}
229
230// TerraformProviderVersions queries the registry for a provider, and returns the available versions.
231func (c *Client) TerraformProviderVersions(provider *regsrc.TerraformProvider) (*response.TerraformProviderVersions, error) {
232 host, err := provider.SvcHost()
233 if err != nil {
234 return nil, err
235 }
236
237 service, err := c.Discover(host, providersServiceID)
238 if err != nil {
239 return nil, err
240 }
241
242 p, err := url.Parse(path.Join(provider.TerraformProvider(), "versions"))
243 if err != nil {
244 return nil, err
245 }
246
247 service = service.ResolveReference(p)
248
249 log.Printf("[DEBUG] fetching provider versions from %q", service)
250
251 req, err := http.NewRequest("GET", service.String(), nil)
252 if err != nil {
253 return nil, err
254 }
255
256 c.addRequestCreds(host, req)
257 req.Header.Set(xTerraformVersion, tfVersion)
258
259 resp, err := c.client.Do(req)
260 if err != nil {
261 return nil, err
262 }
263 defer resp.Body.Close()
264
265 switch resp.StatusCode {
266 case http.StatusOK:
267 // OK
268 case http.StatusNotFound:
269 return nil, &errProviderNotFound{addr: provider}
270 default:
271 return nil, fmt.Errorf("error looking up provider versions: %s", resp.Status)
272 }
273
274 var versions response.TerraformProviderVersions
275
276 dec := json.NewDecoder(resp.Body)
277 if err := dec.Decode(&versions); err != nil {
278 return nil, err
279 }
280
281 return &versions, nil
282}
283
284// TerraformProviderLocation queries the registry for a provider download metadata
285func (c *Client) TerraformProviderLocation(provider *regsrc.TerraformProvider, version string) (*response.TerraformProviderPlatformLocation, error) {
286 host, err := provider.SvcHost()
287 if err != nil {
288 return nil, err
289 }
290
291 service, err := c.Discover(host, providersServiceID)
292 if err != nil {
293 return nil, err
294 }
295
296 p, err := url.Parse(path.Join(
297 provider.TerraformProvider(),
298 version,
299 "download",
300 provider.OS,
301 provider.Arch,
302 ))
303 if err != nil {
304 return nil, err
305 }
306
307 service = service.ResolveReference(p)
308
309 log.Printf("[DEBUG] fetching provider location from %q", service)
310
311 req, err := http.NewRequest("GET", service.String(), nil)
312 if err != nil {
313 return nil, err
314 }
315
316 c.addRequestCreds(host, req)
317 req.Header.Set(xTerraformVersion, tfVersion)
318
319 resp, err := c.client.Do(req)
320 if err != nil {
321 return nil, err
322 }
323 defer resp.Body.Close()
324
325 var loc response.TerraformProviderPlatformLocation
326
327 dec := json.NewDecoder(resp.Body)
328 if err := dec.Decode(&loc); err != nil {
329 return nil, err
330 }
331
332 switch resp.StatusCode {
333 case http.StatusOK, http.StatusNoContent:
334 // OK
335 case http.StatusNotFound:
336 return nil, fmt.Errorf("provider %q version %q not found", provider.TerraformProvider(), version)
337 default:
338 // anything else is an error:
339 return nil, fmt.Errorf("error getting download location for %q: %s", provider.TerraformProvider(), resp.Status)
340 }
341
342 return &loc, nil
343}
diff --git a/vendor/github.com/hashicorp/terraform/registry/errors.go b/vendor/github.com/hashicorp/terraform/registry/errors.go
index b8dcd31..5a6a31b 100644
--- a/vendor/github.com/hashicorp/terraform/registry/errors.go
+++ b/vendor/github.com/hashicorp/terraform/registry/errors.go
@@ -4,6 +4,7 @@ import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/registry/regsrc" 6 "github.com/hashicorp/terraform/registry/regsrc"
7 "github.com/hashicorp/terraform/svchost/disco"
7) 8)
8 9
9type errModuleNotFound struct { 10type errModuleNotFound struct {
@@ -21,3 +22,42 @@ func IsModuleNotFound(err error) bool {
21 _, ok := err.(*errModuleNotFound) 22 _, ok := err.(*errModuleNotFound)
22 return ok 23 return ok
23} 24}
25
26type errProviderNotFound struct {
27 addr *regsrc.TerraformProvider
28}
29
30func (e *errProviderNotFound) Error() string {
31 return fmt.Sprintf("provider %s not found", e.addr)
32}
33
34// IsProviderNotFound returns true only if the given error is a "provider not found"
35// error. This allows callers to recognize this particular error condition
36// as distinct from operational errors such as poor network connectivity.
37func IsProviderNotFound(err error) bool {
38 _, ok := err.(*errProviderNotFound)
39 return ok
40}
41
42// IsServiceNotProvided returns true only if the given error is a "service not provided"
43// error. This allows callers to recognize this particular error condition
44// as distinct from operational errors such as poor network connectivity.
45func IsServiceNotProvided(err error) bool {
46 _, ok := err.(*disco.ErrServiceNotProvided)
47 return ok
48}
49
50// ServiceUnreachableError Registry service is unreachable
51type ServiceUnreachableError struct {
52 err error
53}
54
55func (e *ServiceUnreachableError) Error() string {
56 return e.err.Error()
57}
58
59// IsServiceUnreachable returns true if the registry/discovery service was unreachable
60func IsServiceUnreachable(err error) bool {
61 _, ok := err.(*ServiceUnreachableError)
62 return ok
63}
diff --git a/vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go b/vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go
new file mode 100644
index 0000000..58dedee
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/regsrc/terraform_provider.go
@@ -0,0 +1,60 @@
1package regsrc
2
3import (
4 "fmt"
5 "runtime"
6 "strings"
7
8 "github.com/hashicorp/terraform/svchost"
9)
10
11var (
12 // DefaultProviderNamespace represents the namespace for canonical
13 // HashiCorp-controlled providers.
14 DefaultProviderNamespace = "-"
15)
16
17// TerraformProvider describes a Terraform Registry Provider source.
18type TerraformProvider struct {
19 RawHost *FriendlyHost
20 RawNamespace string
21 RawName string
22 OS string
23 Arch string
24}
25
26// NewTerraformProvider constructs a new provider source.
27func NewTerraformProvider(name, os, arch string) *TerraformProvider {
28 if os == "" {
29 os = runtime.GOOS
30 }
31 if arch == "" {
32 arch = runtime.GOARCH
33 }
34
35 // separate namespace if included
36 namespace := DefaultProviderNamespace
37 if names := strings.SplitN(name, "/", 2); len(names) == 2 {
38 namespace, name = names[0], names[1]
39 }
40 p := &TerraformProvider{
41 RawHost: PublicRegistryHost,
42 RawNamespace: namespace,
43 RawName: name,
44 OS: os,
45 Arch: arch,
46 }
47
48 return p
49}
50
51// Provider returns just the registry ID of the provider
52func (p *TerraformProvider) TerraformProvider() string {
53 return fmt.Sprintf("%s/%s", p.RawNamespace, p.RawName)
54}
55
56// SvcHost returns the svchost.Hostname for this provider. The
57// default PublicRegistryHost is returned.
58func (p *TerraformProvider) SvcHost() (svchost.Hostname, error) {
59 return svchost.ForComparison(PublicRegistryHost.Raw)
60}
diff --git a/vendor/github.com/hashicorp/terraform/registry/response/provider.go b/vendor/github.com/hashicorp/terraform/registry/response/provider.go
new file mode 100644
index 0000000..5e8bae3
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/response/provider.go
@@ -0,0 +1,36 @@
1package response
2
3import (
4 "time"
5)
6
7// Provider is the response structure with the data for a single provider
8// version. This is just the metadata. A full provider response will be
9// ProviderDetail.
10type Provider struct {
11 ID string `json:"id"`
12
13 //---------------------------------------------------------------
14 // Metadata about the overall provider.
15
16 Owner string `json:"owner"`
17 Namespace string `json:"namespace"`
18 Name string `json:"name"`
19 Version string `json:"version"`
20 Description string `json:"description"`
21 Source string `json:"source"`
22 PublishedAt time.Time `json:"published_at"`
23 Downloads int `json:"downloads"`
24}
25
26// ProviderDetail represents a Provider with full detail.
27type ProviderDetail struct {
28 Provider
29
30 //---------------------------------------------------------------
31 // The fields below are only set when requesting this specific
32 // module. They are available to easily know all available versions
33 // without multiple API calls.
34
35 Versions []string `json:"versions"` // All versions
36}
diff --git a/vendor/github.com/hashicorp/terraform/registry/response/provider_list.go b/vendor/github.com/hashicorp/terraform/registry/response/provider_list.go
new file mode 100644
index 0000000..1dc7d23
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/response/provider_list.go
@@ -0,0 +1,7 @@
1package response
2
3// ProviderList is the response structure for a pageable list of providers.
4type ProviderList struct {
5 Meta PaginationMeta `json:"meta"`
6 Providers []*Provider `json:"providers"`
7}
diff --git a/vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go b/vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go
new file mode 100644
index 0000000..64e454a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/registry/response/terraform_provider.go
@@ -0,0 +1,96 @@
1package response
2
3import (
4 "sort"
5 "strings"
6
7 version "github.com/hashicorp/go-version"
8)
9
10// TerraformProvider is the response structure for all required information for
11// Terraform to choose a download URL. It must include all versions and all
12// platforms for Terraform to perform version and os/arch constraint matching
13// locally.
14type TerraformProvider struct {
15 ID string `json:"id"`
16 Verified bool `json:"verified"`
17
18 Versions []*TerraformProviderVersion `json:"versions"`
19}
20
21// TerraformProviderVersion is the Terraform-specific response structure for a
22// provider version.
23type TerraformProviderVersion struct {
24 Version string `json:"version"`
25 Protocols []string `json:"protocols"`
26
27 Platforms []*TerraformProviderPlatform `json:"platforms"`
28}
29
30// TerraformProviderVersions is the Terraform-specific response structure for an
31// array of provider versions
32type TerraformProviderVersions struct {
33 ID string `json:"id"`
34 Versions []*TerraformProviderVersion `json:"versions"`
35 Warnings []string `json:"warnings"`
36}
37
38// TerraformProviderPlatform is the Terraform-specific response structure for a
39// provider platform.
40type TerraformProviderPlatform struct {
41 OS string `json:"os"`
42 Arch string `json:"arch"`
43}
44
45// TerraformProviderPlatformLocation is the Terraform-specific response
46// structure for a provider platform with all details required to perform a
47// download.
48type TerraformProviderPlatformLocation struct {
49 Protocols []string `json:"protocols"`
50 OS string `json:"os"`
51 Arch string `json:"arch"`
52 Filename string `json:"filename"`
53 DownloadURL string `json:"download_url"`
54 ShasumsURL string `json:"shasums_url"`
55 ShasumsSignatureURL string `json:"shasums_signature_url"`
56 Shasum string `json:"shasum"`
57
58 SigningKeys SigningKeyList `json:"signing_keys"`
59}
60
61// SigningKeyList is the response structure for a list of signing keys.
62type SigningKeyList struct {
63 GPGKeys []*GPGKey `json:"gpg_public_keys"`
64}
65
66// GPGKey is the response structure for a GPG key.
67type GPGKey struct {
68 ASCIIArmor string `json:"ascii_armor"`
69 Source string `json:"source"`
70 SourceURL *string `json:"source_url"`
71}
72
73// Collection type for TerraformProviderVersion
74type ProviderVersionCollection []*TerraformProviderVersion
75
76// GPGASCIIArmor returns an ASCII-armor-formatted string for all of the gpg
77// keys in the response.
78func (signingKeys *SigningKeyList) GPGASCIIArmor() string {
79 keys := []string{}
80
81 for _, gpgKey := range signingKeys.GPGKeys {
82 keys = append(keys, gpgKey.ASCIIArmor)
83 }
84
85 return strings.Join(keys, "\n")
86}
87
88// Sort sorts versions from newest to oldest.
89func (v ProviderVersionCollection) Sort() {
90 sort.Slice(v, func(i, j int) bool {
91 versionA, _ := version.NewVersion(v[i].Version)
92 versionB, _ := version.NewVersion(v[j].Version)
93
94 return versionA.GreaterThan(versionB)
95 })
96}
diff --git a/vendor/github.com/hashicorp/terraform/states/doc.go b/vendor/github.com/hashicorp/terraform/states/doc.go
new file mode 100644
index 0000000..7dd74ac
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/doc.go
@@ -0,0 +1,3 @@
1// Package states contains the types that are used to represent Terraform
2// states.
3package states
diff --git a/vendor/github.com/hashicorp/terraform/states/eachmode_string.go b/vendor/github.com/hashicorp/terraform/states/eachmode_string.go
new file mode 100644
index 0000000..0dc7349
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/eachmode_string.go
@@ -0,0 +1,35 @@
1// Code generated by "stringer -type EachMode"; DO NOT EDIT.
2
3package states
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[NoEach-0]
12 _ = x[EachList-76]
13 _ = x[EachMap-77]
14}
15
16const (
17 _EachMode_name_0 = "NoEach"
18 _EachMode_name_1 = "EachListEachMap"
19)
20
21var (
22 _EachMode_index_1 = [...]uint8{0, 8, 15}
23)
24
25func (i EachMode) String() string {
26 switch {
27 case i == 0:
28 return _EachMode_name_0
29 case 76 <= i && i <= 77:
30 i -= 76
31 return _EachMode_name_1[_EachMode_index_1[i]:_EachMode_index_1[i+1]]
32 default:
33 return "EachMode(" + strconv.FormatInt(int64(i), 10) + ")"
34 }
35}
diff --git a/vendor/github.com/hashicorp/terraform/states/instance_generation.go b/vendor/github.com/hashicorp/terraform/states/instance_generation.go
new file mode 100644
index 0000000..617ad4e
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/instance_generation.go
@@ -0,0 +1,24 @@
1package states
2
3// Generation is used to represent multiple objects in a succession of objects
4// represented by a single resource instance address. A resource instance can
5// have multiple generations over its lifetime due to object replacement
6// (when a change can't be applied without destroying and re-creating), and
7// multiple generations can exist at the same time when create_before_destroy
8// is used.
9//
10// A Generation value can either be the value of the variable "CurrentGen" or
11// a value of type DeposedKey. Generation values can be compared for equality
12// using "==" and used as map keys. The zero value of Generation (nil) is not
13// a valid generation and must not be used.
14type Generation interface {
15 generation()
16}
17
18// CurrentGen is the Generation representing the currently-active object for
19// a resource instance.
20var CurrentGen Generation
21
22type currentGen struct{}
23
24func (g currentGen) generation() {}
diff --git a/vendor/github.com/hashicorp/terraform/states/instance_object.go b/vendor/github.com/hashicorp/terraform/states/instance_object.go
new file mode 100644
index 0000000..1374c59
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/instance_object.go
@@ -0,0 +1,120 @@
1package states
2
3import (
4 "github.com/zclconf/go-cty/cty"
5 ctyjson "github.com/zclconf/go-cty/cty/json"
6
7 "github.com/hashicorp/terraform/addrs"
8)
9
10// ResourceInstanceObject is the local representation of a specific remote
11// object associated with a resource instance. In practice not all remote
12// objects are actually remote in the sense of being accessed over the network,
13// but this is the most common case.
14//
15// It is not valid to mutate a ResourceInstanceObject once it has been created.
16// Instead, create a new object and replace the existing one.
17type ResourceInstanceObject struct {
18 // Value is the object-typed value representing the remote object within
19 // Terraform.
20 Value cty.Value
21
22 // Private is an opaque value set by the provider when this object was
23 // last created or updated. Terraform Core does not use this value in
24 // any way and it is not exposed anywhere in the user interface, so
25 // a provider can use it for retaining any necessary private state.
26 Private []byte
27
28 // Status represents the "readiness" of the object as of the last time
29 // it was updated.
30 Status ObjectStatus
31
32 // Dependencies is a set of other addresses in the same module which
33 // this instance depended on when the given attributes were evaluated.
34 // This is used to construct the dependency relationships for an object
35 // whose configuration is no longer available, such as if it has been
36 // removed from configuration altogether, or is now deposed.
37 Dependencies []addrs.Referenceable
38}
39
40// ObjectStatus represents the status of a RemoteObject.
41type ObjectStatus rune
42
43//go:generate stringer -type ObjectStatus
44
45const (
46 // ObjectReady is an object status for an object that is ready to use.
47 ObjectReady ObjectStatus = 'R'
48
49 // ObjectTainted is an object status representing an object that is in
50 // an unrecoverable bad state due to a partial failure during a create,
51 // update, or delete operation. Since it cannot be moved into the
52 // ObjectRead state, a tainted object must be replaced.
53 ObjectTainted ObjectStatus = 'T'
54
55 // ObjectPlanned is a special object status used only for the transient
56 // placeholder objects we place into state during the refresh and plan
57 // walks to stand in for objects that will be created during apply.
58 //
59 // Any object of this status must have a corresponding change recorded
60 // in the current plan, whose value must then be used in preference to
61 // the value stored in state when evaluating expressions. A planned
62 // object stored in state will be incomplete if any of its attributes are
63 // not yet known, and the plan must be consulted in order to "see" those
64 // unknown values, because the state is not able to represent them.
65 ObjectPlanned ObjectStatus = 'P'
66)
67
68// Encode marshals the value within the receiver to produce a
69// ResourceInstanceObjectSrc ready to be written to a state file.
70//
71// The given type must be the implied type of the resource type schema, and
72// the given value must conform to it. It is important to pass the schema
73// type and not the object's own type so that dynamically-typed attributes
74// will be stored correctly. The caller must also provide the version number
75// of the schema that the given type was derived from, which will be recorded
76// in the source object so it can be used to detect when schema migration is
77// required on read.
78//
79// The returned object may share internal references with the receiver and
80// so the caller must not mutate the receiver any further once once this
81// method is called.
82func (o *ResourceInstanceObject) Encode(ty cty.Type, schemaVersion uint64) (*ResourceInstanceObjectSrc, error) {
83 // Our state serialization can't represent unknown values, so we convert
84 // them to nulls here. This is lossy, but nobody should be writing unknown
85 // values here and expecting to get them out again later.
86 //
87 // We get unknown values here while we're building out a "planned state"
88 // during the plan phase, but the value stored in the plan takes precedence
89 // for expression evaluation. The apply step should never produce unknown
90 // values, but if it does it's the responsibility of the caller to detect
91 // and raise an error about that.
92 val := cty.UnknownAsNull(o.Value)
93
94 src, err := ctyjson.Marshal(val, ty)
95 if err != nil {
96 return nil, err
97 }
98
99 return &ResourceInstanceObjectSrc{
100 SchemaVersion: schemaVersion,
101 AttrsJSON: src,
102 Private: o.Private,
103 Status: o.Status,
104 Dependencies: o.Dependencies,
105 }, nil
106}
107
108// AsTainted returns a deep copy of the receiver with the status updated to
109// ObjectTainted.
110func (o *ResourceInstanceObject) AsTainted() *ResourceInstanceObject {
111 if o == nil {
112 // A nil object can't be tainted, but we'll allow this anyway to
113 // avoid a crash, since we presumably intend to eventually record
114 // the object has having been deleted anyway.
115 return nil
116 }
117 ret := o.DeepCopy()
118 ret.Status = ObjectTainted
119 return ret
120}
diff --git a/vendor/github.com/hashicorp/terraform/states/instance_object_src.go b/vendor/github.com/hashicorp/terraform/states/instance_object_src.go
new file mode 100644
index 0000000..6cb3c27
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/instance_object_src.go
@@ -0,0 +1,113 @@
1package states
2
3import (
4 "github.com/zclconf/go-cty/cty"
5 ctyjson "github.com/zclconf/go-cty/cty/json"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/config/hcl2shim"
9)
10
11// ResourceInstanceObjectSrc is a not-fully-decoded version of
12// ResourceInstanceObject. Decoding of it can be completed by first handling
13// any schema migration steps to get to the latest schema version and then
14// calling method Decode with the implied type of the latest schema.
15type ResourceInstanceObjectSrc struct {
16 // SchemaVersion is the resource-type-specific schema version number that
17 // was current when either AttrsJSON or AttrsFlat was encoded. Migration
18 // steps are required if this is less than the current version number
19 // reported by the corresponding provider.
20 SchemaVersion uint64
21
22 // AttrsJSON is a JSON-encoded representation of the object attributes,
23 // encoding the value (of the object type implied by the associated resource
24 // type schema) that represents this remote object in Terraform Language
25 // expressions, and is compared with configuration when producing a diff.
26 //
27 // This is retained in JSON format here because it may require preprocessing
28 // before decoding if, for example, the stored attributes are for an older
29 // schema version which the provider must upgrade before use. If the
30 // version is current, it is valid to simply decode this using the
31 // type implied by the current schema, without the need for the provider
32 // to perform an upgrade first.
33 //
34 // When writing a ResourceInstanceObject into the state, AttrsJSON should
35 // always be conformant to the current schema version and the current
36 // schema version should be recorded in the SchemaVersion field.
37 AttrsJSON []byte
38
39 // AttrsFlat is a legacy form of attributes used in older state file
40 // formats, and in the new state format for objects that haven't yet been
41 // upgraded. This attribute is mutually exclusive with Attrs: for any
42 // ResourceInstanceObject, only one of these attributes may be populated
43 // and the other must be nil.
44 //
45 // An instance object with this field populated should be upgraded to use
46 // Attrs at the earliest opportunity, since this legacy flatmap-based
47 // format will be phased out over time. AttrsFlat should not be used when
48 // writing new or updated objects to state; instead, callers must follow
49 // the recommendations in the AttrsJSON documentation above.
50 AttrsFlat map[string]string
51
52 // These fields all correspond to the fields of the same name on
53 // ResourceInstanceObject.
54 Private []byte
55 Status ObjectStatus
56 Dependencies []addrs.Referenceable
57}
58
59// Decode unmarshals the raw representation of the object attributes. Pass the
60// implied type of the corresponding resource type schema for correct operation.
61//
62// Before calling Decode, the caller must check that the SchemaVersion field
63// exactly equals the version number of the schema whose implied type is being
64// passed, or else the result is undefined.
65//
66// The returned object may share internal references with the receiver and
67// so the caller must not mutate the receiver any further once once this
68// method is called.
69func (os *ResourceInstanceObjectSrc) Decode(ty cty.Type) (*ResourceInstanceObject, error) {
70 var val cty.Value
71 var err error
72 if os.AttrsFlat != nil {
73 // Legacy mode. We'll do our best to unpick this from the flatmap.
74 val, err = hcl2shim.HCL2ValueFromFlatmap(os.AttrsFlat, ty)
75 if err != nil {
76 return nil, err
77 }
78 } else {
79 val, err = ctyjson.Unmarshal(os.AttrsJSON, ty)
80 if err != nil {
81 return nil, err
82 }
83 }
84
85 return &ResourceInstanceObject{
86 Value: val,
87 Status: os.Status,
88 Dependencies: os.Dependencies,
89 Private: os.Private,
90 }, nil
91}
92
93// CompleteUpgrade creates a new ResourceInstanceObjectSrc by copying the
94// metadata from the receiver and writing in the given new schema version
95// and attribute value that are presumed to have resulted from upgrading
96// from an older schema version.
97func (os *ResourceInstanceObjectSrc) CompleteUpgrade(newAttrs cty.Value, newType cty.Type, newSchemaVersion uint64) (*ResourceInstanceObjectSrc, error) {
98 new := os.DeepCopy()
99 new.AttrsFlat = nil // We always use JSON after an upgrade, even if the source used flatmap
100
101 // This is the same principle as ResourceInstanceObject.Encode, but
102 // avoiding a decode/re-encode cycle because we don't have type info
103 // available for the "old" attributes.
104 newAttrs = cty.UnknownAsNull(newAttrs)
105 src, err := ctyjson.Marshal(newAttrs, newType)
106 if err != nil {
107 return nil, err
108 }
109
110 new.AttrsJSON = src
111 new.SchemaVersion = newSchemaVersion
112 return new, nil
113}
diff --git a/vendor/github.com/hashicorp/terraform/states/module.go b/vendor/github.com/hashicorp/terraform/states/module.go
new file mode 100644
index 0000000..d89e787
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/module.go
@@ -0,0 +1,285 @@
1package states
2
3import (
4 "github.com/zclconf/go-cty/cty"
5
6 "github.com/hashicorp/terraform/addrs"
7)
8
9// Module is a container for the states of objects within a particular module.
10type Module struct {
11 Addr addrs.ModuleInstance
12
13 // Resources contains the state for each resource. The keys in this map are
14 // an implementation detail and must not be used by outside callers.
15 Resources map[string]*Resource
16
17 // OutputValues contains the state for each output value. The keys in this
18 // map are output value names.
19 OutputValues map[string]*OutputValue
20
21 // LocalValues contains the value for each named output value. The keys
22 // in this map are local value names.
23 LocalValues map[string]cty.Value
24}
25
26// NewModule constructs an empty module state for the given module address.
27func NewModule(addr addrs.ModuleInstance) *Module {
28 return &Module{
29 Addr: addr,
30 Resources: map[string]*Resource{},
31 OutputValues: map[string]*OutputValue{},
32 LocalValues: map[string]cty.Value{},
33 }
34}
35
36// Resource returns the state for the resource with the given address within
37// the receiving module state, or nil if the requested resource is not tracked
38// in the state.
39func (ms *Module) Resource(addr addrs.Resource) *Resource {
40 return ms.Resources[addr.String()]
41}
42
43// ResourceInstance returns the state for the resource instance with the given
44// address within the receiving module state, or nil if the requested instance
45// is not tracked in the state.
46func (ms *Module) ResourceInstance(addr addrs.ResourceInstance) *ResourceInstance {
47 rs := ms.Resource(addr.Resource)
48 if rs == nil {
49 return nil
50 }
51 return rs.Instance(addr.Key)
52}
53
54// SetResourceMeta updates the resource-level metadata for the resource
55// with the given address, creating the resource state for it if it doesn't
56// already exist.
57func (ms *Module) SetResourceMeta(addr addrs.Resource, eachMode EachMode, provider addrs.AbsProviderConfig) {
58 rs := ms.Resource(addr)
59 if rs == nil {
60 rs = &Resource{
61 Addr: addr,
62 Instances: map[addrs.InstanceKey]*ResourceInstance{},
63 }
64 ms.Resources[addr.String()] = rs
65 }
66
67 rs.EachMode = eachMode
68 rs.ProviderConfig = provider
69}
70
71// RemoveResource removes the entire state for the given resource, taking with
72// it any instances associated with the resource. This should generally be
73// called only for resource objects whose instances have all been destroyed.
74func (ms *Module) RemoveResource(addr addrs.Resource) {
75 delete(ms.Resources, addr.String())
76}
77
78// SetResourceInstanceCurrent saves the given instance object as the current
79// generation of the resource instance with the given address, simulataneously
80// updating the recorded provider configuration address, dependencies, and
81// resource EachMode.
82//
83// Any existing current instance object for the given resource is overwritten.
84// Set obj to nil to remove the primary generation object altogether. If there
85// are no deposed objects then the instance will be removed altogether.
86//
87// The provider address and "each mode" are resource-wide settings and so they
88// are updated for all other instances of the same resource as a side-effect of
89// this call.
90func (ms *Module) SetResourceInstanceCurrent(addr addrs.ResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) {
91 ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider)
92
93 rs := ms.Resource(addr.Resource)
94 is := rs.EnsureInstance(addr.Key)
95
96 is.Current = obj
97
98 if !is.HasObjects() {
99 // If we have no objects at all then we'll clean up.
100 delete(rs.Instances, addr.Key)
101 }
102 if rs.EachMode == NoEach && len(rs.Instances) == 0 {
103 // Also clean up if we only expect to have one instance anyway
104 // and there are none. We leave the resource behind if an each mode
105 // is active because an empty list or map of instances is a valid state.
106 delete(ms.Resources, addr.Resource.String())
107 }
108}
109
110// SetResourceInstanceDeposed saves the given instance object as a deposed
111// generation of the resource instance with the given address and deposed key.
112//
113// Call this method only for pre-existing deposed objects that already have
114// a known DeposedKey. For example, this method is useful if reloading objects
115// that were persisted to a state file. To mark the current object as deposed,
116// use DeposeResourceInstanceObject instead.
117//
118// The resource that contains the given instance must already exist in the
119// state, or this method will panic. Use Resource to check first if its
120// presence is not already guaranteed.
121//
122// Any existing current instance object for the given resource and deposed key
123// is overwritten. Set obj to nil to remove the deposed object altogether. If
124// the instance is left with no objects after this operation then it will
125// be removed from its containing resource altogether.
126func (ms *Module) SetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) {
127 ms.SetResourceMeta(addr.Resource, eachModeForInstanceKey(addr.Key), provider)
128
129 rs := ms.Resource(addr.Resource)
130 is := rs.EnsureInstance(addr.Key)
131 if obj != nil {
132 is.Deposed[key] = obj
133 } else {
134 delete(is.Deposed, key)
135 }
136
137 if !is.HasObjects() {
138 // If we have no objects at all then we'll clean up.
139 delete(rs.Instances, addr.Key)
140 }
141 if rs.EachMode == NoEach && len(rs.Instances) == 0 {
142 // Also clean up if we only expect to have one instance anyway
143 // and there are none. We leave the resource behind if an each mode
144 // is active because an empty list or map of instances is a valid state.
145 delete(ms.Resources, addr.Resource.String())
146 }
147}
148
149// ForgetResourceInstanceAll removes the record of all objects associated with
150// the specified resource instance, if present. If not present, this is a no-op.
151func (ms *Module) ForgetResourceInstanceAll(addr addrs.ResourceInstance) {
152 rs := ms.Resource(addr.Resource)
153 if rs == nil {
154 return
155 }
156 delete(rs.Instances, addr.Key)
157
158 if rs.EachMode == NoEach && len(rs.Instances) == 0 {
159 // Also clean up if we only expect to have one instance anyway
160 // and there are none. We leave the resource behind if an each mode
161 // is active because an empty list or map of instances is a valid state.
162 delete(ms.Resources, addr.Resource.String())
163 }
164}
165
166// ForgetResourceInstanceDeposed removes the record of the deposed object with
167// the given address and key, if present. If not present, this is a no-op.
168func (ms *Module) ForgetResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) {
169 rs := ms.Resource(addr.Resource)
170 if rs == nil {
171 return
172 }
173 is := rs.Instance(addr.Key)
174 if is == nil {
175 return
176 }
177 delete(is.Deposed, key)
178
179 if !is.HasObjects() {
180 // If we have no objects at all then we'll clean up.
181 delete(rs.Instances, addr.Key)
182 }
183 if rs.EachMode == NoEach && len(rs.Instances) == 0 {
184 // Also clean up if we only expect to have one instance anyway
185 // and there are none. We leave the resource behind if an each mode
186 // is active because an empty list or map of instances is a valid state.
187 delete(ms.Resources, addr.Resource.String())
188 }
189}
190
191// deposeResourceInstanceObject is the real implementation of
192// SyncState.DeposeResourceInstanceObject.
193func (ms *Module) deposeResourceInstanceObject(addr addrs.ResourceInstance, forceKey DeposedKey) DeposedKey {
194 is := ms.ResourceInstance(addr)
195 if is == nil {
196 return NotDeposed
197 }
198 return is.deposeCurrentObject(forceKey)
199}
200
201// maybeRestoreResourceInstanceDeposed is the real implementation of
202// SyncState.MaybeRestoreResourceInstanceDeposed.
203func (ms *Module) maybeRestoreResourceInstanceDeposed(addr addrs.ResourceInstance, key DeposedKey) bool {
204 rs := ms.Resource(addr.Resource)
205 if rs == nil {
206 return false
207 }
208 is := rs.Instance(addr.Key)
209 if is == nil {
210 return false
211 }
212 if is.Current != nil {
213 return false
214 }
215 if len(is.Deposed) == 0 {
216 return false
217 }
218 is.Current = is.Deposed[key]
219 delete(is.Deposed, key)
220 return true
221}
222
223// SetOutputValue writes an output value into the state, overwriting any
224// existing value of the same name.
225func (ms *Module) SetOutputValue(name string, value cty.Value, sensitive bool) *OutputValue {
226 os := &OutputValue{
227 Value: value,
228 Sensitive: sensitive,
229 }
230 ms.OutputValues[name] = os
231 return os
232}
233
234// RemoveOutputValue removes the output value of the given name from the state,
235// if it exists. This method is a no-op if there is no value of the given
236// name.
237func (ms *Module) RemoveOutputValue(name string) {
238 delete(ms.OutputValues, name)
239}
240
241// SetLocalValue writes a local value into the state, overwriting any
242// existing value of the same name.
243func (ms *Module) SetLocalValue(name string, value cty.Value) {
244 ms.LocalValues[name] = value
245}
246
247// RemoveLocalValue removes the local value of the given name from the state,
248// if it exists. This method is a no-op if there is no value of the given
249// name.
250func (ms *Module) RemoveLocalValue(name string) {
251 delete(ms.LocalValues, name)
252}
253
254// PruneResourceHusks is a specialized method that will remove any Resource
255// objects that do not contain any instances, even if they have an EachMode.
256//
257// You probably shouldn't call this! See the method of the same name on
258// type State for more information on what this is for and the rare situations
259// where it is safe to use.
260func (ms *Module) PruneResourceHusks() {
261 for _, rs := range ms.Resources {
262 if len(rs.Instances) == 0 {
263 ms.RemoveResource(rs.Addr)
264 }
265 }
266}
267
268// empty returns true if the receving module state is contributing nothing
269// to the state. In other words, it returns true if the module could be
270// removed from the state altogether without changing the meaning of the state.
271//
272// In practice a module containing no objects is the same as a non-existent
273// module, and so we can opportunistically clean up once a module becomes
274// empty on the assumption that it will be re-added if needed later.
275func (ms *Module) empty() bool {
276 if ms == nil {
277 return true
278 }
279
280 // This must be updated to cover any new collections added to Module
281 // in future.
282 return (len(ms.Resources) == 0 &&
283 len(ms.OutputValues) == 0 &&
284 len(ms.LocalValues) == 0)
285}
diff --git a/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go b/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go
new file mode 100644
index 0000000..96a6db2
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/objectstatus_string.go
@@ -0,0 +1,33 @@
1// Code generated by "stringer -type ObjectStatus"; DO NOT EDIT.
2
3package states
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[ObjectReady-82]
12 _ = x[ObjectTainted-84]
13 _ = x[ObjectPlanned-80]
14}
15
16const (
17 _ObjectStatus_name_0 = "ObjectPlanned"
18 _ObjectStatus_name_1 = "ObjectReady"
19 _ObjectStatus_name_2 = "ObjectTainted"
20)
21
22func (i ObjectStatus) String() string {
23 switch {
24 case i == 80:
25 return _ObjectStatus_name_0
26 case i == 82:
27 return _ObjectStatus_name_1
28 case i == 84:
29 return _ObjectStatus_name_2
30 default:
31 return "ObjectStatus(" + strconv.FormatInt(int64(i), 10) + ")"
32 }
33}
diff --git a/vendor/github.com/hashicorp/terraform/states/output_value.go b/vendor/github.com/hashicorp/terraform/states/output_value.go
new file mode 100644
index 0000000..d232b76
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/output_value.go
@@ -0,0 +1,14 @@
1package states
2
3import (
4 "github.com/zclconf/go-cty/cty"
5)
6
7// OutputValue represents the state of a particular output value.
8//
9// It is not valid to mutate an OutputValue object once it has been created.
10// Instead, create an entirely new OutputValue to replace the previous one.
11type OutputValue struct {
12 Value cty.Value
13 Sensitive bool
14}
diff --git a/vendor/github.com/hashicorp/terraform/states/resource.go b/vendor/github.com/hashicorp/terraform/states/resource.go
new file mode 100644
index 0000000..e2a2b85
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/resource.go
@@ -0,0 +1,239 @@
1package states
2
3import (
4 "fmt"
5 "math/rand"
6 "time"
7
8 "github.com/hashicorp/terraform/addrs"
9)
10
11// Resource represents the state of a resource.
12type Resource struct {
13 // Addr is the module-relative address for the resource this state object
14 // belongs to.
15 Addr addrs.Resource
16
17 // EachMode is the multi-instance mode currently in use for this resource,
18 // or NoEach if this is a single-instance resource. This dictates what
19 // type of value is returned when accessing this resource via expressions
20 // in the Terraform language.
21 EachMode EachMode
22
23 // Instances contains the potentially-multiple instances associated with
24 // this resource. This map can contain a mixture of different key types,
25 // but only the ones of InstanceKeyType are considered current.
26 Instances map[addrs.InstanceKey]*ResourceInstance
27
28 // ProviderConfig is the absolute address for the provider configuration that
29 // most recently managed this resource. This is used to connect a resource
30 // with a provider configuration when the resource configuration block is
31 // not available, such as if it has been removed from configuration
32 // altogether.
33 ProviderConfig addrs.AbsProviderConfig
34}
35
36// Instance returns the state for the instance with the given key, or nil
37// if no such instance is tracked within the state.
38func (rs *Resource) Instance(key addrs.InstanceKey) *ResourceInstance {
39 return rs.Instances[key]
40}
41
42// EnsureInstance returns the state for the instance with the given key,
43// creating a new empty state for it if one doesn't already exist.
44//
45// Because this may create and save a new state, it is considered to be
46// a write operation.
47func (rs *Resource) EnsureInstance(key addrs.InstanceKey) *ResourceInstance {
48 ret := rs.Instance(key)
49 if ret == nil {
50 ret = NewResourceInstance()
51 rs.Instances[key] = ret
52 }
53 return ret
54}
55
56// ResourceInstance represents the state of a particular instance of a resource.
57type ResourceInstance struct {
58 // Current, if non-nil, is the remote object that is currently represented
59 // by the corresponding resource instance.
60 Current *ResourceInstanceObjectSrc
61
62 // Deposed, if len > 0, contains any remote objects that were previously
63 // represented by the corresponding resource instance but have been
64 // replaced and are pending destruction due to the create_before_destroy
65 // lifecycle mode.
66 Deposed map[DeposedKey]*ResourceInstanceObjectSrc
67}
68
69// NewResourceInstance constructs and returns a new ResourceInstance, ready to
70// use.
71func NewResourceInstance() *ResourceInstance {
72 return &ResourceInstance{
73 Deposed: map[DeposedKey]*ResourceInstanceObjectSrc{},
74 }
75}
76
77// HasCurrent returns true if this resource instance has a "current"-generation
78// object. Most instances do, but this can briefly be false during a
79// create-before-destroy replace operation when the current has been deposed
80// but its replacement has not yet been created.
81func (i *ResourceInstance) HasCurrent() bool {
82 return i != nil && i.Current != nil
83}
84
85// HasDeposed returns true if this resource instance has a deposed object
86// with the given key.
87func (i *ResourceInstance) HasDeposed(key DeposedKey) bool {
88 return i != nil && i.Deposed[key] != nil
89}
90
91// HasAnyDeposed returns true if this resource instance has one or more
92// deposed objects.
93func (i *ResourceInstance) HasAnyDeposed() bool {
94 return i != nil && len(i.Deposed) > 0
95}
96
97// HasObjects returns true if this resource has any objects at all, whether
98// current or deposed.
99func (i *ResourceInstance) HasObjects() bool {
100 return i.Current != nil || len(i.Deposed) != 0
101}
102
103// deposeCurrentObject is part of the real implementation of
104// SyncState.DeposeResourceInstanceObject. The exported method uses a lock
105// to ensure that we can safely allocate an unused deposed key without
106// collision.
107func (i *ResourceInstance) deposeCurrentObject(forceKey DeposedKey) DeposedKey {
108 if !i.HasCurrent() {
109 return NotDeposed
110 }
111
112 key := forceKey
113 if key == NotDeposed {
114 key = i.findUnusedDeposedKey()
115 } else {
116 if _, exists := i.Deposed[key]; exists {
117 panic(fmt.Sprintf("forced key %s is already in use", forceKey))
118 }
119 }
120 i.Deposed[key] = i.Current
121 i.Current = nil
122 return key
123}
124
125// GetGeneration retrieves the object of the given generation from the
126// ResourceInstance, or returns nil if there is no such object.
127//
128// If the given generation is nil or invalid, this method will panic.
129func (i *ResourceInstance) GetGeneration(gen Generation) *ResourceInstanceObjectSrc {
130 if gen == CurrentGen {
131 return i.Current
132 }
133 if dk, ok := gen.(DeposedKey); ok {
134 return i.Deposed[dk]
135 }
136 if gen == nil {
137 panic(fmt.Sprintf("get with nil Generation"))
138 }
139 // Should never fall out here, since the above covers all possible
140 // Generation values.
141 panic(fmt.Sprintf("get invalid Generation %#v", gen))
142}
143
144// FindUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to
145// already be in use for this instance at the time of the call.
146//
147// Note that the validity of this result may change if new deposed keys are
148// allocated before it is used. To avoid this risk, instead use the
149// DeposeResourceInstanceObject method on the SyncState wrapper type, which
150// allocates a key and uses it atomically.
151func (i *ResourceInstance) FindUnusedDeposedKey() DeposedKey {
152 return i.findUnusedDeposedKey()
153}
154
155// findUnusedDeposedKey generates a unique DeposedKey that is guaranteed not to
156// already be in use for this instance.
157func (i *ResourceInstance) findUnusedDeposedKey() DeposedKey {
158 for {
159 key := NewDeposedKey()
160 if _, exists := i.Deposed[key]; !exists {
161 return key
162 }
163 // Spin until we find a unique one. This shouldn't take long, because
164 // we have a 32-bit keyspace and there's rarely more than one deposed
165 // instance.
166 }
167}
168
169// EachMode specifies the multi-instance mode for a resource.
170type EachMode rune
171
172const (
173 NoEach EachMode = 0
174 EachList EachMode = 'L'
175 EachMap EachMode = 'M'
176)
177
178//go:generate stringer -type EachMode
179
180func eachModeForInstanceKey(key addrs.InstanceKey) EachMode {
181 switch key.(type) {
182 case addrs.IntKey:
183 return EachList
184 case addrs.StringKey:
185 return EachMap
186 default:
187 if key == addrs.NoKey {
188 return NoEach
189 }
190 panic(fmt.Sprintf("don't know an each mode for instance key %#v", key))
191 }
192}
193
194// DeposedKey is a 8-character hex string used to uniquely identify deposed
195// instance objects in the state.
196type DeposedKey string
197
198// NotDeposed is a special invalid value of DeposedKey that is used to represent
199// the absense of a deposed key. It must not be used as an actual deposed key.
200const NotDeposed = DeposedKey("")
201
202var deposedKeyRand = rand.New(rand.NewSource(time.Now().UnixNano()))
203
204// NewDeposedKey generates a pseudo-random deposed key. Because of the short
205// length of these keys, uniqueness is not a natural consequence and so the
206// caller should test to see if the generated key is already in use and generate
207// another if so, until a unique key is found.
208func NewDeposedKey() DeposedKey {
209 v := deposedKeyRand.Uint32()
210 return DeposedKey(fmt.Sprintf("%08x", v))
211}
212
213func (k DeposedKey) String() string {
214 return string(k)
215}
216
217func (k DeposedKey) GoString() string {
218 ks := string(k)
219 switch {
220 case ks == "":
221 return "states.NotDeposed"
222 default:
223 return fmt.Sprintf("states.DeposedKey(%s)", ks)
224 }
225}
226
227// Generation is a helper method to convert a DeposedKey into a Generation.
228// If the reciever is anything other than NotDeposed then the result is
229// just the same value as a Generation. If the receiver is NotDeposed then
230// the result is CurrentGen.
231func (k DeposedKey) Generation() Generation {
232 if k == NotDeposed {
233 return CurrentGen
234 }
235 return k
236}
237
238// generation is an implementation of Generation.
239func (k DeposedKey) generation() {}
diff --git a/vendor/github.com/hashicorp/terraform/states/state.go b/vendor/github.com/hashicorp/terraform/states/state.go
new file mode 100644
index 0000000..1f84235
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/state.go
@@ -0,0 +1,229 @@
1package states
2
3import (
4 "sort"
5
6 "github.com/zclconf/go-cty/cty"
7
8 "github.com/hashicorp/terraform/addrs"
9)
10
11// State is the top-level type of a Terraform state.
12//
13// A state should be mutated only via its accessor methods, to ensure that
14// invariants are preserved.
15//
16// Access to State and the nested values within it is not concurrency-safe,
17// so when accessing a State object concurrently it is the caller's
18// responsibility to ensure that only one write is in progress at a time
19// and that reads only occur when no write is in progress. The most common
20// way to acheive this is to wrap the State in a SyncState and use the
21// higher-level atomic operations supported by that type.
22type State struct {
23 // Modules contains the state for each module. The keys in this map are
24 // an implementation detail and must not be used by outside callers.
25 Modules map[string]*Module
26}
27
28// NewState constructs a minimal empty state, containing an empty root module.
29func NewState() *State {
30 modules := map[string]*Module{}
31 modules[addrs.RootModuleInstance.String()] = NewModule(addrs.RootModuleInstance)
32 return &State{
33 Modules: modules,
34 }
35}
36
37// BuildState is a helper -- primarily intended for tests -- to build a state
38// using imperative code against the StateSync type while still acting as
39// an expression of type *State to assign into a containing struct.
40func BuildState(cb func(*SyncState)) *State {
41 s := NewState()
42 cb(s.SyncWrapper())
43 return s
44}
45
46// Empty returns true if there are no resources or populated output values
47// in the receiver. In other words, if this state could be safely replaced
48// with the return value of NewState and be functionally equivalent.
49func (s *State) Empty() bool {
50 if s == nil {
51 return true
52 }
53 for _, ms := range s.Modules {
54 if len(ms.Resources) != 0 {
55 return false
56 }
57 if len(ms.OutputValues) != 0 {
58 return false
59 }
60 }
61 return true
62}
63
64// Module returns the state for the module with the given address, or nil if
65// the requested module is not tracked in the state.
66func (s *State) Module(addr addrs.ModuleInstance) *Module {
67 if s == nil {
68 panic("State.Module on nil *State")
69 }
70 return s.Modules[addr.String()]
71}
72
73// RemoveModule removes the module with the given address from the state,
74// unless it is the root module. The root module cannot be deleted, and so
75// this method will panic if that is attempted.
76//
77// Removing a module implicitly discards all of the resources, outputs and
78// local values within it, and so this should usually be done only for empty
79// modules. For callers accessing the state through a SyncState wrapper, modules
80// are automatically pruned if they are empty after one of their contained
81// elements is removed.
82func (s *State) RemoveModule(addr addrs.ModuleInstance) {
83 if addr.IsRoot() {
84 panic("attempted to remove root module")
85 }
86
87 delete(s.Modules, addr.String())
88}
89
90// RootModule is a convenient alias for Module(addrs.RootModuleInstance).
91func (s *State) RootModule() *Module {
92 if s == nil {
93 panic("RootModule called on nil State")
94 }
95 return s.Modules[addrs.RootModuleInstance.String()]
96}
97
98// EnsureModule returns the state for the module with the given address,
99// creating and adding a new one if necessary.
100//
101// Since this might modify the state to add a new instance, it is considered
102// to be a write operation.
103func (s *State) EnsureModule(addr addrs.ModuleInstance) *Module {
104 ms := s.Module(addr)
105 if ms == nil {
106 ms = NewModule(addr)
107 s.Modules[addr.String()] = ms
108 }
109 return ms
110}
111
112// HasResources returns true if there is at least one resource (of any mode)
113// present in the receiving state.
114func (s *State) HasResources() bool {
115 if s == nil {
116 return false
117 }
118 for _, ms := range s.Modules {
119 if len(ms.Resources) > 0 {
120 return true
121 }
122 }
123 return false
124}
125
126// Resource returns the state for the resource with the given address, or nil
127// if no such resource is tracked in the state.
128func (s *State) Resource(addr addrs.AbsResource) *Resource {
129 ms := s.Module(addr.Module)
130 if ms == nil {
131 return nil
132 }
133 return ms.Resource(addr.Resource)
134}
135
136// ResourceInstance returns the state for the resource instance with the given
137// address, or nil if no such resource is tracked in the state.
138func (s *State) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance {
139 if s == nil {
140 panic("State.ResourceInstance on nil *State")
141 }
142 ms := s.Module(addr.Module)
143 if ms == nil {
144 return nil
145 }
146 return ms.ResourceInstance(addr.Resource)
147}
148
149// OutputValue returns the state for the output value with the given address,
150// or nil if no such output value is tracked in the state.
151func (s *State) OutputValue(addr addrs.AbsOutputValue) *OutputValue {
152 ms := s.Module(addr.Module)
153 if ms == nil {
154 return nil
155 }
156 return ms.OutputValues[addr.OutputValue.Name]
157}
158
159// LocalValue returns the value of the named local value with the given address,
160// or cty.NilVal if no such value is tracked in the state.
161func (s *State) LocalValue(addr addrs.AbsLocalValue) cty.Value {
162 ms := s.Module(addr.Module)
163 if ms == nil {
164 return cty.NilVal
165 }
166 return ms.LocalValues[addr.LocalValue.Name]
167}
168
169// ProviderAddrs returns a list of all of the provider configuration addresses
170// referenced throughout the receiving state.
171//
172// The result is de-duplicated so that each distinct address appears only once.
173func (s *State) ProviderAddrs() []addrs.AbsProviderConfig {
174 if s == nil {
175 return nil
176 }
177
178 m := map[string]addrs.AbsProviderConfig{}
179 for _, ms := range s.Modules {
180 for _, rc := range ms.Resources {
181 m[rc.ProviderConfig.String()] = rc.ProviderConfig
182 }
183 }
184 if len(m) == 0 {
185 return nil
186 }
187
188 // This is mainly just so we'll get stable results for testing purposes.
189 keys := make([]string, 0, len(m))
190 for k := range m {
191 keys = append(keys, k)
192 }
193 sort.Strings(keys)
194
195 ret := make([]addrs.AbsProviderConfig, len(keys))
196 for i, key := range keys {
197 ret[i] = m[key]
198 }
199
200 return ret
201}
202
203// PruneResourceHusks is a specialized method that will remove any Resource
204// objects that do not contain any instances, even if they have an EachMode.
205//
206// This should generally be used only after a "terraform destroy" operation,
207// to finalize the cleanup of the state. It is not correct to use this after
208// other operations because if a resource has "count = 0" or "for_each" over
209// an empty collection then we want to retain it in the state so that references
210// to it, particularly in "strange" contexts like "terraform console", can be
211// properly resolved.
212//
213// This method MUST NOT be called concurrently with other readers and writers
214// of the receiving state.
215func (s *State) PruneResourceHusks() {
216 for _, m := range s.Modules {
217 m.PruneResourceHusks()
218 if len(m.Resources) == 0 && !m.Addr.IsRoot() {
219 s.RemoveModule(m.Addr)
220 }
221 }
222}
223
224// SyncWrapper returns a SyncState object wrapping the receiver.
225func (s *State) SyncWrapper() *SyncState {
226 return &SyncState{
227 state: s,
228 }
229}
diff --git a/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go b/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go
new file mode 100644
index 0000000..ea717d0
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/state_deepcopy.go
@@ -0,0 +1,218 @@
1package states
2
3import (
4 "github.com/hashicorp/terraform/addrs"
5 "github.com/zclconf/go-cty/cty"
6)
7
8// Taking deep copies of states is an important operation because state is
9// otherwise a mutable data structure that is challenging to share across
10// many separate callers. It is important that the DeepCopy implementations
11// in this file comprehensively copy all parts of the state data structure
12// that could be mutated via pointers.
13
14// DeepCopy returns a new state that contains equivalent data to the reciever
15// but shares no backing memory in common.
16//
17// As with all methods on State, this method is not safe to use concurrently
18// with writing to any portion of the recieving data structure. It is the
19// caller's responsibility to ensure mutual exclusion for the duration of the
20// operation, but may then freely modify the receiver and the returned copy
21// independently once this method returns.
22func (s *State) DeepCopy() *State {
23 if s == nil {
24 return nil
25 }
26
27 modules := make(map[string]*Module, len(s.Modules))
28 for k, m := range s.Modules {
29 modules[k] = m.DeepCopy()
30 }
31 return &State{
32 Modules: modules,
33 }
34}
35
36// DeepCopy returns a new module state that contains equivalent data to the
37// receiver but shares no backing memory in common.
38//
39// As with all methods on Module, this method is not safe to use concurrently
40// with writing to any portion of the recieving data structure. It is the
41// caller's responsibility to ensure mutual exclusion for the duration of the
42// operation, but may then freely modify the receiver and the returned copy
43// independently once this method returns.
44func (ms *Module) DeepCopy() *Module {
45 if ms == nil {
46 return nil
47 }
48
49 resources := make(map[string]*Resource, len(ms.Resources))
50 for k, r := range ms.Resources {
51 resources[k] = r.DeepCopy()
52 }
53 outputValues := make(map[string]*OutputValue, len(ms.OutputValues))
54 for k, v := range ms.OutputValues {
55 outputValues[k] = v.DeepCopy()
56 }
57 localValues := make(map[string]cty.Value, len(ms.LocalValues))
58 for k, v := range ms.LocalValues {
59 // cty.Value is immutable, so we don't need to copy these.
60 localValues[k] = v
61 }
62
63 return &Module{
64 Addr: ms.Addr, // technically mutable, but immutable by convention
65 Resources: resources,
66 OutputValues: outputValues,
67 LocalValues: localValues,
68 }
69}
70
71// DeepCopy returns a new resource state that contains equivalent data to the
72// receiver but shares no backing memory in common.
73//
74// As with all methods on Resource, this method is not safe to use concurrently
75// with writing to any portion of the recieving data structure. It is the
76// caller's responsibility to ensure mutual exclusion for the duration of the
77// operation, but may then freely modify the receiver and the returned copy
78// independently once this method returns.
79func (rs *Resource) DeepCopy() *Resource {
80 if rs == nil {
81 return nil
82 }
83
84 instances := make(map[addrs.InstanceKey]*ResourceInstance, len(rs.Instances))
85 for k, i := range rs.Instances {
86 instances[k] = i.DeepCopy()
87 }
88
89 return &Resource{
90 Addr: rs.Addr,
91 EachMode: rs.EachMode,
92 Instances: instances,
93 ProviderConfig: rs.ProviderConfig, // technically mutable, but immutable by convention
94 }
95}
96
97// DeepCopy returns a new resource instance state that contains equivalent data
98// to the receiver but shares no backing memory in common.
99//
100// As with all methods on ResourceInstance, this method is not safe to use
101// concurrently with writing to any portion of the recieving data structure. It
102// is the caller's responsibility to ensure mutual exclusion for the duration
103// of the operation, but may then freely modify the receiver and the returned
104// copy independently once this method returns.
105func (is *ResourceInstance) DeepCopy() *ResourceInstance {
106 if is == nil {
107 return nil
108 }
109
110 deposed := make(map[DeposedKey]*ResourceInstanceObjectSrc, len(is.Deposed))
111 for k, obj := range is.Deposed {
112 deposed[k] = obj.DeepCopy()
113 }
114
115 return &ResourceInstance{
116 Current: is.Current.DeepCopy(),
117 Deposed: deposed,
118 }
119}
120
121// DeepCopy returns a new resource instance object that contains equivalent data
122// to the receiver but shares no backing memory in common.
123//
124// As with all methods on ResourceInstanceObjectSrc, this method is not safe to
125// use concurrently with writing to any portion of the recieving data structure.
126// It is the caller's responsibility to ensure mutual exclusion for the duration
127// of the operation, but may then freely modify the receiver and the returned
128// copy independently once this method returns.
129func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc {
130 if obj == nil {
131 return nil
132 }
133
134 var attrsFlat map[string]string
135 if obj.AttrsFlat != nil {
136 attrsFlat = make(map[string]string, len(obj.AttrsFlat))
137 for k, v := range obj.AttrsFlat {
138 attrsFlat[k] = v
139 }
140 }
141
142 var attrsJSON []byte
143 if obj.AttrsJSON != nil {
144 attrsJSON = make([]byte, len(obj.AttrsJSON))
145 copy(attrsJSON, obj.AttrsJSON)
146 }
147
148 var private []byte
149 if obj.Private != nil {
150 private := make([]byte, len(obj.Private))
151 copy(private, obj.Private)
152 }
153
154 // Some addrs.Referencable implementations are technically mutable, but
155 // we treat them as immutable by convention and so we don't deep-copy here.
156 dependencies := make([]addrs.Referenceable, len(obj.Dependencies))
157 copy(dependencies, obj.Dependencies)
158
159 return &ResourceInstanceObjectSrc{
160 Status: obj.Status,
161 SchemaVersion: obj.SchemaVersion,
162 Private: private,
163 AttrsFlat: attrsFlat,
164 AttrsJSON: attrsJSON,
165 Dependencies: dependencies,
166 }
167}
168
169// DeepCopy returns a new resource instance object that contains equivalent data
170// to the receiver but shares no backing memory in common.
171//
172// As with all methods on ResourceInstanceObject, this method is not safe to use
173// concurrently with writing to any portion of the recieving data structure. It
174// is the caller's responsibility to ensure mutual exclusion for the duration
175// of the operation, but may then freely modify the receiver and the returned
176// copy independently once this method returns.
177func (obj *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject {
178 if obj == nil {
179 return nil
180 }
181
182 var private []byte
183 if obj.Private != nil {
184 private := make([]byte, len(obj.Private))
185 copy(private, obj.Private)
186 }
187
188 // Some addrs.Referencable implementations are technically mutable, but
189 // we treat them as immutable by convention and so we don't deep-copy here.
190 dependencies := make([]addrs.Referenceable, len(obj.Dependencies))
191 copy(dependencies, obj.Dependencies)
192
193 return &ResourceInstanceObject{
194 Value: obj.Value,
195 Status: obj.Status,
196 Private: private,
197 Dependencies: dependencies,
198 }
199}
200
201// DeepCopy returns a new output value state that contains equivalent data
202// to the receiver but shares no backing memory in common.
203//
204// As with all methods on OutputValue, this method is not safe to use
205// concurrently with writing to any portion of the recieving data structure. It
206// is the caller's responsibility to ensure mutual exclusion for the duration
207// of the operation, but may then freely modify the receiver and the returned
208// copy independently once this method returns.
209func (os *OutputValue) DeepCopy() *OutputValue {
210 if os == nil {
211 return nil
212 }
213
214 return &OutputValue{
215 Value: os.Value,
216 Sensitive: os.Sensitive,
217 }
218}
diff --git a/vendor/github.com/hashicorp/terraform/states/state_equal.go b/vendor/github.com/hashicorp/terraform/states/state_equal.go
new file mode 100644
index 0000000..ea20967
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/state_equal.go
@@ -0,0 +1,18 @@
1package states
2
3import (
4 "reflect"
5)
6
7// Equal returns true if the receiver is functionally equivalent to other,
8// including any ephemeral portions of the state that would not be included
9// if the state were saved to files.
10//
11// To test only the persistent portions of two states for equality, instead
12// use statefile.StatesMarshalEqual.
13func (s *State) Equal(other *State) bool {
14 // For the moment this is sufficient, but we may need to do something
15 // more elaborate in future if we have any portions of state that require
16 // more sophisticated comparisons.
17 return reflect.DeepEqual(s, other)
18}
diff --git a/vendor/github.com/hashicorp/terraform/states/state_string.go b/vendor/github.com/hashicorp/terraform/states/state_string.go
new file mode 100644
index 0000000..bca4581
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/state_string.go
@@ -0,0 +1,279 @@
1package states
2
3import (
4 "bufio"
5 "bytes"
6 "encoding/json"
7 "fmt"
8 "sort"
9 "strings"
10
11 ctyjson "github.com/zclconf/go-cty/cty/json"
12
13 "github.com/hashicorp/terraform/addrs"
14 "github.com/hashicorp/terraform/config/hcl2shim"
15)
16
17// String returns a rather-odd string representation of the entire state.
18//
19// This is intended to match the behavior of the older terraform.State.String
20// method that is used in lots of existing tests. It should not be used in
21// new tests: instead, use "cmp" to directly compare the state data structures
22// and print out a diff if they do not match.
23//
24// This method should never be used in non-test code, whether directly by call
25// or indirectly via a %s or %q verb in package fmt.
26func (s *State) String() string {
27 if s == nil {
28 return "<nil>"
29 }
30
31 // sort the modules by name for consistent output
32 modules := make([]string, 0, len(s.Modules))
33 for m := range s.Modules {
34 modules = append(modules, m)
35 }
36 sort.Strings(modules)
37
38 var buf bytes.Buffer
39 for _, name := range modules {
40 m := s.Modules[name]
41 mStr := m.testString()
42
43 // If we're the root module, we just write the output directly.
44 if m.Addr.IsRoot() {
45 buf.WriteString(mStr + "\n")
46 continue
47 }
48
49 // We need to build out a string that resembles the not-quite-standard
50 // format that terraform.State.String used to use, where there's a
51 // "module." prefix but then just a chain of all of the module names
52 // without any further "module." portions.
53 buf.WriteString("module")
54 for _, step := range m.Addr {
55 buf.WriteByte('.')
56 buf.WriteString(step.Name)
57 if step.InstanceKey != addrs.NoKey {
58 buf.WriteByte('[')
59 buf.WriteString(step.InstanceKey.String())
60 buf.WriteByte(']')
61 }
62 }
63 buf.WriteString(":\n")
64
65 s := bufio.NewScanner(strings.NewReader(mStr))
66 for s.Scan() {
67 text := s.Text()
68 if text != "" {
69 text = " " + text
70 }
71
72 buf.WriteString(fmt.Sprintf("%s\n", text))
73 }
74 }
75
76 return strings.TrimSpace(buf.String())
77}
78
79// testString is used to produce part of the output of State.String. It should
80// never be used directly.
81func (m *Module) testString() string {
82 var buf bytes.Buffer
83
84 if len(m.Resources) == 0 {
85 buf.WriteString("<no state>")
86 }
87
88 // We use AbsResourceInstance here, even though everything belongs to
89 // the same module, just because we have a sorting behavior defined
90 // for those but not for just ResourceInstance.
91 addrsOrder := make([]addrs.AbsResourceInstance, 0, len(m.Resources))
92 for _, rs := range m.Resources {
93 for ik := range rs.Instances {
94 addrsOrder = append(addrsOrder, rs.Addr.Instance(ik).Absolute(addrs.RootModuleInstance))
95 }
96 }
97
98 sort.Slice(addrsOrder, func(i, j int) bool {
99 return addrsOrder[i].Less(addrsOrder[j])
100 })
101
102 for _, fakeAbsAddr := range addrsOrder {
103 addr := fakeAbsAddr.Resource
104 rs := m.Resource(addr.ContainingResource())
105 is := m.ResourceInstance(addr)
106
107 // Here we need to fake up a legacy-style address as the old state
108 // types would've used, since that's what our tests against those
109 // old types expect. The significant difference is that instancekey
110 // is dot-separated rather than using index brackets.
111 k := addr.ContainingResource().String()
112 if addr.Key != addrs.NoKey {
113 switch tk := addr.Key.(type) {
114 case addrs.IntKey:
115 k = fmt.Sprintf("%s.%d", k, tk)
116 default:
117 // No other key types existed for the legacy types, so we
118 // can do whatever we want here. We'll just use our standard
119 // syntax for these.
120 k = k + tk.String()
121 }
122 }
123
124 id := LegacyInstanceObjectID(is.Current)
125
126 taintStr := ""
127 if is.Current != nil && is.Current.Status == ObjectTainted {
128 taintStr = " (tainted)"
129 }
130
131 deposedStr := ""
132 if len(is.Deposed) > 0 {
133 deposedStr = fmt.Sprintf(" (%d deposed)", len(is.Deposed))
134 }
135
136 buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr))
137 buf.WriteString(fmt.Sprintf(" ID = %s\n", id))
138 buf.WriteString(fmt.Sprintf(" provider = %s\n", rs.ProviderConfig.String()))
139
140 // Attributes were a flatmap before, but are not anymore. To preserve
141 // our old output as closely as possible we need to do a conversion
142 // to flatmap. Normally we'd want to do this with schema for
143 // accuracy, but for our purposes here it only needs to be approximate.
144 // This should produce an identical result for most cases, though
145 // in particular will differ in a few cases:
146 // - The keys used for elements in a set will be different
147 // - Values for attributes of type cty.DynamicPseudoType will be
148 // misinterpreted (but these weren't possible in old world anyway)
149 var attributes map[string]string
150 if obj := is.Current; obj != nil {
151 switch {
152 case obj.AttrsFlat != nil:
153 // Easy (but increasingly unlikely) case: the state hasn't
154 // actually been upgraded to the new form yet.
155 attributes = obj.AttrsFlat
156 case obj.AttrsJSON != nil:
157 ty, err := ctyjson.ImpliedType(obj.AttrsJSON)
158 if err == nil {
159 val, err := ctyjson.Unmarshal(obj.AttrsJSON, ty)
160 if err == nil {
161 attributes = hcl2shim.FlatmapValueFromHCL2(val)
162 }
163 }
164 }
165 }
166 attrKeys := make([]string, 0, len(attributes))
167 for ak, val := range attributes {
168 if ak == "id" {
169 continue
170 }
171
172 // don't show empty containers in the output
173 if val == "0" && (strings.HasSuffix(ak, ".#") || strings.HasSuffix(ak, ".%")) {
174 continue
175 }
176
177 attrKeys = append(attrKeys, ak)
178 }
179
180 sort.Strings(attrKeys)
181
182 for _, ak := range attrKeys {
183 av := attributes[ak]
184 buf.WriteString(fmt.Sprintf(" %s = %s\n", ak, av))
185 }
186
187 // CAUTION: Since deposed keys are now random strings instead of
188 // incrementing integers, this result will not be deterministic
189 // if there is more than one deposed object.
190 i := 1
191 for _, t := range is.Deposed {
192 id := LegacyInstanceObjectID(t)
193 taintStr := ""
194 if t.Status == ObjectTainted {
195 taintStr = " (tainted)"
196 }
197 buf.WriteString(fmt.Sprintf(" Deposed ID %d = %s%s\n", i, id, taintStr))
198 i++
199 }
200
201 if obj := is.Current; obj != nil && len(obj.Dependencies) > 0 {
202 buf.WriteString(fmt.Sprintf("\n Dependencies:\n"))
203 for _, dep := range obj.Dependencies {
204 buf.WriteString(fmt.Sprintf(" %s\n", dep.String()))
205 }
206 }
207 }
208
209 if len(m.OutputValues) > 0 {
210 buf.WriteString("\nOutputs:\n\n")
211
212 ks := make([]string, 0, len(m.OutputValues))
213 for k := range m.OutputValues {
214 ks = append(ks, k)
215 }
216 sort.Strings(ks)
217
218 for _, k := range ks {
219 v := m.OutputValues[k]
220 lv := hcl2shim.ConfigValueFromHCL2(v.Value)
221 switch vTyped := lv.(type) {
222 case string:
223 buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
224 case []interface{}:
225 buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
226 case map[string]interface{}:
227 var mapKeys []string
228 for key := range vTyped {
229 mapKeys = append(mapKeys, key)
230 }
231 sort.Strings(mapKeys)
232
233 var mapBuf bytes.Buffer
234 mapBuf.WriteString("{")
235 for _, key := range mapKeys {
236 mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key]))
237 }
238 mapBuf.WriteString("}")
239
240 buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String()))
241 default:
242 buf.WriteString(fmt.Sprintf("%s = %#v\n", k, lv))
243 }
244 }
245 }
246
247 return buf.String()
248}
249
250// LegacyInstanceObjectID is a helper for extracting an object id value from
251// an instance object in a way that approximates how we used to do this
252// for the old state types. ID is no longer first-class, so this is preserved
253// only for compatibility with old tests that include the id as part of their
254// expected value.
255func LegacyInstanceObjectID(obj *ResourceInstanceObjectSrc) string {
256 if obj == nil {
257 return "<not created>"
258 }
259
260 if obj.AttrsJSON != nil {
261 type WithID struct {
262 ID string `json:"id"`
263 }
264 var withID WithID
265 err := json.Unmarshal(obj.AttrsJSON, &withID)
266 if err == nil {
267 return withID.ID
268 }
269 } else if obj.AttrsFlat != nil {
270 if flatID, exists := obj.AttrsFlat["id"]; exists {
271 return flatID
272 }
273 }
274
275 // For resource types created after we removed id as special there may
276 // not actually be one at all. This is okay because older tests won't
277 // encounter this, and new tests shouldn't be using ids.
278 return "<none>"
279}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go b/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go
new file mode 100644
index 0000000..a6d88ec
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/diagnostics.go
@@ -0,0 +1,62 @@
1package statefile
2
3import (
4 "encoding/json"
5 "fmt"
6
7 "github.com/hashicorp/terraform/tfdiags"
8)
9
10const invalidFormat = "Invalid state file format"
11
12// jsonUnmarshalDiags is a helper that translates errors returned from
13// json.Unmarshal into hopefully-more-helpful diagnostics messages.
14func jsonUnmarshalDiags(err error) tfdiags.Diagnostics {
15 var diags tfdiags.Diagnostics
16 if err == nil {
17 return diags
18 }
19
20 switch tErr := err.(type) {
21 case *json.SyntaxError:
22 // We've usually already successfully parsed a source file as JSON at
23 // least once before we'd use jsonUnmarshalDiags with it (to sniff
24 // the version number) so this particular error should not appear much
25 // in practice.
26 diags = diags.Append(tfdiags.Sourceless(
27 tfdiags.Error,
28 invalidFormat,
29 fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset),
30 ))
31 case *json.UnmarshalTypeError:
32 // This is likely to be the most common area, describing a
33 // non-conformance between the file and the expected file format
34 // at a semantic level.
35 if tErr.Field != "" {
36 diags = diags.Append(tfdiags.Sourceless(
37 tfdiags.Error,
38 invalidFormat,
39 fmt.Sprintf("The state file field %q has invalid value %s", tErr.Field, tErr.Value),
40 ))
41 break
42 } else {
43 // Without a field name, we can't really say anything helpful.
44 diags = diags.Append(tfdiags.Sourceless(
45 tfdiags.Error,
46 invalidFormat,
47 "The state file does not conform to the expected JSON data structure.",
48 ))
49 }
50 default:
51 // Fallback for all other types of errors. This can happen only for
52 // custom UnmarshalJSON implementations, so should be encountered
53 // only rarely.
54 diags = diags.Append(tfdiags.Sourceless(
55 tfdiags.Error,
56 invalidFormat,
57 fmt.Sprintf("The state file does not conform to the expected JSON data structure: %s.", err.Error()),
58 ))
59 }
60
61 return diags
62}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/doc.go b/vendor/github.com/hashicorp/terraform/states/statefile/doc.go
new file mode 100644
index 0000000..625d0cf
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/doc.go
@@ -0,0 +1,3 @@
1// Package statefile deals with the file format used to serialize states for
2// persistent storage and then deserialize them into memory again later.
3package statefile
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/file.go b/vendor/github.com/hashicorp/terraform/states/statefile/file.go
new file mode 100644
index 0000000..6e20240
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/file.go
@@ -0,0 +1,62 @@
1package statefile
2
3import (
4 version "github.com/hashicorp/go-version"
5
6 "github.com/hashicorp/terraform/states"
7 tfversion "github.com/hashicorp/terraform/version"
8)
9
10// File is the in-memory representation of a state file. It includes the state
11// itself along with various metadata used to track changing state files for
12// the same configuration over time.
13type File struct {
14 // TerraformVersion is the version of Terraform that wrote this state file.
15 TerraformVersion *version.Version
16
17 // Serial is incremented on any operation that modifies
18 // the State file. It is used to detect potentially conflicting
19 // updates.
20 Serial uint64
21
22 // Lineage is set when a new, blank state file is created and then
23 // never updated. This allows us to determine whether the serials
24 // of two states can be meaningfully compared.
25 // Apart from the guarantee that collisions between two lineages
26 // are very unlikely, this value is opaque and external callers
27 // should only compare lineage strings byte-for-byte for equality.
28 Lineage string
29
30 // State is the actual state represented by this file.
31 State *states.State
32}
33
34func New(state *states.State, lineage string, serial uint64) *File {
35 // To make life easier on callers, we'll accept a nil state here and just
36 // allocate an empty one, which is required for this file to be successfully
37 // written out.
38 if state == nil {
39 state = states.NewState()
40 }
41
42 return &File{
43 TerraformVersion: tfversion.SemVer,
44 State: state,
45 Lineage: lineage,
46 Serial: serial,
47 }
48}
49
50// DeepCopy is a convenience method to create a new File object whose state
51// is a deep copy of the receiver's, as implemented by states.State.DeepCopy.
52func (f *File) DeepCopy() *File {
53 if f == nil {
54 return nil
55 }
56 return &File{
57 TerraformVersion: f.TerraformVersion,
58 Serial: f.Serial,
59 Lineage: f.Lineage,
60 State: f.State.DeepCopy(),
61 }
62}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go b/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go
new file mode 100644
index 0000000..4948b39
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/marshal_equal.go
@@ -0,0 +1,40 @@
1package statefile
2
3import (
4 "bytes"
5
6 "github.com/hashicorp/terraform/states"
7)
8
9// StatesMarshalEqual returns true if and only if the two given states have
10// an identical (byte-for-byte) statefile representation.
11//
12// This function compares only the portions of the state that are persisted
13// in state files, so for example it will not return false if the only
14// differences between the two states are local values or descendent module
15// outputs.
16func StatesMarshalEqual(a, b *states.State) bool {
17 var aBuf bytes.Buffer
18 var bBuf bytes.Buffer
19
20 // nil states are not valid states, and so they can never martial equal.
21 if a == nil || b == nil {
22 return false
23 }
24
25 // We write here some temporary files that have no header information
26 // populated, thus ensuring that we're only comparing the state itself
27 // and not any metadata.
28 err := Write(&File{State: a}, &aBuf)
29 if err != nil {
30 // Should never happen, because we're writing to an in-memory buffer
31 panic(err)
32 }
33 err = Write(&File{State: b}, &bBuf)
34 if err != nil {
35 // Should never happen, because we're writing to an in-memory buffer
36 panic(err)
37 }
38
39 return bytes.Equal(aBuf.Bytes(), bBuf.Bytes())
40}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/read.go b/vendor/github.com/hashicorp/terraform/states/statefile/read.go
new file mode 100644
index 0000000..d691c02
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/read.go
@@ -0,0 +1,209 @@
1package statefile
2
3import (
4 "encoding/json"
5 "errors"
6 "fmt"
7 "io"
8 "io/ioutil"
9 "os"
10
11 version "github.com/hashicorp/go-version"
12
13 "github.com/hashicorp/terraform/tfdiags"
14 tfversion "github.com/hashicorp/terraform/version"
15)
16
17// ErrNoState is returned by ReadState when the state file is empty.
18var ErrNoState = errors.New("no state")
19
20// Read reads a state from the given reader.
21//
22// Legacy state format versions 1 through 3 are supported, but the result will
23// contain object attributes in the deprecated "flatmap" format and so must
24// be upgraded by the caller before use.
25//
26// If the state file is empty, the special error value ErrNoState is returned.
27// Otherwise, the returned error might be a wrapper around tfdiags.Diagnostics
28// potentially describing multiple errors.
29func Read(r io.Reader) (*File, error) {
30 // Some callers provide us a "typed nil" *os.File here, which would
31 // cause us to panic below if we tried to use it.
32 if f, ok := r.(*os.File); ok && f == nil {
33 return nil, ErrNoState
34 }
35
36 var diags tfdiags.Diagnostics
37
38 // We actually just buffer the whole thing in memory, because states are
39 // generally not huge and we need to do be able to sniff for a version
40 // number before full parsing.
41 src, err := ioutil.ReadAll(r)
42 if err != nil {
43 diags = diags.Append(tfdiags.Sourceless(
44 tfdiags.Error,
45 "Failed to read state file",
46 fmt.Sprintf("The state file could not be read: %s", err),
47 ))
48 return nil, diags.Err()
49 }
50
51 if len(src) == 0 {
52 return nil, ErrNoState
53 }
54
55 state, diags := readState(src)
56 if diags.HasErrors() {
57 return nil, diags.Err()
58 }
59
60 if state == nil {
61 // Should never happen
62 panic("readState returned nil state with no errors")
63 }
64
65 if state.TerraformVersion != nil && state.TerraformVersion.GreaterThan(tfversion.SemVer) {
66 return state, fmt.Errorf(
67 "state snapshot was created by Terraform v%s, which is newer than current v%s; upgrade to Terraform v%s or greater to work with this state",
68 state.TerraformVersion,
69 tfversion.SemVer,
70 state.TerraformVersion,
71 )
72 }
73
74 return state, diags.Err()
75}
76
77func readState(src []byte) (*File, tfdiags.Diagnostics) {
78 var diags tfdiags.Diagnostics
79
80 if looksLikeVersion0(src) {
81 diags = diags.Append(tfdiags.Sourceless(
82 tfdiags.Error,
83 unsupportedFormat,
84 "The state is stored in a legacy binary format that is not supported since Terraform v0.7. To continue, first upgrade the state using Terraform 0.6.16 or earlier.",
85 ))
86 return nil, diags
87 }
88
89 version, versionDiags := sniffJSONStateVersion(src)
90 diags = diags.Append(versionDiags)
91 if versionDiags.HasErrors() {
92 return nil, diags
93 }
94
95 switch version {
96 case 0:
97 diags = diags.Append(tfdiags.Sourceless(
98 tfdiags.Error,
99 unsupportedFormat,
100 "The state file uses JSON syntax but has a version number of zero. There was never a JSON-based state format zero, so this state file is invalid and cannot be processed.",
101 ))
102 return nil, diags
103 case 1:
104 return readStateV1(src)
105 case 2:
106 return readStateV2(src)
107 case 3:
108 return readStateV3(src)
109 case 4:
110 return readStateV4(src)
111 default:
112 thisVersion := tfversion.SemVer.String()
113 creatingVersion := sniffJSONStateTerraformVersion(src)
114 switch {
115 case creatingVersion != "":
116 diags = diags.Append(tfdiags.Sourceless(
117 tfdiags.Error,
118 unsupportedFormat,
119 fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file was created by Terraform %s.", version, thisVersion, creatingVersion),
120 ))
121 default:
122 diags = diags.Append(tfdiags.Sourceless(
123 tfdiags.Error,
124 unsupportedFormat,
125 fmt.Sprintf("The state file uses format version %d, which is not supported by Terraform %s. This state file may have been created by a newer version of Terraform.", version, thisVersion),
126 ))
127 }
128 return nil, diags
129 }
130}
131
132func sniffJSONStateVersion(src []byte) (uint64, tfdiags.Diagnostics) {
133 var diags tfdiags.Diagnostics
134
135 type VersionSniff struct {
136 Version *uint64 `json:"version"`
137 }
138 var sniff VersionSniff
139 err := json.Unmarshal(src, &sniff)
140 if err != nil {
141 switch tErr := err.(type) {
142 case *json.SyntaxError:
143 diags = diags.Append(tfdiags.Sourceless(
144 tfdiags.Error,
145 unsupportedFormat,
146 fmt.Sprintf("The state file could not be parsed as JSON: syntax error at byte offset %d.", tErr.Offset),
147 ))
148 case *json.UnmarshalTypeError:
149 diags = diags.Append(tfdiags.Sourceless(
150 tfdiags.Error,
151 unsupportedFormat,
152 fmt.Sprintf("The version in the state file is %s. A positive whole number is required.", tErr.Value),
153 ))
154 default:
155 diags = diags.Append(tfdiags.Sourceless(
156 tfdiags.Error,
157 unsupportedFormat,
158 "The state file could not be parsed as JSON.",
159 ))
160 }
161 }
162
163 if sniff.Version == nil {
164 diags = diags.Append(tfdiags.Sourceless(
165 tfdiags.Error,
166 unsupportedFormat,
167 "The state file does not have a \"version\" attribute, which is required to identify the format version.",
168 ))
169 return 0, diags
170 }
171
172 return *sniff.Version, diags
173}
174
175// sniffJSONStateTerraformVersion attempts to sniff the Terraform version
176// specification from the given state file source code. The result is either
177// a version string or an empty string if no version number could be extracted.
178//
179// This is a best-effort function intended to produce nicer error messages. It
180// should not be used for any real processing.
181func sniffJSONStateTerraformVersion(src []byte) string {
182 type VersionSniff struct {
183 Version string `json:"terraform_version"`
184 }
185 var sniff VersionSniff
186
187 err := json.Unmarshal(src, &sniff)
188 if err != nil {
189 return ""
190 }
191
192 // Attempt to parse the string as a version so we won't report garbage
193 // as a version number.
194 _, err = version.NewVersion(sniff.Version)
195 if err != nil {
196 return ""
197 }
198
199 return sniff.Version
200}
201
202// unsupportedFormat is a diagnostic summary message for when the state file
203// seems to not be a state file at all, or is not a supported version.
204//
205// Use invalidFormat instead for the subtly-different case of "this looks like
206// it's intended to be a state file but it's not structured correctly".
207const unsupportedFormat = "Unsupported state file format"
208
209const upgradeFailed = "State format upgrade failed"
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version0.go b/vendor/github.com/hashicorp/terraform/states/statefile/version0.go
new file mode 100644
index 0000000..9b53331
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version0.go
@@ -0,0 +1,23 @@
1package statefile
2
3// looksLikeVersion0 sniffs for the signature indicating a version 0 state
4// file.
5//
6// Version 0 was the number retroactively assigned to Terraform's initial
7// (unversioned) binary state file format, which was later superseded by the
8// version 1 format in JSON.
9//
10// Version 0 is no longer supported, so this is used only to detect it and
11// return a nice error to the user.
12func looksLikeVersion0(src []byte) bool {
13 // Version 0 files begin with the magic prefix "tfstate".
14 const magic = "tfstate"
15 if len(src) < len(magic) {
16 // Not even long enough to have the magic prefix
17 return false
18 }
19 if string(src[0:len(magic)]) == magic {
20 return true
21 }
22 return false
23}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version1.go b/vendor/github.com/hashicorp/terraform/states/statefile/version1.go
new file mode 100644
index 0000000..80d711b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version1.go
@@ -0,0 +1,174 @@
1package statefile
2
3import (
4 "encoding/json"
5 "fmt"
6
7 "github.com/hashicorp/terraform/tfdiags"
8)
9
10func readStateV1(src []byte) (*File, tfdiags.Diagnostics) {
11 var diags tfdiags.Diagnostics
12 sV1 := &stateV1{}
13 err := json.Unmarshal(src, sV1)
14 if err != nil {
15 diags = diags.Append(jsonUnmarshalDiags(err))
16 return nil, diags
17 }
18
19 file, prepDiags := prepareStateV1(sV1)
20 diags = diags.Append(prepDiags)
21 return file, diags
22}
23
24func prepareStateV1(sV1 *stateV1) (*File, tfdiags.Diagnostics) {
25 var diags tfdiags.Diagnostics
26 sV2, err := upgradeStateV1ToV2(sV1)
27 if err != nil {
28 diags = diags.Append(tfdiags.Sourceless(
29 tfdiags.Error,
30 upgradeFailed,
31 fmt.Sprintf("Error upgrading state file format from version 1 to version 2: %s.", err),
32 ))
33 return nil, diags
34 }
35
36 file, prepDiags := prepareStateV2(sV2)
37 diags = diags.Append(prepDiags)
38 return file, diags
39}
40
41// stateV1 is a representation of the legacy JSON state format version 1.
42//
43// It is only used to read version 1 JSON files prior to upgrading them to
44// the current format.
45type stateV1 struct {
46 // Version is the protocol version. "1" for a StateV1.
47 Version int `json:"version"`
48
49 // Serial is incremented on any operation that modifies
50 // the State file. It is used to detect potentially conflicting
51 // updates.
52 Serial int64 `json:"serial"`
53
54 // Remote is used to track the metadata required to
55 // pull and push state files from a remote storage endpoint.
56 Remote *remoteStateV1 `json:"remote,omitempty"`
57
58 // Modules contains all the modules in a breadth-first order
59 Modules []*moduleStateV1 `json:"modules"`
60}
61
62type remoteStateV1 struct {
63 // Type controls the client we use for the remote state
64 Type string `json:"type"`
65
66 // Config is used to store arbitrary configuration that
67 // is type specific
68 Config map[string]string `json:"config"`
69}
70
71type moduleStateV1 struct {
72 // Path is the import path from the root module. Modules imports are
73 // always disjoint, so the path represents amodule tree
74 Path []string `json:"path"`
75
76 // Outputs declared by the module and maintained for each module
77 // even though only the root module technically needs to be kept.
78 // This allows operators to inspect values at the boundaries.
79 Outputs map[string]string `json:"outputs"`
80
81 // Resources is a mapping of the logically named resource to
82 // the state of the resource. Each resource may actually have
83 // N instances underneath, although a user only needs to think
84 // about the 1:1 case.
85 Resources map[string]*resourceStateV1 `json:"resources"`
86
87 // Dependencies are a list of things that this module relies on
88 // existing to remain intact. For example: an module may depend
89 // on a VPC ID given by an aws_vpc resource.
90 //
91 // Terraform uses this information to build valid destruction
92 // orders and to warn the user if they're destroying a module that
93 // another resource depends on.
94 //
95 // Things can be put into this list that may not be managed by
96 // Terraform. If Terraform doesn't find a matching ID in the
97 // overall state, then it assumes it isn't managed and doesn't
98 // worry about it.
99 Dependencies []string `json:"depends_on,omitempty"`
100}
101
102type resourceStateV1 struct {
103 // This is filled in and managed by Terraform, and is the resource
104 // type itself such as "mycloud_instance". If a resource provider sets
105 // this value, it won't be persisted.
106 Type string `json:"type"`
107
108 // Dependencies are a list of things that this resource relies on
109 // existing to remain intact. For example: an AWS instance might
110 // depend on a subnet (which itself might depend on a VPC, and so
111 // on).
112 //
113 // Terraform uses this information to build valid destruction
114 // orders and to warn the user if they're destroying a resource that
115 // another resource depends on.
116 //
117 // Things can be put into this list that may not be managed by
118 // Terraform. If Terraform doesn't find a matching ID in the
119 // overall state, then it assumes it isn't managed and doesn't
120 // worry about it.
121 Dependencies []string `json:"depends_on,omitempty"`
122
123 // Primary is the current active instance for this resource.
124 // It can be replaced but only after a successful creation.
125 // This is the instances on which providers will act.
126 Primary *instanceStateV1 `json:"primary"`
127
128 // Tainted is used to track any underlying instances that
129 // have been created but are in a bad or unknown state and
130 // need to be cleaned up subsequently. In the
131 // standard case, there is only at most a single instance.
132 // However, in pathological cases, it is possible for the number
133 // of instances to accumulate.
134 Tainted []*instanceStateV1 `json:"tainted,omitempty"`
135
136 // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
137 // Primary is Deposed to get it out of the way for the replacement Primary to
138 // be created by Apply. If the replacement Primary creates successfully, the
139 // Deposed instance is cleaned up. If there were problems creating the
140 // replacement, the instance remains in the Deposed list so it can be
141 // destroyed in a future run. Functionally, Deposed instances are very
142 // similar to Tainted instances in that Terraform is only tracking them in
143 // order to remember to destroy them.
144 Deposed []*instanceStateV1 `json:"deposed,omitempty"`
145
146 // Provider is used when a resource is connected to a provider with an alias.
147 // If this string is empty, the resource is connected to the default provider,
148 // e.g. "aws_instance" goes with the "aws" provider.
149 // If the resource block contained a "provider" key, that value will be set here.
150 Provider string `json:"provider,omitempty"`
151}
152
153type instanceStateV1 struct {
154 // A unique ID for this resource. This is opaque to Terraform
155 // and is only meant as a lookup mechanism for the providers.
156 ID string `json:"id"`
157
158 // Attributes are basic information about the resource. Any keys here
159 // are accessible in variable format within Terraform configurations:
160 // ${resourcetype.name.attribute}.
161 Attributes map[string]string `json:"attributes,omitempty"`
162
163 // Meta is a simple K/V map that is persisted to the State but otherwise
164 // ignored by Terraform core. It's meant to be used for accounting by
165 // external client code.
166 Meta map[string]string `json:"meta,omitempty"`
167}
168
169type ephemeralStateV1 struct {
170 // ConnInfo is used for the providers to export information which is
171 // used to connect to the resource for provisioning. For example,
172 // this could contain SSH or WinRM credentials.
173 ConnInfo map[string]string `json:"-"`
174}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go
new file mode 100644
index 0000000..0b417e1
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version1_upgrade.go
@@ -0,0 +1,172 @@
1package statefile
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/mitchellh/copystructure"
8)
9
10// upgradeStateV1ToV2 is used to upgrade a V1 state representation
11// into a V2 state representation
12func upgradeStateV1ToV2(old *stateV1) (*stateV2, error) {
13 log.Printf("[TRACE] statefile.Read: upgrading format from v1 to v2")
14 if old == nil {
15 return nil, nil
16 }
17
18 remote, err := old.Remote.upgradeToV2()
19 if err != nil {
20 return nil, fmt.Errorf("Error upgrading State V1: %v", err)
21 }
22
23 modules := make([]*moduleStateV2, len(old.Modules))
24 for i, module := range old.Modules {
25 upgraded, err := module.upgradeToV2()
26 if err != nil {
27 return nil, fmt.Errorf("Error upgrading State V1: %v", err)
28 }
29 modules[i] = upgraded
30 }
31 if len(modules) == 0 {
32 modules = nil
33 }
34
35 newState := &stateV2{
36 Version: 2,
37 Serial: old.Serial,
38 Remote: remote,
39 Modules: modules,
40 }
41
42 return newState, nil
43}
44
45func (old *remoteStateV1) upgradeToV2() (*remoteStateV2, error) {
46 if old == nil {
47 return nil, nil
48 }
49
50 config, err := copystructure.Copy(old.Config)
51 if err != nil {
52 return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err)
53 }
54
55 return &remoteStateV2{
56 Type: old.Type,
57 Config: config.(map[string]string),
58 }, nil
59}
60
61func (old *moduleStateV1) upgradeToV2() (*moduleStateV2, error) {
62 if old == nil {
63 return nil, nil
64 }
65
66 pathRaw, err := copystructure.Copy(old.Path)
67 if err != nil {
68 return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
69 }
70 path, ok := pathRaw.([]string)
71 if !ok {
72 return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings")
73 }
74 if len(path) == 0 {
75 // We found some V1 states with a nil path. Assume root.
76 path = []string{"root"}
77 }
78
79 // Outputs needs upgrading to use the new structure
80 outputs := make(map[string]*outputStateV2)
81 for key, output := range old.Outputs {
82 outputs[key] = &outputStateV2{
83 Type: "string",
84 Value: output,
85 Sensitive: false,
86 }
87 }
88
89 resources := make(map[string]*resourceStateV2)
90 for key, oldResource := range old.Resources {
91 upgraded, err := oldResource.upgradeToV2()
92 if err != nil {
93 return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
94 }
95 resources[key] = upgraded
96 }
97
98 dependencies, err := copystructure.Copy(old.Dependencies)
99 if err != nil {
100 return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
101 }
102
103 return &moduleStateV2{
104 Path: path,
105 Outputs: outputs,
106 Resources: resources,
107 Dependencies: dependencies.([]string),
108 }, nil
109}
110
111func (old *resourceStateV1) upgradeToV2() (*resourceStateV2, error) {
112 if old == nil {
113 return nil, nil
114 }
115
116 dependencies, err := copystructure.Copy(old.Dependencies)
117 if err != nil {
118 return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
119 }
120
121 primary, err := old.Primary.upgradeToV2()
122 if err != nil {
123 return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
124 }
125
126 deposed := make([]*instanceStateV2, len(old.Deposed))
127 for i, v := range old.Deposed {
128 upgraded, err := v.upgradeToV2()
129 if err != nil {
130 return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
131 }
132 deposed[i] = upgraded
133 }
134 if len(deposed) == 0 {
135 deposed = nil
136 }
137
138 return &resourceStateV2{
139 Type: old.Type,
140 Dependencies: dependencies.([]string),
141 Primary: primary,
142 Deposed: deposed,
143 Provider: old.Provider,
144 }, nil
145}
146
147func (old *instanceStateV1) upgradeToV2() (*instanceStateV2, error) {
148 if old == nil {
149 return nil, nil
150 }
151
152 attributes, err := copystructure.Copy(old.Attributes)
153 if err != nil {
154 return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
155 }
156
157 meta, err := copystructure.Copy(old.Meta)
158 if err != nil {
159 return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
160 }
161
162 newMeta := make(map[string]interface{})
163 for k, v := range meta.(map[string]string) {
164 newMeta[k] = v
165 }
166
167 return &instanceStateV2{
168 ID: old.ID,
169 Attributes: attributes.(map[string]string),
170 Meta: newMeta,
171 }, nil
172}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version2.go b/vendor/github.com/hashicorp/terraform/states/statefile/version2.go
new file mode 100644
index 0000000..6fe2ab8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version2.go
@@ -0,0 +1,209 @@
1package statefile
2
3import (
4 "encoding/json"
5 "fmt"
6 "sync"
7
8 "github.com/hashicorp/terraform/tfdiags"
9)
10
11func readStateV2(src []byte) (*File, tfdiags.Diagnostics) {
12 var diags tfdiags.Diagnostics
13 sV2 := &stateV2{}
14 err := json.Unmarshal(src, sV2)
15 if err != nil {
16 diags = diags.Append(jsonUnmarshalDiags(err))
17 return nil, diags
18 }
19
20 file, prepDiags := prepareStateV2(sV2)
21 diags = diags.Append(prepDiags)
22 return file, diags
23}
24
25func prepareStateV2(sV2 *stateV2) (*File, tfdiags.Diagnostics) {
26 var diags tfdiags.Diagnostics
27 sV3, err := upgradeStateV2ToV3(sV2)
28 if err != nil {
29 diags = diags.Append(tfdiags.Sourceless(
30 tfdiags.Error,
31 upgradeFailed,
32 fmt.Sprintf("Error upgrading state file format from version 2 to version 3: %s.", err),
33 ))
34 return nil, diags
35 }
36
37 file, prepDiags := prepareStateV3(sV3)
38 diags = diags.Append(prepDiags)
39 return file, diags
40}
41
42// stateV2 is a representation of the legacy JSON state format version 2.
43//
44// It is only used to read version 2 JSON files prior to upgrading them to
45// the current format.
46type stateV2 struct {
47 // Version is the state file protocol version.
48 Version int `json:"version"`
49
50 // TFVersion is the version of Terraform that wrote this state.
51 TFVersion string `json:"terraform_version,omitempty"`
52
53 // Serial is incremented on any operation that modifies
54 // the State file. It is used to detect potentially conflicting
55 // updates.
56 Serial int64 `json:"serial"`
57
58 // Lineage is set when a new, blank state is created and then
59 // never updated. This allows us to determine whether the serials
60 // of two states can be meaningfully compared.
61 // Apart from the guarantee that collisions between two lineages
62 // are very unlikely, this value is opaque and external callers
63 // should only compare lineage strings byte-for-byte for equality.
64 Lineage string `json:"lineage"`
65
66 // Remote is used to track the metadata required to
67 // pull and push state files from a remote storage endpoint.
68 Remote *remoteStateV2 `json:"remote,omitempty"`
69
70 // Backend tracks the configuration for the backend in use with
71 // this state. This is used to track any changes in the backend
72 // configuration.
73 Backend *backendStateV2 `json:"backend,omitempty"`
74
75 // Modules contains all the modules in a breadth-first order
76 Modules []*moduleStateV2 `json:"modules"`
77}
78
79type remoteStateV2 struct {
80 // Type controls the client we use for the remote state
81 Type string `json:"type"`
82
83 // Config is used to store arbitrary configuration that
84 // is type specific
85 Config map[string]string `json:"config"`
86}
87
88type outputStateV2 struct {
89 // Sensitive describes whether the output is considered sensitive,
90 // which may lead to masking the value on screen in some cases.
91 Sensitive bool `json:"sensitive"`
92 // Type describes the structure of Value. Valid values are "string",
93 // "map" and "list"
94 Type string `json:"type"`
95 // Value contains the value of the output, in the structure described
96 // by the Type field.
97 Value interface{} `json:"value"`
98
99 mu sync.Mutex
100}
101
102type moduleStateV2 struct {
103 // Path is the import path from the root module. Modules imports are
104 // always disjoint, so the path represents amodule tree
105 Path []string `json:"path"`
106
107 // Locals are kept only transiently in-memory, because we can always
108 // re-compute them.
109 Locals map[string]interface{} `json:"-"`
110
111 // Outputs declared by the module and maintained for each module
112 // even though only the root module technically needs to be kept.
113 // This allows operators to inspect values at the boundaries.
114 Outputs map[string]*outputStateV2 `json:"outputs"`
115
116 // Resources is a mapping of the logically named resource to
117 // the state of the resource. Each resource may actually have
118 // N instances underneath, although a user only needs to think
119 // about the 1:1 case.
120 Resources map[string]*resourceStateV2 `json:"resources"`
121
122 // Dependencies are a list of things that this module relies on
123 // existing to remain intact. For example: an module may depend
124 // on a VPC ID given by an aws_vpc resource.
125 //
126 // Terraform uses this information to build valid destruction
127 // orders and to warn the user if they're destroying a module that
128 // another resource depends on.
129 //
130 // Things can be put into this list that may not be managed by
131 // Terraform. If Terraform doesn't find a matching ID in the
132 // overall state, then it assumes it isn't managed and doesn't
133 // worry about it.
134 Dependencies []string `json:"depends_on"`
135}
136
137type resourceStateV2 struct {
138 // This is filled in and managed by Terraform, and is the resource
139 // type itself such as "mycloud_instance". If a resource provider sets
140 // this value, it won't be persisted.
141 Type string `json:"type"`
142
143 // Dependencies are a list of things that this resource relies on
144 // existing to remain intact. For example: an AWS instance might
145 // depend on a subnet (which itself might depend on a VPC, and so
146 // on).
147 //
148 // Terraform uses this information to build valid destruction
149 // orders and to warn the user if they're destroying a resource that
150 // another resource depends on.
151 //
152 // Things can be put into this list that may not be managed by
153 // Terraform. If Terraform doesn't find a matching ID in the
154 // overall state, then it assumes it isn't managed and doesn't
155 // worry about it.
156 Dependencies []string `json:"depends_on"`
157
158 // Primary is the current active instance for this resource.
159 // It can be replaced but only after a successful creation.
160 // This is the instances on which providers will act.
161 Primary *instanceStateV2 `json:"primary"`
162
163 // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
164 // Primary is Deposed to get it out of the way for the replacement Primary to
165 // be created by Apply. If the replacement Primary creates successfully, the
166 // Deposed instance is cleaned up.
167 //
168 // If there were problems creating the replacement Primary, the Deposed
169 // instance and the (now tainted) replacement Primary will be swapped so the
170 // tainted replacement will be cleaned up instead.
171 //
172 // An instance will remain in the Deposed list until it is successfully
173 // destroyed and purged.
174 Deposed []*instanceStateV2 `json:"deposed"`
175
176 // Provider is used when a resource is connected to a provider with an alias.
177 // If this string is empty, the resource is connected to the default provider,
178 // e.g. "aws_instance" goes with the "aws" provider.
179 // If the resource block contained a "provider" key, that value will be set here.
180 Provider string `json:"provider"`
181
182 mu sync.Mutex
183}
184
185type instanceStateV2 struct {
186 // A unique ID for this resource. This is opaque to Terraform
187 // and is only meant as a lookup mechanism for the providers.
188 ID string `json:"id"`
189
190 // Attributes are basic information about the resource. Any keys here
191 // are accessible in variable format within Terraform configurations:
192 // ${resourcetype.name.attribute}.
193 Attributes map[string]string `json:"attributes"`
194
195 // Meta is a simple K/V map that is persisted to the State but otherwise
196 // ignored by Terraform core. It's meant to be used for accounting by
197 // external client code. The value here must only contain Go primitives
198 // and collections.
199 Meta map[string]interface{} `json:"meta"`
200
201 // Tainted is used to mark a resource for recreation.
202 Tainted bool `json:"tainted"`
203}
204
205type backendStateV2 struct {
206 Type string `json:"type"` // Backend type
207 ConfigRaw json.RawMessage `json:"config"` // Backend raw config
208 Hash int `json:"hash"` // Hash of portion of configuration from config files
209}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go
new file mode 100644
index 0000000..2d03c07
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version2_upgrade.go
@@ -0,0 +1,145 @@
1package statefile
2
3import (
4 "fmt"
5 "log"
6 "regexp"
7 "sort"
8 "strconv"
9 "strings"
10
11 "github.com/mitchellh/copystructure"
12)
13
14func upgradeStateV2ToV3(old *stateV2) (*stateV3, error) {
15 if old == nil {
16 return (*stateV3)(nil), nil
17 }
18
19 var new *stateV3
20 {
21 copy, err := copystructure.Config{Lock: true}.Copy(old)
22 if err != nil {
23 panic(err)
24 }
25 newWrongType := copy.(*stateV2)
26 newRightType := (stateV3)(*newWrongType)
27 new = &newRightType
28 }
29
30 // Set the new version number
31 new.Version = 3
32
33 // Change the counts for things which look like maps to use the %
34 // syntax. Remove counts for empty collections - they will be added
35 // back in later.
36 for _, module := range new.Modules {
37 for _, resource := range module.Resources {
38 // Upgrade Primary
39 if resource.Primary != nil {
40 upgradeAttributesV2ToV3(resource.Primary)
41 }
42
43 // Upgrade Deposed
44 for _, deposed := range resource.Deposed {
45 upgradeAttributesV2ToV3(deposed)
46 }
47 }
48 }
49
50 return new, nil
51}
52
53func upgradeAttributesV2ToV3(instanceState *instanceStateV2) error {
54 collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`)
55 collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`)
56
57 // Identify the key prefix of anything which is a collection
58 var collectionKeyPrefixes []string
59 for key := range instanceState.Attributes {
60 if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
61 collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1])
62 }
63 }
64 sort.Strings(collectionKeyPrefixes)
65
66 log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes)
67
68 // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not
69 // run very often.
70 for _, prefix := range collectionKeyPrefixes {
71 // First get the actual keys that belong to this prefix
72 var potentialKeysMatching []string
73 for key := range instanceState.Attributes {
74 if strings.HasPrefix(key, prefix) {
75 potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix))
76 }
77 }
78 sort.Strings(potentialKeysMatching)
79
80 var actualKeysMatching []string
81 for _, key := range potentialKeysMatching {
82 if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
83 actualKeysMatching = append(actualKeysMatching, submatches[0][1])
84 } else {
85 if key != "#" {
86 actualKeysMatching = append(actualKeysMatching, key)
87 }
88 }
89 }
90 actualKeysMatching = uniqueSortedStrings(actualKeysMatching)
91
92 // Now inspect the keys in order to determine whether this is most likely to be
93 // a map, list or set. There is room for error here, so we log in each case. If
94 // there is no method of telling, we remove the key from the InstanceState in
95 // order that it will be recreated. Again, this could be rolled into fewer loops
96 // but we prefer clarity.
97
98 oldCountKey := fmt.Sprintf("%s#", prefix)
99
100 // First, detect "obvious" maps - which have non-numeric keys (mostly).
101 hasNonNumericKeys := false
102 for _, key := range actualKeysMatching {
103 if _, err := strconv.Atoi(key); err != nil {
104 hasNonNumericKeys = true
105 }
106 }
107 if hasNonNumericKeys {
108 newCountKey := fmt.Sprintf("%s%%", prefix)
109
110 instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey]
111 delete(instanceState.Attributes, oldCountKey)
112 log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s",
113 strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey])
114 }
115
116 // Now detect empty collections and remove them from state.
117 if len(actualKeysMatching) == 0 {
118 delete(instanceState.Attributes, oldCountKey)
119 log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.",
120 strings.TrimSuffix(prefix, "."))
121 }
122 }
123
124 return nil
125}
126
127// uniqueSortedStrings removes duplicates from a slice of strings and returns
128// a sorted slice of the unique strings.
129func uniqueSortedStrings(input []string) []string {
130 uniquemap := make(map[string]struct{})
131 for _, str := range input {
132 uniquemap[str] = struct{}{}
133 }
134
135 output := make([]string, len(uniquemap))
136
137 i := 0
138 for key := range uniquemap {
139 output[i] = key
140 i = i + 1
141 }
142
143 sort.Strings(output)
144 return output
145}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version3.go b/vendor/github.com/hashicorp/terraform/states/statefile/version3.go
new file mode 100644
index 0000000..ab6414b
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version3.go
@@ -0,0 +1,50 @@
1package statefile
2
3import (
4 "encoding/json"
5 "fmt"
6
7 "github.com/hashicorp/terraform/tfdiags"
8)
9
10func readStateV3(src []byte) (*File, tfdiags.Diagnostics) {
11 var diags tfdiags.Diagnostics
12 sV3 := &stateV3{}
13 err := json.Unmarshal(src, sV3)
14 if err != nil {
15 diags = diags.Append(jsonUnmarshalDiags(err))
16 return nil, diags
17 }
18
19 file, prepDiags := prepareStateV3(sV3)
20 diags = diags.Append(prepDiags)
21 return file, diags
22}
23
24func prepareStateV3(sV3 *stateV3) (*File, tfdiags.Diagnostics) {
25 var diags tfdiags.Diagnostics
26 sV4, err := upgradeStateV3ToV4(sV3)
27 if err != nil {
28 diags = diags.Append(tfdiags.Sourceless(
29 tfdiags.Error,
30 upgradeFailed,
31 fmt.Sprintf("Error upgrading state file format from version 3 to version 4: %s.", err),
32 ))
33 return nil, diags
34 }
35
36 file, prepDiags := prepareStateV4(sV4)
37 diags = diags.Append(prepDiags)
38 return file, diags
39}
40
41// stateV2 is a representation of the legacy JSON state format version 3.
42//
43// It is only used to read version 3 JSON files prior to upgrading them to
44// the current format.
45//
46// The differences between version 2 and version 3 are only in the data and
47// not in the structure, so stateV3 actually shares the same structs as
48// stateV2. Type stateV3 represents that the data within is formatted as
49// expected by the V3 format, rather than the V2 format.
50type stateV3 stateV2
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go b/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go
new file mode 100644
index 0000000..2cbe8a5
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go
@@ -0,0 +1,431 @@
1package statefile
2
3import (
4 "encoding/json"
5 "fmt"
6 "strconv"
7 "strings"
8
9 "github.com/zclconf/go-cty/cty"
10 ctyjson "github.com/zclconf/go-cty/cty/json"
11
12 "github.com/hashicorp/terraform/addrs"
13 "github.com/hashicorp/terraform/states"
14 "github.com/hashicorp/terraform/tfdiags"
15)
16
17func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) {
18
19 if old.Serial < 0 {
20 // The new format is using uint64 here, which should be fine for any
21 // real state (we only used positive integers in practice) but we'll
22 // catch this explicitly here to avoid weird behavior if a state file
23 // has been tampered with in some way.
24 return nil, fmt.Errorf("state has serial less than zero, which is invalid")
25 }
26
27 new := &stateV4{
28 TerraformVersion: old.TFVersion,
29 Serial: uint64(old.Serial),
30 Lineage: old.Lineage,
31 RootOutputs: map[string]outputStateV4{},
32 Resources: []resourceStateV4{},
33 }
34
35 if new.TerraformVersion == "" {
36 // Older formats considered this to be optional, but now it's required
37 // and so we'll stub it out with something that's definitely older
38 // than the version that really created this state.
39 new.TerraformVersion = "0.0.0"
40 }
41
42 for _, msOld := range old.Modules {
43 if len(msOld.Path) < 1 || msOld.Path[0] != "root" {
44 return nil, fmt.Errorf("state contains invalid module path %#v", msOld.Path)
45 }
46
47 // Convert legacy-style module address into our newer address type.
48 // Since these old formats are only generated by versions of Terraform
49 // that don't support count and for_each on modules, we can just assume
50 // all of the modules are unkeyed.
51 moduleAddr := make(addrs.ModuleInstance, len(msOld.Path)-1)
52 for i, name := range msOld.Path[1:] {
53 moduleAddr[i] = addrs.ModuleInstanceStep{
54 Name: name,
55 InstanceKey: addrs.NoKey,
56 }
57 }
58
59 // In a v3 state file, a "resource state" is actually an instance
60 // state, so we need to fill in a missing level of heirarchy here
61 // by lazily creating resource states as we encounter them.
62 // We'll track them in here, keyed on the string representation of
63 // the resource address.
64 resourceStates := map[string]*resourceStateV4{}
65
66 for legacyAddr, rsOld := range msOld.Resources {
67 instAddr, err := parseLegacyResourceAddress(legacyAddr)
68 if err != nil {
69 return nil, err
70 }
71
72 resAddr := instAddr.Resource
73 rs, exists := resourceStates[resAddr.String()]
74 if !exists {
75 var modeStr string
76 switch resAddr.Mode {
77 case addrs.ManagedResourceMode:
78 modeStr = "managed"
79 case addrs.DataResourceMode:
80 modeStr = "data"
81 default:
82 return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode", resAddr)
83 }
84
85 // In state versions prior to 4 we allowed each instance of a
86 // resource to have its own provider configuration address,
87 // which makes no real sense in practice because providers
88 // are associated with resources in the configuration. We
89 // elevate that to the resource level during this upgrade,
90 // implicitly taking the provider address of the first instance
91 // we encounter for each resource. While this is lossy in
92 // theory, in practice there is no reason for these values to
93 // differ between instances.
94 var providerAddr addrs.AbsProviderConfig
95 oldProviderAddr := rsOld.Provider
96 if strings.Contains(oldProviderAddr, "provider.") {
97 // Smells like a new-style provider address, but we'll test it.
98 var diags tfdiags.Diagnostics
99 providerAddr, diags = addrs.ParseAbsProviderConfigStr(oldProviderAddr)
100 if diags.HasErrors() {
101 return nil, diags.Err()
102 }
103 } else {
104 // Smells like an old-style module-local provider address,
105 // which we'll need to migrate. We'll assume it's referring
106 // to the same module the resource is in, which might be
107 // incorrect but it'll get fixed up next time any updates
108 // are made to an instance.
109 if oldProviderAddr != "" {
110 localAddr, diags := addrs.ParseProviderConfigCompactStr(oldProviderAddr)
111 if diags.HasErrors() {
112 return nil, diags.Err()
113 }
114 providerAddr = localAddr.Absolute(moduleAddr)
115 } else {
116 providerAddr = resAddr.DefaultProviderConfig().Absolute(moduleAddr)
117 }
118 }
119
120 rs = &resourceStateV4{
121 Module: moduleAddr.String(),
122 Mode: modeStr,
123 Type: resAddr.Type,
124 Name: resAddr.Name,
125 Instances: []instanceObjectStateV4{},
126 ProviderConfig: providerAddr.String(),
127 }
128 resourceStates[resAddr.String()] = rs
129 }
130
131 // Now we'll deal with the instance itself, which may either be
132 // the first instance in a resource we just created or an additional
133 // instance for a resource added on a prior loop.
134 instKey := instAddr.Key
135 if isOld := rsOld.Primary; isOld != nil {
136 isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, states.NotDeposed)
137 if err != nil {
138 return nil, fmt.Errorf("failed to migrate primary generation of %s: %s", instAddr, err)
139 }
140 rs.Instances = append(rs.Instances, *isNew)
141 }
142 for i, isOld := range rsOld.Deposed {
143 // When we migrate old instances we'll use sequential deposed
144 // keys just so that the upgrade result is deterministic. New
145 // deposed keys allocated moving forward will be pseudorandomly
146 // selected, but we check for collisions and so these
147 // non-random ones won't hurt.
148 deposedKey := states.DeposedKey(fmt.Sprintf("%08x", i+1))
149 isNew, err := upgradeInstanceObjectV3ToV4(rsOld, isOld, instKey, deposedKey)
150 if err != nil {
151 return nil, fmt.Errorf("failed to migrate deposed generation index %d of %s: %s", i, instAddr, err)
152 }
153 rs.Instances = append(rs.Instances, *isNew)
154 }
155
156 if instKey != addrs.NoKey && rs.EachMode == "" {
157 rs.EachMode = "list"
158 }
159 }
160
161 for _, rs := range resourceStates {
162 new.Resources = append(new.Resources, *rs)
163 }
164
165 if len(msOld.Path) == 1 && msOld.Path[0] == "root" {
166 // We'll migrate the outputs for this module too, then.
167 for name, oldOS := range msOld.Outputs {
168 newOS := outputStateV4{
169 Sensitive: oldOS.Sensitive,
170 }
171
172 valRaw := oldOS.Value
173 valSrc, err := json.Marshal(valRaw)
174 if err != nil {
175 // Should never happen, because this value came from JSON
176 // in the first place and so we're just round-tripping here.
177 return nil, fmt.Errorf("failed to serialize output %q value as JSON: %s", name, err)
178 }
179
180 // The "type" field in state V2 wasn't really that useful
181 // since it was only able to capture string vs. list vs. map.
182 // For this reason, during upgrade we'll just discard it
183 // altogether and use cty's idea of the implied type of
184 // turning our old value into JSON.
185 ty, err := ctyjson.ImpliedType(valSrc)
186 if err != nil {
187 // REALLY should never happen, because we literally just
188 // encoded this as JSON above!
189 return nil, fmt.Errorf("failed to parse output %q value from JSON: %s", name, err)
190 }
191
192 // ImpliedType tends to produce structural types, but since older
193 // version of Terraform didn't support those a collection type
194 // is probably what was intended, so we'll see if we can
195 // interpret our value as one.
196 ty = simplifyImpliedValueType(ty)
197
198 tySrc, err := ctyjson.MarshalType(ty)
199 if err != nil {
200 return nil, fmt.Errorf("failed to serialize output %q type as JSON: %s", name, err)
201 }
202
203 newOS.ValueRaw = json.RawMessage(valSrc)
204 newOS.ValueTypeRaw = json.RawMessage(tySrc)
205
206 new.RootOutputs[name] = newOS
207 }
208 }
209 }
210
211 new.normalize()
212
213 return new, nil
214}
215
216func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2, instKey addrs.InstanceKey, deposedKey states.DeposedKey) (*instanceObjectStateV4, error) {
217
218 // Schema versions were, in prior formats, a private concern of the provider
219 // SDK, and not a first-class concept in the state format. Here we're
220 // sniffing for the pre-0.12 SDK's way of representing schema versions
221 // and promoting it to our first-class field if we find it. We'll ignore
222 // it if it doesn't look like what the SDK would've written. If this
223 // sniffing fails then we'll assume schema version 0.
224 var schemaVersion uint64
225 migratedSchemaVersion := false
226 if raw, exists := isOld.Meta["schema_version"]; exists {
227 switch tv := raw.(type) {
228 case string:
229 v, err := strconv.ParseUint(tv, 10, 64)
230 if err == nil {
231 schemaVersion = v
232 migratedSchemaVersion = true
233 }
234 case int:
235 schemaVersion = uint64(tv)
236 migratedSchemaVersion = true
237 case float64:
238 schemaVersion = uint64(tv)
239 migratedSchemaVersion = true
240 }
241 }
242
243 private := map[string]interface{}{}
244 for k, v := range isOld.Meta {
245 if k == "schema_version" && migratedSchemaVersion {
246 // We're gonna promote this into our first-class schema version field
247 continue
248 }
249 private[k] = v
250 }
251 var privateJSON []byte
252 if len(private) != 0 {
253 var err error
254 privateJSON, err = json.Marshal(private)
255 if err != nil {
256 // This shouldn't happen, because the Meta values all came from JSON
257 // originally anyway.
258 return nil, fmt.Errorf("cannot serialize private instance object data: %s", err)
259 }
260 }
261
262 var status string
263 if isOld.Tainted {
264 status = "tainted"
265 }
266
267 var instKeyRaw interface{}
268 switch tk := instKey.(type) {
269 case addrs.IntKey:
270 instKeyRaw = int(tk)
271 case addrs.StringKey:
272 instKeyRaw = string(tk)
273 default:
274 if instKeyRaw != nil {
275 return nil, fmt.Errorf("insupported instance key: %#v", instKey)
276 }
277 }
278
279 var attributes map[string]string
280 if isOld.Attributes != nil {
281 attributes = make(map[string]string, len(isOld.Attributes))
282 for k, v := range isOld.Attributes {
283 attributes[k] = v
284 }
285 }
286 if isOld.ID != "" {
287 // As a special case, if we don't already have an "id" attribute and
288 // yet there's a non-empty first-class ID on the old object then we'll
289 // create a synthetic id attribute to avoid losing that first-class id.
290 // In practice this generally arises only in tests where state literals
291 // are hand-written in a non-standard way; real code prior to 0.12
292 // would always force the first-class ID to be copied into the
293 // id attribute before storing.
294 if attributes == nil {
295 attributes = make(map[string]string, len(isOld.Attributes))
296 }
297 if idVal := attributes["id"]; idVal == "" {
298 attributes["id"] = isOld.ID
299 }
300 }
301
302 dependencies := make([]string, len(rsOld.Dependencies))
303 for i, v := range rsOld.Dependencies {
304 dependencies[i] = parseLegacyDependency(v)
305 }
306
307 return &instanceObjectStateV4{
308 IndexKey: instKeyRaw,
309 Status: status,
310 Deposed: string(deposedKey),
311 AttributesFlat: attributes,
312 Dependencies: dependencies,
313 SchemaVersion: schemaVersion,
314 PrivateRaw: privateJSON,
315 }, nil
316}
317
318// parseLegacyResourceAddress parses the different identifier format used
319// state formats before version 4, like "instance.name.0".
320func parseLegacyResourceAddress(s string) (addrs.ResourceInstance, error) {
321 var ret addrs.ResourceInstance
322
323 // Split based on ".". Every resource address should have at least two
324 // elements (type and name).
325 parts := strings.Split(s, ".")
326 if len(parts) < 2 || len(parts) > 4 {
327 return ret, fmt.Errorf("invalid internal resource address format: %s", s)
328 }
329
330 // Data resource if we have at least 3 parts and the first one is data
331 ret.Resource.Mode = addrs.ManagedResourceMode
332 if len(parts) > 2 && parts[0] == "data" {
333 ret.Resource.Mode = addrs.DataResourceMode
334 parts = parts[1:]
335 }
336
337 // If we're not a data resource and we have more than 3, then it is an error
338 if len(parts) > 3 && ret.Resource.Mode != addrs.DataResourceMode {
339 return ret, fmt.Errorf("invalid internal resource address format: %s", s)
340 }
341
342 // Build the parts of the resource address that are guaranteed to exist
343 ret.Resource.Type = parts[0]
344 ret.Resource.Name = parts[1]
345 ret.Key = addrs.NoKey
346
347 // If we have more parts, then we have an index. Parse that.
348 if len(parts) > 2 {
349 idx, err := strconv.ParseInt(parts[2], 0, 0)
350 if err != nil {
351 return ret, fmt.Errorf("error parsing resource address %q: %s", s, err)
352 }
353
354 ret.Key = addrs.IntKey(idx)
355 }
356
357 return ret, nil
358}
359
360// simplifyImpliedValueType attempts to heuristically simplify a value type
361// derived from a legacy stored output value into something simpler that
362// is closer to what would've fitted into the pre-v0.12 value type system.
363func simplifyImpliedValueType(ty cty.Type) cty.Type {
364 switch {
365 case ty.IsTupleType():
366 // If all of the element types are the same then we'll make this
367 // a list instead. This is very likely to be true, since prior versions
368 // of Terraform did not officially support mixed-type collections.
369
370 if ty.Equals(cty.EmptyTuple) {
371 // Don't know what the element type would be, then.
372 return ty
373 }
374
375 etys := ty.TupleElementTypes()
376 ety := etys[0]
377 for _, other := range etys[1:] {
378 if !other.Equals(ety) {
379 // inconsistent types
380 return ty
381 }
382 }
383 ety = simplifyImpliedValueType(ety)
384 return cty.List(ety)
385
386 case ty.IsObjectType():
387 // If all of the attribute types are the same then we'll make this
388 // a map instead. This is very likely to be true, since prior versions
389 // of Terraform did not officially support mixed-type collections.
390
391 if ty.Equals(cty.EmptyObject) {
392 // Don't know what the element type would be, then.
393 return ty
394 }
395
396 atys := ty.AttributeTypes()
397 var ety cty.Type
398 for _, other := range atys {
399 if ety == cty.NilType {
400 ety = other
401 continue
402 }
403 if !other.Equals(ety) {
404 // inconsistent types
405 return ty
406 }
407 }
408 ety = simplifyImpliedValueType(ety)
409 return cty.Map(ety)
410
411 default:
412 // No other normalizations are possible
413 return ty
414 }
415}
416
417func parseLegacyDependency(s string) string {
418 parts := strings.Split(s, ".")
419 ret := parts[0]
420 for _, part := range parts[1:] {
421 if part == "*" {
422 break
423 }
424 if i, err := strconv.Atoi(part); err == nil {
425 ret = ret + fmt.Sprintf("[%d]", i)
426 break
427 }
428 ret = ret + "." + part
429 }
430 return ret
431}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/version4.go b/vendor/github.com/hashicorp/terraform/states/statefile/version4.go
new file mode 100644
index 0000000..ee8b652
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/version4.go
@@ -0,0 +1,604 @@
1package statefile
2
3import (
4 "encoding/json"
5 "fmt"
6 "io"
7 "sort"
8
9 version "github.com/hashicorp/go-version"
10 ctyjson "github.com/zclconf/go-cty/cty/json"
11
12 "github.com/hashicorp/terraform/addrs"
13 "github.com/hashicorp/terraform/states"
14 "github.com/hashicorp/terraform/tfdiags"
15)
16
17func readStateV4(src []byte) (*File, tfdiags.Diagnostics) {
18 var diags tfdiags.Diagnostics
19 sV4 := &stateV4{}
20 err := json.Unmarshal(src, sV4)
21 if err != nil {
22 diags = diags.Append(jsonUnmarshalDiags(err))
23 return nil, diags
24 }
25
26 file, prepDiags := prepareStateV4(sV4)
27 diags = diags.Append(prepDiags)
28 return file, diags
29}
30
31func prepareStateV4(sV4 *stateV4) (*File, tfdiags.Diagnostics) {
32 var diags tfdiags.Diagnostics
33
34 var tfVersion *version.Version
35 if sV4.TerraformVersion != "" {
36 var err error
37 tfVersion, err = version.NewVersion(sV4.TerraformVersion)
38 if err != nil {
39 diags = diags.Append(tfdiags.Sourceless(
40 tfdiags.Error,
41 "Invalid Terraform version string",
42 fmt.Sprintf("State file claims to have been written by Terraform version %q, which is not a valid version string.", sV4.TerraformVersion),
43 ))
44 }
45 }
46
47 file := &File{
48 TerraformVersion: tfVersion,
49 Serial: sV4.Serial,
50 Lineage: sV4.Lineage,
51 }
52
53 state := states.NewState()
54
55 for _, rsV4 := range sV4.Resources {
56 rAddr := addrs.Resource{
57 Type: rsV4.Type,
58 Name: rsV4.Name,
59 }
60 switch rsV4.Mode {
61 case "managed":
62 rAddr.Mode = addrs.ManagedResourceMode
63 case "data":
64 rAddr.Mode = addrs.DataResourceMode
65 default:
66 diags = diags.Append(tfdiags.Sourceless(
67 tfdiags.Error,
68 "Invalid resource mode in state",
69 fmt.Sprintf("State contains a resource with mode %q (%q %q) which is not supported.", rsV4.Mode, rAddr.Type, rAddr.Name),
70 ))
71 continue
72 }
73
74 moduleAddr := addrs.RootModuleInstance
75 if rsV4.Module != "" {
76 var addrDiags tfdiags.Diagnostics
77 moduleAddr, addrDiags = addrs.ParseModuleInstanceStr(rsV4.Module)
78 diags = diags.Append(addrDiags)
79 if addrDiags.HasErrors() {
80 continue
81 }
82 }
83
84 providerAddr, addrDiags := addrs.ParseAbsProviderConfigStr(rsV4.ProviderConfig)
85 diags.Append(addrDiags)
86 if addrDiags.HasErrors() {
87 continue
88 }
89
90 var eachMode states.EachMode
91 switch rsV4.EachMode {
92 case "":
93 eachMode = states.NoEach
94 case "list":
95 eachMode = states.EachList
96 case "map":
97 eachMode = states.EachMap
98 default:
99 diags = diags.Append(tfdiags.Sourceless(
100 tfdiags.Error,
101 "Invalid resource metadata in state",
102 fmt.Sprintf("Resource %s has invalid \"each\" value %q in state.", rAddr.Absolute(moduleAddr), eachMode),
103 ))
104 continue
105 }
106
107 ms := state.EnsureModule(moduleAddr)
108
109 // Ensure the resource container object is present in the state.
110 ms.SetResourceMeta(rAddr, eachMode, providerAddr)
111
112 for _, isV4 := range rsV4.Instances {
113 keyRaw := isV4.IndexKey
114 var key addrs.InstanceKey
115 switch tk := keyRaw.(type) {
116 case int:
117 key = addrs.IntKey(tk)
118 case float64:
119 // Since JSON only has one number type, reading from encoding/json
120 // gives us a float64 here even if the number is whole.
121 // float64 has a smaller integer range than int, but in practice
122 // we rarely have more than a few tens of instances and so
123 // it's unlikely that we'll exhaust the 52 bits in a float64.
124 key = addrs.IntKey(int(tk))
125 case string:
126 key = addrs.StringKey(tk)
127 default:
128 if keyRaw != nil {
129 diags = diags.Append(tfdiags.Sourceless(
130 tfdiags.Error,
131 "Invalid resource instance metadata in state",
132 fmt.Sprintf("Resource %s has an instance with the invalid instance key %#v.", rAddr.Absolute(moduleAddr), keyRaw),
133 ))
134 continue
135 }
136 key = addrs.NoKey
137 }
138
139 instAddr := rAddr.Instance(key)
140
141 obj := &states.ResourceInstanceObjectSrc{
142 SchemaVersion: isV4.SchemaVersion,
143 }
144
145 {
146 // Instance attributes
147 switch {
148 case isV4.AttributesRaw != nil:
149 obj.AttrsJSON = isV4.AttributesRaw
150 case isV4.AttributesFlat != nil:
151 obj.AttrsFlat = isV4.AttributesFlat
152 default:
153 // This is odd, but we'll accept it and just treat the
154 // object has being empty. In practice this should arise
155 // only from the contrived sort of state objects we tend
156 // to hand-write inline in tests.
157 obj.AttrsJSON = []byte{'{', '}'}
158 }
159 }
160
161 {
162 // Status
163 raw := isV4.Status
164 switch raw {
165 case "":
166 obj.Status = states.ObjectReady
167 case "tainted":
168 obj.Status = states.ObjectTainted
169 default:
170 diags = diags.Append(tfdiags.Sourceless(
171 tfdiags.Error,
172 "Invalid resource instance metadata in state",
173 fmt.Sprintf("Instance %s has invalid status %q.", instAddr.Absolute(moduleAddr), raw),
174 ))
175 continue
176 }
177 }
178
179 if raw := isV4.PrivateRaw; len(raw) > 0 {
180 obj.Private = raw
181 }
182
183 {
184 depsRaw := isV4.Dependencies
185 deps := make([]addrs.Referenceable, 0, len(depsRaw))
186 for _, depRaw := range depsRaw {
187 ref, refDiags := addrs.ParseRefStr(depRaw)
188 diags = diags.Append(refDiags)
189 if refDiags.HasErrors() {
190 continue
191 }
192 if len(ref.Remaining) != 0 {
193 diags = diags.Append(tfdiags.Sourceless(
194 tfdiags.Error,
195 "Invalid resource instance metadata in state",
196 fmt.Sprintf("Instance %s declares dependency on %q, which is not a reference to a dependable object.", instAddr.Absolute(moduleAddr), depRaw),
197 ))
198 }
199 if ref.Subject == nil {
200 // Should never happen
201 panic(fmt.Sprintf("parsing dependency %q for instance %s returned a nil address", depRaw, instAddr.Absolute(moduleAddr)))
202 }
203 deps = append(deps, ref.Subject)
204 }
205 obj.Dependencies = deps
206 }
207
208 switch {
209 case isV4.Deposed != "":
210 dk := states.DeposedKey(isV4.Deposed)
211 if len(dk) != 8 {
212 diags = diags.Append(tfdiags.Sourceless(
213 tfdiags.Error,
214 "Invalid resource instance metadata in state",
215 fmt.Sprintf("Instance %s has an object with deposed key %q, which is not correctly formatted.", instAddr.Absolute(moduleAddr), isV4.Deposed),
216 ))
217 continue
218 }
219 is := ms.ResourceInstance(instAddr)
220 if is.HasDeposed(dk) {
221 diags = diags.Append(tfdiags.Sourceless(
222 tfdiags.Error,
223 "Duplicate resource instance in state",
224 fmt.Sprintf("Instance %s deposed object %q appears multiple times in the state file.", instAddr.Absolute(moduleAddr), dk),
225 ))
226 continue
227 }
228
229 ms.SetResourceInstanceDeposed(instAddr, dk, obj, providerAddr)
230 default:
231 is := ms.ResourceInstance(instAddr)
232 if is.HasCurrent() {
233 diags = diags.Append(tfdiags.Sourceless(
234 tfdiags.Error,
235 "Duplicate resource instance in state",
236 fmt.Sprintf("Instance %s appears multiple times in the state file.", instAddr.Absolute(moduleAddr)),
237 ))
238 continue
239 }
240
241 ms.SetResourceInstanceCurrent(instAddr, obj, providerAddr)
242 }
243 }
244
245 // We repeat this after creating the instances because
246 // SetResourceInstanceCurrent automatically resets this metadata based
247 // on the incoming objects. That behavior is useful when we're making
248 // piecemeal updates to the state during an apply, but when we're
249 // reading the state file we want to reflect its contents exactly.
250 ms.SetResourceMeta(rAddr, eachMode, providerAddr)
251 }
252
253 // The root module is special in that we persist its attributes and thus
254 // need to reload them now. (For descendent modules we just re-calculate
255 // them based on the latest configuration on each run.)
256 {
257 rootModule := state.RootModule()
258 for name, fos := range sV4.RootOutputs {
259 os := &states.OutputValue{}
260 os.Sensitive = fos.Sensitive
261
262 ty, err := ctyjson.UnmarshalType([]byte(fos.ValueTypeRaw))
263 if err != nil {
264 diags = diags.Append(tfdiags.Sourceless(
265 tfdiags.Error,
266 "Invalid output value type in state",
267 fmt.Sprintf("The state file has an invalid type specification for output %q: %s.", name, err),
268 ))
269 continue
270 }
271
272 val, err := ctyjson.Unmarshal([]byte(fos.ValueRaw), ty)
273 if err != nil {
274 diags = diags.Append(tfdiags.Sourceless(
275 tfdiags.Error,
276 "Invalid output value saved in state",
277 fmt.Sprintf("The state file has an invalid value for output %q: %s.", name, err),
278 ))
279 continue
280 }
281
282 os.Value = val
283 rootModule.OutputValues[name] = os
284 }
285 }
286
287 file.State = state
288 return file, diags
289}
290
291func writeStateV4(file *File, w io.Writer) tfdiags.Diagnostics {
292 // Here we'll convert back from the "File" representation to our
293 // stateV4 struct representation and write that.
294 //
295 // While we support legacy state formats for reading, we only support the
296 // latest for writing and so if a V5 is added in future then this function
297 // should be deleted and replaced with a writeStateV5, even though the
298 // read/prepare V4 functions above would stick around.
299
300 var diags tfdiags.Diagnostics
301 if file == nil || file.State == nil {
302 panic("attempt to write nil state to file")
303 }
304
305 var terraformVersion string
306 if file.TerraformVersion != nil {
307 terraformVersion = file.TerraformVersion.String()
308 }
309
310 sV4 := &stateV4{
311 TerraformVersion: terraformVersion,
312 Serial: file.Serial,
313 Lineage: file.Lineage,
314 RootOutputs: map[string]outputStateV4{},
315 Resources: []resourceStateV4{},
316 }
317
318 for name, os := range file.State.RootModule().OutputValues {
319 src, err := ctyjson.Marshal(os.Value, os.Value.Type())
320 if err != nil {
321 diags = diags.Append(tfdiags.Sourceless(
322 tfdiags.Error,
323 "Failed to serialize output value in state",
324 fmt.Sprintf("An error occured while serializing output value %q: %s.", name, err),
325 ))
326 continue
327 }
328
329 typeSrc, err := ctyjson.MarshalType(os.Value.Type())
330 if err != nil {
331 diags = diags.Append(tfdiags.Sourceless(
332 tfdiags.Error,
333 "Failed to serialize output value in state",
334 fmt.Sprintf("An error occured while serializing the type of output value %q: %s.", name, err),
335 ))
336 continue
337 }
338
339 sV4.RootOutputs[name] = outputStateV4{
340 Sensitive: os.Sensitive,
341 ValueRaw: json.RawMessage(src),
342 ValueTypeRaw: json.RawMessage(typeSrc),
343 }
344 }
345
346 for _, ms := range file.State.Modules {
347 moduleAddr := ms.Addr
348 for _, rs := range ms.Resources {
349 resourceAddr := rs.Addr
350
351 var mode string
352 switch resourceAddr.Mode {
353 case addrs.ManagedResourceMode:
354 mode = "managed"
355 case addrs.DataResourceMode:
356 mode = "data"
357 default:
358 diags = diags.Append(tfdiags.Sourceless(
359 tfdiags.Error,
360 "Failed to serialize resource in state",
361 fmt.Sprintf("Resource %s has mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), resourceAddr.Mode),
362 ))
363 continue
364 }
365
366 var eachMode string
367 switch rs.EachMode {
368 case states.NoEach:
369 eachMode = ""
370 case states.EachList:
371 eachMode = "list"
372 case states.EachMap:
373 eachMode = "map"
374 default:
375 diags = diags.Append(tfdiags.Sourceless(
376 tfdiags.Error,
377 "Failed to serialize resource in state",
378 fmt.Sprintf("Resource %s has \"each\" mode %s, which cannot be serialized in state", resourceAddr.Absolute(moduleAddr), rs.EachMode),
379 ))
380 continue
381 }
382
383 sV4.Resources = append(sV4.Resources, resourceStateV4{
384 Module: moduleAddr.String(),
385 Mode: mode,
386 Type: resourceAddr.Type,
387 Name: resourceAddr.Name,
388 EachMode: eachMode,
389 ProviderConfig: rs.ProviderConfig.String(),
390 Instances: []instanceObjectStateV4{},
391 })
392 rsV4 := &(sV4.Resources[len(sV4.Resources)-1])
393
394 for key, is := range rs.Instances {
395 if is.HasCurrent() {
396 var objDiags tfdiags.Diagnostics
397 rsV4.Instances, objDiags = appendInstanceObjectStateV4(
398 rs, is, key, is.Current, states.NotDeposed,
399 rsV4.Instances,
400 )
401 diags = diags.Append(objDiags)
402 }
403 for dk, obj := range is.Deposed {
404 var objDiags tfdiags.Diagnostics
405 rsV4.Instances, objDiags = appendInstanceObjectStateV4(
406 rs, is, key, obj, dk,
407 rsV4.Instances,
408 )
409 diags = diags.Append(objDiags)
410 }
411 }
412 }
413 }
414
415 sV4.normalize()
416
417 src, err := json.MarshalIndent(sV4, "", " ")
418 if err != nil {
419 // Shouldn't happen if we do our conversion to *stateV4 correctly above.
420 diags = diags.Append(tfdiags.Sourceless(
421 tfdiags.Error,
422 "Failed to serialize state",
423 fmt.Sprintf("An error occured while serializing the state to save it. This is a bug in Terraform and should be reported: %s.", err),
424 ))
425 return diags
426 }
427 src = append(src, '\n')
428
429 _, err = w.Write(src)
430 if err != nil {
431 diags = diags.Append(tfdiags.Sourceless(
432 tfdiags.Error,
433 "Failed to write state",
434 fmt.Sprintf("An error occured while writing the serialized state: %s.", err),
435 ))
436 return diags
437 }
438
439 return diags
440}
441
442func appendInstanceObjectStateV4(rs *states.Resource, is *states.ResourceInstance, key addrs.InstanceKey, obj *states.ResourceInstanceObjectSrc, deposed states.DeposedKey, isV4s []instanceObjectStateV4) ([]instanceObjectStateV4, tfdiags.Diagnostics) {
443 var diags tfdiags.Diagnostics
444
445 var status string
446 switch obj.Status {
447 case states.ObjectReady:
448 status = ""
449 case states.ObjectTainted:
450 status = "tainted"
451 default:
452 diags = diags.Append(tfdiags.Sourceless(
453 tfdiags.Error,
454 "Failed to serialize resource instance in state",
455 fmt.Sprintf("Instance %s has status %s, which cannot be saved in state.", rs.Addr.Instance(key), obj.Status),
456 ))
457 }
458
459 var privateRaw []byte
460 if len(obj.Private) > 0 {
461 privateRaw = obj.Private
462 }
463
464 deps := make([]string, len(obj.Dependencies))
465 for i, depAddr := range obj.Dependencies {
466 deps[i] = depAddr.String()
467 }
468
469 var rawKey interface{}
470 switch tk := key.(type) {
471 case addrs.IntKey:
472 rawKey = int(tk)
473 case addrs.StringKey:
474 rawKey = string(tk)
475 default:
476 if key != addrs.NoKey {
477 diags = diags.Append(tfdiags.Sourceless(
478 tfdiags.Error,
479 "Failed to serialize resource instance in state",
480 fmt.Sprintf("Instance %s has an unsupported instance key: %#v.", rs.Addr.Instance(key), key),
481 ))
482 }
483 }
484
485 return append(isV4s, instanceObjectStateV4{
486 IndexKey: rawKey,
487 Deposed: string(deposed),
488 Status: status,
489 SchemaVersion: obj.SchemaVersion,
490 AttributesFlat: obj.AttrsFlat,
491 AttributesRaw: obj.AttrsJSON,
492 PrivateRaw: privateRaw,
493 Dependencies: deps,
494 }), diags
495}
496
497type stateV4 struct {
498 Version stateVersionV4 `json:"version"`
499 TerraformVersion string `json:"terraform_version"`
500 Serial uint64 `json:"serial"`
501 Lineage string `json:"lineage"`
502 RootOutputs map[string]outputStateV4 `json:"outputs"`
503 Resources []resourceStateV4 `json:"resources"`
504}
505
506// normalize makes some in-place changes to normalize the way items are
507// stored to ensure that two functionally-equivalent states will be stored
508// identically.
509func (s *stateV4) normalize() {
510 sort.Stable(sortResourcesV4(s.Resources))
511 for _, rs := range s.Resources {
512 sort.Stable(sortInstancesV4(rs.Instances))
513 }
514}
515
516type outputStateV4 struct {
517 ValueRaw json.RawMessage `json:"value"`
518 ValueTypeRaw json.RawMessage `json:"type"`
519 Sensitive bool `json:"sensitive,omitempty"`
520}
521
522type resourceStateV4 struct {
523 Module string `json:"module,omitempty"`
524 Mode string `json:"mode"`
525 Type string `json:"type"`
526 Name string `json:"name"`
527 EachMode string `json:"each,omitempty"`
528 ProviderConfig string `json:"provider"`
529 Instances []instanceObjectStateV4 `json:"instances"`
530}
531
532type instanceObjectStateV4 struct {
533 IndexKey interface{} `json:"index_key,omitempty"`
534 Status string `json:"status,omitempty"`
535 Deposed string `json:"deposed,omitempty"`
536
537 SchemaVersion uint64 `json:"schema_version"`
538 AttributesRaw json.RawMessage `json:"attributes,omitempty"`
539 AttributesFlat map[string]string `json:"attributes_flat,omitempty"`
540
541 PrivateRaw []byte `json:"private,omitempty"`
542
543 Dependencies []string `json:"depends_on,omitempty"`
544}
545
546// stateVersionV4 is a weird special type we use to produce our hard-coded
547// "version": 4 in the JSON serialization.
548type stateVersionV4 struct{}
549
550func (sv stateVersionV4) MarshalJSON() ([]byte, error) {
551 return []byte{'4'}, nil
552}
553
554func (sv stateVersionV4) UnmarshalJSON([]byte) error {
555 // Nothing to do: we already know we're version 4
556 return nil
557}
558
559type sortResourcesV4 []resourceStateV4
560
561func (sr sortResourcesV4) Len() int { return len(sr) }
562func (sr sortResourcesV4) Swap(i, j int) { sr[i], sr[j] = sr[j], sr[i] }
563func (sr sortResourcesV4) Less(i, j int) bool {
564 switch {
565 case sr[i].Mode != sr[j].Mode:
566 return sr[i].Mode < sr[j].Mode
567 case sr[i].Type != sr[j].Type:
568 return sr[i].Type < sr[j].Type
569 case sr[i].Name != sr[j].Name:
570 return sr[i].Name < sr[j].Name
571 default:
572 return false
573 }
574}
575
576type sortInstancesV4 []instanceObjectStateV4
577
578func (si sortInstancesV4) Len() int { return len(si) }
579func (si sortInstancesV4) Swap(i, j int) { si[i], si[j] = si[j], si[i] }
580func (si sortInstancesV4) Less(i, j int) bool {
581 ki := si[i].IndexKey
582 kj := si[j].IndexKey
583 if ki != kj {
584 if (ki == nil) != (kj == nil) {
585 return ki == nil
586 }
587 if kii, isInt := ki.(int); isInt {
588 if kji, isInt := kj.(int); isInt {
589 return kii < kji
590 }
591 return true
592 }
593 if kis, isStr := ki.(string); isStr {
594 if kjs, isStr := kj.(string); isStr {
595 return kis < kjs
596 }
597 return true
598 }
599 }
600 if si[i].Deposed != si[j].Deposed {
601 return si[i].Deposed < si[j].Deposed
602 }
603 return false
604}
diff --git a/vendor/github.com/hashicorp/terraform/states/statefile/write.go b/vendor/github.com/hashicorp/terraform/states/statefile/write.go
new file mode 100644
index 0000000..548ba8a
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/statefile/write.go
@@ -0,0 +1,17 @@
1package statefile
2
3import (
4 "io"
5
6 tfversion "github.com/hashicorp/terraform/version"
7)
8
9// Write writes the given state to the given writer in the current state
10// serialization format.
11func Write(s *File, w io.Writer) error {
12 // Always record the current terraform version in the state.
13 s.TerraformVersion = tfversion.SemVer
14
15 diags := writeStateV4(s, w)
16 return diags.Err()
17}
diff --git a/vendor/github.com/hashicorp/terraform/states/sync.go b/vendor/github.com/hashicorp/terraform/states/sync.go
new file mode 100644
index 0000000..a377446
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/states/sync.go
@@ -0,0 +1,537 @@
1package states
2
3import (
4 "log"
5 "sync"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/zclconf/go-cty/cty"
9)
10
11// SyncState is a wrapper around State that provides concurrency-safe access to
12// various common operations that occur during a Terraform graph walk, or other
13// similar concurrent contexts.
14//
15// When a SyncState wrapper is in use, no concurrent direct access to the
16// underlying objects is permitted unless the caller first acquires an explicit
17// lock, using the Lock and Unlock methods. Most callers should _not_
18// explicitly lock, and should instead use the other methods of this type that
19// handle locking automatically.
20//
21// Since SyncState is able to safely consolidate multiple updates into a single
22// atomic operation, many of its methods are at a higher level than those
23// of the underlying types, and operate on the state as a whole rather than
24// on individual sub-structures of the state.
25//
26// SyncState can only protect against races within its own methods. It cannot
27// provide any guarantees about the order in which concurrent operations will
28// be processed, so callers may still need to employ higher-level techniques
29// for ensuring correct operation sequencing, such as building and walking
30// a dependency graph.
31type SyncState struct {
32 state *State
33 lock sync.RWMutex
34}
35
36// Module returns a snapshot of the state of the module instance with the given
37// address, or nil if no such module is tracked.
38//
39// The return value is a pointer to a copy of the module state, which the
40// caller may then freely access and mutate. However, since the module state
41// tends to be a large data structure with many child objects, where possible
42// callers should prefer to use a more granular accessor to access a child
43// module directly, and thus reduce the amount of copying required.
44func (s *SyncState) Module(addr addrs.ModuleInstance) *Module {
45 s.lock.RLock()
46 ret := s.state.Module(addr).DeepCopy()
47 s.lock.RUnlock()
48 return ret
49}
50
51// RemoveModule removes the entire state for the given module, taking with
52// it any resources associated with the module. This should generally be
53// called only for modules whose resources have all been destroyed, but
54// that is not enforced by this method.
55func (s *SyncState) RemoveModule(addr addrs.ModuleInstance) {
56 s.lock.Lock()
57 defer s.lock.Unlock()
58
59 s.state.RemoveModule(addr)
60}
61
62// OutputValue returns a snapshot of the state of the output value with the
63// given address, or nil if no such output value is tracked.
64//
65// The return value is a pointer to a copy of the output value state, which the
66// caller may then freely access and mutate.
67func (s *SyncState) OutputValue(addr addrs.AbsOutputValue) *OutputValue {
68 s.lock.RLock()
69 ret := s.state.OutputValue(addr).DeepCopy()
70 s.lock.RUnlock()
71 return ret
72}
73
74// SetOutputValue writes a given output value into the state, overwriting
75// any existing value of the same name.
76//
77// If the module containing the output is not yet tracked in state then it
78// be added as a side-effect.
79func (s *SyncState) SetOutputValue(addr addrs.AbsOutputValue, value cty.Value, sensitive bool) {
80 s.lock.Lock()
81 defer s.lock.Unlock()
82
83 ms := s.state.EnsureModule(addr.Module)
84 ms.SetOutputValue(addr.OutputValue.Name, value, sensitive)
85}
86
87// RemoveOutputValue removes the stored value for the output value with the
88// given address.
89//
90// If this results in its containing module being empty, the module will be
91// pruned from the state as a side-effect.
92func (s *SyncState) RemoveOutputValue(addr addrs.AbsOutputValue) {
93 s.lock.Lock()
94 defer s.lock.Unlock()
95
96 ms := s.state.Module(addr.Module)
97 if ms == nil {
98 return
99 }
100 ms.RemoveOutputValue(addr.OutputValue.Name)
101 s.maybePruneModule(addr.Module)
102}
103
104// LocalValue returns the current value associated with the given local value
105// address.
106func (s *SyncState) LocalValue(addr addrs.AbsLocalValue) cty.Value {
107 s.lock.RLock()
108 // cty.Value is immutable, so we don't need any extra copying here.
109 ret := s.state.LocalValue(addr)
110 s.lock.RUnlock()
111 return ret
112}
113
114// SetLocalValue writes a given output value into the state, overwriting
115// any existing value of the same name.
116//
117// If the module containing the local value is not yet tracked in state then it
118// will be added as a side-effect.
119func (s *SyncState) SetLocalValue(addr addrs.AbsLocalValue, value cty.Value) {
120 s.lock.Lock()
121 defer s.lock.Unlock()
122
123 ms := s.state.EnsureModule(addr.Module)
124 ms.SetLocalValue(addr.LocalValue.Name, value)
125}
126
127// RemoveLocalValue removes the stored value for the local value with the
128// given address.
129//
130// If this results in its containing module being empty, the module will be
131// pruned from the state as a side-effect.
132func (s *SyncState) RemoveLocalValue(addr addrs.AbsLocalValue) {
133 s.lock.Lock()
134 defer s.lock.Unlock()
135
136 ms := s.state.Module(addr.Module)
137 if ms == nil {
138 return
139 }
140 ms.RemoveLocalValue(addr.LocalValue.Name)
141 s.maybePruneModule(addr.Module)
142}
143
144// Resource returns a snapshot of the state of the resource with the given
145// address, or nil if no such resource is tracked.
146//
147// The return value is a pointer to a copy of the resource state, which the
148// caller may then freely access and mutate.
149func (s *SyncState) Resource(addr addrs.AbsResource) *Resource {
150 s.lock.RLock()
151 ret := s.state.Resource(addr).DeepCopy()
152 s.lock.RUnlock()
153 return ret
154}
155
156// ResourceInstance returns a snapshot of the state the resource instance with
157// the given address, or nil if no such instance is tracked.
158//
159// The return value is a pointer to a copy of the instance state, which the
160// caller may then freely access and mutate.
161func (s *SyncState) ResourceInstance(addr addrs.AbsResourceInstance) *ResourceInstance {
162 s.lock.RLock()
163 ret := s.state.ResourceInstance(addr).DeepCopy()
164 s.lock.RUnlock()
165 return ret
166}
167
168// ResourceInstanceObject returns a snapshot of the current instance object
169// of the given generation belonging to the instance with the given address,
170// or nil if no such object is tracked..
171//
172// The return value is a pointer to a copy of the object, which the caller may
173// then freely access and mutate.
174func (s *SyncState) ResourceInstanceObject(addr addrs.AbsResourceInstance, gen Generation) *ResourceInstanceObjectSrc {
175 s.lock.RLock()
176 defer s.lock.RUnlock()
177
178 inst := s.state.ResourceInstance(addr)
179 if inst == nil {
180 return nil
181 }
182 return inst.GetGeneration(gen).DeepCopy()
183}
184
185// SetResourceMeta updates the resource-level metadata for the resource at
186// the given address, creating the containing module state and resource state
187// as a side-effect if not already present.
188func (s *SyncState) SetResourceMeta(addr addrs.AbsResource, eachMode EachMode, provider addrs.AbsProviderConfig) {
189 s.lock.Lock()
190 defer s.lock.Unlock()
191
192 ms := s.state.EnsureModule(addr.Module)
193 ms.SetResourceMeta(addr.Resource, eachMode, provider)
194}
195
196// RemoveResource removes the entire state for the given resource, taking with
197// it any instances associated with the resource. This should generally be
198// called only for resource objects whose instances have all been destroyed,
199// but that is not enforced by this method. (Use RemoveResourceIfEmpty instead
200// to safely check first.)
201func (s *SyncState) RemoveResource(addr addrs.AbsResource) {
202 s.lock.Lock()
203 defer s.lock.Unlock()
204
205 ms := s.state.EnsureModule(addr.Module)
206 ms.RemoveResource(addr.Resource)
207 s.maybePruneModule(addr.Module)
208}
209
210// RemoveResourceIfEmpty is similar to RemoveResource but first checks to
211// make sure there are no instances or objects left in the resource.
212//
213// Returns true if the resource was removed, or false if remaining child
214// objects prevented its removal. Returns true also if the resource was
215// already absent, and thus no action needed to be taken.
216func (s *SyncState) RemoveResourceIfEmpty(addr addrs.AbsResource) bool {
217 s.lock.Lock()
218 defer s.lock.Unlock()
219
220 ms := s.state.Module(addr.Module)
221 if ms == nil {
222 return true // nothing to do
223 }
224 rs := ms.Resource(addr.Resource)
225 if rs == nil {
226 return true // nothing to do
227 }
228 if len(rs.Instances) != 0 {
229 // We don't check here for the possibility of instances that exist
230 // but don't have any objects because it's the responsibility of the
231 // instance-mutation methods to prune those away automatically.
232 return false
233 }
234 ms.RemoveResource(addr.Resource)
235 s.maybePruneModule(addr.Module)
236 return true
237}
238
239// MaybeFixUpResourceInstanceAddressForCount deals with the situation where a
240// resource has changed from having "count" set to not set, or vice-versa, and
241// so we need to rename the zeroth instance key to no key at all, or vice-versa.
242//
243// Set countEnabled to true if the resource has count set in its new
244// configuration, or false if it does not.
245//
246// The state is modified in-place if necessary, moving a resource instance
247// between the two addresses. The return value is true if a change was made,
248// and false otherwise.
249func (s *SyncState) MaybeFixUpResourceInstanceAddressForCount(addr addrs.AbsResource, countEnabled bool) bool {
250 s.lock.Lock()
251 defer s.lock.Unlock()
252
253 ms := s.state.Module(addr.Module)
254 if ms == nil {
255 return false
256 }
257
258 relAddr := addr.Resource
259 rs := ms.Resource(relAddr)
260 if rs == nil {
261 return false
262 }
263 huntKey := addrs.NoKey
264 replaceKey := addrs.InstanceKey(addrs.IntKey(0))
265 if !countEnabled {
266 huntKey, replaceKey = replaceKey, huntKey
267 }
268
269 is, exists := rs.Instances[huntKey]
270 if !exists {
271 return false
272 }
273
274 if _, exists := rs.Instances[replaceKey]; exists {
275 // If the replacement key also exists then we'll do nothing and keep both.
276 return false
277 }
278
279 // If we get here then we need to "rename" from hunt to replace
280 rs.Instances[replaceKey] = is
281 delete(rs.Instances, huntKey)
282 return true
283}
284
285// SetResourceInstanceCurrent saves the given instance object as the current
286// generation of the resource instance with the given address, simulataneously
287// updating the recorded provider configuration address, dependencies, and
288// resource EachMode.
289//
290// Any existing current instance object for the given resource is overwritten.
291// Set obj to nil to remove the primary generation object altogether. If there
292// are no deposed objects then the instance as a whole will be removed, which
293// may in turn also remove the containing module if it becomes empty.
294//
295// The caller must ensure that the given ResourceInstanceObject is not
296// concurrently mutated during this call, but may be freely used again once
297// this function returns.
298//
299// The provider address and "each mode" are resource-wide settings and so they
300// are updated for all other instances of the same resource as a side-effect of
301// this call.
302//
303// If the containing module for this resource or the resource itself are not
304// already tracked in state then they will be added as a side-effect.
305func (s *SyncState) SetResourceInstanceCurrent(addr addrs.AbsResourceInstance, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) {
306 s.lock.Lock()
307 defer s.lock.Unlock()
308
309 ms := s.state.EnsureModule(addr.Module)
310 ms.SetResourceInstanceCurrent(addr.Resource, obj.DeepCopy(), provider)
311 s.maybePruneModule(addr.Module)
312}
313
314// SetResourceInstanceDeposed saves the given instance object as a deposed
315// generation of the resource instance with the given address and deposed key.
316//
317// Call this method only for pre-existing deposed objects that already have
318// a known DeposedKey. For example, this method is useful if reloading objects
319// that were persisted to a state file. To mark the current object as deposed,
320// use DeposeResourceInstanceObject instead.
321//
322// The caller must ensure that the given ResourceInstanceObject is not
323// concurrently mutated during this call, but may be freely used again once
324// this function returns.
325//
326// The resource that contains the given instance must already exist in the
327// state, or this method will panic. Use Resource to check first if its
328// presence is not already guaranteed.
329//
330// Any existing current instance object for the given resource and deposed key
331// is overwritten. Set obj to nil to remove the deposed object altogether. If
332// the instance is left with no objects after this operation then it will
333// be removed from its containing resource altogether.
334//
335// If the containing module for this resource or the resource itself are not
336// already tracked in state then they will be added as a side-effect.
337func (s *SyncState) SetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey, obj *ResourceInstanceObjectSrc, provider addrs.AbsProviderConfig) {
338 s.lock.Lock()
339 defer s.lock.Unlock()
340
341 ms := s.state.EnsureModule(addr.Module)
342 ms.SetResourceInstanceDeposed(addr.Resource, key, obj.DeepCopy(), provider)
343 s.maybePruneModule(addr.Module)
344}
345
346// DeposeResourceInstanceObject moves the current instance object for the
347// given resource instance address into the deposed set, leaving the instance
348// without a current object.
349//
350// The return value is the newly-allocated deposed key, or NotDeposed if the
351// given instance is already lacking a current object.
352//
353// If the containing module for this resource or the resource itself are not
354// already tracked in state then there cannot be a current object for the
355// given instance, and so NotDeposed will be returned without modifying the
356// state at all.
357func (s *SyncState) DeposeResourceInstanceObject(addr addrs.AbsResourceInstance) DeposedKey {
358 s.lock.Lock()
359 defer s.lock.Unlock()
360
361 ms := s.state.Module(addr.Module)
362 if ms == nil {
363 return NotDeposed
364 }
365
366 return ms.deposeResourceInstanceObject(addr.Resource, NotDeposed)
367}
368
369// DeposeResourceInstanceObjectForceKey is like DeposeResourceInstanceObject
370// but uses a pre-allocated key. It's the caller's responsibility to ensure
371// that there aren't any races to use a particular key; this method will panic
372// if the given key is already in use.
373func (s *SyncState) DeposeResourceInstanceObjectForceKey(addr addrs.AbsResourceInstance, forcedKey DeposedKey) {
374 s.lock.Lock()
375 defer s.lock.Unlock()
376
377 if forcedKey == NotDeposed {
378 // Usage error: should use DeposeResourceInstanceObject in this case
379 panic("DeposeResourceInstanceObjectForceKey called without forced key")
380 }
381
382 ms := s.state.Module(addr.Module)
383 if ms == nil {
384 return // Nothing to do, since there can't be any current object either.
385 }
386
387 ms.deposeResourceInstanceObject(addr.Resource, forcedKey)
388}
389
390// ForgetResourceInstanceAll removes the record of all objects associated with
391// the specified resource instance, if present. If not present, this is a no-op.
392func (s *SyncState) ForgetResourceInstanceAll(addr addrs.AbsResourceInstance) {
393 s.lock.Lock()
394 defer s.lock.Unlock()
395
396 ms := s.state.Module(addr.Module)
397 if ms == nil {
398 return
399 }
400 ms.ForgetResourceInstanceAll(addr.Resource)
401 s.maybePruneModule(addr.Module)
402}
403
404// ForgetResourceInstanceDeposed removes the record of the deposed object with
405// the given address and key, if present. If not present, this is a no-op.
406func (s *SyncState) ForgetResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) {
407 s.lock.Lock()
408 defer s.lock.Unlock()
409
410 ms := s.state.Module(addr.Module)
411 if ms == nil {
412 return
413 }
414 ms.ForgetResourceInstanceDeposed(addr.Resource, key)
415 s.maybePruneModule(addr.Module)
416}
417
418// MaybeRestoreResourceInstanceDeposed will restore the deposed object with the
419// given key on the specified resource as the current object for that instance
420// if and only if that would not cause us to forget an existing current
421// object for that instance.
422//
423// Returns true if the object was restored to current, or false if no change
424// was made at all.
425func (s *SyncState) MaybeRestoreResourceInstanceDeposed(addr addrs.AbsResourceInstance, key DeposedKey) bool {
426 s.lock.Lock()
427 defer s.lock.Unlock()
428
429 if key == NotDeposed {
430 panic("MaybeRestoreResourceInstanceDeposed called without DeposedKey")
431 }
432
433 ms := s.state.Module(addr.Module)
434 if ms == nil {
435 // Nothing to do, since the specified deposed object cannot exist.
436 return false
437 }
438
439 return ms.maybeRestoreResourceInstanceDeposed(addr.Resource, key)
440}
441
442// RemovePlannedResourceInstanceObjects removes from the state any resource
443// instance objects that have the status ObjectPlanned, indiciating that they
444// are just transient placeholders created during planning.
445//
446// Note that this does not restore any "ready" or "tainted" object that might
447// have been present before the planned object was written. The only real use
448// for this method is in preparing the state created during a refresh walk,
449// where we run the planning step for certain instances just to create enough
450// information to allow correct expression evaluation within provider and
451// data resource blocks. Discarding planned instances in that case is okay
452// because the refresh phase only creates planned objects to stand in for
453// objects that don't exist yet, and thus the planned object must have been
454// absent before by definition.
455func (s *SyncState) RemovePlannedResourceInstanceObjects() {
456 // TODO: Merge together the refresh and plan phases into a single walk,
457 // so we can remove the need to create this "partial plan" during refresh
458 // that we then need to clean up before proceeding.
459
460 s.lock.Lock()
461 defer s.lock.Unlock()
462
463 for _, ms := range s.state.Modules {
464 moduleAddr := ms.Addr
465
466 for _, rs := range ms.Resources {
467 resAddr := rs.Addr
468
469 for ik, is := range rs.Instances {
470 instAddr := resAddr.Instance(ik)
471
472 if is.Current != nil && is.Current.Status == ObjectPlanned {
473 // Setting the current instance to nil removes it from the
474 // state altogether if there are not also deposed instances.
475 ms.SetResourceInstanceCurrent(instAddr, nil, rs.ProviderConfig)
476 }
477
478 for dk, obj := range is.Deposed {
479 // Deposed objects should never be "planned", but we'll
480 // do this anyway for the sake of completeness.
481 if obj.Status == ObjectPlanned {
482 ms.ForgetResourceInstanceDeposed(instAddr, dk)
483 }
484 }
485 }
486 }
487
488 // We may have deleted some objects, which means that we may have
489 // left a module empty, and so we must prune to preserve the invariant
490 // that only the root module is allowed to be empty.
491 s.maybePruneModule(moduleAddr)
492 }
493}
494
495// Lock acquires an explicit lock on the state, allowing direct read and write
496// access to the returned state object. The caller must call Unlock once
497// access is no longer needed, and then immediately discard the state pointer
498// pointer.
499//
500// Most callers should not use this. Instead, use the concurrency-safe
501// accessors and mutators provided directly on SyncState.
502func (s *SyncState) Lock() *State {
503 s.lock.Lock()
504 return s.state
505}
506
507// Unlock releases a lock previously acquired by Lock, at which point the
508// caller must cease all use of the state pointer that was returned.
509//
510// Do not call this method except to end an explicit lock acquired by
511// Lock. If a caller calls Unlock without first holding the lock, behavior
512// is undefined.
513func (s *SyncState) Unlock() {
514 s.lock.Unlock()
515}
516
517// maybePruneModule will remove a module from the state altogether if it is
518// empty, unless it's the root module which must always be present.
519//
520// This helper method is not concurrency-safe on its own, so must only be
521// called while the caller is already holding the lock for writing.
522func (s *SyncState) maybePruneModule(addr addrs.ModuleInstance) {
523 if addr.IsRoot() {
524 // We never prune the root.
525 return
526 }
527
528 ms := s.state.Module(addr)
529 if ms == nil {
530 return
531 }
532
533 if ms.empty() {
534 log.Printf("[TRACE] states.SyncState: pruning %s because it is empty", addr)
535 s.state.RemoveModule(addr)
536 }
537}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go
index f133cc2..afdba99 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/context.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/context.go
@@ -1,20 +1,26 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "bytes"
4 "context" 5 "context"
5 "fmt" 6 "fmt"
6 "log" 7 "log"
7 "sort"
8 "strings" 8 "strings"
9 "sync" 9 "sync"
10 10
11 "github.com/hashicorp/terraform/tfdiags"
12
13 "github.com/hashicorp/go-multierror"
14 "github.com/hashicorp/hcl" 11 "github.com/hashicorp/hcl"
12 "github.com/zclconf/go-cty/cty"
13
14 "github.com/hashicorp/terraform/addrs"
15 "github.com/hashicorp/terraform/config" 15 "github.com/hashicorp/terraform/config"
16 "github.com/hashicorp/terraform/config/module" 16 "github.com/hashicorp/terraform/configs"
17 "github.com/hashicorp/terraform/version" 17 "github.com/hashicorp/terraform/lang"
18 "github.com/hashicorp/terraform/plans"
19 "github.com/hashicorp/terraform/providers"
20 "github.com/hashicorp/terraform/provisioners"
21 "github.com/hashicorp/terraform/states"
22 "github.com/hashicorp/terraform/states/statefile"
23 "github.com/hashicorp/terraform/tfdiags"
18) 24)
19 25
20// InputMode defines what sort of input will be asked for when Input 26// InputMode defines what sort of input will be asked for when Input
@@ -51,19 +57,18 @@ var (
51// ContextOpts are the user-configurable options to create a context with 57// ContextOpts are the user-configurable options to create a context with
52// NewContext. 58// NewContext.
53type ContextOpts struct { 59type ContextOpts struct {
54 Meta *ContextMeta 60 Config *configs.Config
55 Destroy bool 61 Changes *plans.Changes
56 Diff *Diff 62 State *states.State
57 Hooks []Hook 63 Targets []addrs.Targetable
58 Module *module.Tree 64 Variables InputValues
59 Parallelism int 65 Meta *ContextMeta
60 State *State 66 Destroy bool
61 StateFutureAllowed bool 67
62 ProviderResolver ResourceProviderResolver 68 Hooks []Hook
63 Provisioners map[string]ResourceProvisionerFactory 69 Parallelism int
64 Shadow bool 70 ProviderResolver providers.Resolver
65 Targets []string 71 Provisioners map[string]ProvisionerFactory
66 Variables map[string]interface{}
67 72
68 // If non-nil, will apply as additional constraints on the provider 73 // If non-nil, will apply as additional constraints on the provider
69 // plugins that will be requested from the provider resolver. 74 // plugins that will be requested from the provider resolver.
@@ -83,32 +88,25 @@ type ContextMeta struct {
83 88
84// Context represents all the context that Terraform needs in order to 89// Context represents all the context that Terraform needs in order to
85// perform operations on infrastructure. This structure is built using 90// perform operations on infrastructure. This structure is built using
86// NewContext. See the documentation for that. 91// NewContext.
87//
88// Extra functions on Context can be found in context_*.go files.
89type Context struct { 92type Context struct {
90 // Maintainer note: Anytime this struct is changed, please verify 93 config *configs.Config
91 // that newShadowContext still does the right thing. Tests should 94 changes *plans.Changes
92 // fail regardless but putting this note here as well. 95 state *states.State
96 targets []addrs.Targetable
97 variables InputValues
98 meta *ContextMeta
99 destroy bool
93 100
94 components contextComponentFactory
95 destroy bool
96 diff *Diff
97 diffLock sync.RWMutex
98 hooks []Hook 101 hooks []Hook
99 meta *ContextMeta 102 components contextComponentFactory
100 module *module.Tree 103 schemas *Schemas
101 sh *stopHook 104 sh *stopHook
102 shadow bool
103 state *State
104 stateLock sync.RWMutex
105 targets []string
106 uiInput UIInput 105 uiInput UIInput
107 variables map[string]interface{}
108 106
109 l sync.Mutex // Lock acquired during any task 107 l sync.Mutex // Lock acquired during any task
110 parallelSem Semaphore 108 parallelSem Semaphore
111 providerInputConfig map[string]map[string]interface{} 109 providerInputConfig map[string]map[string]cty.Value
112 providerSHA256s map[string][]byte 110 providerSHA256s map[string][]byte
113 runLock sync.Mutex 111 runLock sync.Mutex
114 runCond *sync.Cond 112 runCond *sync.Cond
@@ -117,17 +115,23 @@ type Context struct {
117 shadowErr error 115 shadowErr error
118} 116}
119 117
118// (additional methods on Context can be found in context_*.go files.)
119
120// NewContext creates a new Context structure. 120// NewContext creates a new Context structure.
121// 121//
122// Once a Context is creator, the pointer values within ContextOpts 122// Once a Context is created, the caller must not access or mutate any of
123// should not be mutated in any way, since the pointers are copied, not 123// the objects referenced (directly or indirectly) by the ContextOpts fields.
124// the values themselves. 124//
125func NewContext(opts *ContextOpts) (*Context, error) { 125// If the returned diagnostics contains errors then the resulting context is
126 // Validate the version requirement if it is given 126// invalid and must not be used.
127 if opts.Module != nil { 127func NewContext(opts *ContextOpts) (*Context, tfdiags.Diagnostics) {
128 if err := CheckRequiredVersion(opts.Module); err != nil { 128 log.Printf("[TRACE] terraform.NewContext: starting")
129 return nil, err 129 diags := CheckCoreVersionRequirements(opts.Config)
130 } 130 // If version constraints are not met then we'll bail early since otherwise
131 // we're likely to just see a bunch of other errors related to
132 // incompatibilities, which could be overwhelming for the user.
133 if diags.HasErrors() {
134 return nil, diags
131 } 135 }
132 136
133 // Copy all the hooks and add our stop hook. We don't append directly 137 // Copy all the hooks and add our stop hook. We don't append directly
@@ -139,21 +143,9 @@ func NewContext(opts *ContextOpts) (*Context, error) {
139 143
140 state := opts.State 144 state := opts.State
141 if state == nil { 145 if state == nil {
142 state = new(State) 146 state = states.NewState()
143 state.init()
144 }
145
146 // If our state is from the future, then error. Callers can avoid
147 // this error by explicitly setting `StateFutureAllowed`.
148 if err := CheckStateVersion(state); err != nil && !opts.StateFutureAllowed {
149 return nil, err
150 } 147 }
151 148
152 // Explicitly reset our state version to our current version so that
153 // any operations we do will write out that our latest version
154 // has run.
155 state.TFVersion = version.Version
156
157 // Determine parallelism, default to 10. We do this both to limit 149 // Determine parallelism, default to 10. We do this both to limit
158 // CPU pressure but also to have an extra guard against rate throttling 150 // CPU pressure but also to have an extra guard against rate throttling
159 // from providers. 151 // from providers.
@@ -168,60 +160,84 @@ func NewContext(opts *ContextOpts) (*Context, error) {
168 // 2 - Take values specified in -var flags, overriding values 160 // 2 - Take values specified in -var flags, overriding values
169 // set by environment variables if necessary. This includes 161 // set by environment variables if necessary. This includes
170 // values taken from -var-file in addition. 162 // values taken from -var-file in addition.
171 variables := make(map[string]interface{}) 163 var variables InputValues
172 if opts.Module != nil { 164 if opts.Config != nil {
173 var err error 165 // Default variables from the configuration seed our map.
174 variables, err = Variables(opts.Module, opts.Variables) 166 variables = DefaultVariableValues(opts.Config.Module.Variables)
175 if err != nil {
176 return nil, err
177 }
178 } 167 }
168 // Variables provided by the caller (from CLI, environment, etc) can
169 // override the defaults.
170 variables = variables.Override(opts.Variables)
179 171
180 // Bind available provider plugins to the constraints in config 172 // Bind available provider plugins to the constraints in config
181 var providers map[string]ResourceProviderFactory 173 var providerFactories map[string]providers.Factory
182 if opts.ProviderResolver != nil { 174 if opts.ProviderResolver != nil {
183 var err error 175 deps := ConfigTreeDependencies(opts.Config, state)
184 deps := ModuleTreeDependencies(opts.Module, state)
185 reqd := deps.AllPluginRequirements() 176 reqd := deps.AllPluginRequirements()
186 if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify { 177 if opts.ProviderSHA256s != nil && !opts.SkipProviderVerify {
187 reqd.LockExecutables(opts.ProviderSHA256s) 178 reqd.LockExecutables(opts.ProviderSHA256s)
188 } 179 }
189 providers, err = resourceProviderFactories(opts.ProviderResolver, reqd) 180 log.Printf("[TRACE] terraform.NewContext: resolving provider version selections")
190 if err != nil { 181
191 return nil, err 182 var providerDiags tfdiags.Diagnostics
183 providerFactories, providerDiags = resourceProviderFactories(opts.ProviderResolver, reqd)
184 diags = diags.Append(providerDiags)
185
186 if diags.HasErrors() {
187 return nil, diags
192 } 188 }
193 } else { 189 } else {
194 providers = make(map[string]ResourceProviderFactory) 190 providerFactories = make(map[string]providers.Factory)
195 } 191 }
196 192
197 diff := opts.Diff 193 components := &basicComponentFactory{
198 if diff == nil { 194 providers: providerFactories,
199 diff = &Diff{} 195 provisioners: opts.Provisioners,
200 } 196 }
201 197
198 log.Printf("[TRACE] terraform.NewContext: loading provider schemas")
199 schemas, err := LoadSchemas(opts.Config, opts.State, components)
200 if err != nil {
201 diags = diags.Append(err)
202 return nil, diags
203 }
204
205 changes := opts.Changes
206 if changes == nil {
207 changes = plans.NewChanges()
208 }
209
210 config := opts.Config
211 if config == nil {
212 config = configs.NewEmptyConfig()
213 }
214
215 log.Printf("[TRACE] terraform.NewContext: complete")
216
202 return &Context{ 217 return &Context{
203 components: &basicComponentFactory{ 218 components: components,
204 providers: providers, 219 schemas: schemas,
205 provisioners: opts.Provisioners, 220 destroy: opts.Destroy,
206 }, 221 changes: changes,
207 destroy: opts.Destroy, 222 hooks: hooks,
208 diff: diff, 223 meta: opts.Meta,
209 hooks: hooks, 224 config: config,
210 meta: opts.Meta, 225 state: state,
211 module: opts.Module, 226 targets: opts.Targets,
212 shadow: opts.Shadow, 227 uiInput: opts.UIInput,
213 state: state, 228 variables: variables,
214 targets: opts.Targets,
215 uiInput: opts.UIInput,
216 variables: variables,
217 229
218 parallelSem: NewSemaphore(par), 230 parallelSem: NewSemaphore(par),
219 providerInputConfig: make(map[string]map[string]interface{}), 231 providerInputConfig: make(map[string]map[string]cty.Value),
220 providerSHA256s: opts.ProviderSHA256s, 232 providerSHA256s: opts.ProviderSHA256s,
221 sh: sh, 233 sh: sh,
222 }, nil 234 }, nil
223} 235}
224 236
237func (c *Context) Schemas() *Schemas {
238 return c.schemas
239}
240
225type ContextGraphOpts struct { 241type ContextGraphOpts struct {
226 // If true, validates the graph structure (checks for cycles). 242 // If true, validates the graph structure (checks for cycles).
227 Validate bool 243 Validate bool
@@ -233,7 +249,7 @@ type ContextGraphOpts struct {
233// Graph returns the graph used for the given operation type. 249// Graph returns the graph used for the given operation type.
234// 250//
235// The most extensive or complex graph type is GraphTypePlan. 251// The most extensive or complex graph type is GraphTypePlan.
236func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) { 252func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, tfdiags.Diagnostics) {
237 if opts == nil { 253 if opts == nil {
238 opts = &ContextGraphOpts{Validate: true} 254 opts = &ContextGraphOpts{Validate: true}
239 } 255 }
@@ -242,65 +258,71 @@ func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
242 switch typ { 258 switch typ {
243 case GraphTypeApply: 259 case GraphTypeApply:
244 return (&ApplyGraphBuilder{ 260 return (&ApplyGraphBuilder{
245 Module: c.module, 261 Config: c.config,
246 Diff: c.diff, 262 Changes: c.changes,
247 State: c.state, 263 State: c.state,
248 Providers: c.components.ResourceProviders(), 264 Components: c.components,
249 Provisioners: c.components.ResourceProvisioners(), 265 Schemas: c.schemas,
250 Targets: c.targets, 266 Targets: c.targets,
251 Destroy: c.destroy, 267 Destroy: c.destroy,
252 Validate: opts.Validate, 268 Validate: opts.Validate,
253 }).Build(RootModulePath) 269 }).Build(addrs.RootModuleInstance)
254 270
255 case GraphTypeInput:
256 // The input graph is just a slightly modified plan graph
257 fallthrough
258 case GraphTypeValidate: 271 case GraphTypeValidate:
259 // The validate graph is just a slightly modified plan graph 272 // The validate graph is just a slightly modified plan graph
260 fallthrough 273 fallthrough
261 case GraphTypePlan: 274 case GraphTypePlan:
262 // Create the plan graph builder 275 // Create the plan graph builder
263 p := &PlanGraphBuilder{ 276 p := &PlanGraphBuilder{
264 Module: c.module, 277 Config: c.config,
265 State: c.state, 278 State: c.state,
266 Providers: c.components.ResourceProviders(), 279 Components: c.components,
267 Targets: c.targets, 280 Schemas: c.schemas,
268 Validate: opts.Validate, 281 Targets: c.targets,
282 Validate: opts.Validate,
269 } 283 }
270 284
271 // Some special cases for other graph types shared with plan currently 285 // Some special cases for other graph types shared with plan currently
272 var b GraphBuilder = p 286 var b GraphBuilder = p
273 switch typ { 287 switch typ {
274 case GraphTypeInput:
275 b = InputGraphBuilder(p)
276 case GraphTypeValidate: 288 case GraphTypeValidate:
277 // We need to set the provisioners so those can be validated
278 p.Provisioners = c.components.ResourceProvisioners()
279
280 b = ValidateGraphBuilder(p) 289 b = ValidateGraphBuilder(p)
281 } 290 }
282 291
283 return b.Build(RootModulePath) 292 return b.Build(addrs.RootModuleInstance)
284 293
285 case GraphTypePlanDestroy: 294 case GraphTypePlanDestroy:
286 return (&DestroyPlanGraphBuilder{ 295 return (&DestroyPlanGraphBuilder{
287 Module: c.module, 296 Config: c.config,
288 State: c.state, 297 State: c.state,
289 Targets: c.targets, 298 Components: c.components,
290 Validate: opts.Validate, 299 Schemas: c.schemas,
291 }).Build(RootModulePath) 300 Targets: c.targets,
301 Validate: opts.Validate,
302 }).Build(addrs.RootModuleInstance)
292 303
293 case GraphTypeRefresh: 304 case GraphTypeRefresh:
294 return (&RefreshGraphBuilder{ 305 return (&RefreshGraphBuilder{
295 Module: c.module, 306 Config: c.config,
296 State: c.state, 307 State: c.state,
297 Providers: c.components.ResourceProviders(), 308 Components: c.components,
298 Targets: c.targets, 309 Schemas: c.schemas,
299 Validate: opts.Validate, 310 Targets: c.targets,
300 }).Build(RootModulePath) 311 Validate: opts.Validate,
301 } 312 }).Build(addrs.RootModuleInstance)
313
314 case GraphTypeEval:
315 return (&EvalGraphBuilder{
316 Config: c.config,
317 State: c.state,
318 Components: c.components,
319 Schemas: c.schemas,
320 }).Build(addrs.RootModuleInstance)
302 321
303 return nil, fmt.Errorf("unknown graph type: %s", typ) 322 default:
323 // Should never happen, because the above is exhaustive for all graph types.
324 panic(fmt.Errorf("unsupported graph type %s", typ))
325 }
304} 326}
305 327
306// ShadowError returns any errors caught during a shadow operation. 328// ShadowError returns any errors caught during a shadow operation.
@@ -333,141 +355,72 @@ func (c *Context) ShadowError() error {
333// State returns a copy of the current state associated with this context. 355// State returns a copy of the current state associated with this context.
334// 356//
335// This cannot safely be called in parallel with any other Context function. 357// This cannot safely be called in parallel with any other Context function.
336func (c *Context) State() *State { 358func (c *Context) State() *states.State {
337 return c.state.DeepCopy() 359 return c.state.DeepCopy()
338} 360}
339 361
340// Interpolater returns an Interpolater built on a copy of the state 362// Eval produces a scope in which expressions can be evaluated for
341// that can be used to test interpolation values. 363// the given module path.
342func (c *Context) Interpolater() *Interpolater { 364//
343 var varLock sync.Mutex 365// This method must first evaluate any ephemeral values (input variables, local
344 var stateLock sync.RWMutex 366// values, and output values) in the configuration. These ephemeral values are
345 return &Interpolater{ 367// not included in the persisted state, so they must be re-computed using other
346 Operation: walkApply, 368// values in the state before they can be properly evaluated. The updated
347 Meta: c.meta, 369// values are retained in the main state associated with the receiving context.
348 Module: c.module, 370//
349 State: c.state.DeepCopy(), 371// This function takes no action against remote APIs but it does need access
350 StateLock: &stateLock, 372// to all provider and provisioner instances in order to obtain their schemas
351 VariableValues: c.variables, 373// for type checking.
352 VariableValuesLock: &varLock, 374//
353 } 375// The result is an evaluation scope that can be used to resolve references
354} 376// against the root module. If the returned diagnostics contains errors then
355 377// the returned scope may be nil. If it is not nil then it may still be used
356// Input asks for input to fill variables and provider configurations. 378// to attempt expression evaluation or other analysis, but some expressions
357// This modifies the configuration in-place, so asking for Input twice 379// may not behave as expected.
358// may result in different UI output showing different current values. 380func (c *Context) Eval(path addrs.ModuleInstance) (*lang.Scope, tfdiags.Diagnostics) {
359func (c *Context) Input(mode InputMode) error { 381 // This is intended for external callers such as the "terraform console"
360 defer c.acquireRun("input")() 382 // command. Internally, we create an evaluator in c.walk before walking
361 383 // the graph, and create scopes in ContextGraphWalker.
362 if mode&InputModeVar != 0 {
363 // Walk the variables first for the root module. We walk them in
364 // alphabetical order for UX reasons.
365 rootConf := c.module.Config()
366 names := make([]string, len(rootConf.Variables))
367 m := make(map[string]*config.Variable)
368 for i, v := range rootConf.Variables {
369 names[i] = v.Name
370 m[v.Name] = v
371 }
372 sort.Strings(names)
373 for _, n := range names {
374 // If we only care about unset variables, then if the variable
375 // is set, continue on.
376 if mode&InputModeVarUnset != 0 {
377 if _, ok := c.variables[n]; ok {
378 continue
379 }
380 }
381
382 var valueType config.VariableType
383
384 v := m[n]
385 switch valueType = v.Type(); valueType {
386 case config.VariableTypeUnknown:
387 continue
388 case config.VariableTypeMap:
389 // OK
390 case config.VariableTypeList:
391 // OK
392 case config.VariableTypeString:
393 // OK
394 default:
395 panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
396 }
397
398 // If the variable is not already set, and the variable defines a
399 // default, use that for the value.
400 if _, ok := c.variables[n]; !ok {
401 if v.Default != nil {
402 c.variables[n] = v.Default.(string)
403 continue
404 }
405 }
406
407 // this should only happen during tests
408 if c.uiInput == nil {
409 log.Println("[WARN] Content.uiInput is nil")
410 continue
411 }
412
413 // Ask the user for a value for this variable
414 var value string
415 retry := 0
416 for {
417 var err error
418 value, err = c.uiInput.Input(&InputOpts{
419 Id: fmt.Sprintf("var.%s", n),
420 Query: fmt.Sprintf("var.%s", n),
421 Description: v.Description,
422 })
423 if err != nil {
424 return fmt.Errorf(
425 "Error asking for %s: %s", n, err)
426 }
427
428 if value == "" && v.Required() {
429 // Redo if it is required, but abort if we keep getting
430 // blank entries
431 if retry > 2 {
432 return fmt.Errorf("missing required value for %q", n)
433 }
434 retry++
435 continue
436 }
437
438 break
439 }
440
441 // no value provided, so don't set the variable at all
442 if value == "" {
443 continue
444 }
445
446 decoded, err := parseVariableAsHCL(n, value, valueType)
447 if err != nil {
448 return err
449 }
450
451 if decoded != nil {
452 c.variables[n] = decoded
453 }
454 }
455 }
456 384
457 if mode&InputModeProvider != 0 { 385 var diags tfdiags.Diagnostics
458 // Build the graph 386 defer c.acquireRun("eval")()
459 graph, err := c.Graph(GraphTypeInput, nil)
460 if err != nil {
461 return err
462 }
463 387
464 // Do the walk 388 // Start with a copy of state so that we don't affect any instances
465 if _, err := c.walk(graph, walkInput); err != nil { 389 // that other methods may have already returned.
466 return err 390 c.state = c.state.DeepCopy()
467 } 391 var walker *ContextGraphWalker
468 } 392
393 graph, graphDiags := c.Graph(GraphTypeEval, nil)
394 diags = diags.Append(graphDiags)
395 if !diags.HasErrors() {
396 var walkDiags tfdiags.Diagnostics
397 walker, walkDiags = c.walk(graph, walkEval)
398 diags = diags.Append(walker.NonFatalDiagnostics)
399 diags = diags.Append(walkDiags)
400 }
401
402 if walker == nil {
403 // If we skipped walking the graph (due to errors) then we'll just
404 // use a placeholder graph walker here, which'll refer to the
405 // unmodified state.
406 walker = c.graphWalker(walkEval)
407 }
408
409 // This is a bit weird since we don't normally evaluate outside of
410 // the context of a walk, but we'll "re-enter" our desired path here
411 // just to get hold of an EvalContext for it. GraphContextBuiltin
412 // caches its contexts, so we should get hold of the context that was
413 // previously used for evaluation here, unless we skipped walking.
414 evalCtx := walker.EnterPath(path)
415 return evalCtx.EvaluationScope(nil, EvalDataForNoInstanceKey), diags
416}
469 417
470 return nil 418// Interpolater is no longer used. Use Evaluator instead.
419//
420// The interpolator returned from this function will return an error on any use.
421func (c *Context) Interpolater() *Interpolater {
422 // FIXME: Remove this once all callers are updated to no longer use it.
423 return &Interpolater{}
471} 424}
472 425
473// Apply applies the changes represented by this context and returns 426// Apply applies the changes represented by this context and returns
@@ -484,23 +437,16 @@ func (c *Context) Input(mode InputMode) error {
484// State() method. Currently the helper/resource testing framework relies 437// State() method. Currently the helper/resource testing framework relies
485// on the absence of a returned state to determine if Destroy can be 438// on the absence of a returned state to determine if Destroy can be
486// called, so that will need to be refactored before this can be changed. 439// called, so that will need to be refactored before this can be changed.
487func (c *Context) Apply() (*State, error) { 440func (c *Context) Apply() (*states.State, tfdiags.Diagnostics) {
488 defer c.acquireRun("apply")() 441 defer c.acquireRun("apply")()
489 442
490 // Check there are no empty target parameter values
491 for _, target := range c.targets {
492 if target == "" {
493 return nil, fmt.Errorf("Target parameter must not have empty value")
494 }
495 }
496
497 // Copy our own state 443 // Copy our own state
498 c.state = c.state.DeepCopy() 444 c.state = c.state.DeepCopy()
499 445
500 // Build the graph. 446 // Build the graph.
501 graph, err := c.Graph(GraphTypeApply, nil) 447 graph, diags := c.Graph(GraphTypeApply, nil)
502 if err != nil { 448 if diags.HasErrors() {
503 return nil, err 449 return nil, diags
504 } 450 }
505 451
506 // Determine the operation 452 // Determine the operation
@@ -510,15 +456,30 @@ func (c *Context) Apply() (*State, error) {
510 } 456 }
511 457
512 // Walk the graph 458 // Walk the graph
513 walker, err := c.walk(graph, operation) 459 walker, walkDiags := c.walk(graph, operation)
514 if len(walker.ValidationErrors) > 0 { 460 diags = diags.Append(walker.NonFatalDiagnostics)
515 err = multierror.Append(err, walker.ValidationErrors...) 461 diags = diags.Append(walkDiags)
516 } 462
517 463 if c.destroy && !diags.HasErrors() {
518 // Clean out any unused things 464 // If we know we were trying to destroy objects anyway, and we
519 c.state.prune() 465 // completed without any errors, then we'll also prune out any
520 466 // leftover empty resource husks (left after all of the instances
521 return c.state, err 467 // of a resource with "count" or "for_each" are destroyed) to
468 // help ensure we end up with an _actually_ empty state, assuming
469 // we weren't destroying with -target here.
470 //
471 // (This doesn't actually take into account -target, but that should
472 // be okay because it doesn't throw away anything we can't recompute
473 // on a subsequent "terraform plan" run, if the resources are still
474 // present in the configuration. However, this _will_ cause "count = 0"
475 // resources to read as unknown during the next refresh walk, which
476 // may cause some additional churn if used in a data resource or
477 // provider block, until we remove refreshing as a separate walk and
478 // just do it as part of the plan walk.)
479 c.state.PruneResourceHusks()
480 }
481
482 return c.state, diags
522} 483}
523 484
524// Plan generates an execution plan for the given context. 485// Plan generates an execution plan for the given context.
@@ -528,38 +489,45 @@ func (c *Context) Apply() (*State, error) {
528// 489//
529// Plan also updates the diff of this context to be the diff generated 490// Plan also updates the diff of this context to be the diff generated
530// by the plan, so Apply can be called after. 491// by the plan, so Apply can be called after.
531func (c *Context) Plan() (*Plan, error) { 492func (c *Context) Plan() (*plans.Plan, tfdiags.Diagnostics) {
532 defer c.acquireRun("plan")() 493 defer c.acquireRun("plan")()
494 c.changes = plans.NewChanges()
533 495
534 // Check there are no empty target parameter values 496 var diags tfdiags.Diagnostics
535 for _, target := range c.targets { 497
536 if target == "" { 498 varVals := make(map[string]plans.DynamicValue, len(c.variables))
537 return nil, fmt.Errorf("Target parameter must not have empty value") 499 for k, iv := range c.variables {
500 // We use cty.DynamicPseudoType here so that we'll save both the
501 // value _and_ its dynamic type in the plan, so we can recover
502 // exactly the same value later.
503 dv, err := plans.NewDynamicValue(iv.Value, cty.DynamicPseudoType)
504 if err != nil {
505 diags = diags.Append(tfdiags.Sourceless(
506 tfdiags.Error,
507 "Failed to prepare variable value for plan",
508 fmt.Sprintf("The value for variable %q could not be serialized to store in the plan: %s.", k, err),
509 ))
510 continue
538 } 511 }
512 varVals[k] = dv
539 } 513 }
540 514
541 p := &Plan{ 515 p := &plans.Plan{
542 Module: c.module, 516 VariableValues: varVals,
543 Vars: c.variables, 517 TargetAddrs: c.targets,
544 State: c.state, 518 ProviderSHA256s: c.providerSHA256s,
545 Targets: c.targets,
546
547 TerraformVersion: version.String(),
548 ProviderSHA256s: c.providerSHA256s,
549 } 519 }
550 520
551 var operation walkOperation 521 var operation walkOperation
552 if c.destroy { 522 if c.destroy {
553 operation = walkPlanDestroy 523 operation = walkPlanDestroy
554 p.Destroy = true
555 } else { 524 } else {
556 // Set our state to be something temporary. We do this so that 525 // Set our state to be something temporary. We do this so that
557 // the plan can update a fake state so that variables work, then 526 // the plan can update a fake state so that variables work, then
558 // we replace it back with our old state. 527 // we replace it back with our old state.
559 old := c.state 528 old := c.state
560 if old == nil { 529 if old == nil {
561 c.state = &State{} 530 c.state = states.NewState()
562 c.state.init()
563 } else { 531 } else {
564 c.state = old.DeepCopy() 532 c.state = old.DeepCopy()
565 } 533 }
@@ -570,57 +538,27 @@ func (c *Context) Plan() (*Plan, error) {
570 operation = walkPlan 538 operation = walkPlan
571 } 539 }
572 540
573 // Setup our diff
574 c.diffLock.Lock()
575 c.diff = new(Diff)
576 c.diff.init()
577 c.diffLock.Unlock()
578
579 // Build the graph. 541 // Build the graph.
580 graphType := GraphTypePlan 542 graphType := GraphTypePlan
581 if c.destroy { 543 if c.destroy {
582 graphType = GraphTypePlanDestroy 544 graphType = GraphTypePlanDestroy
583 } 545 }
584 graph, err := c.Graph(graphType, nil) 546 graph, graphDiags := c.Graph(graphType, nil)
585 if err != nil { 547 diags = diags.Append(graphDiags)
586 return nil, err 548 if graphDiags.HasErrors() {
549 return nil, diags
587 } 550 }
588 551
589 // Do the walk 552 // Do the walk
590 walker, err := c.walk(graph, operation) 553 walker, walkDiags := c.walk(graph, operation)
591 if err != nil { 554 diags = diags.Append(walker.NonFatalDiagnostics)
592 return nil, err 555 diags = diags.Append(walkDiags)
556 if walkDiags.HasErrors() {
557 return nil, diags
593 } 558 }
594 p.Diff = c.diff 559 p.Changes = c.changes
595
596 // If this is true, it means we're running unit tests. In this case,
597 // we perform a deep copy just to ensure that all context tests also
598 // test that a diff is copy-able. This will panic if it fails. This
599 // is enabled during unit tests.
600 //
601 // This should never be true during production usage, but even if it is,
602 // it can't do any real harm.
603 if contextTestDeepCopyOnPlan {
604 p.Diff.DeepCopy()
605 }
606
607 /*
608 // We don't do the reverification during the new destroy plan because
609 // it will use a different apply process.
610 if X_legacyGraph {
611 // Now that we have a diff, we can build the exact graph that Apply will use
612 // and catch any possible cycles during the Plan phase.
613 if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
614 return nil, err
615 }
616 }
617 */
618 560
619 var errs error 561 return p, diags
620 if len(walker.ValidationErrors) > 0 {
621 errs = multierror.Append(errs, walker.ValidationErrors...)
622 }
623 return p, errs
624} 562}
625 563
626// Refresh goes through all the resources in the state and refreshes them 564// Refresh goes through all the resources in the state and refreshes them
@@ -629,27 +567,46 @@ func (c *Context) Plan() (*Plan, error) {
629// 567//
630// Even in the case an error is returned, the state may be returned and 568// Even in the case an error is returned, the state may be returned and
631// will potentially be partially updated. 569// will potentially be partially updated.
632func (c *Context) Refresh() (*State, error) { 570func (c *Context) Refresh() (*states.State, tfdiags.Diagnostics) {
633 defer c.acquireRun("refresh")() 571 defer c.acquireRun("refresh")()
634 572
635 // Copy our own state 573 // Copy our own state
636 c.state = c.state.DeepCopy() 574 c.state = c.state.DeepCopy()
637 575
576 // Refresh builds a partial changeset as part of its work because it must
577 // create placeholder stubs for any resource instances that'll be created
578 // in subsequent plan so that provider configurations and data resources
579 // can interpolate from them. This plan is always thrown away after
580 // the operation completes, restoring any existing changeset.
581 oldChanges := c.changes
582 defer func() { c.changes = oldChanges }()
583 c.changes = plans.NewChanges()
584
638 // Build the graph. 585 // Build the graph.
639 graph, err := c.Graph(GraphTypeRefresh, nil) 586 graph, diags := c.Graph(GraphTypeRefresh, nil)
640 if err != nil { 587 if diags.HasErrors() {
641 return nil, err 588 return nil, diags
642 } 589 }
643 590
644 // Do the walk 591 // Do the walk
645 if _, err := c.walk(graph, walkRefresh); err != nil { 592 _, walkDiags := c.walk(graph, walkRefresh)
646 return nil, err 593 diags = diags.Append(walkDiags)
647 } 594 if walkDiags.HasErrors() {
648 595 return nil, diags
649 // Clean out any unused things 596 }
650 c.state.prune() 597
651 598 // During our walk we will have created planned object placeholders in
652 return c.state, nil 599 // state for resource instances that are in configuration but not yet
600 // created. These were created only to allow expression evaluation to
601 // work properly in provider and data blocks during the walk and must
602 // now be discarded, since a subsequent plan walk is responsible for
603 // creating these "for real".
604 // TODO: Consolidate refresh and plan into a single walk, so that the
605 // refresh walk doesn't need to emulate various aspects of the plan
606 // walk in order to properly evaluate provider and data blocks.
607 c.state.SyncWrapper().RemovePlannedResourceInstanceObjects()
608
609 return c.state, diags
653} 610}
654 611
655// Stop stops the running task. 612// Stop stops the running task.
@@ -675,32 +632,33 @@ func (c *Context) Stop() {
675 632
676 // Grab the condition var before we exit 633 // Grab the condition var before we exit
677 if cond := c.runCond; cond != nil { 634 if cond := c.runCond; cond != nil {
635 log.Printf("[INFO] terraform: waiting for graceful stop to complete")
678 cond.Wait() 636 cond.Wait()
679 } 637 }
680 638
681 log.Printf("[WARN] terraform: stop complete") 639 log.Printf("[WARN] terraform: stop complete")
682} 640}
683 641
684// Validate validates the configuration and returns any warnings or errors. 642// Validate performs semantic validation of the configuration, and returning
643// any warnings or errors.
644//
645// Syntax and structural checks are performed by the configuration loader,
646// and so are not repeated here.
685func (c *Context) Validate() tfdiags.Diagnostics { 647func (c *Context) Validate() tfdiags.Diagnostics {
686 defer c.acquireRun("validate")() 648 defer c.acquireRun("validate")()
687 649
688 var diags tfdiags.Diagnostics 650 var diags tfdiags.Diagnostics
689 651
690 // Validate the configuration itself 652 // Validate input variables. We do this only for the values supplied
691 diags = diags.Append(c.module.Validate()) 653 // by the root module, since child module calls are validated when we
692 654 // visit their graph nodes.
693 // This only needs to be done for the root module, since inter-module 655 if c.config != nil {
694 // variables are validated in the module tree. 656 varDiags := checkInputVariables(c.config.Module.Variables, c.variables)
695 if config := c.module.Config(); config != nil { 657 diags = diags.Append(varDiags)
696 // Validate the user variables
697 for _, err := range smcUserVariables(config, c.variables) {
698 diags = diags.Append(err)
699 }
700 } 658 }
701 659
702 // If we have errors at this point, the graphing has no chance, 660 // If we have errors at this point then we probably won't be able to
703 // so just bail early. 661 // construct a graph without producing redundant errors, so we'll halt early.
704 if diags.HasErrors() { 662 if diags.HasErrors() {
705 return diags 663 return diags
706 } 664 }
@@ -709,48 +667,41 @@ func (c *Context) Validate() tfdiags.Diagnostics {
709 // We also validate the graph generated here, but this graph doesn't 667 // We also validate the graph generated here, but this graph doesn't
710 // necessarily match the graph that Plan will generate, so we'll validate the 668 // necessarily match the graph that Plan will generate, so we'll validate the
711 // graph again later after Planning. 669 // graph again later after Planning.
712 graph, err := c.Graph(GraphTypeValidate, nil) 670 graph, graphDiags := c.Graph(GraphTypeValidate, nil)
713 if err != nil { 671 diags = diags.Append(graphDiags)
714 diags = diags.Append(err) 672 if graphDiags.HasErrors() {
715 return diags 673 return diags
716 } 674 }
717 675
718 // Walk 676 // Walk
719 walker, err := c.walk(graph, walkValidate) 677 walker, walkDiags := c.walk(graph, walkValidate)
720 if err != nil { 678 diags = diags.Append(walker.NonFatalDiagnostics)
721 diags = diags.Append(err) 679 diags = diags.Append(walkDiags)
722 } 680 if walkDiags.HasErrors() {
723 681 return diags
724 sort.Strings(walker.ValidationWarnings)
725 sort.Slice(walker.ValidationErrors, func(i, j int) bool {
726 return walker.ValidationErrors[i].Error() < walker.ValidationErrors[j].Error()
727 })
728
729 for _, warn := range walker.ValidationWarnings {
730 diags = diags.Append(tfdiags.SimpleWarning(warn))
731 }
732 for _, err := range walker.ValidationErrors {
733 diags = diags.Append(err)
734 } 682 }
735 683
736 return diags 684 return diags
737} 685}
738 686
739// Module returns the module tree associated with this context. 687// Config returns the configuration tree associated with this context.
740func (c *Context) Module() *module.Tree { 688func (c *Context) Config() *configs.Config {
741 return c.module 689 return c.config
742} 690}
743 691
744// Variables will return the mapping of variables that were defined 692// Variables will return the mapping of variables that were defined
745// for this Context. If Input was called, this mapping may be different 693// for this Context. If Input was called, this mapping may be different
746// than what was given. 694// than what was given.
747func (c *Context) Variables() map[string]interface{} { 695func (c *Context) Variables() InputValues {
748 return c.variables 696 return c.variables
749} 697}
750 698
751// SetVariable sets a variable after a context has already been built. 699// SetVariable sets a variable after a context has already been built.
752func (c *Context) SetVariable(k string, v interface{}) { 700func (c *Context) SetVariable(k string, v cty.Value) {
753 c.variables[k] = v 701 c.variables[k] = &InputValue{
702 Value: v,
703 SourceType: ValueFromCaller,
704 }
754} 705}
755 706
756func (c *Context) acquireRun(phase string) func() { 707func (c *Context) acquireRun(phase string) func() {
@@ -767,9 +718,6 @@ func (c *Context) acquireRun(phase string) func() {
767 // Build our lock 718 // Build our lock
768 c.runCond = sync.NewCond(&c.l) 719 c.runCond = sync.NewCond(&c.l)
769 720
770 // Setup debugging
771 dbug.SetPhase(phase)
772
773 // Create a new run context 721 // Create a new run context
774 c.runContext, c.runContextCancel = context.WithCancel(context.Background()) 722 c.runContext, c.runContextCancel = context.WithCancel(context.Background())
775 723
@@ -787,11 +735,6 @@ func (c *Context) releaseRun() {
787 c.l.Lock() 735 c.l.Lock()
788 defer c.l.Unlock() 736 defer c.l.Unlock()
789 737
790 // setting the phase to "INVALID" lets us easily detect if we have
791 // operations happening outside of a run, or we missed setting the proper
792 // phase
793 dbug.SetPhase("INVALID")
794
795 // End our run. We check if runContext is non-nil because it can be 738 // End our run. We check if runContext is non-nil because it can be
796 // set to nil if it was cancelled via Stop() 739 // set to nil if it was cancelled via Stop()
797 if c.runContextCancel != nil { 740 if c.runContextCancel != nil {
@@ -807,30 +750,33 @@ func (c *Context) releaseRun() {
807 c.runContext = nil 750 c.runContext = nil
808} 751}
809 752
810func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, error) { 753func (c *Context) walk(graph *Graph, operation walkOperation) (*ContextGraphWalker, tfdiags.Diagnostics) {
811 // Keep track of the "real" context which is the context that does
812 // the real work: talking to real providers, modifying real state, etc.
813 realCtx := c
814
815 log.Printf("[DEBUG] Starting graph walk: %s", operation.String()) 754 log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
816 755
817 walker := &ContextGraphWalker{ 756 walker := c.graphWalker(operation)
818 Context: realCtx,
819 Operation: operation,
820 StopContext: c.runContext,
821 }
822 757
823 // Watch for a stop so we can call the provider Stop() API. 758 // Watch for a stop so we can call the provider Stop() API.
824 watchStop, watchWait := c.watchStop(walker) 759 watchStop, watchWait := c.watchStop(walker)
825 760
826 // Walk the real graph, this will block until it completes 761 // Walk the real graph, this will block until it completes
827 realErr := graph.Walk(walker) 762 diags := graph.Walk(walker)
828 763
829 // Close the channel so the watcher stops, and wait for it to return. 764 // Close the channel so the watcher stops, and wait for it to return.
830 close(watchStop) 765 close(watchStop)
831 <-watchWait 766 <-watchWait
832 767
833 return walker, realErr 768 return walker, diags
769}
770
771func (c *Context) graphWalker(operation walkOperation) *ContextGraphWalker {
772 return &ContextGraphWalker{
773 Context: c,
774 State: c.state.SyncWrapper(),
775 Changes: c.changes.SyncWrapper(),
776 Operation: operation,
777 StopContext: c.runContext,
778 RootVariableValues: c.variables,
779 }
834} 780}
835 781
836// watchStop immediately returns a `stop` and a `wait` chan after dispatching 782// watchStop immediately returns a `stop` and a `wait` chan after dispatching
@@ -863,12 +809,13 @@ func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan s
863 } 809 }
864 810
865 // If we're here, we're stopped, trigger the call. 811 // If we're here, we're stopped, trigger the call.
812 log.Printf("[TRACE] Context: requesting providers and provisioners to gracefully stop")
866 813
867 { 814 {
868 // Copy the providers so that a misbehaved blocking Stop doesn't 815 // Copy the providers so that a misbehaved blocking Stop doesn't
869 // completely hang Terraform. 816 // completely hang Terraform.
870 walker.providerLock.Lock() 817 walker.providerLock.Lock()
871 ps := make([]ResourceProvider, 0, len(walker.providerCache)) 818 ps := make([]providers.Interface, 0, len(walker.providerCache))
872 for _, p := range walker.providerCache { 819 for _, p := range walker.providerCache {
873 ps = append(ps, p) 820 ps = append(ps, p)
874 } 821 }
@@ -885,7 +832,7 @@ func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan s
885 { 832 {
886 // Call stop on all the provisioners 833 // Call stop on all the provisioners
887 walker.provisionerLock.Lock() 834 walker.provisionerLock.Lock()
888 ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache)) 835 ps := make([]provisioners.Interface, 0, len(walker.provisionerCache))
889 for _, p := range walker.provisionerCache { 836 for _, p := range walker.provisionerCache {
890 ps = append(ps, p) 837 ps = append(ps, p)
891 } 838 }
@@ -955,3 +902,37 @@ func parseVariableAsHCL(name string, input string, targetType config.VariableTyp
955 panic(fmt.Errorf("unknown type %s", targetType.Printable())) 902 panic(fmt.Errorf("unknown type %s", targetType.Printable()))
956 } 903 }
957} 904}
905
906// ShimLegacyState is a helper that takes the legacy state type and
907// converts it to the new state type.
908//
909// This is implemented as a state file upgrade, so it will not preserve
910// parts of the state structure that are not included in a serialized state,
911// such as the resolved results of any local values, outputs in non-root
912// modules, etc.
913func ShimLegacyState(legacy *State) (*states.State, error) {
914 if legacy == nil {
915 return nil, nil
916 }
917 var buf bytes.Buffer
918 err := WriteState(legacy, &buf)
919 if err != nil {
920 return nil, err
921 }
922 f, err := statefile.Read(&buf)
923 if err != nil {
924 return nil, err
925 }
926 return f.State, err
927}
928
929// MustShimLegacyState is a wrapper around ShimLegacyState that panics if
930// the conversion does not succeed. This is primarily intended for tests where
931// the given legacy state is an object constructed within the test.
932func MustShimLegacyState(legacy *State) *states.State {
933 ret, err := ShimLegacyState(legacy)
934 if err != nil {
935 panic(err)
936 }
937 return ret
938}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
index 6f50744..26ec995 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/context_components.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
@@ -2,6 +2,9 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5
6 "github.com/hashicorp/terraform/providers"
7 "github.com/hashicorp/terraform/provisioners"
5) 8)
6 9
7// contextComponentFactory is the interface that Context uses 10// contextComponentFactory is the interface that Context uses
@@ -12,25 +15,25 @@ type contextComponentFactory interface {
12 // ResourceProvider creates a new ResourceProvider with the given 15 // ResourceProvider creates a new ResourceProvider with the given
13 // type. The "uid" is a unique identifier for this provider being 16 // type. The "uid" is a unique identifier for this provider being
14 // initialized that can be used for internal tracking. 17 // initialized that can be used for internal tracking.
15 ResourceProvider(typ, uid string) (ResourceProvider, error) 18 ResourceProvider(typ, uid string) (providers.Interface, error)
16 ResourceProviders() []string 19 ResourceProviders() []string
17 20
18 // ResourceProvisioner creates a new ResourceProvisioner with the 21 // ResourceProvisioner creates a new ResourceProvisioner with the
19 // given type. The "uid" is a unique identifier for this provisioner 22 // given type. The "uid" is a unique identifier for this provisioner
20 // being initialized that can be used for internal tracking. 23 // being initialized that can be used for internal tracking.
21 ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) 24 ResourceProvisioner(typ, uid string) (provisioners.Interface, error)
22 ResourceProvisioners() []string 25 ResourceProvisioners() []string
23} 26}
24 27
25// basicComponentFactory just calls a factory from a map directly. 28// basicComponentFactory just calls a factory from a map directly.
26type basicComponentFactory struct { 29type basicComponentFactory struct {
27 providers map[string]ResourceProviderFactory 30 providers map[string]providers.Factory
28 provisioners map[string]ResourceProvisionerFactory 31 provisioners map[string]ProvisionerFactory
29} 32}
30 33
31func (c *basicComponentFactory) ResourceProviders() []string { 34func (c *basicComponentFactory) ResourceProviders() []string {
32 result := make([]string, len(c.providers)) 35 result := make([]string, len(c.providers))
33 for k, _ := range c.providers { 36 for k := range c.providers {
34 result = append(result, k) 37 result = append(result, k)
35 } 38 }
36 39
@@ -39,14 +42,14 @@ func (c *basicComponentFactory) ResourceProviders() []string {
39 42
40func (c *basicComponentFactory) ResourceProvisioners() []string { 43func (c *basicComponentFactory) ResourceProvisioners() []string {
41 result := make([]string, len(c.provisioners)) 44 result := make([]string, len(c.provisioners))
42 for k, _ := range c.provisioners { 45 for k := range c.provisioners {
43 result = append(result, k) 46 result = append(result, k)
44 } 47 }
45 48
46 return result 49 return result
47} 50}
48 51
49func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvider, error) { 52func (c *basicComponentFactory) ResourceProvider(typ, uid string) (providers.Interface, error) {
50 f, ok := c.providers[typ] 53 f, ok := c.providers[typ]
51 if !ok { 54 if !ok {
52 return nil, fmt.Errorf("unknown provider %q", typ) 55 return nil, fmt.Errorf("unknown provider %q", typ)
@@ -55,7 +58,7 @@ func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvi
55 return f() 58 return f()
56} 59}
57 60
58func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) { 61func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (provisioners.Interface, error) {
59 f, ok := c.provisioners[typ] 62 f, ok := c.provisioners[typ]
60 if !ok { 63 if !ok {
61 return nil, fmt.Errorf("unknown provisioner %q", typ) 64 return nil, fmt.Errorf("unknown provisioner %q", typ)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
index 084f010..0a424a0 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
@@ -14,8 +14,8 @@ const (
14 GraphTypePlan 14 GraphTypePlan
15 GraphTypePlanDestroy 15 GraphTypePlanDestroy
16 GraphTypeApply 16 GraphTypeApply
17 GraphTypeInput
18 GraphTypeValidate 17 GraphTypeValidate
18 GraphTypeEval // only visits in-memory elements such as variables, locals, and outputs.
19) 19)
20 20
21// GraphTypeMap is a mapping of human-readable string to GraphType. This 21// GraphTypeMap is a mapping of human-readable string to GraphType. This
@@ -23,10 +23,10 @@ const (
23// graph types. 23// graph types.
24var GraphTypeMap = map[string]GraphType{ 24var GraphTypeMap = map[string]GraphType{
25 "apply": GraphTypeApply, 25 "apply": GraphTypeApply,
26 "input": GraphTypeInput,
27 "plan": GraphTypePlan, 26 "plan": GraphTypePlan,
28 "plan-destroy": GraphTypePlanDestroy, 27 "plan-destroy": GraphTypePlanDestroy,
29 "refresh": GraphTypeRefresh, 28 "refresh": GraphTypeRefresh,
30 "legacy": GraphTypeLegacy, 29 "legacy": GraphTypeLegacy,
31 "validate": GraphTypeValidate, 30 "validate": GraphTypeValidate,
31 "eval": GraphTypeEval,
32} 32}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
index e940143..313e909 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/context_import.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
@@ -1,7 +1,10 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config/module" 4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/configs"
6 "github.com/hashicorp/terraform/states"
7 "github.com/hashicorp/terraform/tfdiags"
5) 8)
6 9
7// ImportOpts are used as the configuration for Import. 10// ImportOpts are used as the configuration for Import.
@@ -9,23 +12,23 @@ type ImportOpts struct {
9 // Targets are the targets to import 12 // Targets are the targets to import
10 Targets []*ImportTarget 13 Targets []*ImportTarget
11 14
12 // Module is optional, and specifies a config module that is loaded 15 // Config is optional, and specifies a config tree that will be loaded
13 // into the graph and evaluated. The use case for this is to provide 16 // into the graph and evaluated. This is the source for provider
14 // provider configuration. 17 // configurations.
15 Module *module.Tree 18 Config *configs.Config
16} 19}
17 20
18// ImportTarget is a single resource to import. 21// ImportTarget is a single resource to import.
19type ImportTarget struct { 22type ImportTarget struct {
20 // Addr is the full resource address of the resource to import. 23 // Addr is the address for the resource instance that the new object should
21 // Example: "module.foo.aws_instance.bar" 24 // be imported into.
22 Addr string 25 Addr addrs.AbsResourceInstance
23 26
24 // ID is the ID of the resource to import. This is resource-specific. 27 // ID is the ID of the resource to import. This is resource-specific.
25 ID string 28 ID string
26 29
27 // Provider string 30 // ProviderAddr is the address of the provider that should handle the import.
28 Provider string 31 ProviderAddr addrs.AbsProviderConfig
29} 32}
30 33
31// Import takes already-created external resources and brings them 34// Import takes already-created external resources and brings them
@@ -38,7 +41,9 @@ type ImportTarget struct {
38// Further, this operation also gracefully handles partial state. If during 41// Further, this operation also gracefully handles partial state. If during
39// an import there is a failure, all previously imported resources remain 42// an import there is a failure, all previously imported resources remain
40// imported. 43// imported.
41func (c *Context) Import(opts *ImportOpts) (*State, error) { 44func (c *Context) Import(opts *ImportOpts) (*states.State, tfdiags.Diagnostics) {
45 var diags tfdiags.Diagnostics
46
42 // Hold a lock since we can modify our own state here 47 // Hold a lock since we can modify our own state here
43 defer c.acquireRun("import")() 48 defer c.acquireRun("import")()
44 49
@@ -47,31 +52,32 @@ func (c *Context) Import(opts *ImportOpts) (*State, error) {
47 52
48 // If no module is given, default to the module configured with 53 // If no module is given, default to the module configured with
49 // the Context. 54 // the Context.
50 module := opts.Module 55 config := opts.Config
51 if module == nil { 56 if config == nil {
52 module = c.module 57 config = c.config
53 } 58 }
54 59
55 // Initialize our graph builder 60 // Initialize our graph builder
56 builder := &ImportGraphBuilder{ 61 builder := &ImportGraphBuilder{
57 ImportTargets: opts.Targets, 62 ImportTargets: opts.Targets,
58 Module: module, 63 Config: config,
59 Providers: c.components.ResourceProviders(), 64 Components: c.components,
65 Schemas: c.schemas,
60 } 66 }
61 67
62 // Build the graph! 68 // Build the graph!
63 graph, err := builder.Build(RootModulePath) 69 graph, graphDiags := builder.Build(addrs.RootModuleInstance)
64 if err != nil { 70 diags = diags.Append(graphDiags)
65 return c.state, err 71 if graphDiags.HasErrors() {
72 return c.state, diags
66 } 73 }
67 74
68 // Walk it 75 // Walk it
69 if _, err := c.walk(graph, walkImport); err != nil { 76 _, walkDiags := c.walk(graph, walkImport)
70 return c.state, err 77 diags = diags.Append(walkDiags)
78 if walkDiags.HasErrors() {
79 return c.state, diags
71 } 80 }
72 81
73 // Clean the state 82 return c.state, diags
74 c.state.prune()
75
76 return c.state, nil
77} 83}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_input.go b/vendor/github.com/hashicorp/terraform/terraform/context_input.go
new file mode 100644
index 0000000..6c7be88
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/context_input.go
@@ -0,0 +1,251 @@
1package terraform
2
3import (
4 "context"
5 "fmt"
6 "log"
7 "sort"
8
9 "github.com/hashicorp/hcl2/hcl"
10 "github.com/hashicorp/hcl2/hcldec"
11 "github.com/zclconf/go-cty/cty"
12
13 "github.com/hashicorp/terraform/addrs"
14 "github.com/hashicorp/terraform/configs"
15 "github.com/hashicorp/terraform/tfdiags"
16)
17
18// Input asks for input to fill variables and provider configurations.
19// This modifies the configuration in-place, so asking for Input twice
20// may result in different UI output showing different current values.
21func (c *Context) Input(mode InputMode) tfdiags.Diagnostics {
22 var diags tfdiags.Diagnostics
23 defer c.acquireRun("input")()
24
25 if c.uiInput == nil {
26 log.Printf("[TRACE] Context.Input: uiInput is nil, so skipping")
27 return diags
28 }
29
30 ctx := context.Background()
31
32 if mode&InputModeVar != 0 {
33 log.Printf("[TRACE] Context.Input: Prompting for variables")
34
35 // Walk the variables first for the root module. We walk them in
36 // alphabetical order for UX reasons.
37 configs := c.config.Module.Variables
38 names := make([]string, 0, len(configs))
39 for name := range configs {
40 names = append(names, name)
41 }
42 sort.Strings(names)
43 Variables:
44 for _, n := range names {
45 v := configs[n]
46
47 // If we only care about unset variables, then we should set any
48 // variable that is already set.
49 if mode&InputModeVarUnset != 0 {
50 if _, isSet := c.variables[n]; isSet {
51 continue
52 }
53 }
54
55 // this should only happen during tests
56 if c.uiInput == nil {
57 log.Println("[WARN] Context.uiInput is nil during input walk")
58 continue
59 }
60
61 // Ask the user for a value for this variable
62 var rawValue string
63 retry := 0
64 for {
65 var err error
66 rawValue, err = c.uiInput.Input(ctx, &InputOpts{
67 Id: fmt.Sprintf("var.%s", n),
68 Query: fmt.Sprintf("var.%s", n),
69 Description: v.Description,
70 })
71 if err != nil {
72 diags = diags.Append(tfdiags.Sourceless(
73 tfdiags.Error,
74 "Failed to request interactive input",
75 fmt.Sprintf("Terraform attempted to request a value for var.%s interactively, but encountered an error: %s.", n, err),
76 ))
77 return diags
78 }
79
80 if rawValue == "" && v.Default == cty.NilVal {
81 // Redo if it is required, but abort if we keep getting
82 // blank entries
83 if retry > 2 {
84 diags = diags.Append(tfdiags.Sourceless(
85 tfdiags.Error,
86 "Required variable not assigned",
87 fmt.Sprintf("The variable %q is required, so Terraform cannot proceed without a defined value for it.", n),
88 ))
89 continue Variables
90 }
91 retry++
92 continue
93 }
94
95 break
96 }
97
98 val, valDiags := v.ParsingMode.Parse(n, rawValue)
99 diags = diags.Append(valDiags)
100 if diags.HasErrors() {
101 continue
102 }
103
104 c.variables[n] = &InputValue{
105 Value: val,
106 SourceType: ValueFromInput,
107 }
108 }
109 }
110
111 if mode&InputModeProvider != 0 {
112 log.Printf("[TRACE] Context.Input: Prompting for provider arguments")
113
114 // We prompt for input only for provider configurations defined in
115 // the root module. At the time of writing that is an arbitrary
116 // restriction, but we have future plans to support "count" and
117 // "for_each" on modules that will then prevent us from supporting
118 // input for child module configurations anyway (since we'd need to
119 // dynamic-expand first), and provider configurations in child modules
120 // are not recommended since v0.11 anyway, so this restriction allows
121 // us to keep this relatively simple without significant hardship.
122
123 pcs := make(map[string]*configs.Provider)
124 pas := make(map[string]addrs.ProviderConfig)
125 for _, pc := range c.config.Module.ProviderConfigs {
126 addr := pc.Addr()
127 pcs[addr.String()] = pc
128 pas[addr.String()] = addr
129 log.Printf("[TRACE] Context.Input: Provider %s declared at %s", addr, pc.DeclRange)
130 }
131 // We also need to detect _implied_ provider configs from resources.
132 // These won't have *configs.Provider objects, but they will still
133 // exist in the map and we'll just treat them as empty below.
134 for _, rc := range c.config.Module.ManagedResources {
135 pa := rc.ProviderConfigAddr()
136 if pa.Alias != "" {
137 continue // alias configurations cannot be implied
138 }
139 if _, exists := pcs[pa.String()]; !exists {
140 pcs[pa.String()] = nil
141 pas[pa.String()] = pa
142 log.Printf("[TRACE] Context.Input: Provider %s implied by resource block at %s", pa, rc.DeclRange)
143 }
144 }
145 for _, rc := range c.config.Module.DataResources {
146 pa := rc.ProviderConfigAddr()
147 if pa.Alias != "" {
148 continue // alias configurations cannot be implied
149 }
150 if _, exists := pcs[pa.String()]; !exists {
151 pcs[pa.String()] = nil
152 pas[pa.String()] = pa
153 log.Printf("[TRACE] Context.Input: Provider %s implied by data block at %s", pa, rc.DeclRange)
154 }
155 }
156
157 for pk, pa := range pas {
158 pc := pcs[pk] // will be nil if this is an implied config
159
160 // Wrap the input into a namespace
161 input := &PrefixUIInput{
162 IdPrefix: pk,
163 QueryPrefix: pk + ".",
164 UIInput: c.uiInput,
165 }
166
167 schema := c.schemas.ProviderConfig(pa.Type)
168 if schema == nil {
169 // Could either be an incorrect config or just an incomplete
170 // mock in tests. We'll let a later pass decide, and just
171 // ignore this for the purposes of gathering input.
172 log.Printf("[TRACE] Context.Input: No schema available for provider type %q", pa.Type)
173 continue
174 }
175
176 // For our purposes here we just want to detect if attrbutes are
177 // set in config at all, so rather than doing a full decode
178 // (which would require us to prepare an evalcontext, etc) we'll
179 // use the low-level HCL API to process only the top-level
180 // structure.
181 var attrExprs hcl.Attributes // nil if there is no config
182 if pc != nil && pc.Config != nil {
183 lowLevelSchema := schemaForInputSniffing(hcldec.ImpliedSchema(schema.DecoderSpec()))
184 content, _, diags := pc.Config.PartialContent(lowLevelSchema)
185 if diags.HasErrors() {
186 log.Printf("[TRACE] Context.Input: %s has decode error, so ignoring: %s", pa, diags.Error())
187 continue
188 }
189 attrExprs = content.Attributes
190 }
191
192 keys := make([]string, 0, len(schema.Attributes))
193 for key := range schema.Attributes {
194 keys = append(keys, key)
195 }
196 sort.Strings(keys)
197
198 vals := map[string]cty.Value{}
199 for _, key := range keys {
200 attrS := schema.Attributes[key]
201 if attrS.Optional {
202 continue
203 }
204 if attrExprs != nil {
205 if _, exists := attrExprs[key]; exists {
206 continue
207 }
208 }
209 if !attrS.Type.Equals(cty.String) {
210 continue
211 }
212
213 log.Printf("[TRACE] Context.Input: Prompting for %s argument %s", pa, key)
214 rawVal, err := input.Input(ctx, &InputOpts{
215 Id: key,
216 Query: key,
217 Description: attrS.Description,
218 })
219 if err != nil {
220 log.Printf("[TRACE] Context.Input: Failed to prompt for %s argument %s: %s", pa, key, err)
221 continue
222 }
223
224 vals[key] = cty.StringVal(rawVal)
225 }
226
227 c.providerInputConfig[pk] = vals
228 log.Printf("[TRACE] Context.Input: Input for %s: %#v", pk, vals)
229 }
230 }
231
232 return diags
233}
234
235// schemaForInputSniffing returns a transformed version of a given schema
236// that marks all attributes as optional, which the Context.Input method can
237// use to detect whether a required argument is set without missing arguments
238// themselves generating errors.
239func schemaForInputSniffing(schema *hcl.BodySchema) *hcl.BodySchema {
240 ret := &hcl.BodySchema{
241 Attributes: make([]hcl.AttributeSchema, len(schema.Attributes)),
242 Blocks: schema.Blocks,
243 }
244
245 for i, attrS := range schema.Attributes {
246 ret.Attributes[i] = attrS
247 ret.Attributes[i].Required = false
248 }
249
250 return ret
251}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/debug.go b/vendor/github.com/hashicorp/terraform/terraform/debug.go
deleted file mode 100644
index 265339f..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/debug.go
+++ /dev/null
@@ -1,523 +0,0 @@
1package terraform
2
3import (
4 "archive/tar"
5 "bytes"
6 "compress/gzip"
7 "encoding/json"
8 "fmt"
9 "io"
10 "os"
11 "path/filepath"
12 "sync"
13 "time"
14)
15
16// DebugInfo is the global handler for writing the debug archive. All methods
17// are safe to call concurrently. Setting DebugInfo to nil will disable writing
18// the debug archive. All methods are safe to call on the nil value.
19var dbug *debugInfo
20
21// SetDebugInfo initializes the debug handler with a backing file in the
22// provided directory. This must be called before any other terraform package
23// operations or not at all. Once his is called, CloseDebugInfo should be
24// called before program exit.
25func SetDebugInfo(path string) error {
26 if os.Getenv("TF_DEBUG") == "" {
27 return nil
28 }
29
30 di, err := newDebugInfoFile(path)
31 if err != nil {
32 return err
33 }
34
35 dbug = di
36 return nil
37}
38
39// CloseDebugInfo is the exported interface to Close the debug info handler.
40// The debug handler needs to be closed before program exit, so we export this
41// function to be deferred in the appropriate entrypoint for our executable.
42func CloseDebugInfo() error {
43 return dbug.Close()
44}
45
46// newDebugInfoFile initializes the global debug handler with a backing file in
47// the provided directory.
48func newDebugInfoFile(dir string) (*debugInfo, error) {
49 err := os.MkdirAll(dir, 0755)
50 if err != nil {
51 return nil, err
52 }
53
54 // FIXME: not guaranteed unique, but good enough for now
55 name := fmt.Sprintf("debug-%s", time.Now().Format("2006-01-02-15-04-05.999999999"))
56 archivePath := filepath.Join(dir, name+".tar.gz")
57
58 f, err := os.OpenFile(archivePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
59 if err != nil {
60 return nil, err
61 }
62 return newDebugInfo(name, f)
63}
64
65// newDebugInfo initializes the global debug handler.
66func newDebugInfo(name string, w io.Writer) (*debugInfo, error) {
67 gz := gzip.NewWriter(w)
68
69 d := &debugInfo{
70 name: name,
71 w: w,
72 gz: gz,
73 tar: tar.NewWriter(gz),
74 }
75
76 // create the subdirs we need
77 topHdr := &tar.Header{
78 Name: name,
79 Typeflag: tar.TypeDir,
80 Mode: 0755,
81 }
82 graphsHdr := &tar.Header{
83 Name: name + "/graphs",
84 Typeflag: tar.TypeDir,
85 Mode: 0755,
86 }
87 err := d.tar.WriteHeader(topHdr)
88 // if the first errors, the second will too
89 err = d.tar.WriteHeader(graphsHdr)
90 if err != nil {
91 return nil, err
92 }
93
94 return d, nil
95}
96
97// debugInfo provides various methods for writing debug information to a
98// central archive. The debugInfo struct should be initialized once before any
99// output is written, and Close should be called before program exit. All
100// exported methods on debugInfo will be safe for concurrent use. The exported
101// methods are also all safe to call on a nil pointer, so that there is no need
102// for conditional blocks before writing debug information.
103//
104// Each write operation done by the debugInfo will flush the gzip.Writer and
105// tar.Writer, and call Sync() or Flush() on the output writer as needed. This
106// ensures that as much data as possible is written to storage in the event of
107// a crash. The append format of the tar file, and the stream format of the
108// gzip writer allow easy recovery f the data in the event that the debugInfo
109// is not closed before program exit.
110type debugInfo struct {
111 sync.Mutex
112
113 // archive root directory name
114 name string
115
116 // current operation phase
117 phase string
118
119 // step is monotonic counter for for recording the order of operations
120 step int
121
122 // flag to protect Close()
123 closed bool
124
125 // the debug log output is in a tar.gz format, written to the io.Writer w
126 w io.Writer
127 gz *gzip.Writer
128 tar *tar.Writer
129}
130
131// Set the name of the current operational phase in the debug handler. Each file
132// in the archive will contain the name of the phase in which it was created,
133// i.e. "input", "apply", "plan", "refresh", "validate"
134func (d *debugInfo) SetPhase(phase string) {
135 if d == nil {
136 return
137 }
138 d.Lock()
139 defer d.Unlock()
140
141 d.phase = phase
142}
143
144// Close the debugInfo, finalizing the data in storage. This closes the
145// tar.Writer, the gzip.Wrtier, and if the output writer is an io.Closer, it is
146// also closed.
147func (d *debugInfo) Close() error {
148 if d == nil {
149 return nil
150 }
151
152 d.Lock()
153 defer d.Unlock()
154
155 if d.closed {
156 return nil
157 }
158 d.closed = true
159
160 d.tar.Close()
161 d.gz.Close()
162
163 if c, ok := d.w.(io.Closer); ok {
164 return c.Close()
165 }
166 return nil
167}
168
169// debug buffer is an io.WriteCloser that will write itself to the debug
170// archive when closed.
171type debugBuffer struct {
172 debugInfo *debugInfo
173 name string
174 buf bytes.Buffer
175}
176
177func (b *debugBuffer) Write(d []byte) (int, error) {
178 return b.buf.Write(d)
179}
180
181func (b *debugBuffer) Close() error {
182 return b.debugInfo.WriteFile(b.name, b.buf.Bytes())
183}
184
185// ioutils only has a noop ReadCloser
186type nopWriteCloser struct{}
187
188func (nopWriteCloser) Write([]byte) (int, error) { return 0, nil }
189func (nopWriteCloser) Close() error { return nil }
190
191// NewFileWriter returns an io.WriteClose that will be buffered and written to
192// the debug archive when closed.
193func (d *debugInfo) NewFileWriter(name string) io.WriteCloser {
194 if d == nil {
195 return nopWriteCloser{}
196 }
197
198 return &debugBuffer{
199 debugInfo: d,
200 name: name,
201 }
202}
203
204type syncer interface {
205 Sync() error
206}
207
208type flusher interface {
209 Flush() error
210}
211
212// Flush the tar.Writer and the gzip.Writer. Flush() or Sync() will be called
213// on the output writer if they are available.
214func (d *debugInfo) flush() {
215 d.tar.Flush()
216 d.gz.Flush()
217
218 if f, ok := d.w.(flusher); ok {
219 f.Flush()
220 }
221
222 if s, ok := d.w.(syncer); ok {
223 s.Sync()
224 }
225}
226
227// WriteFile writes data as a single file to the debug arhive.
228func (d *debugInfo) WriteFile(name string, data []byte) error {
229 if d == nil {
230 return nil
231 }
232
233 d.Lock()
234 defer d.Unlock()
235 return d.writeFile(name, data)
236}
237
238func (d *debugInfo) writeFile(name string, data []byte) error {
239 defer d.flush()
240 path := fmt.Sprintf("%s/%d-%s-%s", d.name, d.step, d.phase, name)
241 d.step++
242
243 hdr := &tar.Header{
244 Name: path,
245 Mode: 0644,
246 Size: int64(len(data)),
247 }
248 err := d.tar.WriteHeader(hdr)
249 if err != nil {
250 return err
251 }
252
253 _, err = d.tar.Write(data)
254 return err
255}
256
257// DebugHook implements all methods of the terraform.Hook interface, and writes
258// the arguments to a file in the archive. When a suitable format for the
259// argument isn't available, the argument is encoded using json.Marshal. If the
260// debug handler is nil, all DebugHook methods are noop, so no time is spent in
261// marshaling the data structures.
262type DebugHook struct{}
263
264func (*DebugHook) PreApply(ii *InstanceInfo, is *InstanceState, id *InstanceDiff) (HookAction, error) {
265 if dbug == nil {
266 return HookActionContinue, nil
267 }
268
269 var buf bytes.Buffer
270
271 if ii != nil {
272 buf.WriteString(ii.HumanId() + "\n")
273 }
274
275 if is != nil {
276 buf.WriteString(is.String() + "\n")
277 }
278
279 idCopy, err := id.Copy()
280 if err != nil {
281 return HookActionContinue, err
282 }
283 js, err := json.MarshalIndent(idCopy, "", " ")
284 if err != nil {
285 return HookActionContinue, err
286 }
287 buf.Write(js)
288
289 dbug.WriteFile("hook-PreApply", buf.Bytes())
290
291 return HookActionContinue, nil
292}
293
294func (*DebugHook) PostApply(ii *InstanceInfo, is *InstanceState, err error) (HookAction, error) {
295 if dbug == nil {
296 return HookActionContinue, nil
297 }
298
299 var buf bytes.Buffer
300
301 if ii != nil {
302 buf.WriteString(ii.HumanId() + "\n")
303 }
304
305 if is != nil {
306 buf.WriteString(is.String() + "\n")
307 }
308
309 if err != nil {
310 buf.WriteString(err.Error())
311 }
312
313 dbug.WriteFile("hook-PostApply", buf.Bytes())
314
315 return HookActionContinue, nil
316}
317
318func (*DebugHook) PreDiff(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
319 if dbug == nil {
320 return HookActionContinue, nil
321 }
322
323 var buf bytes.Buffer
324 if ii != nil {
325 buf.WriteString(ii.HumanId() + "\n")
326 }
327
328 if is != nil {
329 buf.WriteString(is.String())
330 buf.WriteString("\n")
331 }
332 dbug.WriteFile("hook-PreDiff", buf.Bytes())
333
334 return HookActionContinue, nil
335}
336
337func (*DebugHook) PostDiff(ii *InstanceInfo, id *InstanceDiff) (HookAction, error) {
338 if dbug == nil {
339 return HookActionContinue, nil
340 }
341
342 var buf bytes.Buffer
343 if ii != nil {
344 buf.WriteString(ii.HumanId() + "\n")
345 }
346
347 idCopy, err := id.Copy()
348 if err != nil {
349 return HookActionContinue, err
350 }
351 js, err := json.MarshalIndent(idCopy, "", " ")
352 if err != nil {
353 return HookActionContinue, err
354 }
355 buf.Write(js)
356
357 dbug.WriteFile("hook-PostDiff", buf.Bytes())
358
359 return HookActionContinue, nil
360}
361
362func (*DebugHook) PreProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
363 if dbug == nil {
364 return HookActionContinue, nil
365 }
366
367 var buf bytes.Buffer
368 if ii != nil {
369 buf.WriteString(ii.HumanId() + "\n")
370 }
371
372 if is != nil {
373 buf.WriteString(is.String())
374 buf.WriteString("\n")
375 }
376 dbug.WriteFile("hook-PreProvisionResource", buf.Bytes())
377
378 return HookActionContinue, nil
379}
380
381func (*DebugHook) PostProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
382 if dbug == nil {
383 return HookActionContinue, nil
384 }
385
386 var buf bytes.Buffer
387 if ii != nil {
388 buf.WriteString(ii.HumanId())
389 buf.WriteString("\n")
390 }
391
392 if is != nil {
393 buf.WriteString(is.String())
394 buf.WriteString("\n")
395 }
396 dbug.WriteFile("hook-PostProvisionResource", buf.Bytes())
397 return HookActionContinue, nil
398}
399
400func (*DebugHook) PreProvision(ii *InstanceInfo, s string) (HookAction, error) {
401 if dbug == nil {
402 return HookActionContinue, nil
403 }
404
405 var buf bytes.Buffer
406 if ii != nil {
407 buf.WriteString(ii.HumanId())
408 buf.WriteString("\n")
409 }
410 buf.WriteString(s + "\n")
411
412 dbug.WriteFile("hook-PreProvision", buf.Bytes())
413 return HookActionContinue, nil
414}
415
416func (*DebugHook) PostProvision(ii *InstanceInfo, s string, err error) (HookAction, error) {
417 if dbug == nil {
418 return HookActionContinue, nil
419 }
420
421 var buf bytes.Buffer
422 if ii != nil {
423 buf.WriteString(ii.HumanId() + "\n")
424 }
425 buf.WriteString(s + "\n")
426
427 dbug.WriteFile("hook-PostProvision", buf.Bytes())
428 return HookActionContinue, nil
429}
430
431func (*DebugHook) ProvisionOutput(ii *InstanceInfo, s1 string, s2 string) {
432 if dbug == nil {
433 return
434 }
435
436 var buf bytes.Buffer
437 if ii != nil {
438 buf.WriteString(ii.HumanId())
439 buf.WriteString("\n")
440 }
441 buf.WriteString(s1 + "\n")
442 buf.WriteString(s2 + "\n")
443
444 dbug.WriteFile("hook-ProvisionOutput", buf.Bytes())
445}
446
447func (*DebugHook) PreRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
448 if dbug == nil {
449 return HookActionContinue, nil
450 }
451
452 var buf bytes.Buffer
453 if ii != nil {
454 buf.WriteString(ii.HumanId() + "\n")
455 }
456
457 if is != nil {
458 buf.WriteString(is.String())
459 buf.WriteString("\n")
460 }
461 dbug.WriteFile("hook-PreRefresh", buf.Bytes())
462 return HookActionContinue, nil
463}
464
465func (*DebugHook) PostRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
466 if dbug == nil {
467 return HookActionContinue, nil
468 }
469
470 var buf bytes.Buffer
471 if ii != nil {
472 buf.WriteString(ii.HumanId())
473 buf.WriteString("\n")
474 }
475
476 if is != nil {
477 buf.WriteString(is.String())
478 buf.WriteString("\n")
479 }
480 dbug.WriteFile("hook-PostRefresh", buf.Bytes())
481 return HookActionContinue, nil
482}
483
484func (*DebugHook) PreImportState(ii *InstanceInfo, s string) (HookAction, error) {
485 if dbug == nil {
486 return HookActionContinue, nil
487 }
488
489 var buf bytes.Buffer
490 if ii != nil {
491 buf.WriteString(ii.HumanId())
492 buf.WriteString("\n")
493 }
494 buf.WriteString(s + "\n")
495
496 dbug.WriteFile("hook-PreImportState", buf.Bytes())
497 return HookActionContinue, nil
498}
499
500func (*DebugHook) PostImportState(ii *InstanceInfo, iss []*InstanceState) (HookAction, error) {
501 if dbug == nil {
502 return HookActionContinue, nil
503 }
504
505 var buf bytes.Buffer
506
507 if ii != nil {
508 buf.WriteString(ii.HumanId() + "\n")
509 }
510
511 for _, is := range iss {
512 if is != nil {
513 buf.WriteString(is.String() + "\n")
514 }
515 }
516 dbug.WriteFile("hook-PostImportState", buf.Bytes())
517 return HookActionContinue, nil
518}
519
520// skip logging this for now, since it could be huge
521func (*DebugHook) PostStateUpdate(*State) (HookAction, error) {
522 return HookActionContinue, nil
523}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go
index d6dc550..7a6ef3d 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/diff.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go
@@ -4,12 +4,20 @@ import (
4 "bufio" 4 "bufio"
5 "bytes" 5 "bytes"
6 "fmt" 6 "fmt"
7 "log"
7 "reflect" 8 "reflect"
8 "regexp" 9 "regexp"
9 "sort" 10 "sort"
11 "strconv"
10 "strings" 12 "strings"
11 "sync" 13 "sync"
12 14
15 "github.com/hashicorp/terraform/addrs"
16 "github.com/hashicorp/terraform/config"
17 "github.com/hashicorp/terraform/config/hcl2shim"
18 "github.com/hashicorp/terraform/configs/configschema"
19 "github.com/zclconf/go-cty/cty"
20
13 "github.com/mitchellh/copystructure" 21 "github.com/mitchellh/copystructure"
14) 22)
15 23
@@ -69,8 +77,24 @@ func (d *Diff) Prune() {
69// 77//
70// This should be the preferred method to add module diffs since it 78// This should be the preferred method to add module diffs since it
71// allows us to optimize lookups later as well as control sorting. 79// allows us to optimize lookups later as well as control sorting.
72func (d *Diff) AddModule(path []string) *ModuleDiff { 80func (d *Diff) AddModule(path addrs.ModuleInstance) *ModuleDiff {
73 m := &ModuleDiff{Path: path} 81 // Lower the new-style address into a legacy-style address.
82 // This requires that none of the steps have instance keys, which is
83 // true for all addresses at the time of implementing this because
84 // "count" and "for_each" are not yet implemented for modules.
85 legacyPath := make([]string, len(path))
86 for i, step := range path {
87 if step.InstanceKey != addrs.NoKey {
88 // FIXME: Once the rest of Terraform is ready to use count and
89 // for_each, remove all of this and just write the addrs.ModuleInstance
90 // value itself into the ModuleState.
91 panic("diff cannot represent modules with count or for_each keys")
92 }
93
94 legacyPath[i] = step.Name
95 }
96
97 m := &ModuleDiff{Path: legacyPath}
74 m.init() 98 m.init()
75 d.Modules = append(d.Modules, m) 99 d.Modules = append(d.Modules, m)
76 return m 100 return m
@@ -79,7 +103,7 @@ func (d *Diff) AddModule(path []string) *ModuleDiff {
79// ModuleByPath is used to lookup the module diff for the given path. 103// ModuleByPath is used to lookup the module diff for the given path.
80// This should be the preferred lookup mechanism as it allows for future 104// This should be the preferred lookup mechanism as it allows for future
81// lookup optimizations. 105// lookup optimizations.
82func (d *Diff) ModuleByPath(path []string) *ModuleDiff { 106func (d *Diff) ModuleByPath(path addrs.ModuleInstance) *ModuleDiff {
83 if d == nil { 107 if d == nil {
84 return nil 108 return nil
85 } 109 }
@@ -87,7 +111,8 @@ func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
87 if mod.Path == nil { 111 if mod.Path == nil {
88 panic("missing module path") 112 panic("missing module path")
89 } 113 }
90 if reflect.DeepEqual(mod.Path, path) { 114 modPath := normalizeModulePath(mod.Path)
115 if modPath.String() == path.String() {
91 return mod 116 return mod
92 } 117 }
93 } 118 }
@@ -96,7 +121,7 @@ func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
96 121
97// RootModule returns the ModuleState for the root module 122// RootModule returns the ModuleState for the root module
98func (d *Diff) RootModule() *ModuleDiff { 123func (d *Diff) RootModule() *ModuleDiff {
99 root := d.ModuleByPath(rootModulePath) 124 root := d.ModuleByPath(addrs.RootModuleInstance)
100 if root == nil { 125 if root == nil {
101 panic("missing root module") 126 panic("missing root module")
102 } 127 }
@@ -166,7 +191,8 @@ func (d *Diff) String() string {
166 keys := make([]string, 0, len(d.Modules)) 191 keys := make([]string, 0, len(d.Modules))
167 lookup := make(map[string]*ModuleDiff) 192 lookup := make(map[string]*ModuleDiff)
168 for _, m := range d.Modules { 193 for _, m := range d.Modules {
169 key := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], ".")) 194 addr := normalizeModulePath(m.Path)
195 key := addr.String()
170 keys = append(keys, key) 196 keys = append(keys, key)
171 lookup[key] = m 197 lookup[key] = m
172 } 198 }
@@ -384,6 +410,541 @@ type InstanceDiff struct {
384func (d *InstanceDiff) Lock() { d.mu.Lock() } 410func (d *InstanceDiff) Lock() { d.mu.Lock() }
385func (d *InstanceDiff) Unlock() { d.mu.Unlock() } 411func (d *InstanceDiff) Unlock() { d.mu.Unlock() }
386 412
413// ApplyToValue merges the receiver into the given base value, returning a
414// new value that incorporates the planned changes. The given value must
415// conform to the given schema, or this method will panic.
416//
417// This method is intended for shimming old subsystems that still use this
418// legacy diff type to work with the new-style types.
419func (d *InstanceDiff) ApplyToValue(base cty.Value, schema *configschema.Block) (cty.Value, error) {
420 // Create an InstanceState attributes from our existing state.
421 // We can use this to more easily apply the diff changes.
422 attrs := hcl2shim.FlatmapValueFromHCL2(base)
423 applied, err := d.Apply(attrs, schema)
424 if err != nil {
425 return base, err
426 }
427
428 val, err := hcl2shim.HCL2ValueFromFlatmap(applied, schema.ImpliedType())
429 if err != nil {
430 return base, err
431 }
432
433 return schema.CoerceValue(val)
434}
435
436// Apply applies the diff to the provided flatmapped attributes,
437// returning the new instance attributes.
438//
439// This method is intended for shimming old subsystems that still use this
440// legacy diff type to work with the new-style types.
441func (d *InstanceDiff) Apply(attrs map[string]string, schema *configschema.Block) (map[string]string, error) {
442 // We always build a new value here, even if the given diff is "empty",
443 // because we might be planning to create a new instance that happens
444 // to have no attributes set, and so we want to produce an empty object
445 // rather than just echoing back the null old value.
446 if attrs == nil {
447 attrs = map[string]string{}
448 }
449
450 // Rather applying the diff to mutate the attrs, we'll copy new values into
451 // here to avoid the possibility of leaving stale values.
452 result := map[string]string{}
453
454 if d.Destroy || d.DestroyDeposed || d.DestroyTainted {
455 return result, nil
456 }
457
458 return d.applyBlockDiff(nil, attrs, schema)
459}
460
461func (d *InstanceDiff) applyBlockDiff(path []string, attrs map[string]string, schema *configschema.Block) (map[string]string, error) {
462 result := map[string]string{}
463 name := ""
464 if len(path) > 0 {
465 name = path[len(path)-1]
466 }
467
468 // localPrefix is used to build the local result map
469 localPrefix := ""
470 if name != "" {
471 localPrefix = name + "."
472 }
473
474 // iterate over the schema rather than the attributes, so we can handle
475 // different block types separately from plain attributes
476 for n, attrSchema := range schema.Attributes {
477 var err error
478 newAttrs, err := d.applyAttrDiff(append(path, n), attrs, attrSchema)
479
480 if err != nil {
481 return result, err
482 }
483
484 for k, v := range newAttrs {
485 result[localPrefix+k] = v
486 }
487 }
488
489 blockPrefix := strings.Join(path, ".")
490 if blockPrefix != "" {
491 blockPrefix += "."
492 }
493 for n, block := range schema.BlockTypes {
494 // we need to find the set of all keys that traverse this block
495 candidateKeys := map[string]bool{}
496 blockKey := blockPrefix + n + "."
497 localBlockPrefix := localPrefix + n + "."
498
499 // we can only trust the diff for sets, since the path changes, so don't
500 // count existing values as candidate keys. If it turns out we're
501 // keeping the attributes, we will catch it down below with "keepBlock"
502 // after we check the set count.
503 if block.Nesting != configschema.NestingSet {
504 for k := range attrs {
505 if strings.HasPrefix(k, blockKey) {
506 nextDot := strings.Index(k[len(blockKey):], ".")
507 if nextDot < 0 {
508 continue
509 }
510 nextDot += len(blockKey)
511 candidateKeys[k[len(blockKey):nextDot]] = true
512 }
513 }
514 }
515
516 for k, diff := range d.Attributes {
517 if strings.HasPrefix(k, blockKey) {
518 nextDot := strings.Index(k[len(blockKey):], ".")
519 if nextDot < 0 {
520 continue
521 }
522
523 if diff.NewRemoved {
524 continue
525 }
526
527 nextDot += len(blockKey)
528 candidateKeys[k[len(blockKey):nextDot]] = true
529 }
530 }
531
532 // check each set candidate to see if it was removed.
533 // we need to do this, because when entire sets are removed, they may
534 // have the wrong key, and ony show diffs going to ""
535 if block.Nesting == configschema.NestingSet {
536 for k := range candidateKeys {
537 indexPrefix := strings.Join(append(path, n, k), ".") + "."
538 keep := false
539 // now check each set element to see if it's a new diff, or one
540 // that we're dropping. Since we're only applying the "New"
541 // portion of the set, we can ignore diffs that only contain "Old"
542 for attr, diff := range d.Attributes {
543 if !strings.HasPrefix(attr, indexPrefix) {
544 continue
545 }
546
547 // check for empty "count" keys
548 if (strings.HasSuffix(attr, ".#") || strings.HasSuffix(attr, ".%")) && diff.New == "0" {
549 continue
550 }
551
552 // removed items don't count either
553 if diff.NewRemoved {
554 continue
555 }
556
557 // this must be a diff to keep
558 keep = true
559 break
560 }
561 if !keep {
562 delete(candidateKeys, k)
563 }
564 }
565 }
566
567 for k := range candidateKeys {
568 newAttrs, err := d.applyBlockDiff(append(path, n, k), attrs, &block.Block)
569 if err != nil {
570 return result, err
571 }
572
573 for attr, v := range newAttrs {
574 result[localBlockPrefix+attr] = v
575 }
576 }
577
578 keepBlock := true
579 // check this block's count diff directly first, since we may not
580 // have candidates because it was removed and only set to "0"
581 if diff, ok := d.Attributes[blockKey+"#"]; ok {
582 if diff.New == "0" || diff.NewRemoved {
583 keepBlock = false
584 }
585 }
586
587 // if there was no diff at all, then we need to keep the block attributes
588 if len(candidateKeys) == 0 && keepBlock {
589 for k, v := range attrs {
590 if strings.HasPrefix(k, blockKey) {
591 // we need the key relative to this block, so remove the
592 // entire prefix, then re-insert the block name.
593 localKey := localBlockPrefix + k[len(blockKey):]
594 result[localKey] = v
595 }
596 }
597 }
598
599 countAddr := strings.Join(append(path, n, "#"), ".")
600 if countDiff, ok := d.Attributes[countAddr]; ok {
601 if countDiff.NewComputed {
602 result[localBlockPrefix+"#"] = hcl2shim.UnknownVariableValue
603 } else {
604 result[localBlockPrefix+"#"] = countDiff.New
605
606 // While sets are complete, list are not, and we may not have all the
607 // information to track removals. If the list was truncated, we need to
608 // remove the extra items from the result.
609 if block.Nesting == configschema.NestingList &&
610 countDiff.New != "" && countDiff.New != hcl2shim.UnknownVariableValue {
611 length, _ := strconv.Atoi(countDiff.New)
612 for k := range result {
613 if !strings.HasPrefix(k, localBlockPrefix) {
614 continue
615 }
616
617 index := k[len(localBlockPrefix):]
618 nextDot := strings.Index(index, ".")
619 if nextDot < 1 {
620 continue
621 }
622 index = index[:nextDot]
623 i, err := strconv.Atoi(index)
624 if err != nil {
625 // this shouldn't happen since we added these
626 // ourself, but make note of it just in case.
627 log.Printf("[ERROR] bad list index in %q: %s", k, err)
628 continue
629 }
630 if i >= length {
631 delete(result, k)
632 }
633 }
634 }
635 }
636 } else if origCount, ok := attrs[countAddr]; ok && keepBlock {
637 result[localBlockPrefix+"#"] = origCount
638 } else {
639 result[localBlockPrefix+"#"] = countFlatmapContainerValues(localBlockPrefix+"#", result)
640 }
641 }
642
643 return result, nil
644}
645
646func (d *InstanceDiff) applyAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) {
647 ty := attrSchema.Type
648 switch {
649 case ty.IsListType(), ty.IsTupleType(), ty.IsMapType():
650 return d.applyCollectionDiff(path, attrs, attrSchema)
651 case ty.IsSetType():
652 return d.applySetDiff(path, attrs, attrSchema)
653 default:
654 return d.applySingleAttrDiff(path, attrs, attrSchema)
655 }
656}
657
658func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) {
659 currentKey := strings.Join(path, ".")
660
661 attr := path[len(path)-1]
662
663 result := map[string]string{}
664 diff := d.Attributes[currentKey]
665 old, exists := attrs[currentKey]
666
667 if diff != nil && diff.NewComputed {
668 result[attr] = config.UnknownVariableValue
669 return result, nil
670 }
671
672 // "id" must exist and not be an empty string, or it must be unknown.
673 // This only applied to top-level "id" fields.
674 if attr == "id" && len(path) == 1 {
675 if old == "" {
676 result[attr] = config.UnknownVariableValue
677 } else {
678 result[attr] = old
679 }
680 return result, nil
681 }
682
683 // attribute diffs are sometimes missed, so assume no diff means keep the
684 // old value
685 if diff == nil {
686 if exists {
687 result[attr] = old
688 } else {
689 // We need required values, so set those with an empty value. It
690 // must be set in the config, since if it were missing it would have
691 // failed validation.
692 if attrSchema.Required {
693 // we only set a missing string here, since bool or number types
694 // would have distinct zero value which shouldn't have been
695 // lost.
696 if attrSchema.Type == cty.String {
697 result[attr] = ""
698 }
699 }
700 }
701 return result, nil
702 }
703
704 // check for missmatched diff values
705 if exists &&
706 old != diff.Old &&
707 old != config.UnknownVariableValue &&
708 diff.Old != config.UnknownVariableValue {
709 return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old)
710 }
711
712 if diff.NewRemoved {
713 // don't set anything in the new value
714 return map[string]string{}, nil
715 }
716
717 if diff.Old == diff.New && diff.New == "" {
718 // this can only be a valid empty string
719 if attrSchema.Type == cty.String {
720 result[attr] = ""
721 }
722 return result, nil
723 }
724
725 if attrSchema.Computed && diff.NewComputed {
726 result[attr] = config.UnknownVariableValue
727 return result, nil
728 }
729
730 result[attr] = diff.New
731
732 return result, nil
733}
734
735func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) {
736 result := map[string]string{}
737
738 prefix := ""
739 if len(path) > 1 {
740 prefix = strings.Join(path[:len(path)-1], ".") + "."
741 }
742
743 name := ""
744 if len(path) > 0 {
745 name = path[len(path)-1]
746 }
747
748 currentKey := prefix + name
749
750 // check the index first for special handling
751 for k, diff := range d.Attributes {
752 // check the index value, which can be set, and 0
753 if k == currentKey+".#" || k == currentKey+".%" || k == currentKey {
754 if diff.NewRemoved {
755 return result, nil
756 }
757
758 if diff.NewComputed {
759 result[k[len(prefix):]] = config.UnknownVariableValue
760 return result, nil
761 }
762
763 // do what the diff tells us to here, so that it's consistent with applies
764 if diff.New == "0" {
765 result[k[len(prefix):]] = "0"
766 return result, nil
767 }
768 }
769 }
770
771 // collect all the keys from the diff and the old state
772 noDiff := true
773 keys := map[string]bool{}
774 for k := range d.Attributes {
775 if !strings.HasPrefix(k, currentKey+".") {
776 continue
777 }
778 noDiff = false
779 keys[k] = true
780 }
781
782 noAttrs := true
783 for k := range attrs {
784 if !strings.HasPrefix(k, currentKey+".") {
785 continue
786 }
787 noAttrs = false
788 keys[k] = true
789 }
790
791 // If there's no diff and no attrs, then there's no value at all.
792 // This prevents an unexpected zero-count attribute in the attributes.
793 if noDiff && noAttrs {
794 return result, nil
795 }
796
797 idx := "#"
798 if attrSchema.Type.IsMapType() {
799 idx = "%"
800 }
801
802 for k := range keys {
803 // generate an schema placeholder for the values
804 elSchema := &configschema.Attribute{
805 Type: attrSchema.Type.ElementType(),
806 }
807
808 res, err := d.applySingleAttrDiff(append(path, k[len(currentKey)+1:]), attrs, elSchema)
809 if err != nil {
810 return result, err
811 }
812
813 for k, v := range res {
814 result[name+"."+k] = v
815 }
816 }
817
818 // Just like in nested list blocks, for simple lists we may need to fill in
819 // missing empty strings.
820 countKey := name + "." + idx
821 count := result[countKey]
822 length, _ := strconv.Atoi(count)
823
824 if count != "" && count != hcl2shim.UnknownVariableValue &&
825 attrSchema.Type.Equals(cty.List(cty.String)) {
826 // insert empty strings into missing indexes
827 for i := 0; i < length; i++ {
828 key := fmt.Sprintf("%s.%d", name, i)
829 if _, ok := result[key]; !ok {
830 result[key] = ""
831 }
832 }
833 }
834
835 // now check for truncation in any type of list
836 if attrSchema.Type.IsListType() {
837 for key := range result {
838 if key == countKey {
839 continue
840 }
841
842 if len(key) <= len(name)+1 {
843 // not sure what this is, but don't panic
844 continue
845 }
846
847 index := key[len(name)+1:]
848
849 // It is possible to have nested sets or maps, so look for another dot
850 dot := strings.Index(index, ".")
851 if dot > 0 {
852 index = index[:dot]
853 }
854
855 // This shouldn't have any more dots, since the element type is only string.
856 num, err := strconv.Atoi(index)
857 if err != nil {
858 log.Printf("[ERROR] bad list index in %q: %s", currentKey, err)
859 continue
860 }
861
862 if num >= length {
863 delete(result, key)
864 }
865 }
866 }
867
868 // Fill in the count value if it wasn't present in the diff for some reason,
869 // or if there is no count at all.
870 _, countDiff := d.Attributes[countKey]
871 if result[countKey] == "" || (!countDiff && len(keys) != len(result)) {
872 result[countKey] = countFlatmapContainerValues(countKey, result)
873 }
874
875 return result, nil
876}
877
878func (d *InstanceDiff) applySetDiff(path []string, attrs map[string]string, attrSchema *configschema.Attribute) (map[string]string, error) {
879 // We only need this special behavior for sets of object.
880 if !attrSchema.Type.ElementType().IsObjectType() {
881 // The normal collection apply behavior will work okay for this one, then.
882 return d.applyCollectionDiff(path, attrs, attrSchema)
883 }
884
885 // When we're dealing with a set of an object type we actually want to
886 // use our normal _block type_ apply behaviors, so we'll construct ourselves
887 // a synthetic schema that treats the object type as a block type and
888 // then delegate to our block apply method.
889 synthSchema := &configschema.Block{
890 Attributes: make(map[string]*configschema.Attribute),
891 }
892
893 for name, ty := range attrSchema.Type.ElementType().AttributeTypes() {
894 // We can safely make everything into an attribute here because in the
895 // event that there are nested set attributes we'll end up back in
896 // here again recursively and can then deal with the next level of
897 // expansion.
898 synthSchema.Attributes[name] = &configschema.Attribute{
899 Type: ty,
900 Optional: true,
901 }
902 }
903
904 parentPath := path[:len(path)-1]
905 childName := path[len(path)-1]
906 containerSchema := &configschema.Block{
907 BlockTypes: map[string]*configschema.NestedBlock{
908 childName: {
909 Nesting: configschema.NestingSet,
910 Block: *synthSchema,
911 },
912 },
913 }
914
915 return d.applyBlockDiff(parentPath, attrs, containerSchema)
916}
917
918// countFlatmapContainerValues returns the number of values in the flatmapped container
919// (set, map, list) indexed by key. The key argument is expected to include the
920// trailing ".#", or ".%".
921func countFlatmapContainerValues(key string, attrs map[string]string) string {
922 if len(key) < 3 || !(strings.HasSuffix(key, ".#") || strings.HasSuffix(key, ".%")) {
923 panic(fmt.Sprintf("invalid index value %q", key))
924 }
925
926 prefix := key[:len(key)-1]
927 items := map[string]int{}
928
929 for k := range attrs {
930 if k == key {
931 continue
932 }
933 if !strings.HasPrefix(k, prefix) {
934 continue
935 }
936
937 suffix := k[len(prefix):]
938 dot := strings.Index(suffix, ".")
939 if dot > 0 {
940 suffix = suffix[:dot]
941 }
942
943 items[suffix]++
944 }
945 return strconv.Itoa(len(items))
946}
947
387// ResourceAttrDiff is the diff of a single attribute of a resource. 948// ResourceAttrDiff is the diff of a single attribute of a resource.
388type ResourceAttrDiff struct { 949type ResourceAttrDiff struct {
389 Old string // Old Value 950 Old string // Old Value
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go
index 10d9c22..48ed353 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval.go
@@ -2,7 +2,8 @@ package terraform
2 2
3import ( 3import (
4 "log" 4 "log"
5 "strings" 5
6 "github.com/hashicorp/terraform/tfdiags"
6) 7)
7 8
8// EvalNode is the interface that must be implemented by graph nodes to 9// EvalNode is the interface that must be implemented by graph nodes to
@@ -46,15 +47,21 @@ func Eval(n EvalNode, ctx EvalContext) (interface{}, error) {
46func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) { 47func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) {
47 path := "unknown" 48 path := "unknown"
48 if ctx != nil { 49 if ctx != nil {
49 path = strings.Join(ctx.Path(), ".") 50 path = ctx.Path().String()
51 }
52 if path == "" {
53 path = "<root>"
50 } 54 }
51 55
52 log.Printf("[TRACE] %s: eval: %T", path, n) 56 log.Printf("[TRACE] %s: eval: %T", path, n)
53 output, err := n.Eval(ctx) 57 output, err := n.Eval(ctx)
54 if err != nil { 58 if err != nil {
55 if _, ok := err.(EvalEarlyExitError); ok { 59 switch err.(type) {
56 log.Printf("[TRACE] %s: eval: %T, err: %s", path, n, err) 60 case EvalEarlyExitError:
57 } else { 61 log.Printf("[TRACE] %s: eval: %T, early exit err: %s", path, n, err)
62 case tfdiags.NonFatalError:
63 log.Printf("[WARN] %s: eval: %T, non-fatal err: %s", path, n, err)
64 default:
58 log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err) 65 log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err)
59 } 66 }
60 } 67 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
index b9b4806..09313f7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
@@ -3,119 +3,316 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 "strconv" 6 "strings"
7 7
8 "github.com/hashicorp/go-multierror" 8 "github.com/hashicorp/go-multierror"
9 "github.com/hashicorp/terraform/config" 9 "github.com/hashicorp/hcl2/hcl"
10 "github.com/zclconf/go-cty/cty"
11
12 "github.com/hashicorp/terraform/addrs"
13 "github.com/hashicorp/terraform/configs"
14 "github.com/hashicorp/terraform/plans"
15 "github.com/hashicorp/terraform/plans/objchange"
16 "github.com/hashicorp/terraform/providers"
17 "github.com/hashicorp/terraform/provisioners"
18 "github.com/hashicorp/terraform/states"
19 "github.com/hashicorp/terraform/tfdiags"
10) 20)
11 21
12// EvalApply is an EvalNode implementation that writes the diff to 22// EvalApply is an EvalNode implementation that writes the diff to
13// the full diff. 23// the full diff.
14type EvalApply struct { 24type EvalApply struct {
15 Info *InstanceInfo 25 Addr addrs.ResourceInstance
16 State **InstanceState 26 Config *configs.Resource
17 Diff **InstanceDiff 27 Dependencies []addrs.Referenceable
18 Provider *ResourceProvider 28 State **states.ResourceInstanceObject
19 Output **InstanceState 29 Change **plans.ResourceInstanceChange
20 CreateNew *bool 30 ProviderAddr addrs.AbsProviderConfig
21 Error *error 31 Provider *providers.Interface
32 ProviderSchema **ProviderSchema
33 Output **states.ResourceInstanceObject
34 CreateNew *bool
35 Error *error
22} 36}
23 37
24// TODO: test 38// TODO: test
25func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { 39func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
26 diff := *n.Diff 40 var diags tfdiags.Diagnostics
41
42 change := *n.Change
27 provider := *n.Provider 43 provider := *n.Provider
28 state := *n.State 44 state := *n.State
45 absAddr := n.Addr.Absolute(ctx.Path())
29 46
30 // If we have no diff, we have nothing to do! 47 if state == nil {
31 if diff.Empty() { 48 state = &states.ResourceInstanceObject{}
32 log.Printf( 49 }
33 "[DEBUG] apply: %s: diff is empty, doing nothing.", n.Info.Id) 50
34 return nil, nil 51 schema, _ := (*n.ProviderSchema).SchemaForResourceType(n.Addr.Resource.Mode, n.Addr.Resource.Type)
52 if schema == nil {
53 // Should be caught during validation, so we don't bother with a pretty error here
54 return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
55 }
56
57 if n.CreateNew != nil {
58 *n.CreateNew = (change.Action == plans.Create || change.Action.IsReplace())
35 } 59 }
36 60
37 // Remove any output values from the diff 61 configVal := cty.NullVal(cty.DynamicPseudoType)
38 for k, ad := range diff.CopyAttributes() { 62 if n.Config != nil {
39 if ad.Type == DiffAttrOutput { 63 var configDiags tfdiags.Diagnostics
40 diff.DelAttribute(k) 64 keyData := EvalDataForInstanceKey(n.Addr.Key)
65 configVal, _, configDiags = ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData)
66 diags = diags.Append(configDiags)
67 if configDiags.HasErrors() {
68 return nil, diags.Err()
41 } 69 }
42 } 70 }
43 71
44 // If the state is nil, make it non-nil 72 log.Printf("[DEBUG] %s: applying the planned %s change", n.Addr.Absolute(ctx.Path()), change.Action)
45 if state == nil { 73 resp := provider.ApplyResourceChange(providers.ApplyResourceChangeRequest{
46 state = new(InstanceState) 74 TypeName: n.Addr.Resource.Type,
75 PriorState: change.Before,
76 Config: configVal,
77 PlannedState: change.After,
78 PlannedPrivate: change.Private,
79 })
80 applyDiags := resp.Diagnostics
81 if n.Config != nil {
82 applyDiags = applyDiags.InConfigBody(n.Config.Config)
47 } 83 }
48 state.init() 84 diags = diags.Append(applyDiags)
85
86 // Even if there are errors in the returned diagnostics, the provider may
87 // have returned a _partial_ state for an object that already exists but
88 // failed to fully configure, and so the remaining code must always run
89 // to completion but must be defensive against the new value being
90 // incomplete.
91 newVal := resp.NewState
92
93 if newVal == cty.NilVal {
94 // Providers are supposed to return a partial new value even when errors
95 // occur, but sometimes they don't and so in that case we'll patch that up
96 // by just using the prior state, so we'll at least keep track of the
97 // object for the user to retry.
98 newVal = change.Before
99
100 // As a special case, we'll set the new value to null if it looks like
101 // we were trying to execute a delete, because the provider in this case
102 // probably left the newVal unset intending it to be interpreted as "null".
103 if change.After.IsNull() {
104 newVal = cty.NullVal(schema.ImpliedType())
105 }
49 106
50 // Flag if we're creating a new instance 107 // Ideally we'd produce an error or warning here if newVal is nil and
51 if n.CreateNew != nil { 108 // there are no errors in diags, because that indicates a buggy
52 *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew() 109 // provider not properly reporting its result, but unfortunately many
110 // of our historical test mocks behave in this way and so producing
111 // a diagnostic here fails hundreds of tests. Instead, we must just
112 // silently retain the old value for now. Returning a nil value with
113 // no errors is still always considered a bug in the provider though,
114 // and should be fixed for any "real" providers that do it.
53 } 115 }
54 116
55 // With the completed diff, apply! 117 var conformDiags tfdiags.Diagnostics
56 log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id) 118 for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) {
57 state, err := provider.Apply(n.Info, state, diff) 119 conformDiags = conformDiags.Append(tfdiags.Sourceless(
58 if state == nil { 120 tfdiags.Error,
59 state = new(InstanceState) 121 "Provider produced invalid object",
122 fmt.Sprintf(
123 "Provider %q produced an invalid value after apply for %s. The result cannot not be saved in the Terraform state.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
124 n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()),
125 ),
126 ))
127 }
128 diags = diags.Append(conformDiags)
129 if conformDiags.HasErrors() {
130 // Bail early in this particular case, because an object that doesn't
131 // conform to the schema can't be saved in the state anyway -- the
132 // serializer will reject it.
133 return nil, diags.Err()
134 }
135
136 // After this point we have a type-conforming result object and so we
137 // must always run to completion to ensure it can be saved. If n.Error
138 // is set then we must not return a non-nil error, in order to allow
139 // evaluation to continue to a later point where our state object will
140 // be saved.
141
142 // By this point there must not be any unknown values remaining in our
143 // object, because we've applied the change and we can't save unknowns
144 // in our persistent state. If any are present then we will indicate an
145 // error (which is always a bug in the provider) but we will also replace
146 // them with nulls so that we can successfully save the portions of the
147 // returned value that are known.
148 if !newVal.IsWhollyKnown() {
149 // To generate better error messages, we'll go for a walk through the
150 // value and make a separate diagnostic for each unknown value we
151 // find.
152 cty.Walk(newVal, func(path cty.Path, val cty.Value) (bool, error) {
153 if !val.IsKnown() {
154 pathStr := tfdiags.FormatCtyPath(path)
155 diags = diags.Append(tfdiags.Sourceless(
156 tfdiags.Error,
157 "Provider returned invalid result object after apply",
158 fmt.Sprintf(
159 "After the apply operation, the provider still indicated an unknown value for %s%s. All values must be known after apply, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save the other known object values in the state.",
160 n.Addr.Absolute(ctx.Path()), pathStr,
161 ),
162 ))
163 }
164 return true, nil
165 })
166
167 // NOTE: This operation can potentially be lossy if there are multiple
168 // elements in a set that differ only by unknown values: after
169 // replacing with null these will be merged together into a single set
170 // element. Since we can only get here in the presence of a provider
171 // bug, we accept this because storing a result here is always a
172 // best-effort sort of thing.
173 newVal = cty.UnknownAsNull(newVal)
174 }
175
176 if change.Action != plans.Delete && !diags.HasErrors() {
177 // Only values that were marked as unknown in the planned value are allowed
178 // to change during the apply operation. (We do this after the unknown-ness
179 // check above so that we also catch anything that became unknown after
180 // being known during plan.)
181 //
182 // If we are returning other errors anyway then we'll give this
183 // a pass since the other errors are usually the explanation for
184 // this one and so it's more helpful to let the user focus on the
185 // root cause rather than distract with this extra problem.
186 if errs := objchange.AssertObjectCompatible(schema, change.After, newVal); len(errs) > 0 {
187 if resp.LegacyTypeSystem {
188 // The shimming of the old type system in the legacy SDK is not precise
189 // enough to pass this consistency check, so we'll give it a pass here,
190 // but we will generate a warning about it so that we are more likely
191 // to notice in the logs if an inconsistency beyond the type system
192 // leads to a downstream provider failure.
193 var buf strings.Builder
194 fmt.Fprintf(&buf, "[WARN] Provider %q produced an unexpected new value for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr)
195 for _, err := range errs {
196 fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err))
197 }
198 log.Print(buf.String())
199
200 // The sort of inconsistency we won't catch here is if a known value
201 // in the plan is changed during apply. That can cause downstream
202 // problems because a dependent resource would make its own plan based
203 // on the planned value, and thus get a different result during the
204 // apply phase. This will usually lead to a "Provider produced invalid plan"
205 // error that incorrectly blames the downstream resource for the change.
206
207 } else {
208 for _, err := range errs {
209 diags = diags.Append(tfdiags.Sourceless(
210 tfdiags.Error,
211 "Provider produced inconsistent result after apply",
212 fmt.Sprintf(
213 "When applying changes to %s, provider %q produced an unexpected new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
214 absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err),
215 ),
216 ))
217 }
218 }
219 }
220 }
221
222 // If a provider returns a null or non-null object at the wrong time then
223 // we still want to save that but it often causes some confusing behaviors
224 // where it seems like Terraform is failing to take any action at all,
225 // so we'll generate some errors to draw attention to it.
226 if !diags.HasErrors() {
227 if change.Action == plans.Delete && !newVal.IsNull() {
228 diags = diags.Append(tfdiags.Sourceless(
229 tfdiags.Error,
230 "Provider returned invalid result object after apply",
231 fmt.Sprintf(
232 "After applying a %s plan, the provider returned a non-null object for %s. Destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository. Terraform will still save this errant object in the state for debugging and recovery.",
233 change.Action, n.Addr.Absolute(ctx.Path()),
234 ),
235 ))
236 }
237 if change.Action != plans.Delete && newVal.IsNull() {
238 diags = diags.Append(tfdiags.Sourceless(
239 tfdiags.Error,
240 "Provider returned invalid result object after apply",
241 fmt.Sprintf(
242 "After applying a %s plan, the provider returned a null object for %s. Only destroying should always produce a null value, so this is always a bug in the provider and should be reported in the provider's own repository.",
243 change.Action, n.Addr.Absolute(ctx.Path()),
244 ),
245 ))
246 }
60 } 247 }
61 state.init()
62 248
63 // Force the "id" attribute to be our ID 249 // Sometimes providers return a null value when an operation fails for some
64 if state.ID != "" { 250 // reason, but we'd rather keep the prior state so that the error can be
65 state.Attributes["id"] = state.ID 251 // corrected on a subsequent run. We must only do this for null new value
252 // though, or else we may discard partial updates the provider was able to
253 // complete.
254 if diags.HasErrors() && newVal.IsNull() {
255 // Otherwise, we'll continue but using the prior state as the new value,
256 // making this effectively a no-op. If the item really _has_ been
257 // deleted then our next refresh will detect that and fix it up.
258 // If change.Action is Create then change.Before will also be null,
259 // which is fine.
260 newVal = change.Before
66 } 261 }
67 262
68 // If the value is the unknown variable value, then it is an error. 263 var newState *states.ResourceInstanceObject
69 // In this case we record the error and remove it from the state 264 if !newVal.IsNull() { // null value indicates that the object is deleted, so we won't set a new state in that case
70 for ak, av := range state.Attributes { 265 newState = &states.ResourceInstanceObject{
71 if av == config.UnknownVariableValue { 266 Status: states.ObjectReady,
72 err = multierror.Append(err, fmt.Errorf( 267 Value: newVal,
73 "Attribute with unknown value: %s", ak)) 268 Private: resp.Private,
74 delete(state.Attributes, ak) 269 Dependencies: n.Dependencies, // Should be populated by the caller from the StateDependencies method on the resource instance node
75 } 270 }
76 } 271 }
77 272
78 // Write the final state 273 // Write the final state
79 if n.Output != nil { 274 if n.Output != nil {
80 *n.Output = state 275 *n.Output = newState
81 } 276 }
82 277
83 // If there are no errors, then we append it to our output error 278 if diags.HasErrors() {
84 // if we have one, otherwise we just output it. 279 // If the caller provided an error pointer then they are expected to
85 if err != nil { 280 // handle the error some other way and we treat our own result as
281 // success.
86 if n.Error != nil { 282 if n.Error != nil {
87 helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error()) 283 err := diags.Err()
88 *n.Error = multierror.Append(*n.Error, helpfulErr) 284 *n.Error = err
89 } else { 285 log.Printf("[DEBUG] %s: apply errored, but we're indicating that via the Error pointer rather than returning it: %s", n.Addr.Absolute(ctx.Path()), err)
90 return nil, err 286 return nil, nil
91 } 287 }
92 } 288 }
93 289
94 return nil, nil 290 return nil, diags.ErrWithWarnings()
95} 291}
96 292
97// EvalApplyPre is an EvalNode implementation that does the pre-Apply work 293// EvalApplyPre is an EvalNode implementation that does the pre-Apply work
98type EvalApplyPre struct { 294type EvalApplyPre struct {
99 Info *InstanceInfo 295 Addr addrs.ResourceInstance
100 State **InstanceState 296 Gen states.Generation
101 Diff **InstanceDiff 297 State **states.ResourceInstanceObject
298 Change **plans.ResourceInstanceChange
102} 299}
103 300
104// TODO: test 301// TODO: test
105func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) { 302func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) {
106 state := *n.State 303 change := *n.Change
107 diff := *n.Diff 304 absAddr := n.Addr.Absolute(ctx.Path())
108 305
109 // If the state is nil, make it non-nil 306 if change == nil {
110 if state == nil { 307 panic(fmt.Sprintf("EvalApplyPre for %s called with nil Change", absAddr))
111 state = new(InstanceState)
112 } 308 }
113 state.init()
114 309
115 if resourceHasUserVisibleApply(n.Info) { 310 if resourceHasUserVisibleApply(n.Addr) {
116 // Call post-apply hook 311 priorState := change.Before
312 plannedNewState := change.After
313
117 err := ctx.Hook(func(h Hook) (HookAction, error) { 314 err := ctx.Hook(func(h Hook) (HookAction, error) {
118 return h.PreApply(n.Info, state, diff) 315 return h.PreApply(absAddr, n.Gen, change.Action, priorState, plannedNewState)
119 }) 316 })
120 if err != nil { 317 if err != nil {
121 return nil, err 318 return nil, err
@@ -127,8 +324,9 @@ func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) {
127 324
128// EvalApplyPost is an EvalNode implementation that does the post-Apply work 325// EvalApplyPost is an EvalNode implementation that does the post-Apply work
129type EvalApplyPost struct { 326type EvalApplyPost struct {
130 Info *InstanceInfo 327 Addr addrs.ResourceInstance
131 State **InstanceState 328 Gen states.Generation
329 State **states.ResourceInstanceObject
132 Error *error 330 Error *error
133} 331}
134 332
@@ -136,33 +334,93 @@ type EvalApplyPost struct {
136func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) { 334func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) {
137 state := *n.State 335 state := *n.State
138 336
139 if resourceHasUserVisibleApply(n.Info) { 337 if resourceHasUserVisibleApply(n.Addr) {
140 // Call post-apply hook 338 absAddr := n.Addr.Absolute(ctx.Path())
141 err := ctx.Hook(func(h Hook) (HookAction, error) { 339 var newState cty.Value
142 return h.PostApply(n.Info, state, *n.Error) 340 if state != nil {
341 newState = state.Value
342 } else {
343 newState = cty.NullVal(cty.DynamicPseudoType)
344 }
345 var err error
346 if n.Error != nil {
347 err = *n.Error
348 }
349
350 hookErr := ctx.Hook(func(h Hook) (HookAction, error) {
351 return h.PostApply(absAddr, n.Gen, newState, err)
143 }) 352 })
144 if err != nil { 353 if hookErr != nil {
145 return nil, err 354 return nil, hookErr
146 } 355 }
147 } 356 }
148 357
149 return nil, *n.Error 358 return nil, *n.Error
150} 359}
151 360
361// EvalMaybeTainted is an EvalNode that takes the planned change, new value,
362// and possible error from an apply operation and produces a new instance
363// object marked as tainted if it appears that a create operation has failed.
364//
365// This EvalNode never returns an error, to ensure that a subsequent EvalNode
366// can still record the possibly-tainted object in the state.
367type EvalMaybeTainted struct {
368 Addr addrs.ResourceInstance
369 Gen states.Generation
370 Change **plans.ResourceInstanceChange
371 State **states.ResourceInstanceObject
372 Error *error
373
374 // If StateOutput is not nil, its referent will be assigned either the same
375 // pointer as State or a new object with its status set as Tainted,
376 // depending on whether an error is given and if this was a create action.
377 StateOutput **states.ResourceInstanceObject
378}
379
380// TODO: test
381func (n *EvalMaybeTainted) Eval(ctx EvalContext) (interface{}, error) {
382 state := *n.State
383 change := *n.Change
384 err := *n.Error
385
386 if state != nil && state.Status == states.ObjectTainted {
387 log.Printf("[TRACE] EvalMaybeTainted: %s was already tainted, so nothing to do", n.Addr.Absolute(ctx.Path()))
388 return nil, nil
389 }
390
391 if n.StateOutput != nil {
392 if err != nil && change.Action == plans.Create {
393 // If there are errors during a _create_ then the object is
394 // in an undefined state, and so we'll mark it as tainted so
395 // we can try again on the next run.
396 //
397 // We don't do this for other change actions because errors
398 // during updates will often not change the remote object at all.
399 // If there _were_ changes prior to the error, it's the provider's
400 // responsibility to record the effect of those changes in the
401 // object value it returned.
402 log.Printf("[TRACE] EvalMaybeTainted: %s encountered an error during creation, so it is now marked as tainted", n.Addr.Absolute(ctx.Path()))
403 *n.StateOutput = state.AsTainted()
404 } else {
405 *n.StateOutput = state
406 }
407 }
408
409 return nil, nil
410}
411
152// resourceHasUserVisibleApply returns true if the given resource is one where 412// resourceHasUserVisibleApply returns true if the given resource is one where
153// apply actions should be exposed to the user. 413// apply actions should be exposed to the user.
154// 414//
155// Certain resources do apply actions only as an implementation detail, so 415// Certain resources do apply actions only as an implementation detail, so
156// these should not be advertised to code outside of this package. 416// these should not be advertised to code outside of this package.
157func resourceHasUserVisibleApply(info *InstanceInfo) bool { 417func resourceHasUserVisibleApply(addr addrs.ResourceInstance) bool {
158 addr := info.ResourceAddress()
159
160 // Only managed resources have user-visible apply actions. 418 // Only managed resources have user-visible apply actions.
161 // In particular, this excludes data resources since we "apply" these 419 // In particular, this excludes data resources since we "apply" these
162 // only as an implementation detail of removing them from state when 420 // only as an implementation detail of removing them from state when
163 // they are destroyed. (When reading, they don't get here at all because 421 // they are destroyed. (When reading, they don't get here at all because
164 // we present them as "Refresh" actions.) 422 // we present them as "Refresh" actions.)
165 return addr.Mode == config.ManagedResourceMode 423 return addr.ContainingResource().Mode == addrs.ManagedResourceMode
166} 424}
167 425
168// EvalApplyProvisioners is an EvalNode implementation that executes 426// EvalApplyProvisioners is an EvalNode implementation that executes
@@ -171,23 +429,33 @@ func resourceHasUserVisibleApply(info *InstanceInfo) bool {
171// TODO(mitchellh): This should probably be split up into a more fine-grained 429// TODO(mitchellh): This should probably be split up into a more fine-grained
172// ApplyProvisioner (single) that is looped over. 430// ApplyProvisioner (single) that is looped over.
173type EvalApplyProvisioners struct { 431type EvalApplyProvisioners struct {
174 Info *InstanceInfo 432 Addr addrs.ResourceInstance
175 State **InstanceState 433 State **states.ResourceInstanceObject
176 Resource *config.Resource 434 ResourceConfig *configs.Resource
177 InterpResource *Resource
178 CreateNew *bool 435 CreateNew *bool
179 Error *error 436 Error *error
180 437
181 // When is the type of provisioner to run at this point 438 // When is the type of provisioner to run at this point
182 When config.ProvisionerWhen 439 When configs.ProvisionerWhen
183} 440}
184 441
185// TODO: test 442// TODO: test
186func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) { 443func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
444 absAddr := n.Addr.Absolute(ctx.Path())
187 state := *n.State 445 state := *n.State
188 446 if state == nil {
189 if n.CreateNew != nil && !*n.CreateNew { 447 log.Printf("[TRACE] EvalApplyProvisioners: %s has no state, so skipping provisioners", n.Addr)
448 return nil, nil
449 }
450 if n.When == configs.ProvisionerWhenCreate && n.CreateNew != nil && !*n.CreateNew {
190 // If we're not creating a new resource, then don't run provisioners 451 // If we're not creating a new resource, then don't run provisioners
452 log.Printf("[TRACE] EvalApplyProvisioners: %s is not freshly-created, so no provisioning is required", n.Addr)
453 return nil, nil
454 }
455 if state.Status == states.ObjectTainted {
456 // No point in provisioning an object that is already tainted, since
457 // it's going to get recreated on the next apply anyway.
458 log.Printf("[TRACE] EvalApplyProvisioners: %s is tainted, so skipping provisioning", n.Addr)
191 return nil, nil 459 return nil, nil
192 } 460 }
193 461
@@ -197,14 +465,7 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
197 return nil, nil 465 return nil, nil
198 } 466 }
199 467
200 // taint tells us whether to enable tainting.
201 taint := n.When == config.ProvisionerWhenCreate
202
203 if n.Error != nil && *n.Error != nil { 468 if n.Error != nil && *n.Error != nil {
204 if taint {
205 state.Tainted = true
206 }
207
208 // We're already tainted, so just return out 469 // We're already tainted, so just return out
209 return nil, nil 470 return nil, nil
210 } 471 }
@@ -212,7 +473,7 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
212 { 473 {
213 // Call pre hook 474 // Call pre hook
214 err := ctx.Hook(func(h Hook) (HookAction, error) { 475 err := ctx.Hook(func(h Hook) (HookAction, error) {
215 return h.PreProvisionResource(n.Info, state) 476 return h.PreProvisionInstance(absAddr, state.Value)
216 }) 477 })
217 if err != nil { 478 if err != nil {
218 return nil, err 479 return nil, err
@@ -223,18 +484,19 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
223 // if we have one, otherwise we just output it. 484 // if we have one, otherwise we just output it.
224 err := n.apply(ctx, provs) 485 err := n.apply(ctx, provs)
225 if err != nil { 486 if err != nil {
226 if taint {
227 state.Tainted = true
228 }
229
230 *n.Error = multierror.Append(*n.Error, err) 487 *n.Error = multierror.Append(*n.Error, err)
231 return nil, err 488 if n.Error == nil {
489 return nil, err
490 } else {
491 log.Printf("[TRACE] EvalApplyProvisioners: %s provisioning failed, but we will continue anyway at the caller's request", absAddr)
492 return nil, nil
493 }
232 } 494 }
233 495
234 { 496 {
235 // Call post hook 497 // Call post hook
236 err := ctx.Hook(func(h Hook) (HookAction, error) { 498 err := ctx.Hook(func(h Hook) (HookAction, error) {
237 return h.PostProvisionResource(n.Info, state) 499 return h.PostProvisionInstance(absAddr, state.Value)
238 }) 500 })
239 if err != nil { 501 if err != nil {
240 return nil, err 502 return nil, err
@@ -246,18 +508,18 @@ func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
246 508
247// filterProvisioners filters the provisioners on the resource to only 509// filterProvisioners filters the provisioners on the resource to only
248// the provisioners specified by the "when" option. 510// the provisioners specified by the "when" option.
249func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner { 511func (n *EvalApplyProvisioners) filterProvisioners() []*configs.Provisioner {
250 // Fast path the zero case 512 // Fast path the zero case
251 if n.Resource == nil { 513 if n.ResourceConfig == nil || n.ResourceConfig.Managed == nil {
252 return nil 514 return nil
253 } 515 }
254 516
255 if len(n.Resource.Provisioners) == 0 { 517 if len(n.ResourceConfig.Managed.Provisioners) == 0 {
256 return nil 518 return nil
257 } 519 }
258 520
259 result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners)) 521 result := make([]*configs.Provisioner, 0, len(n.ResourceConfig.Managed.Provisioners))
260 for _, p := range n.Resource.Provisioners { 522 for _, p := range n.ResourceConfig.Managed.Provisioners {
261 if p.When == n.When { 523 if p.When == n.When {
262 result = append(result, p) 524 result = append(result, p)
263 } 525 }
@@ -266,64 +528,71 @@ func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner {
266 return result 528 return result
267} 529}
268 530
269func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error { 531func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisioner) error {
270 state := *n.State 532 var diags tfdiags.Diagnostics
271 533 instanceAddr := n.Addr
272 // Store the original connection info, restore later 534 absAddr := instanceAddr.Absolute(ctx.Path())
273 origConnInfo := state.Ephemeral.ConnInfo 535
274 defer func() { 536 // If there's a connection block defined directly inside the resource block
275 state.Ephemeral.ConnInfo = origConnInfo 537 // then it'll serve as a base connection configuration for all of the
276 }() 538 // provisioners.
539 var baseConn hcl.Body
540 if n.ResourceConfig.Managed != nil && n.ResourceConfig.Managed.Connection != nil {
541 baseConn = n.ResourceConfig.Managed.Connection.Config
542 }
277 543
278 for _, prov := range provs { 544 for _, prov := range provs {
545 log.Printf("[TRACE] EvalApplyProvisioners: provisioning %s with %q", absAddr, prov.Type)
546
279 // Get the provisioner 547 // Get the provisioner
280 provisioner := ctx.Provisioner(prov.Type) 548 provisioner := ctx.Provisioner(prov.Type)
549 schema := ctx.ProvisionerSchema(prov.Type)
281 550
282 // Interpolate the provisioner config 551 keyData := EvalDataForInstanceKey(instanceAddr.Key)
283 provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource)
284 if err != nil {
285 return err
286 }
287 552
288 // Interpolate the conn info, since it may contain variables 553 // Evaluate the main provisioner configuration.
289 connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource) 554 config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData)
290 if err != nil { 555 diags = diags.Append(configDiags)
291 return err 556
557 // If the provisioner block contains a connection block of its own then
558 // it can override the base connection configuration, if any.
559 var localConn hcl.Body
560 if prov.Connection != nil {
561 localConn = prov.Connection.Config
292 } 562 }
293 563
294 // Merge the connection information 564 var connBody hcl.Body
295 overlay := make(map[string]string) 565 switch {
296 if origConnInfo != nil { 566 case baseConn != nil && localConn != nil:
297 for k, v := range origConnInfo { 567 // Our standard merging logic applies here, similar to what we do
298 overlay[k] = v 568 // with _override.tf configuration files: arguments from the
299 } 569 // base connection block will be masked by any arguments of the
570 // same name in the local connection block.
571 connBody = configs.MergeBodies(baseConn, localConn)
572 case baseConn != nil:
573 connBody = baseConn
574 case localConn != nil:
575 connBody = localConn
300 } 576 }
301 for k, v := range connInfo.Config { 577
302 switch vt := v.(type) { 578 // start with an empty connInfo
303 case string: 579 connInfo := cty.NullVal(connectionBlockSupersetSchema.ImpliedType())
304 overlay[k] = vt 580
305 case int64: 581 if connBody != nil {
306 overlay[k] = strconv.FormatInt(vt, 10) 582 var connInfoDiags tfdiags.Diagnostics
307 case int32: 583 connInfo, _, connInfoDiags = ctx.EvaluateBlock(connBody, connectionBlockSupersetSchema, instanceAddr, keyData)
308 overlay[k] = strconv.FormatInt(int64(vt), 10) 584 diags = diags.Append(connInfoDiags)
309 case int: 585 if diags.HasErrors() {
310 overlay[k] = strconv.FormatInt(int64(vt), 10) 586 // "on failure continue" setting only applies to failures of the
311 case float32: 587 // provisioner itself, not to invalid configuration.
312 overlay[k] = strconv.FormatFloat(float64(vt), 'f', 3, 32) 588 return diags.Err()
313 case float64:
314 overlay[k] = strconv.FormatFloat(vt, 'f', 3, 64)
315 case bool:
316 overlay[k] = strconv.FormatBool(vt)
317 default:
318 overlay[k] = fmt.Sprintf("%v", vt)
319 } 589 }
320 } 590 }
321 state.Ephemeral.ConnInfo = overlay
322 591
323 { 592 {
324 // Call pre hook 593 // Call pre hook
325 err := ctx.Hook(func(h Hook) (HookAction, error) { 594 err := ctx.Hook(func(h Hook) (HookAction, error) {
326 return h.PreProvision(n.Info, prov.Type) 595 return h.PreProvisionInstanceStep(absAddr, prov.Type)
327 }) 596 })
328 if err != nil { 597 if err != nil {
329 return err 598 return err
@@ -333,31 +602,37 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provision
333 // The output function 602 // The output function
334 outputFn := func(msg string) { 603 outputFn := func(msg string) {
335 ctx.Hook(func(h Hook) (HookAction, error) { 604 ctx.Hook(func(h Hook) (HookAction, error) {
336 h.ProvisionOutput(n.Info, prov.Type, msg) 605 h.ProvisionOutput(absAddr, prov.Type, msg)
337 return HookActionContinue, nil 606 return HookActionContinue, nil
338 }) 607 })
339 } 608 }
340 609
341 // Invoke the Provisioner
342 output := CallbackUIOutput{OutputFn: outputFn} 610 output := CallbackUIOutput{OutputFn: outputFn}
343 applyErr := provisioner.Apply(&output, state, provConfig) 611 resp := provisioner.ProvisionResource(provisioners.ProvisionResourceRequest{
612 Config: config,
613 Connection: connInfo,
614 UIOutput: &output,
615 })
616 applyDiags := resp.Diagnostics.InConfigBody(prov.Config)
344 617
345 // Call post hook 618 // Call post hook
346 hookErr := ctx.Hook(func(h Hook) (HookAction, error) { 619 hookErr := ctx.Hook(func(h Hook) (HookAction, error) {
347 return h.PostProvision(n.Info, prov.Type, applyErr) 620 return h.PostProvisionInstanceStep(absAddr, prov.Type, applyDiags.Err())
348 }) 621 })
349 622
350 // Handle the error before we deal with the hook 623 switch prov.OnFailure {
351 if applyErr != nil { 624 case configs.ProvisionerOnFailureContinue:
352 // Determine failure behavior 625 if applyDiags.HasErrors() {
353 switch prov.OnFailure { 626 log.Printf("[WARN] Errors while provisioning %s with %q, but continuing as requested in configuration", n.Addr, prov.Type)
354 case config.ProvisionerOnFailureContinue: 627 } else {
355 log.Printf( 628 // Maybe there are warnings that we still want to see
356 "[INFO] apply: %s [%s]: error during provision, continue requested", 629 diags = diags.Append(applyDiags)
357 n.Info.Id, prov.Type) 630 }
358 631 default:
359 case config.ProvisionerOnFailureFail: 632 diags = diags.Append(applyDiags)
360 return applyErr 633 if applyDiags.HasErrors() {
634 log.Printf("[WARN] Errors while provisioning %s with %q, so aborting", n.Addr, prov.Type)
635 return diags.Err()
361 } 636 }
362 } 637 }
363 638
@@ -367,6 +642,5 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provision
367 } 642 }
368 } 643 }
369 644
370 return nil 645 return diags.ErrWithWarnings()
371
372} 646}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
index 715e79e..4dff0c8 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
@@ -3,33 +3,44 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/plans"
7
8 "github.com/hashicorp/hcl2/hcl"
9
10 "github.com/hashicorp/terraform/addrs"
11 "github.com/hashicorp/terraform/configs"
12 "github.com/hashicorp/terraform/tfdiags"
7) 13)
8 14
9// EvalPreventDestroy is an EvalNode implementation that returns an 15// EvalPreventDestroy is an EvalNode implementation that returns an
10// error if a resource has PreventDestroy configured and the diff 16// error if a resource has PreventDestroy configured and the diff
11// would destroy the resource. 17// would destroy the resource.
12type EvalCheckPreventDestroy struct { 18type EvalCheckPreventDestroy struct {
13 Resource *config.Resource 19 Addr addrs.ResourceInstance
14 ResourceId string 20 Config *configs.Resource
15 Diff **InstanceDiff 21 Change **plans.ResourceInstanceChange
16} 22}
17 23
18func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { 24func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) {
19 if n.Diff == nil || *n.Diff == nil || n.Resource == nil { 25 if n.Change == nil || *n.Change == nil || n.Config == nil || n.Config.Managed == nil {
20 return nil, nil 26 return nil, nil
21 } 27 }
22 28
23 diff := *n.Diff 29 change := *n.Change
24 preventDestroy := n.Resource.Lifecycle.PreventDestroy 30 preventDestroy := n.Config.Managed.PreventDestroy
25 31
26 if diff.GetDestroy() && preventDestroy { 32 if (change.Action == plans.Delete || change.Action.IsReplace()) && preventDestroy {
27 resourceId := n.ResourceId 33 var diags tfdiags.Diagnostics
28 if resourceId == "" { 34 diags = diags.Append(&hcl.Diagnostic{
29 resourceId = n.Resource.Id() 35 Severity: hcl.DiagError,
30 } 36 Summary: "Instance cannot be destroyed",
31 37 Detail: fmt.Sprintf(
32 return nil, fmt.Errorf(preventDestroyErrStr, resourceId) 38 "Resource %s has lifecycle.prevent_destroy set, but the plan calls for this resource to be destroyed. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or reduce the scope of the plan using the -target flag.",
39 n.Addr.Absolute(ctx.Path()).String(),
40 ),
41 Subject: &n.Config.DeclRange,
42 })
43 return nil, diags.Err()
33 } 44 }
34 45
35 return nil, nil 46 return nil, nil
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
index 86481de..08f3059 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
@@ -1,9 +1,16 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "sync" 4 "github.com/hashicorp/hcl2/hcl"
5 5 "github.com/hashicorp/terraform/addrs"
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/hashicorp/terraform/lang"
8 "github.com/hashicorp/terraform/plans"
9 "github.com/hashicorp/terraform/providers"
10 "github.com/hashicorp/terraform/provisioners"
11 "github.com/hashicorp/terraform/states"
12 "github.com/hashicorp/terraform/tfdiags"
13 "github.com/zclconf/go-cty/cty"
7) 14)
8 15
9// EvalContext is the interface that is given to eval nodes to execute. 16// EvalContext is the interface that is given to eval nodes to execute.
@@ -13,7 +20,7 @@ type EvalContext interface {
13 Stopped() <-chan struct{} 20 Stopped() <-chan struct{}
14 21
15 // Path is the current module path. 22 // Path is the current module path.
16 Path() []string 23 Path() addrs.ModuleInstance
17 24
18 // Hook is used to call hook methods. The callback is called for each 25 // Hook is used to call hook methods. The callback is called for each
19 // hook and should return the hook action to take and the error. 26 // hook and should return the hook action to take and the error.
@@ -22,68 +29,105 @@ type EvalContext interface {
22 // Input is the UIInput object for interacting with the UI. 29 // Input is the UIInput object for interacting with the UI.
23 Input() UIInput 30 Input() UIInput
24 31
25 // InitProvider initializes the provider with the given type and name, and 32 // InitProvider initializes the provider with the given type and address, and
26 // returns the implementation of the resource provider or an error. 33 // returns the implementation of the resource provider or an error.
27 // 34 //
28 // It is an error to initialize the same provider more than once. 35 // It is an error to initialize the same provider more than once.
29 InitProvider(typ string, name string) (ResourceProvider, error) 36 InitProvider(typ string, addr addrs.ProviderConfig) (providers.Interface, error)
30 37
31 // Provider gets the provider instance with the given name (already 38 // Provider gets the provider instance with the given address (already
32 // initialized) or returns nil if the provider isn't initialized. 39 // initialized) or returns nil if the provider isn't initialized.
33 Provider(string) ResourceProvider 40 //
41 // This method expects an _absolute_ provider configuration address, since
42 // resources in one module are able to use providers from other modules.
43 // InitProvider must've been called on the EvalContext of the module
44 // that owns the given provider before calling this method.
45 Provider(addrs.AbsProviderConfig) providers.Interface
46
47 // ProviderSchema retrieves the schema for a particular provider, which
48 // must have already been initialized with InitProvider.
49 //
50 // This method expects an _absolute_ provider configuration address, since
51 // resources in one module are able to use providers from other modules.
52 ProviderSchema(addrs.AbsProviderConfig) *ProviderSchema
34 53
35 // CloseProvider closes provider connections that aren't needed anymore. 54 // CloseProvider closes provider connections that aren't needed anymore.
36 CloseProvider(string) error 55 CloseProvider(addrs.ProviderConfig) error
37 56
38 // ConfigureProvider configures the provider with the given 57 // ConfigureProvider configures the provider with the given
39 // configuration. This is a separate context call because this call 58 // configuration. This is a separate context call because this call
40 // is used to store the provider configuration for inheritance lookups 59 // is used to store the provider configuration for inheritance lookups
41 // with ParentProviderConfig(). 60 // with ParentProviderConfig().
42 ConfigureProvider(string, *ResourceConfig) error 61 ConfigureProvider(addrs.ProviderConfig, cty.Value) tfdiags.Diagnostics
43 62
44 // ProviderInput and SetProviderInput are used to configure providers 63 // ProviderInput and SetProviderInput are used to configure providers
45 // from user input. 64 // from user input.
46 ProviderInput(string) map[string]interface{} 65 ProviderInput(addrs.ProviderConfig) map[string]cty.Value
47 SetProviderInput(string, map[string]interface{}) 66 SetProviderInput(addrs.ProviderConfig, map[string]cty.Value)
48 67
49 // InitProvisioner initializes the provisioner with the given name and 68 // InitProvisioner initializes the provisioner with the given name and
50 // returns the implementation of the resource provisioner or an error. 69 // returns the implementation of the resource provisioner or an error.
51 // 70 //
52 // It is an error to initialize the same provisioner more than once. 71 // It is an error to initialize the same provisioner more than once.
53 InitProvisioner(string) (ResourceProvisioner, error) 72 InitProvisioner(string) (provisioners.Interface, error)
54 73
55 // Provisioner gets the provisioner instance with the given name (already 74 // Provisioner gets the provisioner instance with the given name (already
56 // initialized) or returns nil if the provisioner isn't initialized. 75 // initialized) or returns nil if the provisioner isn't initialized.
57 Provisioner(string) ResourceProvisioner 76 Provisioner(string) provisioners.Interface
77
78 // ProvisionerSchema retrieves the main configuration schema for a
79 // particular provisioner, which must have already been initialized with
80 // InitProvisioner.
81 ProvisionerSchema(string) *configschema.Block
58 82
59 // CloseProvisioner closes provisioner connections that aren't needed 83 // CloseProvisioner closes provisioner connections that aren't needed
60 // anymore. 84 // anymore.
61 CloseProvisioner(string) error 85 CloseProvisioner(string) error
62 86
63 // Interpolate takes the given raw configuration and completes 87 // EvaluateBlock takes the given raw configuration block and associated
64 // the interpolations, returning the processed ResourceConfig. 88 // schema and evaluates it to produce a value of an object type that
89 // conforms to the implied type of the schema.
90 //
91 // The "self" argument is optional. If given, it is the referenceable
92 // address that the name "self" should behave as an alias for when
93 // evaluating. Set this to nil if the "self" object should not be available.
94 //
95 // The "key" argument is also optional. If given, it is the instance key
96 // of the current object within the multi-instance container it belongs
97 // to. For example, on a resource block with "count" set this should be
98 // set to a different addrs.IntKey for each instance created from that
99 // block. Set this to addrs.NoKey if not appropriate.
100 //
101 // The returned body is an expanded version of the given body, with any
102 // "dynamic" blocks replaced with zero or more static blocks. This can be
103 // used to extract correct source location information about attributes of
104 // the returned object value.
105 EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics)
106
107 // EvaluateExpr takes the given HCL expression and evaluates it to produce
108 // a value.
109 //
110 // The "self" argument is optional. If given, it is the referenceable
111 // address that the name "self" should behave as an alias for when
112 // evaluating. Set this to nil if the "self" object should not be available.
113 EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics)
114
115 // EvaluationScope returns a scope that can be used to evaluate reference
116 // addresses in this context.
117 EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope
118
119 // SetModuleCallArguments defines values for the variables of a particular
120 // child module call.
65 // 121 //
66 // The resource argument is optional. If given, it is the resource 122 // Calling this function multiple times has merging behavior, keeping any
67 // that is currently being acted upon. 123 // previously-set keys that are not present in the new map.
68 Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error) 124 SetModuleCallArguments(addrs.ModuleCallInstance, map[string]cty.Value)
69 125
70 // InterpolateProvider takes a ProviderConfig and interpolates it with the 126 // Changes returns the writer object that can be used to write new proposed
71 // stored interpolation scope. Since provider configurations can be 127 // changes into the global changes set.
72 // inherited, the interpolation scope may be different from the current 128 Changes() *plans.ChangesSync
73 // context path. Interplation is otherwise executed the same as in the 129
74 // Interpolation method. 130 // State returns a wrapper object that provides safe concurrent access to
75 InterpolateProvider(*config.ProviderConfig, *Resource) (*ResourceConfig, error) 131 // the global state.
76 132 State() *states.SyncState
77 // SetVariables sets the variables for the module within
78 // this context with the name n. This function call is additive:
79 // the second parameter is merged with any previous call.
80 SetVariables(string, map[string]interface{})
81
82 // Diff returns the global diff as well as the lock that should
83 // be used to modify that diff.
84 Diff() (*Diff, *sync.RWMutex)
85
86 // State returns the global state as well as the lock that should
87 // be used to modify that state.
88 State() (*State, *sync.RWMutex)
89} 133}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
index 1b6ee5a..20b3793 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
@@ -6,7 +6,20 @@ import (
6 "log" 6 "log"
7 "sync" 7 "sync"
8 8
9 "github.com/hashicorp/terraform/config" 9 "github.com/hashicorp/terraform/plans"
10 "github.com/hashicorp/terraform/providers"
11 "github.com/hashicorp/terraform/provisioners"
12 "github.com/hashicorp/terraform/version"
13
14 "github.com/hashicorp/terraform/states"
15
16 "github.com/hashicorp/hcl2/hcl"
17 "github.com/hashicorp/terraform/configs/configschema"
18 "github.com/hashicorp/terraform/lang"
19 "github.com/hashicorp/terraform/tfdiags"
20
21 "github.com/hashicorp/terraform/addrs"
22 "github.com/zclconf/go-cty/cty"
10) 23)
11 24
12// BuiltinEvalContext is an EvalContext implementation that is used by 25// BuiltinEvalContext is an EvalContext implementation that is used by
@@ -16,35 +29,47 @@ type BuiltinEvalContext struct {
16 StopContext context.Context 29 StopContext context.Context
17 30
18 // PathValue is the Path that this context is operating within. 31 // PathValue is the Path that this context is operating within.
19 PathValue []string 32 PathValue addrs.ModuleInstance
20 33
21 // Interpolater setting below affect the interpolation of variables. 34 // Evaluator is used for evaluating expressions within the scope of this
35 // eval context.
36 Evaluator *Evaluator
37
38 // Schemas is a repository of all of the schemas we should need to
39 // decode configuration blocks and expressions. This must be constructed by
40 // the caller to include schemas for all of the providers, resource types,
41 // data sources and provisioners used by the given configuration and
42 // state.
22 // 43 //
23 // The InterpolaterVars are the exact value for ${var.foo} values. 44 // This must not be mutated during evaluation.
24 // The map is shared between all contexts and is a mapping of 45 Schemas *Schemas
25 // PATH to KEY to VALUE. Because it is shared by all contexts as well 46
26 // as the Interpolater itself, it is protected by InterpolaterVarLock 47 // VariableValues contains the variable values across all modules. This
27 // which must be locked during any access to the map. 48 // structure is shared across the entire containing context, and so it
28 Interpolater *Interpolater 49 // may be accessed only when holding VariableValuesLock.
29 InterpolaterVars map[string]map[string]interface{} 50 // The keys of the first level of VariableValues are the string
30 InterpolaterVarLock *sync.Mutex 51 // representations of addrs.ModuleInstance values. The second-level keys
52 // are variable names within each module instance.
53 VariableValues map[string]map[string]cty.Value
54 VariableValuesLock *sync.Mutex
31 55
32 Components contextComponentFactory 56 Components contextComponentFactory
33 Hooks []Hook 57 Hooks []Hook
34 InputValue UIInput 58 InputValue UIInput
35 ProviderCache map[string]ResourceProvider 59 ProviderCache map[string]providers.Interface
36 ProviderInputConfig map[string]map[string]interface{} 60 ProviderInputConfig map[string]map[string]cty.Value
37 ProviderLock *sync.Mutex 61 ProviderLock *sync.Mutex
38 ProvisionerCache map[string]ResourceProvisioner 62 ProvisionerCache map[string]provisioners.Interface
39 ProvisionerLock *sync.Mutex 63 ProvisionerLock *sync.Mutex
40 DiffValue *Diff 64 ChangesValue *plans.ChangesSync
41 DiffLock *sync.RWMutex 65 StateValue *states.SyncState
42 StateValue *State
43 StateLock *sync.RWMutex
44 66
45 once sync.Once 67 once sync.Once
46} 68}
47 69
70// BuiltinEvalContext implements EvalContext
71var _ EvalContext = (*BuiltinEvalContext)(nil)
72
48func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} { 73func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} {
49 // This can happen during tests. During tests, we just block forever. 74 // This can happen during tests. During tests, we just block forever.
50 if ctx.StopContext == nil { 75 if ctx.StopContext == nil {
@@ -78,12 +103,13 @@ func (ctx *BuiltinEvalContext) Input() UIInput {
78 return ctx.InputValue 103 return ctx.InputValue
79} 104}
80 105
81func (ctx *BuiltinEvalContext) InitProvider(typeName, name string) (ResourceProvider, error) { 106func (ctx *BuiltinEvalContext) InitProvider(typeName string, addr addrs.ProviderConfig) (providers.Interface, error) {
82 ctx.once.Do(ctx.init) 107 ctx.once.Do(ctx.init)
108 absAddr := addr.Absolute(ctx.Path())
83 109
84 // If we already initialized, it is an error 110 // If we already initialized, it is an error
85 if p := ctx.Provider(name); p != nil { 111 if p := ctx.Provider(absAddr); p != nil {
86 return nil, fmt.Errorf("Provider '%s' already initialized", name) 112 return nil, fmt.Errorf("%s is already initialized", addr)
87 } 113 }
88 114
89 // Warning: make sure to acquire these locks AFTER the call to Provider 115 // Warning: make sure to acquire these locks AFTER the call to Provider
@@ -91,85 +117,102 @@ func (ctx *BuiltinEvalContext) InitProvider(typeName, name string) (ResourceProv
91 ctx.ProviderLock.Lock() 117 ctx.ProviderLock.Lock()
92 defer ctx.ProviderLock.Unlock() 118 defer ctx.ProviderLock.Unlock()
93 119
94 p, err := ctx.Components.ResourceProvider(typeName, name) 120 key := absAddr.String()
121
122 p, err := ctx.Components.ResourceProvider(typeName, key)
95 if err != nil { 123 if err != nil {
96 return nil, err 124 return nil, err
97 } 125 }
98 126
99 ctx.ProviderCache[name] = p 127 log.Printf("[TRACE] BuiltinEvalContext: Initialized %q provider for %s", typeName, absAddr)
128 ctx.ProviderCache[key] = p
129
100 return p, nil 130 return p, nil
101} 131}
102 132
103func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider { 133func (ctx *BuiltinEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface {
104 ctx.once.Do(ctx.init) 134 ctx.once.Do(ctx.init)
105 135
106 ctx.ProviderLock.Lock() 136 ctx.ProviderLock.Lock()
107 defer ctx.ProviderLock.Unlock() 137 defer ctx.ProviderLock.Unlock()
108 138
109 return ctx.ProviderCache[n] 139 return ctx.ProviderCache[addr.String()]
140}
141
142func (ctx *BuiltinEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema {
143 ctx.once.Do(ctx.init)
144
145 return ctx.Schemas.ProviderSchema(addr.ProviderConfig.Type)
110} 146}
111 147
112func (ctx *BuiltinEvalContext) CloseProvider(n string) error { 148func (ctx *BuiltinEvalContext) CloseProvider(addr addrs.ProviderConfig) error {
113 ctx.once.Do(ctx.init) 149 ctx.once.Do(ctx.init)
114 150
115 ctx.ProviderLock.Lock() 151 ctx.ProviderLock.Lock()
116 defer ctx.ProviderLock.Unlock() 152 defer ctx.ProviderLock.Unlock()
117 153
118 var provider interface{} 154 key := addr.Absolute(ctx.Path()).String()
119 provider = ctx.ProviderCache[n] 155 provider := ctx.ProviderCache[key]
120 if provider != nil { 156 if provider != nil {
121 if p, ok := provider.(ResourceProviderCloser); ok { 157 delete(ctx.ProviderCache, key)
122 delete(ctx.ProviderCache, n) 158 return provider.Close()
123 return p.Close()
124 }
125 } 159 }
126 160
127 return nil 161 return nil
128} 162}
129 163
130func (ctx *BuiltinEvalContext) ConfigureProvider( 164func (ctx *BuiltinEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics {
131 n string, cfg *ResourceConfig) error { 165 var diags tfdiags.Diagnostics
132 p := ctx.Provider(n) 166 absAddr := addr.Absolute(ctx.Path())
167 p := ctx.Provider(absAddr)
133 if p == nil { 168 if p == nil {
134 return fmt.Errorf("Provider '%s' not initialized", n) 169 diags = diags.Append(fmt.Errorf("%s not initialized", addr))
170 return diags
135 } 171 }
136 return p.Configure(cfg) 172
173 providerSchema := ctx.ProviderSchema(absAddr)
174 if providerSchema == nil {
175 diags = diags.Append(fmt.Errorf("schema for %s is not available", absAddr))
176 return diags
177 }
178
179 req := providers.ConfigureRequest{
180 TerraformVersion: version.String(),
181 Config: cfg,
182 }
183
184 resp := p.Configure(req)
185 return resp.Diagnostics
137} 186}
138 187
139func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} { 188func (ctx *BuiltinEvalContext) ProviderInput(pc addrs.ProviderConfig) map[string]cty.Value {
140 ctx.ProviderLock.Lock() 189 ctx.ProviderLock.Lock()
141 defer ctx.ProviderLock.Unlock() 190 defer ctx.ProviderLock.Unlock()
142 191
143 // Make a copy of the path so we can safely edit it 192 if !ctx.Path().IsRoot() {
144 path := ctx.Path() 193 // Only root module provider configurations can have input.
145 pathCopy := make([]string, len(path)+1) 194 return nil
146 copy(pathCopy, path)
147
148 // Go up the tree.
149 for i := len(path) - 1; i >= 0; i-- {
150 pathCopy[i+1] = n
151 k := PathCacheKey(pathCopy[:i+2])
152 if v, ok := ctx.ProviderInputConfig[k]; ok {
153 return v
154 }
155 } 195 }
156 196
157 return nil 197 return ctx.ProviderInputConfig[pc.String()]
158} 198}
159 199
160func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface{}) { 200func (ctx *BuiltinEvalContext) SetProviderInput(pc addrs.ProviderConfig, c map[string]cty.Value) {
161 providerPath := make([]string, len(ctx.Path())+1) 201 absProvider := pc.Absolute(ctx.Path())
162 copy(providerPath, ctx.Path()) 202
163 providerPath[len(providerPath)-1] = n 203 if !ctx.Path().IsRoot() {
204 // Only root module provider configurations can have input.
205 log.Printf("[WARN] BuiltinEvalContext: attempt to SetProviderInput for non-root module")
206 return
207 }
164 208
165 // Save the configuration 209 // Save the configuration
166 ctx.ProviderLock.Lock() 210 ctx.ProviderLock.Lock()
167 ctx.ProviderInputConfig[PathCacheKey(providerPath)] = c 211 ctx.ProviderInputConfig[absProvider.String()] = c
168 ctx.ProviderLock.Unlock() 212 ctx.ProviderLock.Unlock()
169} 213}
170 214
171func (ctx *BuiltinEvalContext) InitProvisioner( 215func (ctx *BuiltinEvalContext) InitProvisioner(n string) (provisioners.Interface, error) {
172 n string) (ResourceProvisioner, error) {
173 ctx.once.Do(ctx.init) 216 ctx.once.Do(ctx.init)
174 217
175 // If we already initialized, it is an error 218 // If we already initialized, it is an error
@@ -182,10 +225,7 @@ func (ctx *BuiltinEvalContext) InitProvisioner(
182 ctx.ProvisionerLock.Lock() 225 ctx.ProvisionerLock.Lock()
183 defer ctx.ProvisionerLock.Unlock() 226 defer ctx.ProvisionerLock.Unlock()
184 227
185 provPath := make([]string, len(ctx.Path())+1) 228 key := PathObjectCacheKey(ctx.Path(), n)
186 copy(provPath, ctx.Path())
187 provPath[len(provPath)-1] = n
188 key := PathCacheKey(provPath)
189 229
190 p, err := ctx.Components.ResourceProvisioner(n, key) 230 p, err := ctx.Components.ResourceProvisioner(n, key)
191 if err != nil { 231 if err != nil {
@@ -193,20 +233,24 @@ func (ctx *BuiltinEvalContext) InitProvisioner(
193 } 233 }
194 234
195 ctx.ProvisionerCache[key] = p 235 ctx.ProvisionerCache[key] = p
236
196 return p, nil 237 return p, nil
197} 238}
198 239
199func (ctx *BuiltinEvalContext) Provisioner(n string) ResourceProvisioner { 240func (ctx *BuiltinEvalContext) Provisioner(n string) provisioners.Interface {
200 ctx.once.Do(ctx.init) 241 ctx.once.Do(ctx.init)
201 242
202 ctx.ProvisionerLock.Lock() 243 ctx.ProvisionerLock.Lock()
203 defer ctx.ProvisionerLock.Unlock() 244 defer ctx.ProvisionerLock.Unlock()
204 245
205 provPath := make([]string, len(ctx.Path())+1) 246 key := PathObjectCacheKey(ctx.Path(), n)
206 copy(provPath, ctx.Path()) 247 return ctx.ProvisionerCache[key]
207 provPath[len(provPath)-1] = n 248}
249
250func (ctx *BuiltinEvalContext) ProvisionerSchema(n string) *configschema.Block {
251 ctx.once.Do(ctx.init)
208 252
209 return ctx.ProvisionerCache[PathCacheKey(provPath)] 253 return ctx.Schemas.ProvisionerConfig(n)
210} 254}
211 255
212func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error { 256func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error {
@@ -215,106 +259,70 @@ func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error {
215 ctx.ProvisionerLock.Lock() 259 ctx.ProvisionerLock.Lock()
216 defer ctx.ProvisionerLock.Unlock() 260 defer ctx.ProvisionerLock.Unlock()
217 261
218 provPath := make([]string, len(ctx.Path())+1) 262 key := PathObjectCacheKey(ctx.Path(), n)
219 copy(provPath, ctx.Path())
220 provPath[len(provPath)-1] = n
221 263
222 var prov interface{} 264 prov := ctx.ProvisionerCache[key]
223 prov = ctx.ProvisionerCache[PathCacheKey(provPath)]
224 if prov != nil { 265 if prov != nil {
225 if p, ok := prov.(ResourceProvisionerCloser); ok { 266 return prov.Close()
226 delete(ctx.ProvisionerCache, PathCacheKey(provPath))
227 return p.Close()
228 }
229 } 267 }
230 268
231 return nil 269 return nil
232} 270}
233 271
234func (ctx *BuiltinEvalContext) Interpolate( 272func (ctx *BuiltinEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) {
235 cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) { 273 var diags tfdiags.Diagnostics
236 274 scope := ctx.EvaluationScope(self, keyData)
237 if cfg != nil { 275 body, evalDiags := scope.ExpandBlock(body, schema)
238 scope := &InterpolationScope{ 276 diags = diags.Append(evalDiags)
239 Path: ctx.Path(), 277 val, evalDiags := scope.EvalBlock(body, schema)
240 Resource: r, 278 diags = diags.Append(evalDiags)
241 } 279 return val, body, diags
242
243 vs, err := ctx.Interpolater.Values(scope, cfg.Variables)
244 if err != nil {
245 return nil, err
246 }
247
248 // Do the interpolation
249 if err := cfg.Interpolate(vs); err != nil {
250 return nil, err
251 }
252 }
253
254 result := NewResourceConfig(cfg)
255 result.interpolateForce()
256 return result, nil
257} 280}
258 281
259func (ctx *BuiltinEvalContext) InterpolateProvider( 282func (ctx *BuiltinEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) {
260 pc *config.ProviderConfig, r *Resource) (*ResourceConfig, error) { 283 scope := ctx.EvaluationScope(self, EvalDataForNoInstanceKey)
261 284 return scope.EvalExpr(expr, wantType)
262 var cfg *config.RawConfig 285}
263
264 if pc != nil && pc.RawConfig != nil {
265 scope := &InterpolationScope{
266 Path: ctx.Path(),
267 Resource: r,
268 }
269
270 cfg = pc.RawConfig
271
272 vs, err := ctx.Interpolater.Values(scope, cfg.Variables)
273 if err != nil {
274 return nil, err
275 }
276 286
277 // Do the interpolation 287func (ctx *BuiltinEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope {
278 if err := cfg.Interpolate(vs); err != nil { 288 data := &evaluationStateData{
279 return nil, err 289 Evaluator: ctx.Evaluator,
280 } 290 ModulePath: ctx.PathValue,
291 InstanceKeyData: keyData,
292 Operation: ctx.Evaluator.Operation,
281 } 293 }
282 294 return ctx.Evaluator.Scope(data, self)
283 result := NewResourceConfig(cfg)
284 result.interpolateForce()
285 return result, nil
286} 295}
287 296
288func (ctx *BuiltinEvalContext) Path() []string { 297func (ctx *BuiltinEvalContext) Path() addrs.ModuleInstance {
289 return ctx.PathValue 298 return ctx.PathValue
290} 299}
291 300
292func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) { 301func (ctx *BuiltinEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, vals map[string]cty.Value) {
293 ctx.InterpolaterVarLock.Lock() 302 ctx.VariableValuesLock.Lock()
294 defer ctx.InterpolaterVarLock.Unlock() 303 defer ctx.VariableValuesLock.Unlock()
295 304
296 path := make([]string, len(ctx.Path())+1) 305 childPath := n.ModuleInstance(ctx.PathValue)
297 copy(path, ctx.Path()) 306 key := childPath.String()
298 path[len(path)-1] = n
299 key := PathCacheKey(path)
300 307
301 vars := ctx.InterpolaterVars[key] 308 args := ctx.VariableValues[key]
302 if vars == nil { 309 if args == nil {
303 vars = make(map[string]interface{}) 310 args = make(map[string]cty.Value)
304 ctx.InterpolaterVars[key] = vars 311 ctx.VariableValues[key] = vals
312 return
305 } 313 }
306 314
307 for k, v := range vs { 315 for k, v := range vals {
308 vars[k] = v 316 args[k] = v
309 } 317 }
310} 318}
311 319
312func (ctx *BuiltinEvalContext) Diff() (*Diff, *sync.RWMutex) { 320func (ctx *BuiltinEvalContext) Changes() *plans.ChangesSync {
313 return ctx.DiffValue, ctx.DiffLock 321 return ctx.ChangesValue
314} 322}
315 323
316func (ctx *BuiltinEvalContext) State() (*State, *sync.RWMutex) { 324func (ctx *BuiltinEvalContext) State() *states.SyncState {
317 return ctx.StateValue, ctx.StateLock 325 return ctx.StateValue
318} 326}
319 327
320func (ctx *BuiltinEvalContext) init() { 328func (ctx *BuiltinEvalContext) init() {
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
index 6464517..195ecc5 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
@@ -1,9 +1,20 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "sync" 4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/hashicorp/hcl2/hcldec"
6 "github.com/zclconf/go-cty/cty"
7 "github.com/zclconf/go-cty/cty/convert"
5 8
9 "github.com/hashicorp/terraform/addrs"
6 "github.com/hashicorp/terraform/config" 10 "github.com/hashicorp/terraform/config"
11 "github.com/hashicorp/terraform/configs/configschema"
12 "github.com/hashicorp/terraform/lang"
13 "github.com/hashicorp/terraform/plans"
14 "github.com/hashicorp/terraform/providers"
15 "github.com/hashicorp/terraform/provisioners"
16 "github.com/hashicorp/terraform/states"
17 "github.com/hashicorp/terraform/tfdiags"
7) 18)
8 19
9// MockEvalContext is a mock version of EvalContext that can be used 20// MockEvalContext is a mock version of EvalContext that can be used
@@ -20,43 +31,84 @@ type MockEvalContext struct {
20 InputInput UIInput 31 InputInput UIInput
21 32
22 InitProviderCalled bool 33 InitProviderCalled bool
23 InitProviderName string 34 InitProviderType string
24 InitProviderProvider ResourceProvider 35 InitProviderAddr addrs.ProviderConfig
36 InitProviderProvider providers.Interface
25 InitProviderError error 37 InitProviderError error
26 38
27 ProviderCalled bool 39 ProviderCalled bool
28 ProviderName string 40 ProviderAddr addrs.AbsProviderConfig
29 ProviderProvider ResourceProvider 41 ProviderProvider providers.Interface
42
43 ProviderSchemaCalled bool
44 ProviderSchemaAddr addrs.AbsProviderConfig
45 ProviderSchemaSchema *ProviderSchema
30 46
31 CloseProviderCalled bool 47 CloseProviderCalled bool
32 CloseProviderName string 48 CloseProviderAddr addrs.ProviderConfig
33 CloseProviderProvider ResourceProvider 49 CloseProviderProvider providers.Interface
34 50
35 ProviderInputCalled bool 51 ProviderInputCalled bool
36 ProviderInputName string 52 ProviderInputAddr addrs.ProviderConfig
37 ProviderInputConfig map[string]interface{} 53 ProviderInputValues map[string]cty.Value
38 54
39 SetProviderInputCalled bool 55 SetProviderInputCalled bool
40 SetProviderInputName string 56 SetProviderInputAddr addrs.ProviderConfig
41 SetProviderInputConfig map[string]interface{} 57 SetProviderInputValues map[string]cty.Value
42 58
43 ConfigureProviderCalled bool 59 ConfigureProviderCalled bool
44 ConfigureProviderName string 60 ConfigureProviderAddr addrs.ProviderConfig
45 ConfigureProviderConfig *ResourceConfig 61 ConfigureProviderConfig cty.Value
46 ConfigureProviderError error 62 ConfigureProviderDiags tfdiags.Diagnostics
47 63
48 InitProvisionerCalled bool 64 InitProvisionerCalled bool
49 InitProvisionerName string 65 InitProvisionerName string
50 InitProvisionerProvisioner ResourceProvisioner 66 InitProvisionerProvisioner provisioners.Interface
51 InitProvisionerError error 67 InitProvisionerError error
52 68
53 ProvisionerCalled bool 69 ProvisionerCalled bool
54 ProvisionerName string 70 ProvisionerName string
55 ProvisionerProvisioner ResourceProvisioner 71 ProvisionerProvisioner provisioners.Interface
72
73 ProvisionerSchemaCalled bool
74 ProvisionerSchemaName string
75 ProvisionerSchemaSchema *configschema.Block
56 76
57 CloseProvisionerCalled bool 77 CloseProvisionerCalled bool
58 CloseProvisionerName string 78 CloseProvisionerName string
59 CloseProvisionerProvisioner ResourceProvisioner 79 CloseProvisionerProvisioner provisioners.Interface
80
81 EvaluateBlockCalled bool
82 EvaluateBlockBody hcl.Body
83 EvaluateBlockSchema *configschema.Block
84 EvaluateBlockSelf addrs.Referenceable
85 EvaluateBlockKeyData InstanceKeyEvalData
86 EvaluateBlockResultFunc func(
87 body hcl.Body,
88 schema *configschema.Block,
89 self addrs.Referenceable,
90 keyData InstanceKeyEvalData,
91 ) (cty.Value, hcl.Body, tfdiags.Diagnostics) // overrides the other values below, if set
92 EvaluateBlockResult cty.Value
93 EvaluateBlockExpandedBody hcl.Body
94 EvaluateBlockDiags tfdiags.Diagnostics
95
96 EvaluateExprCalled bool
97 EvaluateExprExpr hcl.Expression
98 EvaluateExprWantType cty.Type
99 EvaluateExprSelf addrs.Referenceable
100 EvaluateExprResultFunc func(
101 expr hcl.Expression,
102 wantType cty.Type,
103 self addrs.Referenceable,
104 ) (cty.Value, tfdiags.Diagnostics) // overrides the other values below, if set
105 EvaluateExprResult cty.Value
106 EvaluateExprDiags tfdiags.Diagnostics
107
108 EvaluationScopeCalled bool
109 EvaluationScopeSelf addrs.Referenceable
110 EvaluationScopeKeyData InstanceKeyEvalData
111 EvaluationScopeScope *lang.Scope
60 112
61 InterpolateCalled bool 113 InterpolateCalled bool
62 InterpolateConfig *config.RawConfig 114 InterpolateConfig *config.RawConfig
@@ -71,21 +123,22 @@ type MockEvalContext struct {
71 InterpolateProviderError error 123 InterpolateProviderError error
72 124
73 PathCalled bool 125 PathCalled bool
74 PathPath []string 126 PathPath addrs.ModuleInstance
75 127
76 SetVariablesCalled bool 128 SetModuleCallArgumentsCalled bool
77 SetVariablesModule string 129 SetModuleCallArgumentsModule addrs.ModuleCallInstance
78 SetVariablesVariables map[string]interface{} 130 SetModuleCallArgumentsValues map[string]cty.Value
79 131
80 DiffCalled bool 132 ChangesCalled bool
81 DiffDiff *Diff 133 ChangesChanges *plans.ChangesSync
82 DiffLock *sync.RWMutex
83 134
84 StateCalled bool 135 StateCalled bool
85 StateState *State 136 StateState *states.SyncState
86 StateLock *sync.RWMutex
87} 137}
88 138
139// MockEvalContext implements EvalContext
140var _ EvalContext = (*MockEvalContext)(nil)
141
89func (c *MockEvalContext) Stopped() <-chan struct{} { 142func (c *MockEvalContext) Stopped() <-chan struct{} {
90 c.StoppedCalled = true 143 c.StoppedCalled = true
91 return c.StoppedValue 144 return c.StoppedValue
@@ -107,61 +160,157 @@ func (c *MockEvalContext) Input() UIInput {
107 return c.InputInput 160 return c.InputInput
108} 161}
109 162
110func (c *MockEvalContext) InitProvider(t, n string) (ResourceProvider, error) { 163func (c *MockEvalContext) InitProvider(t string, addr addrs.ProviderConfig) (providers.Interface, error) {
111 c.InitProviderCalled = true 164 c.InitProviderCalled = true
112 c.InitProviderName = n 165 c.InitProviderType = t
166 c.InitProviderAddr = addr
113 return c.InitProviderProvider, c.InitProviderError 167 return c.InitProviderProvider, c.InitProviderError
114} 168}
115 169
116func (c *MockEvalContext) Provider(n string) ResourceProvider { 170func (c *MockEvalContext) Provider(addr addrs.AbsProviderConfig) providers.Interface {
117 c.ProviderCalled = true 171 c.ProviderCalled = true
118 c.ProviderName = n 172 c.ProviderAddr = addr
119 return c.ProviderProvider 173 return c.ProviderProvider
120} 174}
121 175
122func (c *MockEvalContext) CloseProvider(n string) error { 176func (c *MockEvalContext) ProviderSchema(addr addrs.AbsProviderConfig) *ProviderSchema {
177 c.ProviderSchemaCalled = true
178 c.ProviderSchemaAddr = addr
179 return c.ProviderSchemaSchema
180}
181
182func (c *MockEvalContext) CloseProvider(addr addrs.ProviderConfig) error {
123 c.CloseProviderCalled = true 183 c.CloseProviderCalled = true
124 c.CloseProviderName = n 184 c.CloseProviderAddr = addr
125 return nil 185 return nil
126} 186}
127 187
128func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error { 188func (c *MockEvalContext) ConfigureProvider(addr addrs.ProviderConfig, cfg cty.Value) tfdiags.Diagnostics {
129 c.ConfigureProviderCalled = true 189 c.ConfigureProviderCalled = true
130 c.ConfigureProviderName = n 190 c.ConfigureProviderAddr = addr
131 c.ConfigureProviderConfig = cfg 191 c.ConfigureProviderConfig = cfg
132 return c.ConfigureProviderError 192 return c.ConfigureProviderDiags
133} 193}
134 194
135func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} { 195func (c *MockEvalContext) ProviderInput(addr addrs.ProviderConfig) map[string]cty.Value {
136 c.ProviderInputCalled = true 196 c.ProviderInputCalled = true
137 c.ProviderInputName = n 197 c.ProviderInputAddr = addr
138 return c.ProviderInputConfig 198 return c.ProviderInputValues
139} 199}
140 200
141func (c *MockEvalContext) SetProviderInput(n string, cfg map[string]interface{}) { 201func (c *MockEvalContext) SetProviderInput(addr addrs.ProviderConfig, vals map[string]cty.Value) {
142 c.SetProviderInputCalled = true 202 c.SetProviderInputCalled = true
143 c.SetProviderInputName = n 203 c.SetProviderInputAddr = addr
144 c.SetProviderInputConfig = cfg 204 c.SetProviderInputValues = vals
145} 205}
146 206
147func (c *MockEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) { 207func (c *MockEvalContext) InitProvisioner(n string) (provisioners.Interface, error) {
148 c.InitProvisionerCalled = true 208 c.InitProvisionerCalled = true
149 c.InitProvisionerName = n 209 c.InitProvisionerName = n
150 return c.InitProvisionerProvisioner, c.InitProvisionerError 210 return c.InitProvisionerProvisioner, c.InitProvisionerError
151} 211}
152 212
153func (c *MockEvalContext) Provisioner(n string) ResourceProvisioner { 213func (c *MockEvalContext) Provisioner(n string) provisioners.Interface {
154 c.ProvisionerCalled = true 214 c.ProvisionerCalled = true
155 c.ProvisionerName = n 215 c.ProvisionerName = n
156 return c.ProvisionerProvisioner 216 return c.ProvisionerProvisioner
157} 217}
158 218
219func (c *MockEvalContext) ProvisionerSchema(n string) *configschema.Block {
220 c.ProvisionerSchemaCalled = true
221 c.ProvisionerSchemaName = n
222 return c.ProvisionerSchemaSchema
223}
224
159func (c *MockEvalContext) CloseProvisioner(n string) error { 225func (c *MockEvalContext) CloseProvisioner(n string) error {
160 c.CloseProvisionerCalled = true 226 c.CloseProvisionerCalled = true
161 c.CloseProvisionerName = n 227 c.CloseProvisionerName = n
162 return nil 228 return nil
163} 229}
164 230
231func (c *MockEvalContext) EvaluateBlock(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) {
232 c.EvaluateBlockCalled = true
233 c.EvaluateBlockBody = body
234 c.EvaluateBlockSchema = schema
235 c.EvaluateBlockSelf = self
236 c.EvaluateBlockKeyData = keyData
237 if c.EvaluateBlockResultFunc != nil {
238 return c.EvaluateBlockResultFunc(body, schema, self, keyData)
239 }
240 return c.EvaluateBlockResult, c.EvaluateBlockExpandedBody, c.EvaluateBlockDiags
241}
242
243func (c *MockEvalContext) EvaluateExpr(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) {
244 c.EvaluateExprCalled = true
245 c.EvaluateExprExpr = expr
246 c.EvaluateExprWantType = wantType
247 c.EvaluateExprSelf = self
248 if c.EvaluateExprResultFunc != nil {
249 return c.EvaluateExprResultFunc(expr, wantType, self)
250 }
251 return c.EvaluateExprResult, c.EvaluateExprDiags
252}
253
254// installSimpleEval is a helper to install a simple mock implementation of
255// both EvaluateBlock and EvaluateExpr into the receiver.
256//
257// These default implementations will either evaluate the given input against
258// the scope in field EvaluationScopeScope or, if it is nil, with no eval
259// context at all so that only constant values may be used.
260//
261// This function overwrites any existing functions installed in fields
262// EvaluateBlockResultFunc and EvaluateExprResultFunc.
263func (c *MockEvalContext) installSimpleEval() {
264 c.EvaluateBlockResultFunc = func(body hcl.Body, schema *configschema.Block, self addrs.Referenceable, keyData InstanceKeyEvalData) (cty.Value, hcl.Body, tfdiags.Diagnostics) {
265 if scope := c.EvaluationScopeScope; scope != nil {
266 // Fully-functional codepath.
267 var diags tfdiags.Diagnostics
268 body, diags = scope.ExpandBlock(body, schema)
269 if diags.HasErrors() {
270 return cty.DynamicVal, body, diags
271 }
272 val, evalDiags := c.EvaluationScopeScope.EvalBlock(body, schema)
273 diags = diags.Append(evalDiags)
274 if evalDiags.HasErrors() {
275 return cty.DynamicVal, body, diags
276 }
277 return val, body, diags
278 }
279
280 // Fallback codepath supporting constant values only.
281 val, hclDiags := hcldec.Decode(body, schema.DecoderSpec(), nil)
282 return val, body, tfdiags.Diagnostics(nil).Append(hclDiags)
283 }
284 c.EvaluateExprResultFunc = func(expr hcl.Expression, wantType cty.Type, self addrs.Referenceable) (cty.Value, tfdiags.Diagnostics) {
285 if scope := c.EvaluationScopeScope; scope != nil {
286 // Fully-functional codepath.
287 return scope.EvalExpr(expr, wantType)
288 }
289
290 // Fallback codepath supporting constant values only.
291 var diags tfdiags.Diagnostics
292 val, hclDiags := expr.Value(nil)
293 diags = diags.Append(hclDiags)
294 if hclDiags.HasErrors() {
295 return cty.DynamicVal, diags
296 }
297 var err error
298 val, err = convert.Convert(val, wantType)
299 if err != nil {
300 diags = diags.Append(err)
301 return cty.DynamicVal, diags
302 }
303 return val, diags
304 }
305}
306
307func (c *MockEvalContext) EvaluationScope(self addrs.Referenceable, keyData InstanceKeyEvalData) *lang.Scope {
308 c.EvaluationScopeCalled = true
309 c.EvaluationScopeSelf = self
310 c.EvaluationScopeKeyData = keyData
311 return c.EvaluationScopeScope
312}
313
165func (c *MockEvalContext) Interpolate( 314func (c *MockEvalContext) Interpolate(
166 config *config.RawConfig, resource *Resource) (*ResourceConfig, error) { 315 config *config.RawConfig, resource *Resource) (*ResourceConfig, error) {
167 c.InterpolateCalled = true 316 c.InterpolateCalled = true
@@ -178,23 +327,23 @@ func (c *MockEvalContext) InterpolateProvider(
178 return c.InterpolateProviderConfigResult, c.InterpolateError 327 return c.InterpolateProviderConfigResult, c.InterpolateError
179} 328}
180 329
181func (c *MockEvalContext) Path() []string { 330func (c *MockEvalContext) Path() addrs.ModuleInstance {
182 c.PathCalled = true 331 c.PathCalled = true
183 return c.PathPath 332 return c.PathPath
184} 333}
185 334
186func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) { 335func (c *MockEvalContext) SetModuleCallArguments(n addrs.ModuleCallInstance, values map[string]cty.Value) {
187 c.SetVariablesCalled = true 336 c.SetModuleCallArgumentsCalled = true
188 c.SetVariablesModule = n 337 c.SetModuleCallArgumentsModule = n
189 c.SetVariablesVariables = vs 338 c.SetModuleCallArgumentsValues = values
190} 339}
191 340
192func (c *MockEvalContext) Diff() (*Diff, *sync.RWMutex) { 341func (c *MockEvalContext) Changes() *plans.ChangesSync {
193 c.DiffCalled = true 342 c.ChangesCalled = true
194 return c.DiffDiff, c.DiffLock 343 return c.ChangesChanges
195} 344}
196 345
197func (c *MockEvalContext) State() (*State, *sync.RWMutex) { 346func (c *MockEvalContext) State() *states.SyncState {
198 c.StateCalled = true 347 c.StateCalled = true
199 return c.StateState, c.StateLock 348 return c.StateState
200} 349}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
index 2ae56a7..8083105 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
@@ -1,58 +1,120 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config" 4 "fmt"
5 "log"
6
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/terraform/addrs"
9 "github.com/hashicorp/terraform/tfdiags"
10 "github.com/zclconf/go-cty/cty"
11 "github.com/zclconf/go-cty/cty/gocty"
5) 12)
6 13
7// EvalCountFixZeroOneBoundary is an EvalNode that fixes up the state 14// evaluateResourceCountExpression is our standard mechanism for interpreting an
8// when there is a resource count with zero/one boundary, i.e. fixing 15// expression given for a "count" argument on a resource. This should be called
9// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. 16// from the DynamicExpand of a node representing a resource in order to
10type EvalCountFixZeroOneBoundary struct { 17// determine the final count value.
11 Resource *config.Resource 18//
19// If the result is zero or positive and no error diagnostics are returned, then
20// the result is the literal count value to use.
21//
22// If the result is -1, this indicates that the given expression is nil and so
23// the "count" behavior should not be enabled for this resource at all.
24//
25// If error diagnostics are returned then the result is always the meaningless
26// placeholder value -1.
27func evaluateResourceCountExpression(expr hcl.Expression, ctx EvalContext) (int, tfdiags.Diagnostics) {
28 count, known, diags := evaluateResourceCountExpressionKnown(expr, ctx)
29 if !known {
30 // Currently this is a rather bad outcome from a UX standpoint, since we have
31 // no real mechanism to deal with this situation and all we can do is produce
32 // an error message.
33 // FIXME: In future, implement a built-in mechanism for deferring changes that
34 // can't yet be predicted, and use it to guide the user through several
35 // plan/apply steps until the desired configuration is eventually reached.
36 diags = diags.Append(&hcl.Diagnostic{
37 Severity: hcl.DiagError,
38 Summary: "Invalid count argument",
39 Detail: `The "count" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the count depends on.`,
40 Subject: expr.Range().Ptr(),
41 })
42 }
43 return count, diags
12} 44}
13 45
14// TODO: test 46// evaluateResourceCountExpressionKnown is like evaluateResourceCountExpression
15func (n *EvalCountFixZeroOneBoundary) Eval(ctx EvalContext) (interface{}, error) { 47// except that it handles an unknown result by returning count = 0 and
16 // Get the count, important for knowing whether we're supposed to 48// a known = false, rather than by reporting the unknown value as an error
17 // be adding the zero, or trimming it. 49// diagnostic.
18 count, err := n.Resource.Count() 50func evaluateResourceCountExpressionKnown(expr hcl.Expression, ctx EvalContext) (count int, known bool, diags tfdiags.Diagnostics) {
19 if err != nil { 51 if expr == nil {
20 return nil, err 52 return -1, true, nil
21 } 53 }
22 54
23 // Figure what to look for and what to replace it with 55 countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil)
24 hunt := n.Resource.Id() 56 diags = diags.Append(countDiags)
25 replace := hunt + ".0" 57 if diags.HasErrors() {
26 if count < 2 { 58 return -1, true, diags
27 hunt, replace = replace, hunt
28 } 59 }
29 60
30 state, lock := ctx.State() 61 switch {
31 62 case countVal.IsNull():
32 // Get a lock so we can access this instance and potentially make 63 diags = diags.Append(&hcl.Diagnostic{
33 // changes to it. 64 Severity: hcl.DiagError,
34 lock.Lock() 65 Summary: "Invalid count argument",
35 defer lock.Unlock() 66 Detail: `The given "count" argument value is null. An integer is required.`,
36 67 Subject: expr.Range().Ptr(),
37 // Look for the module state. If we don't have one, then it doesn't matter. 68 })
38 mod := state.ModuleByPath(ctx.Path()) 69 return -1, true, diags
39 if mod == nil { 70 case !countVal.IsKnown():
40 return nil, nil 71 return 0, false, diags
41 } 72 }
42 73
43 // Look for the resource state. If we don't have one, then it is okay. 74 err := gocty.FromCtyValue(countVal, &count)
44 rs, ok := mod.Resources[hunt] 75 if err != nil {
45 if !ok { 76 diags = diags.Append(&hcl.Diagnostic{
46 return nil, nil 77 Severity: hcl.DiagError,
78 Summary: "Invalid count argument",
79 Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err),
80 Subject: expr.Range().Ptr(),
81 })
82 return -1, true, diags
47 } 83 }
48 84 if count < 0 {
49 // If the replacement key exists, we just keep both 85 diags = diags.Append(&hcl.Diagnostic{
50 if _, ok := mod.Resources[replace]; ok { 86 Severity: hcl.DiagError,
51 return nil, nil 87 Summary: "Invalid count argument",
88 Detail: `The given "count" argument value is unsuitable: negative numbers are not supported.`,
89 Subject: expr.Range().Ptr(),
90 })
91 return -1, true, diags
52 } 92 }
53 93
54 mod.Resources[replace] = rs 94 return count, true, diags
55 delete(mod.Resources, hunt) 95}
56 96
57 return nil, nil 97// fixResourceCountSetTransition is a helper function to fix up the state when a
98// resource transitions its "count" from being set to unset or vice-versa,
99// treating a 0-key and a no-key instance as aliases for one another across
100// the transition.
101//
102// The correct time to call this function is in the DynamicExpand method for
103// a node representing a resource, just after evaluating the count with
104// evaluateResourceCountExpression, and before any other analysis of the
105// state such as orphan detection.
106//
107// This function calls methods on the given EvalContext to update the current
108// state in-place, if necessary. It is a no-op if there is no count transition
109// taking place.
110//
111// Since the state is modified in-place, this function must take a writer lock
112// on the state. The caller must therefore not also be holding a state lock,
113// or this function will block forever awaiting the lock.
114func fixResourceCountSetTransition(ctx EvalContext, addr addrs.AbsResource, countEnabled bool) {
115 state := ctx.State()
116 changed := state.MaybeFixUpResourceInstanceAddressForCount(addr, countEnabled)
117 if changed {
118 log.Printf("[TRACE] renamed first %s instance in transient state due to count argument change", addr)
119 }
58} 120}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
index 91e2b90..647c58d 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
@@ -1,7 +1,11 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt"
4 "log" 5 "log"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/configs"
5) 9)
6 10
7// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state 11// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state
@@ -9,22 +13,34 @@ import (
9// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa. 13// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
10// 14//
11// This works on the global state. 15// This works on the global state.
12type EvalCountFixZeroOneBoundaryGlobal struct{} 16type EvalCountFixZeroOneBoundaryGlobal struct {
17 Config *configs.Config
18}
13 19
14// TODO: test 20// TODO: test
15func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) { 21func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) {
16 // Get the state and lock it since we'll potentially modify it 22 // We'll temporarily lock the state to grab the modules, then work on each
17 state, lock := ctx.State() 23 // one separately while taking a lock again for each separate resource.
18 lock.Lock() 24 // This means that if another caller concurrently adds a module here while
19 defer lock.Unlock() 25 // we're working then we won't update it, but that's no worse than the
20 26 // concurrent writer blocking for our entire fixup process and _then_
21 // Prune the state since we require a clean state to work 27 // adding a new module, and in practice the graph node associated with
22 state.prune() 28 // this eval depends on everything else in the graph anyway, so there
23 29 // should not be concurrent writers.
24 // Go through each modules since the boundaries are restricted to a 30 state := ctx.State().Lock()
25 // module scope. 31 moduleAddrs := make([]addrs.ModuleInstance, 0, len(state.Modules))
26 for _, m := range state.Modules { 32 for _, m := range state.Modules {
27 if err := n.fixModule(m); err != nil { 33 moduleAddrs = append(moduleAddrs, m.Addr)
34 }
35 ctx.State().Unlock()
36
37 for _, addr := range moduleAddrs {
38 cfg := n.Config.DescendentForInstance(addr)
39 if cfg == nil {
40 log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr)
41 continue
42 }
43 if err := n.fixModule(ctx, addr); err != nil {
28 return nil, err 44 return nil, err
29 } 45 }
30 } 46 }
@@ -32,46 +48,29 @@ func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{},
32 return nil, nil 48 return nil, nil
33} 49}
34 50
35func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(m *ModuleState) error { 51func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(ctx EvalContext, moduleAddr addrs.ModuleInstance) error {
36 // Counts keeps track of keys and their counts 52 ms := ctx.State().Module(moduleAddr)
37 counts := make(map[string]int) 53 cfg := n.Config.DescendentForInstance(moduleAddr)
38 for k, _ := range m.Resources { 54 if ms == nil {
39 // Parse the key 55 // Theoretically possible for a concurrent writer to delete a module
40 key, err := ParseResourceStateKey(k) 56 // while we're running, but in practice the graph node that called us
41 if err != nil { 57 // depends on everything else in the graph and so there can never
42 return err 58 // be a concurrent writer.
43 } 59 return fmt.Errorf("[WARN] no state found for %s while trying to fix up EachModes", moduleAddr)
44 60 }
45 // Set the index to -1 so that we can keep count 61 if cfg == nil {
46 key.Index = -1 62 return fmt.Errorf("[WARN] no config found for %s while trying to fix up EachModes", moduleAddr)
47
48 // Increment
49 counts[key.String()]++
50 } 63 }
51 64
52 // Go through the counts and do the fixup for each resource 65 for _, r := range ms.Resources {
53 for raw, count := range counts { 66 addr := r.Addr.Absolute(moduleAddr)
54 // Search and replace this resource 67 rCfg := cfg.Module.ResourceByAddr(r.Addr)
55 search := raw 68 if rCfg == nil {
56 replace := raw + ".0" 69 log.Printf("[WARN] Not fixing up EachModes for %s because it has no config", addr)
57 if count < 2 {
58 search, replace = replace, search
59 }
60 log.Printf("[TRACE] EvalCountFixZeroOneBoundaryGlobal: count %d, search %q, replace %q", count, search, replace)
61
62 // Look for the resource state. If we don't have one, then it is okay.
63 rs, ok := m.Resources[search]
64 if !ok {
65 continue
66 }
67
68 // If the replacement key exists, we just keep both
69 if _, ok := m.Resources[replace]; ok {
70 continue 70 continue
71 } 71 }
72 72 hasCount := rCfg.Count != nil
73 m.Resources[replace] = rs 73 fixResourceCountSetTransition(ctx, addr, hasCount)
74 delete(m.Resources, search)
75 } 74 }
76 75
77 return nil 76 return nil
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
index 26205ce..b7acfb0 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
@@ -1,92 +1,114 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "bytes"
4 "fmt" 5 "fmt"
5 "log" 6 "log"
7 "reflect"
6 "strings" 8 "strings"
7 9
8 "github.com/hashicorp/terraform/config" 10 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/terraform/version" 11 "github.com/zclconf/go-cty/cty"
12
13 "github.com/hashicorp/terraform/addrs"
14 "github.com/hashicorp/terraform/configs"
15 "github.com/hashicorp/terraform/plans"
16 "github.com/hashicorp/terraform/plans/objchange"
17 "github.com/hashicorp/terraform/providers"
18 "github.com/hashicorp/terraform/states"
19 "github.com/hashicorp/terraform/tfdiags"
10) 20)
11 21
12// EvalCompareDiff is an EvalNode implementation that compares two diffs 22// EvalCheckPlannedChange is an EvalNode implementation that produces errors
13// and errors if the diffs are not equal. 23// if the _actual_ expected value is not compatible with what was recorded
14type EvalCompareDiff struct { 24// in the plan.
15 Info *InstanceInfo 25//
16 One, Two **InstanceDiff 26// Errors here are most often indicative of a bug in the provider, so our
27// error messages will report with that in mind. It's also possible that
28// there's a bug in Terraform's Core's own "proposed new value" code in
29// EvalDiff.
30type EvalCheckPlannedChange struct {
31 Addr addrs.ResourceInstance
32 ProviderAddr addrs.AbsProviderConfig
33 ProviderSchema **ProviderSchema
34
35 // We take ResourceInstanceChange objects here just because that's what's
36 // convenient to pass in from the evaltree implementation, but we really
37 // only look at the "After" value of each change.
38 Planned, Actual **plans.ResourceInstanceChange
17} 39}
18 40
19// TODO: test 41func (n *EvalCheckPlannedChange) Eval(ctx EvalContext) (interface{}, error) {
20func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) { 42 providerSchema := *n.ProviderSchema
21 one, two := *n.One, *n.Two 43 plannedChange := *n.Planned
22 44 actualChange := *n.Actual
23 // If either are nil, let them be empty 45
24 if one == nil { 46 schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
25 one = new(InstanceDiff) 47 if schema == nil {
26 one.init() 48 // Should be caught during validation, so we don't bother with a pretty error here
27 } 49 return nil, fmt.Errorf("provider does not support %q", n.Addr.Resource.Type)
28 if two == nil { 50 }
29 two = new(InstanceDiff) 51
30 two.init() 52 var diags tfdiags.Diagnostics
31 } 53 absAddr := n.Addr.Absolute(ctx.Path())
32 oneId, _ := one.GetAttribute("id") 54
33 twoId, _ := two.GetAttribute("id") 55 log.Printf("[TRACE] EvalCheckPlannedChange: Verifying that actual change (action %s) matches planned change (action %s)", actualChange.Action, plannedChange.Action)
34 one.DelAttribute("id") 56
35 two.DelAttribute("id") 57 if plannedChange.Action != actualChange.Action {
36 defer func() { 58 switch {
37 if oneId != nil { 59 case plannedChange.Action == plans.Update && actualChange.Action == plans.NoOp:
38 one.SetAttribute("id", oneId) 60 // It's okay for an update to become a NoOp once we've filled in
39 } 61 // all of the unknown values, since the final values might actually
40 if twoId != nil { 62 // match what was there before after all.
41 two.SetAttribute("id", twoId) 63 log.Printf("[DEBUG] After incorporating new values learned so far during apply, %s change has become NoOp", absAddr)
42 } 64 default:
43 }() 65 diags = diags.Append(tfdiags.Sourceless(
44 66 tfdiags.Error,
45 if same, reason := one.Same(two); !same { 67 "Provider produced inconsistent final plan",
46 log.Printf("[ERROR] %s: diffs didn't match", n.Info.Id) 68 fmt.Sprintf(
47 log.Printf("[ERROR] %s: reason: %s", n.Info.Id, reason) 69 "When expanding the plan for %s to include new values learned so far during apply, provider %q changed the planned action from %s to %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
48 log.Printf("[ERROR] %s: diff one: %#v", n.Info.Id, one) 70 absAddr, n.ProviderAddr.ProviderConfig.Type,
49 log.Printf("[ERROR] %s: diff two: %#v", n.Info.Id, two) 71 plannedChange.Action, actualChange.Action,
50 return nil, fmt.Errorf( 72 ),
51 "%s: diffs didn't match during apply. This is a bug with "+ 73 ))
52 "Terraform and should be reported as a GitHub Issue.\n"+ 74 }
53 "\n"+
54 "Please include the following information in your report:\n"+
55 "\n"+
56 " Terraform Version: %s\n"+
57 " Resource ID: %s\n"+
58 " Mismatch reason: %s\n"+
59 " Diff One (usually from plan): %#v\n"+
60 " Diff Two (usually from apply): %#v\n"+
61 "\n"+
62 "Also include as much context as you can about your config, state, "+
63 "and the steps you performed to trigger this error.\n",
64 n.Info.Id, version.Version, n.Info.Id, reason, one, two)
65 } 75 }
66 76
67 return nil, nil 77 errs := objchange.AssertObjectCompatible(schema, plannedChange.After, actualChange.After)
78 for _, err := range errs {
79 diags = diags.Append(tfdiags.Sourceless(
80 tfdiags.Error,
81 "Provider produced inconsistent final plan",
82 fmt.Sprintf(
83 "When expanding the plan for %s to include new values learned so far during apply, provider %q produced an invalid new value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
84 absAddr, n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatError(err),
85 ),
86 ))
87 }
88 return nil, diags.Err()
68} 89}
69 90
70// EvalDiff is an EvalNode implementation that does a refresh for 91// EvalDiff is an EvalNode implementation that detects changes for a given
71// a resource. 92// resource instance.
72type EvalDiff struct { 93type EvalDiff struct {
73 Name string 94 Addr addrs.ResourceInstance
74 Info *InstanceInfo 95 Config *configs.Resource
75 Config **ResourceConfig 96 Provider *providers.Interface
76 Provider *ResourceProvider 97 ProviderAddr addrs.AbsProviderConfig
77 Diff **InstanceDiff 98 ProviderSchema **ProviderSchema
78 State **InstanceState 99 State **states.ResourceInstanceObject
79 OutputDiff **InstanceDiff 100 PreviousDiff **plans.ResourceInstanceChange
80 OutputState **InstanceState 101
81 102 // CreateBeforeDestroy is set if either the resource's own config sets
82 // Resource is needed to fetch the ignore_changes list so we can 103 // create_before_destroy explicitly or if dependencies have forced the
83 // filter user-requested ignored attributes from the diff. 104 // resource to be handled as create_before_destroy in order to avoid
84 Resource *config.Resource 105 // a dependency cycle.
85 106 CreateBeforeDestroy bool
86 // Stub is used to flag the generated InstanceDiff as a stub. This is used to 107
87 // ensure that the node exists to perform interpolations and generate 108 OutputChange **plans.ResourceInstanceChange
88 // computed paths off of, but not as an actual diff where resouces should be 109 OutputValue *cty.Value
89 // counted, and not as a diff that should be acted on. 110 OutputState **states.ResourceInstanceObject
111
90 Stub bool 112 Stub bool
91} 113}
92 114
@@ -95,81 +117,303 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
95 state := *n.State 117 state := *n.State
96 config := *n.Config 118 config := *n.Config
97 provider := *n.Provider 119 provider := *n.Provider
120 providerSchema := *n.ProviderSchema
121
122 if providerSchema == nil {
123 return nil, fmt.Errorf("provider schema is unavailable for %s", n.Addr)
124 }
125 if n.ProviderAddr.ProviderConfig.Type == "" {
126 panic(fmt.Sprintf("EvalDiff for %s does not have ProviderAddr set", n.Addr.Absolute(ctx.Path())))
127 }
128
129 var diags tfdiags.Diagnostics
130
131 // Evaluate the configuration
132 schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
133 if schema == nil {
134 // Should be caught during validation, so we don't bother with a pretty error here
135 return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
136 }
137 keyData := EvalDataForInstanceKey(n.Addr.Key)
138 configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData)
139 diags = diags.Append(configDiags)
140 if configDiags.HasErrors() {
141 return nil, diags.Err()
142 }
143
144 absAddr := n.Addr.Absolute(ctx.Path())
145 var priorVal cty.Value
146 var priorValTainted cty.Value
147 var priorPrivate []byte
148 if state != nil {
149 if state.Status != states.ObjectTainted {
150 priorVal = state.Value
151 priorPrivate = state.Private
152 } else {
153 // If the prior state is tainted then we'll proceed below like
154 // we're creating an entirely new object, but then turn it into
155 // a synthetic "Replace" change at the end, creating the same
156 // result as if the provider had marked at least one argument
157 // change as "requires replacement".
158 priorValTainted = state.Value
159 priorVal = cty.NullVal(schema.ImpliedType())
160 }
161 } else {
162 priorVal = cty.NullVal(schema.ImpliedType())
163 }
164
165 proposedNewVal := objchange.ProposedNewObject(schema, priorVal, configVal)
98 166
99 // Call pre-diff hook 167 // Call pre-diff hook
100 if !n.Stub { 168 if !n.Stub {
101 err := ctx.Hook(func(h Hook) (HookAction, error) { 169 err := ctx.Hook(func(h Hook) (HookAction, error) {
102 return h.PreDiff(n.Info, state) 170 return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal)
103 }) 171 })
104 if err != nil { 172 if err != nil {
105 return nil, err 173 return nil, err
106 } 174 }
107 } 175 }
108 176
109 // The state for the diff must never be nil 177 // The provider gets an opportunity to customize the proposed new value,
110 diffState := state 178 // which in turn produces the _planned_ new value.
111 if diffState == nil { 179 resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{
112 diffState = new(InstanceState) 180 TypeName: n.Addr.Resource.Type,
181 Config: configVal,
182 PriorState: priorVal,
183 ProposedNewState: proposedNewVal,
184 PriorPrivate: priorPrivate,
185 })
186 diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config))
187 if diags.HasErrors() {
188 return nil, diags.Err()
189 }
190
191 plannedNewVal := resp.PlannedState
192 plannedPrivate := resp.PlannedPrivate
193
194 if plannedNewVal == cty.NilVal {
195 // Should never happen. Since real-world providers return via RPC a nil
196 // is always a bug in the client-side stub. This is more likely caused
197 // by an incompletely-configured mock provider in tests, though.
198 panic(fmt.Sprintf("PlanResourceChange of %s produced nil value", absAddr.String()))
199 }
200
201 // We allow the planned new value to disagree with configuration _values_
202 // here, since that allows the provider to do special logic like a
203 // DiffSuppressFunc, but we still require that the provider produces
204 // a value whose type conforms to the schema.
205 for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) {
206 diags = diags.Append(tfdiags.Sourceless(
207 tfdiags.Error,
208 "Provider produced invalid plan",
209 fmt.Sprintf(
210 "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
211 n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()),
212 ),
213 ))
214 }
215 if diags.HasErrors() {
216 return nil, diags.Err()
217 }
218
219 if errs := objchange.AssertPlanValid(schema, priorVal, configVal, plannedNewVal); len(errs) > 0 {
220 if resp.LegacyTypeSystem {
221 // The shimming of the old type system in the legacy SDK is not precise
222 // enough to pass this consistency check, so we'll give it a pass here,
223 // but we will generate a warning about it so that we are more likely
224 // to notice in the logs if an inconsistency beyond the type system
225 // leads to a downstream provider failure.
226 var buf strings.Builder
227 fmt.Fprintf(&buf, "[WARN] Provider %q produced an invalid plan for %s, but we are tolerating it because it is using the legacy plugin SDK.\n The following problems may be the cause of any confusing errors from downstream operations:", n.ProviderAddr.ProviderConfig.Type, absAddr)
228 for _, err := range errs {
229 fmt.Fprintf(&buf, "\n - %s", tfdiags.FormatError(err))
230 }
231 log.Print(buf.String())
232 } else {
233 for _, err := range errs {
234 diags = diags.Append(tfdiags.Sourceless(
235 tfdiags.Error,
236 "Provider produced invalid plan",
237 fmt.Sprintf(
238 "Provider %q planned an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
239 n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()),
240 ),
241 ))
242 }
243 return nil, diags.Err()
244 }
113 } 245 }
114 diffState.init()
115 246
116 // Diff! 247 {
117 diff, err := provider.Diff(n.Info, diffState, config) 248 var moreDiags tfdiags.Diagnostics
118 if err != nil { 249 plannedNewVal, moreDiags = n.processIgnoreChanges(priorVal, plannedNewVal)
119 return nil, err 250 diags = diags.Append(moreDiags)
120 } 251 if moreDiags.HasErrors() {
121 if diff == nil { 252 return nil, diags.Err()
122 diff = new(InstanceDiff) 253 }
123 } 254 }
124 255
125 // Set DestroyDeposed if we have deposed instances 256 // The provider produces a list of paths to attributes whose changes mean
126 _, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) { 257 // that we must replace rather than update an existing remote object.
127 if len(rs.Deposed) > 0 { 258 // However, we only need to do that if the identified attributes _have_
128 diff.DestroyDeposed = true 259 // actually changed -- particularly after we may have undone some of the
129 } 260 // changes in processIgnoreChanges -- so now we'll filter that list to
261 // include only where changes are detected.
262 reqRep := cty.NewPathSet()
263 if len(resp.RequiresReplace) > 0 {
264 for _, path := range resp.RequiresReplace {
265 if priorVal.IsNull() {
266 // If prior is null then we don't expect any RequiresReplace at all,
267 // because this is a Create action.
268 continue
269 }
130 270
131 return nil, nil 271 priorChangedVal, priorPathDiags := hcl.ApplyPath(priorVal, path, nil)
132 }) 272 plannedChangedVal, plannedPathDiags := hcl.ApplyPath(plannedNewVal, path, nil)
133 if err != nil { 273 if plannedPathDiags.HasErrors() && priorPathDiags.HasErrors() {
134 return nil, err 274 // This means the path was invalid in both the prior and new
135 } 275 // values, which is an error with the provider itself.
276 diags = diags.Append(tfdiags.Sourceless(
277 tfdiags.Error,
278 "Provider produced invalid plan",
279 fmt.Sprintf(
280 "Provider %q has indicated \"requires replacement\" on %s for a non-existent attribute path %#v.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
281 n.ProviderAddr.ProviderConfig.Type, absAddr, path,
282 ),
283 ))
284 continue
285 }
136 286
137 // Preserve the DestroyTainted flag 287 // Make sure we have valid Values for both values.
138 if n.Diff != nil { 288 // Note: if the opposing value was of the type
139 diff.SetTainted((*n.Diff).GetDestroyTainted()) 289 // cty.DynamicPseudoType, the type assigned here may not exactly
140 } 290 // match the schema. This is fine here, since we're only going to
291 // check for equality, but if the NullVal is to be used, we need to
292 // check the schema for th true type.
293 switch {
294 case priorChangedVal == cty.NilVal && plannedChangedVal == cty.NilVal:
295 // this should never happen without ApplyPath errors above
296 panic("requires replace path returned 2 nil values")
297 case priorChangedVal == cty.NilVal:
298 priorChangedVal = cty.NullVal(plannedChangedVal.Type())
299 case plannedChangedVal == cty.NilVal:
300 plannedChangedVal = cty.NullVal(priorChangedVal.Type())
301 }
141 302
142 // Require a destroy if there is an ID and it requires new. 303 eqV := plannedChangedVal.Equals(priorChangedVal)
143 if diff.RequiresNew() && state != nil && state.ID != "" { 304 if !eqV.IsKnown() || eqV.False() {
144 diff.SetDestroy(true) 305 reqRep.Add(path)
306 }
307 }
308 if diags.HasErrors() {
309 return nil, diags.Err()
310 }
145 } 311 }
146 312
147 // If we're creating a new resource, compute its ID 313 eqV := plannedNewVal.Equals(priorVal)
148 if diff.RequiresNew() || state == nil || state.ID == "" { 314 eq := eqV.IsKnown() && eqV.True()
149 var oldID string 315
150 if state != nil { 316 var action plans.Action
151 oldID = state.Attributes["id"] 317 switch {
318 case priorVal.IsNull():
319 action = plans.Create
320 case eq:
321 action = plans.NoOp
322 case !reqRep.Empty():
323 // If there are any "requires replace" paths left _after our filtering
324 // above_ then this is a replace action.
325 if n.CreateBeforeDestroy {
326 action = plans.CreateThenDelete
327 } else {
328 action = plans.DeleteThenCreate
152 } 329 }
153 330 default:
154 // Add diff to compute new ID 331 action = plans.Update
155 diff.init() 332 // "Delete" is never chosen here, because deletion plans are always
156 diff.SetAttribute("id", &ResourceAttrDiff{ 333 // created more directly elsewhere, such as in "orphan" handling.
157 Old: oldID, 334 }
158 NewComputed: true, 335
159 RequiresNew: true, 336 if action.IsReplace() {
160 Type: DiffAttrOutput, 337 // In this strange situation we want to produce a change object that
338 // shows our real prior object but has a _new_ object that is built
339 // from a null prior object, since we're going to delete the one
340 // that has all the computed values on it.
341 //
342 // Therefore we'll ask the provider to plan again here, giving it
343 // a null object for the prior, and then we'll meld that with the
344 // _actual_ prior state to produce a correctly-shaped replace change.
345 // The resulting change should show any computed attributes changing
346 // from known prior values to unknown values, unless the provider is
347 // able to predict new values for any of these computed attributes.
348 nullPriorVal := cty.NullVal(schema.ImpliedType())
349
350 // create a new proposed value from the null state and the config
351 proposedNewVal = objchange.ProposedNewObject(schema, nullPriorVal, configVal)
352
353 resp = provider.PlanResourceChange(providers.PlanResourceChangeRequest{
354 TypeName: n.Addr.Resource.Type,
355 Config: configVal,
356 PriorState: nullPriorVal,
357 ProposedNewState: proposedNewVal,
358 PriorPrivate: plannedPrivate,
161 }) 359 })
360 // We need to tread carefully here, since if there are any warnings
361 // in here they probably also came out of our previous call to
362 // PlanResourceChange above, and so we don't want to repeat them.
363 // Consequently, we break from the usual pattern here and only
364 // append these new diagnostics if there's at least one error inside.
365 if resp.Diagnostics.HasErrors() {
366 diags = diags.Append(resp.Diagnostics.InConfigBody(config.Config))
367 return nil, diags.Err()
368 }
369 plannedNewVal = resp.PlannedState
370 plannedPrivate = resp.PlannedPrivate
371 for _, err := range plannedNewVal.Type().TestConformance(schema.ImpliedType()) {
372 diags = diags.Append(tfdiags.Sourceless(
373 tfdiags.Error,
374 "Provider produced invalid plan",
375 fmt.Sprintf(
376 "Provider %q planned an invalid value for %s%s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
377 n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err),
378 ),
379 ))
380 }
381 if diags.HasErrors() {
382 return nil, diags.Err()
383 }
162 } 384 }
163 385
164 // filter out ignored resources 386 // If our prior value was tainted then we actually want this to appear
165 if err := n.processIgnoreChanges(diff); err != nil { 387 // as a replace change, even though so far we've been treating it as a
166 return nil, err 388 // create.
389 if action == plans.Create && priorValTainted != cty.NilVal {
390 if n.CreateBeforeDestroy {
391 action = plans.CreateThenDelete
392 } else {
393 action = plans.DeleteThenCreate
394 }
395 priorVal = priorValTainted
396 }
397
398 // As a special case, if we have a previous diff (presumably from the plan
399 // phases, whereas we're now in the apply phase) and it was for a replace,
400 // we've already deleted the original object from state by the time we
401 // get here and so we would've ended up with a _create_ action this time,
402 // which we now need to paper over to get a result consistent with what
403 // we originally intended.
404 if n.PreviousDiff != nil {
405 prevChange := *n.PreviousDiff
406 if prevChange.Action.IsReplace() && action == plans.Create {
407 log.Printf("[TRACE] EvalDiff: %s treating Create change as %s change to match with earlier plan", absAddr, prevChange.Action)
408 action = prevChange.Action
409 priorVal = prevChange.Before
410 }
167 } 411 }
168 412
169 // Call post-refresh hook 413 // Call post-refresh hook
170 if !n.Stub { 414 if !n.Stub {
171 err = ctx.Hook(func(h Hook) (HookAction, error) { 415 err := ctx.Hook(func(h Hook) (HookAction, error) {
172 return h.PostDiff(n.Info, diff) 416 return h.PostDiff(absAddr, states.CurrentGen, action, priorVal, plannedNewVal)
173 }) 417 })
174 if err != nil { 418 if err != nil {
175 return nil, err 419 return nil, err
@@ -177,30 +421,135 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
177 } 421 }
178 422
179 // Update our output if we care 423 // Update our output if we care
180 if n.OutputDiff != nil { 424 if n.OutputChange != nil {
181 *n.OutputDiff = diff 425 *n.OutputChange = &plans.ResourceInstanceChange{
426 Addr: absAddr,
427 Private: plannedPrivate,
428 ProviderAddr: n.ProviderAddr,
429 Change: plans.Change{
430 Action: action,
431 Before: priorVal,
432 After: plannedNewVal,
433 },
434 RequiredReplace: reqRep,
435 }
436 }
437
438 if n.OutputValue != nil {
439 *n.OutputValue = configVal
182 } 440 }
183 441
184 // Update the state if we care 442 // Update the state if we care
185 if n.OutputState != nil { 443 if n.OutputState != nil {
186 *n.OutputState = state 444 *n.OutputState = &states.ResourceInstanceObject{
187 445 // We use the special "planned" status here to note that this
188 // Merge our state so that the state is updated with our plan 446 // object's value is not yet complete. Objects with this status
189 if !diff.Empty() && n.OutputState != nil { 447 // cannot be used during expression evaluation, so the caller
190 *n.OutputState = state.MergeDiff(diff) 448 // must _also_ record the returned change in the active plan,
449 // which the expression evaluator will use in preference to this
450 // incomplete value recorded in the state.
451 Status: states.ObjectPlanned,
452 Value: plannedNewVal,
191 } 453 }
192 } 454 }
193 455
194 return nil, nil 456 return nil, nil
195} 457}
196 458
197func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { 459func (n *EvalDiff) processIgnoreChanges(prior, proposed cty.Value) (cty.Value, tfdiags.Diagnostics) {
198 if diff == nil || n.Resource == nil || n.Resource.Id() == "" { 460 // ignore_changes only applies when an object already exists, since we
461 // can't ignore changes to a thing we've not created yet.
462 if prior.IsNull() {
463 return proposed, nil
464 }
465
466 ignoreChanges := n.Config.Managed.IgnoreChanges
467 ignoreAll := n.Config.Managed.IgnoreAllChanges
468
469 if len(ignoreChanges) == 0 && !ignoreAll {
470 return proposed, nil
471 }
472 if ignoreAll {
473 return prior, nil
474 }
475 if prior.IsNull() || proposed.IsNull() {
476 // Ignore changes doesn't apply when we're creating for the first time.
477 // Proposed should never be null here, but if it is then we'll just let it be.
478 return proposed, nil
479 }
480
481 return processIgnoreChangesIndividual(prior, proposed, ignoreChanges)
482}
483
484func processIgnoreChangesIndividual(prior, proposed cty.Value, ignoreChanges []hcl.Traversal) (cty.Value, tfdiags.Diagnostics) {
485 // When we walk below we will be using cty.Path values for comparison, so
486 // we'll convert our traversals here so we can compare more easily.
487 ignoreChangesPath := make([]cty.Path, len(ignoreChanges))
488 for i, traversal := range ignoreChanges {
489 path := make(cty.Path, len(traversal))
490 for si, step := range traversal {
491 switch ts := step.(type) {
492 case hcl.TraverseRoot:
493 path[si] = cty.GetAttrStep{
494 Name: ts.Name,
495 }
496 case hcl.TraverseAttr:
497 path[si] = cty.GetAttrStep{
498 Name: ts.Name,
499 }
500 case hcl.TraverseIndex:
501 path[si] = cty.IndexStep{
502 Key: ts.Key,
503 }
504 default:
505 panic(fmt.Sprintf("unsupported traversal step %#v", step))
506 }
507 }
508 ignoreChangesPath[i] = path
509 }
510
511 var diags tfdiags.Diagnostics
512 ret, _ := cty.Transform(proposed, func(path cty.Path, v cty.Value) (cty.Value, error) {
513 // First we must see if this is a path that's being ignored at all.
514 // We're looking for an exact match here because this walk will visit
515 // leaf values first and then their containers, and we want to do
516 // the "ignore" transform once we reach the point indicated, throwing
517 // away any deeper values we already produced at that point.
518 var ignoreTraversal hcl.Traversal
519 for i, candidate := range ignoreChangesPath {
520 if reflect.DeepEqual(path, candidate) {
521 ignoreTraversal = ignoreChanges[i]
522 }
523 }
524 if ignoreTraversal == nil {
525 return v, nil
526 }
527
528 // If we're able to follow the same path through the prior value,
529 // we'll take the value there instead, effectively undoing the
530 // change that was planned.
531 priorV, diags := hcl.ApplyPath(prior, path, nil)
532 if diags.HasErrors() {
533 // We just ignore the errors and move on here, since we assume it's
534 // just because the prior value was a slightly-different shape.
535 // It could potentially also be that the traversal doesn't match
536 // the schema, but we should've caught that during the validate
537 // walk if so.
538 return v, nil
539 }
540 return priorV, nil
541 })
542 return ret, diags
543}
544
545func (n *EvalDiff) processIgnoreChangesOld(diff *InstanceDiff) error {
546 if diff == nil || n.Config == nil || n.Config.Managed == nil {
199 return nil 547 return nil
200 } 548 }
201 ignoreChanges := n.Resource.Lifecycle.IgnoreChanges 549 ignoreChanges := n.Config.Managed.IgnoreChanges
550 ignoreAll := n.Config.Managed.IgnoreAllChanges
202 551
203 if len(ignoreChanges) == 0 { 552 if len(ignoreChanges) == 0 && !ignoreAll {
204 return nil 553 return nil
205 } 554 }
206 555
@@ -220,9 +569,14 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
220 569
221 // get the complete set of keys we want to ignore 570 // get the complete set of keys we want to ignore
222 ignorableAttrKeys := make(map[string]bool) 571 ignorableAttrKeys := make(map[string]bool)
223 for _, ignoredKey := range ignoreChanges { 572 for k := range attrs {
224 for k := range attrs { 573 if ignoreAll {
225 if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) { 574 ignorableAttrKeys[k] = true
575 continue
576 }
577 for _, ignoredTraversal := range ignoreChanges {
578 ignoredKey := legacyFlatmapKeyForTraversal(ignoredTraversal)
579 if k == ignoredKey || strings.HasPrefix(k, ignoredKey+".") {
226 ignorableAttrKeys[k] = true 580 ignorableAttrKeys[k] = true
227 } 581 }
228 } 582 }
@@ -285,14 +639,56 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
285 639
286 // If we didn't hit any of our early exit conditions, we can filter the diff. 640 // If we didn't hit any of our early exit conditions, we can filter the diff.
287 for k := range ignorableAttrKeys { 641 for k := range ignorableAttrKeys {
288 log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s", 642 log.Printf("[DEBUG] [EvalIgnoreChanges] %s: Ignoring diff attribute: %s", n.Addr.String(), k)
289 n.Resource.Id(), k)
290 diff.DelAttribute(k) 643 diff.DelAttribute(k)
291 } 644 }
292 645
293 return nil 646 return nil
294} 647}
295 648
649// legacyFlagmapKeyForTraversal constructs a key string compatible with what
650// the flatmap package would generate for an attribute addressable by the given
651// traversal.
652//
653// This is used only to shim references to attributes within the diff and
654// state structures, which have not (at the time of writing) yet been updated
655// to use the newer HCL-based representations.
656func legacyFlatmapKeyForTraversal(traversal hcl.Traversal) string {
657 var buf bytes.Buffer
658 first := true
659 for _, step := range traversal {
660 if !first {
661 buf.WriteByte('.')
662 }
663 switch ts := step.(type) {
664 case hcl.TraverseRoot:
665 buf.WriteString(ts.Name)
666 case hcl.TraverseAttr:
667 buf.WriteString(ts.Name)
668 case hcl.TraverseIndex:
669 val := ts.Key
670 switch val.Type() {
671 case cty.Number:
672 bf := val.AsBigFloat()
673 buf.WriteString(bf.String())
674 case cty.String:
675 s := val.AsString()
676 buf.WriteString(s)
677 default:
678 // should never happen, since no other types appear in
679 // traversals in practice.
680 buf.WriteByte('?')
681 }
682 default:
683 // should never happen, since we've covered all of the types
684 // that show up in parsed traversals in practice.
685 buf.WriteByte('?')
686 }
687 first = false
688 }
689 return buf.String()
690}
691
296// a group of key-*ResourceAttrDiff pairs from the same flatmapped container 692// a group of key-*ResourceAttrDiff pairs from the same flatmapped container
297type flatAttrDiff map[string]*ResourceAttrDiff 693type flatAttrDiff map[string]*ResourceAttrDiff
298 694
@@ -343,159 +739,213 @@ func groupContainers(d *InstanceDiff) map[string]flatAttrDiff {
343// EvalDiffDestroy is an EvalNode implementation that returns a plain 739// EvalDiffDestroy is an EvalNode implementation that returns a plain
344// destroy diff. 740// destroy diff.
345type EvalDiffDestroy struct { 741type EvalDiffDestroy struct {
346 Info *InstanceInfo 742 Addr addrs.ResourceInstance
347 State **InstanceState 743 DeposedKey states.DeposedKey
348 Output **InstanceDiff 744 State **states.ResourceInstanceObject
745 ProviderAddr addrs.AbsProviderConfig
746
747 Output **plans.ResourceInstanceChange
748 OutputState **states.ResourceInstanceObject
349} 749}
350 750
351// TODO: test 751// TODO: test
352func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) { 752func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
753 absAddr := n.Addr.Absolute(ctx.Path())
353 state := *n.State 754 state := *n.State
354 755
355 // If there is no state or we don't have an ID, we're already destroyed 756 if n.ProviderAddr.ProviderConfig.Type == "" {
356 if state == nil || state.ID == "" { 757 if n.DeposedKey == "" {
758 panic(fmt.Sprintf("EvalDiffDestroy for %s does not have ProviderAddr set", absAddr))
759 } else {
760 panic(fmt.Sprintf("EvalDiffDestroy for %s (deposed %s) does not have ProviderAddr set", absAddr, n.DeposedKey))
761 }
762 }
763
764 // If there is no state or our attributes object is null then we're already
765 // destroyed.
766 if state == nil || state.Value.IsNull() {
357 return nil, nil 767 return nil, nil
358 } 768 }
359 769
360 // Call pre-diff hook 770 // Call pre-diff hook
361 err := ctx.Hook(func(h Hook) (HookAction, error) { 771 err := ctx.Hook(func(h Hook) (HookAction, error) {
362 return h.PreDiff(n.Info, state) 772 return h.PreDiff(
773 absAddr, n.DeposedKey.Generation(),
774 state.Value,
775 cty.NullVal(cty.DynamicPseudoType),
776 )
363 }) 777 })
364 if err != nil { 778 if err != nil {
365 return nil, err 779 return nil, err
366 } 780 }
367 781
368 // The diff 782 // Change is always the same for a destroy. We don't need the provider's
369 diff := &InstanceDiff{Destroy: true} 783 // help for this one.
784 // TODO: Should we give the provider an opportunity to veto this?
785 change := &plans.ResourceInstanceChange{
786 Addr: absAddr,
787 DeposedKey: n.DeposedKey,
788 Change: plans.Change{
789 Action: plans.Delete,
790 Before: state.Value,
791 After: cty.NullVal(cty.DynamicPseudoType),
792 },
793 ProviderAddr: n.ProviderAddr,
794 }
370 795
371 // Call post-diff hook 796 // Call post-diff hook
372 err = ctx.Hook(func(h Hook) (HookAction, error) { 797 err = ctx.Hook(func(h Hook) (HookAction, error) {
373 return h.PostDiff(n.Info, diff) 798 return h.PostDiff(
799 absAddr,
800 n.DeposedKey.Generation(),
801 change.Action,
802 change.Before,
803 change.After,
804 )
374 }) 805 })
375 if err != nil { 806 if err != nil {
376 return nil, err 807 return nil, err
377 } 808 }
378 809
379 // Update our output 810 // Update our output
380 *n.Output = diff 811 *n.Output = change
381
382 return nil, nil
383}
384
385// EvalDiffDestroyModule is an EvalNode implementation that writes the diff to
386// the full diff.
387type EvalDiffDestroyModule struct {
388 Path []string
389}
390
391// TODO: test
392func (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) {
393 diff, lock := ctx.Diff()
394
395 // Acquire the lock so that we can do this safely concurrently
396 lock.Lock()
397 defer lock.Unlock()
398 812
399 // Write the diff 813 if n.OutputState != nil {
400 modDiff := diff.ModuleByPath(n.Path) 814 // Record our proposed new state, which is nil because we're destroying.
401 if modDiff == nil { 815 *n.OutputState = nil
402 modDiff = diff.AddModule(n.Path)
403 } 816 }
404 modDiff.Destroy = true
405 817
406 return nil, nil 818 return nil, nil
407} 819}
408 820
409// EvalFilterDiff is an EvalNode implementation that filters the diff 821// EvalReduceDiff is an EvalNode implementation that takes a planned resource
410// according to some filter. 822// instance change as might be produced by EvalDiff or EvalDiffDestroy and
411type EvalFilterDiff struct { 823// "simplifies" it to a single atomic action to be performed by a specific
412 // Input and output 824// graph node.
413 Diff **InstanceDiff 825//
414 Output **InstanceDiff 826// Callers must specify whether they are a destroy node or a regular apply
415 827// node. If the result is NoOp then the given change requires no action for
416 // Destroy, if true, will only include a destroy diff if it is set. 828// the specific graph node calling this and so evaluation of the that graph
417 Destroy bool 829// node should exit early and take no action.
830//
831// The object written to OutChange may either be identical to InChange or
832// a new change object derived from InChange. Because of the former case, the
833// caller must not mutate the object returned in OutChange.
834type EvalReduceDiff struct {
835 Addr addrs.ResourceInstance
836 InChange **plans.ResourceInstanceChange
837 Destroy bool
838 OutChange **plans.ResourceInstanceChange
418} 839}
419 840
420func (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) { 841// TODO: test
421 if *n.Diff == nil { 842func (n *EvalReduceDiff) Eval(ctx EvalContext) (interface{}, error) {
422 return nil, nil 843 in := *n.InChange
423 } 844 out := in.Simplify(n.Destroy)
424 845 if n.OutChange != nil {
425 input := *n.Diff 846 *n.OutChange = out
426 result := new(InstanceDiff) 847 }
427 848 if out.Action != in.Action {
428 if n.Destroy { 849 if n.Destroy {
429 if input.GetDestroy() || input.RequiresNew() { 850 log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for destroy node", n.Addr, in.Action, out.Action)
430 result.SetDestroy(true) 851 } else {
852 log.Printf("[TRACE] EvalReduceDiff: %s change simplified from %s to %s for apply node", n.Addr, in.Action, out.Action)
431 } 853 }
432 } 854 }
433
434 if n.Output != nil {
435 *n.Output = result
436 }
437
438 return nil, nil 855 return nil, nil
439} 856}
440 857
441// EvalReadDiff is an EvalNode implementation that writes the diff to 858// EvalReadDiff is an EvalNode implementation that retrieves the planned
442// the full diff. 859// change for a particular resource instance object.
443type EvalReadDiff struct { 860type EvalReadDiff struct {
444 Name string 861 Addr addrs.ResourceInstance
445 Diff **InstanceDiff 862 DeposedKey states.DeposedKey
863 ProviderSchema **ProviderSchema
864 Change **plans.ResourceInstanceChange
446} 865}
447 866
448func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) { 867func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) {
449 diff, lock := ctx.Diff() 868 providerSchema := *n.ProviderSchema
869 changes := ctx.Changes()
870 addr := n.Addr.Absolute(ctx.Path())
450 871
451 // Acquire the lock so that we can do this safely concurrently 872 schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
452 lock.Lock() 873 if schema == nil {
453 defer lock.Unlock() 874 // Should be caught during validation, so we don't bother with a pretty error here
875 return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
876 }
454 877
455 // Write the diff 878 gen := states.CurrentGen
456 modDiff := diff.ModuleByPath(ctx.Path()) 879 if n.DeposedKey != states.NotDeposed {
457 if modDiff == nil { 880 gen = n.DeposedKey
881 }
882 csrc := changes.GetResourceInstanceChange(addr, gen)
883 if csrc == nil {
884 log.Printf("[TRACE] EvalReadDiff: No planned change recorded for %s", addr)
458 return nil, nil 885 return nil, nil
459 } 886 }
460 887
461 *n.Diff = modDiff.Resources[n.Name] 888 change, err := csrc.Decode(schema.ImpliedType())
889 if err != nil {
890 return nil, fmt.Errorf("failed to decode planned changes for %s: %s", addr, err)
891 }
892 if n.Change != nil {
893 *n.Change = change
894 }
895
896 log.Printf("[TRACE] EvalReadDiff: Read %s change from plan for %s", change.Action, addr)
462 897
463 return nil, nil 898 return nil, nil
464} 899}
465 900
466// EvalWriteDiff is an EvalNode implementation that writes the diff to 901// EvalWriteDiff is an EvalNode implementation that saves a planned change
467// the full diff. 902// for an instance object into the set of global planned changes.
468type EvalWriteDiff struct { 903type EvalWriteDiff struct {
469 Name string 904 Addr addrs.ResourceInstance
470 Diff **InstanceDiff 905 DeposedKey states.DeposedKey
906 ProviderSchema **ProviderSchema
907 Change **plans.ResourceInstanceChange
471} 908}
472 909
473// TODO: test 910// TODO: test
474func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) { 911func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) {
475 diff, lock := ctx.Diff() 912 changes := ctx.Changes()
476 913 addr := n.Addr.Absolute(ctx.Path())
477 // The diff to write, if its empty it should write nil 914 if n.Change == nil || *n.Change == nil {
478 var diffVal *InstanceDiff 915 // Caller sets nil to indicate that we need to remove a change from
479 if n.Diff != nil { 916 // the set of changes.
480 diffVal = *n.Diff 917 gen := states.CurrentGen
918 if n.DeposedKey != states.NotDeposed {
919 gen = n.DeposedKey
920 }
921 changes.RemoveResourceInstanceChange(addr, gen)
922 return nil, nil
481 } 923 }
482 if diffVal.Empty() { 924
483 diffVal = nil 925 providerSchema := *n.ProviderSchema
926 change := *n.Change
927
928 if change.Addr.String() != addr.String() || change.DeposedKey != n.DeposedKey {
929 // Should never happen, and indicates a bug in the caller.
930 panic("inconsistent address and/or deposed key in EvalWriteDiff")
484 } 931 }
485 932
486 // Acquire the lock so that we can do this safely concurrently 933 schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
487 lock.Lock() 934 if schema == nil {
488 defer lock.Unlock() 935 // Should be caught during validation, so we don't bother with a pretty error here
936 return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
937 }
489 938
490 // Write the diff 939 csrc, err := change.Encode(schema.ImpliedType())
491 modDiff := diff.ModuleByPath(ctx.Path()) 940 if err != nil {
492 if modDiff == nil { 941 return nil, fmt.Errorf("failed to encode planned changes for %s: %s", addr, err)
493 modDiff = diff.AddModule(ctx.Path())
494 } 942 }
495 if diffVal != nil { 943
496 modDiff.Resources[n.Name] = diffVal 944 changes.AppendResourceInstanceChange(csrc)
945 if n.DeposedKey == states.NotDeposed {
946 log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s", change.Action, addr)
497 } else { 947 } else {
498 delete(modDiff.Resources, n.Name) 948 log.Printf("[TRACE] EvalWriteDiff: recorded %s change for %s deposed object %s", change.Action, addr, n.DeposedKey)
499 } 949 }
500 950
501 return nil, nil 951 return nil, nil
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
index 62cc581..a60f4a0 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
@@ -2,47 +2,63 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/providers"
9 "github.com/hashicorp/terraform/states"
10 "github.com/hashicorp/terraform/tfdiags"
5) 11)
6 12
7// EvalImportState is an EvalNode implementation that performs an 13// EvalImportState is an EvalNode implementation that performs an
8// ImportState operation on a provider. This will return the imported 14// ImportState operation on a provider. This will return the imported
9// states but won't modify any actual state. 15// states but won't modify any actual state.
10type EvalImportState struct { 16type EvalImportState struct {
11 Provider *ResourceProvider 17 Addr addrs.ResourceInstance
12 Info *InstanceInfo 18 Provider *providers.Interface
13 Id string 19 ID string
14 Output *[]*InstanceState 20 Output *[]providers.ImportedResource
15} 21}
16 22
17// TODO: test 23// TODO: test
18func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) { 24func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) {
25 absAddr := n.Addr.Absolute(ctx.Path())
19 provider := *n.Provider 26 provider := *n.Provider
27 var diags tfdiags.Diagnostics
20 28
21 { 29 {
22 // Call pre-import hook 30 // Call pre-import hook
23 err := ctx.Hook(func(h Hook) (HookAction, error) { 31 err := ctx.Hook(func(h Hook) (HookAction, error) {
24 return h.PreImportState(n.Info, n.Id) 32 return h.PreImportState(absAddr, n.ID)
25 }) 33 })
26 if err != nil { 34 if err != nil {
27 return nil, err 35 return nil, err
28 } 36 }
29 } 37 }
30 38
31 // Import! 39 resp := provider.ImportResourceState(providers.ImportResourceStateRequest{
32 state, err := provider.ImportState(n.Info, n.Id) 40 TypeName: n.Addr.Resource.Type,
33 if err != nil { 41 ID: n.ID,
34 return nil, fmt.Errorf( 42 })
35 "import %s (id: %s): %s", n.Info.HumanId(), n.Id, err) 43 diags = diags.Append(resp.Diagnostics)
44 if diags.HasErrors() {
45 return nil, diags.Err()
46 }
47
48 imported := resp.ImportedResources
49
50 for _, obj := range imported {
51 log.Printf("[TRACE] EvalImportState: import %s %q produced instance object of type %s", absAddr.String(), n.ID, obj.TypeName)
36 } 52 }
37 53
38 if n.Output != nil { 54 if n.Output != nil {
39 *n.Output = state 55 *n.Output = imported
40 } 56 }
41 57
42 { 58 {
43 // Call post-import hook 59 // Call post-import hook
44 err := ctx.Hook(func(h Hook) (HookAction, error) { 60 err := ctx.Hook(func(h Hook) (HookAction, error) {
45 return h.PostImportState(n.Info, state) 61 return h.PostImportState(absAddr, imported)
46 }) 62 })
47 if err != nil { 63 if err != nil {
48 return nil, err 64 return nil, err
@@ -55,22 +71,25 @@ func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) {
55// EvalImportStateVerify verifies the state after ImportState and 71// EvalImportStateVerify verifies the state after ImportState and
56// after the refresh to make sure it is non-nil and valid. 72// after the refresh to make sure it is non-nil and valid.
57type EvalImportStateVerify struct { 73type EvalImportStateVerify struct {
58 Info *InstanceInfo 74 Addr addrs.ResourceInstance
59 Id string 75 State **states.ResourceInstanceObject
60 State **InstanceState
61} 76}
62 77
63// TODO: test 78// TODO: test
64func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) { 79func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) {
80 var diags tfdiags.Diagnostics
81
65 state := *n.State 82 state := *n.State
66 if state.Empty() { 83 if state.Value.IsNull() {
67 return nil, fmt.Errorf( 84 diags = diags.Append(tfdiags.Sourceless(
68 "import %s (id: %s): Terraform detected a resource with this ID doesn't\n"+ 85 tfdiags.Error,
69 "exist. Please verify the ID is correct. You cannot import non-existent\n"+ 86 "Cannot import non-existent remote object",
70 "resources using Terraform import.", 87 fmt.Sprintf(
71 n.Info.HumanId(), 88 "While attempting to import an existing object to %s, the provider detected that no object exists with the given id. Only pre-existing objects can be imported; check that the id is correct and that it is associated with the provider's configured region or endpoint, or use \"terraform apply\" to create a new remote object for this resource.",
72 n.Id) 89 n.Addr.String(),
90 ),
91 ))
73 } 92 }
74 93
75 return nil, nil 94 return nil, diags.ErrWithWarnings()
76} 95}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
deleted file mode 100644
index 6a78a6b..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
+++ /dev/null
@@ -1,56 +0,0 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/config"
7)
8
9// EvalInterpolate is an EvalNode implementation that takes a raw
10// configuration and interpolates it.
11type EvalInterpolate struct {
12 Config *config.RawConfig
13 Resource *Resource
14 Output **ResourceConfig
15 ContinueOnErr bool
16}
17
18func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) {
19 rc, err := ctx.Interpolate(n.Config, n.Resource)
20 if err != nil {
21 if n.ContinueOnErr {
22 log.Printf("[WARN] Interpolation %q failed: %s", n.Config.Key, err)
23 return nil, EvalEarlyExitError{}
24 }
25 return nil, err
26 }
27
28 if n.Output != nil {
29 *n.Output = rc
30 }
31
32 return nil, nil
33}
34
35// EvalInterpolateProvider is an EvalNode implementation that takes a
36// ProviderConfig and interpolates it. Provider configurations are the only
37// "inherited" type of configuration we have, and the original raw config may
38// have a different interpolation scope.
39type EvalInterpolateProvider struct {
40 Config *config.ProviderConfig
41 Resource *Resource
42 Output **ResourceConfig
43}
44
45func (n *EvalInterpolateProvider) Eval(ctx EvalContext) (interface{}, error) {
46 rc, err := ctx.InterpolateProvider(n.Config, n.Resource)
47 if err != nil {
48 return nil, err
49 }
50
51 if n.Output != nil {
52 *n.Output = rc
53 }
54
55 return nil, nil
56}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go b/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go
new file mode 100644
index 0000000..0c051f7
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_lang.go
@@ -0,0 +1,61 @@
1package terraform
2
3import (
4 "log"
5
6 "github.com/hashicorp/terraform/addrs"
7
8 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/terraform/configs/configschema"
10 "github.com/zclconf/go-cty/cty"
11)
12
13// EvalConfigBlock is an EvalNode implementation that takes a raw
14// configuration block and evaluates any expressions within it.
15//
16// ExpandedConfig is populated with the result of expanding any "dynamic"
17// blocks in the given body, which can be useful for extracting correct source
18// location information for specific attributes in the result.
19type EvalConfigBlock struct {
20 Config *hcl.Body
21 Schema *configschema.Block
22 SelfAddr addrs.Referenceable
23 Output *cty.Value
24 ExpandedConfig *hcl.Body
25 ContinueOnErr bool
26}
27
28func (n *EvalConfigBlock) Eval(ctx EvalContext) (interface{}, error) {
29 val, body, diags := ctx.EvaluateBlock(*n.Config, n.Schema, n.SelfAddr, EvalDataForNoInstanceKey)
30 if diags.HasErrors() && n.ContinueOnErr {
31 log.Printf("[WARN] Block evaluation failed: %s", diags.Err())
32 return nil, EvalEarlyExitError{}
33 }
34
35 if n.Output != nil {
36 *n.Output = val
37 }
38 if n.ExpandedConfig != nil {
39 *n.ExpandedConfig = body
40 }
41
42 return nil, diags.ErrWithWarnings()
43}
44
45// EvalConfigExpr is an EvalNode implementation that takes a raw configuration
46// expression and evaluates it.
47type EvalConfigExpr struct {
48 Expr hcl.Expression
49 SelfAddr addrs.Referenceable
50 Output *cty.Value
51}
52
53func (n *EvalConfigExpr) Eval(ctx EvalContext) (interface{}, error) {
54 val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, n.SelfAddr)
55
56 if n.Output != nil {
57 *n.Output = val
58 }
59
60 return nil, diags.ErrWithWarnings()
61}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_local.go b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go
index a4b2a50..bad9ac5 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_local.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_local.go
@@ -3,56 +3,55 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/zclconf/go-cty/cty"
8
9 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/lang"
11 "github.com/hashicorp/terraform/tfdiags"
7) 12)
8 13
9// EvalLocal is an EvalNode implementation that evaluates the 14// EvalLocal is an EvalNode implementation that evaluates the
10// expression for a local value and writes it into a transient part of 15// expression for a local value and writes it into a transient part of
11// the state. 16// the state.
12type EvalLocal struct { 17type EvalLocal struct {
13 Name string 18 Addr addrs.LocalValue
14 Value *config.RawConfig 19 Expr hcl.Expression
15} 20}
16 21
17func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) { 22func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) {
18 cfg, err := ctx.Interpolate(n.Value, nil) 23 var diags tfdiags.Diagnostics
19 if err != nil { 24
20 return nil, fmt.Errorf("local.%s: %s", n.Name, err) 25 // We ignore diags here because any problems we might find will be found
26 // again in EvaluateExpr below.
27 refs, _ := lang.ReferencesInExpr(n.Expr)
28 for _, ref := range refs {
29 if ref.Subject == n.Addr {
30 diags = diags.Append(&hcl.Diagnostic{
31 Severity: hcl.DiagError,
32 Summary: "Self-referencing local value",
33 Detail: fmt.Sprintf("Local value %s cannot use its own result as part of its expression.", n.Addr),
34 Subject: ref.SourceRange.ToHCL().Ptr(),
35 Context: n.Expr.Range().Ptr(),
36 })
37 }
21 } 38 }
22 39 if diags.HasErrors() {
23 state, lock := ctx.State() 40 return nil, diags.Err()
24 if state == nil {
25 return nil, fmt.Errorf("cannot write local value to nil state")
26 } 41 }
27 42
28 // Get a write lock so we can access the state 43 val, moreDiags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil)
29 lock.Lock() 44 diags = diags.Append(moreDiags)
30 defer lock.Unlock() 45 if moreDiags.HasErrors() {
31 46 return nil, diags.Err()
32 // Look for the module state. If we don't have one, create it.
33 mod := state.ModuleByPath(ctx.Path())
34 if mod == nil {
35 mod = state.AddModule(ctx.Path())
36 } 47 }
37 48
38 // Get the value from the config 49 state := ctx.State()
39 var valueRaw interface{} = config.UnknownVariableValue 50 if state == nil {
40 if cfg != nil { 51 return nil, fmt.Errorf("cannot write local value to nil state")
41 var ok bool
42 valueRaw, ok = cfg.Get("value")
43 if !ok {
44 valueRaw = ""
45 }
46 if cfg.IsComputed("value") {
47 valueRaw = config.UnknownVariableValue
48 }
49 } 52 }
50 53
51 if mod.Locals == nil { 54 state.SetLocalValue(n.Addr.Absolute(ctx.Path()), val)
52 // initialize
53 mod.Locals = map[string]interface{}{}
54 }
55 mod.Locals[n.Name] = valueRaw
56 55
57 return nil, nil 56 return nil, nil
58} 57}
@@ -61,26 +60,15 @@ func (n *EvalLocal) Eval(ctx EvalContext) (interface{}, error) {
61// from the state. Locals aren't persisted, but we don't need to evaluate them 60// from the state. Locals aren't persisted, but we don't need to evaluate them
62// during destroy. 61// during destroy.
63type EvalDeleteLocal struct { 62type EvalDeleteLocal struct {
64 Name string 63 Addr addrs.LocalValue
65} 64}
66 65
67func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) { 66func (n *EvalDeleteLocal) Eval(ctx EvalContext) (interface{}, error) {
68 state, lock := ctx.State() 67 state := ctx.State()
69 if state == nil { 68 if state == nil {
70 return nil, nil 69 return nil, nil
71 } 70 }
72 71
73 // Get a write lock so we can access this instance 72 state.RemoveLocalValue(n.Addr.Absolute(ctx.Path()))
74 lock.Lock()
75 defer lock.Unlock()
76
77 // Look for the module state. If we don't have one, create it.
78 mod := state.ModuleByPath(ctx.Path())
79 if mod == nil {
80 return nil, nil
81 }
82
83 delete(mod.Locals, n.Name)
84
85 return nil, nil 73 return nil, nil
86} 74}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
index a834627..1057397 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
@@ -4,131 +4,132 @@ import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 6
7 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/zclconf/go-cty/cty"
9
10 "github.com/hashicorp/terraform/addrs"
11 "github.com/hashicorp/terraform/plans"
12 "github.com/hashicorp/terraform/states"
8) 13)
9 14
10// EvalDeleteOutput is an EvalNode implementation that deletes an output 15// EvalDeleteOutput is an EvalNode implementation that deletes an output
11// from the state. 16// from the state.
12type EvalDeleteOutput struct { 17type EvalDeleteOutput struct {
13 Name string 18 Addr addrs.OutputValue
14} 19}
15 20
16// TODO: test 21// TODO: test
17func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) { 22func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) {
18 state, lock := ctx.State() 23 state := ctx.State()
19 if state == nil { 24 if state == nil {
20 return nil, nil 25 return nil, nil
21 } 26 }
22 27
23 // Get a write lock so we can access this instance 28 state.RemoveOutputValue(n.Addr.Absolute(ctx.Path()))
24 lock.Lock()
25 defer lock.Unlock()
26
27 // Look for the module state. If we don't have one, create it.
28 mod := state.ModuleByPath(ctx.Path())
29 if mod == nil {
30 return nil, nil
31 }
32
33 delete(mod.Outputs, n.Name)
34
35 return nil, nil 29 return nil, nil
36} 30}
37 31
38// EvalWriteOutput is an EvalNode implementation that writes the output 32// EvalWriteOutput is an EvalNode implementation that writes the output
39// for the given name to the current state. 33// for the given name to the current state.
40type EvalWriteOutput struct { 34type EvalWriteOutput struct {
41 Name string 35 Addr addrs.OutputValue
42 Sensitive bool 36 Sensitive bool
43 Value *config.RawConfig 37 Expr hcl.Expression
44 // ContinueOnErr allows interpolation to fail during Input 38 // ContinueOnErr allows interpolation to fail during Input
45 ContinueOnErr bool 39 ContinueOnErr bool
46} 40}
47 41
48// TODO: test 42// TODO: test
49func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) { 43func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) {
50 // This has to run before we have a state lock, since interpolation also 44 addr := n.Addr.Absolute(ctx.Path())
45
46 // This has to run before we have a state lock, since evaluation also
51 // reads the state 47 // reads the state
52 cfg, err := ctx.Interpolate(n.Value, nil) 48 val, diags := ctx.EvaluateExpr(n.Expr, cty.DynamicPseudoType, nil)
53 // handle the error after we have the module from the state 49 // We'll handle errors below, after we have loaded the module.
54 50
55 state, lock := ctx.State() 51 state := ctx.State()
56 if state == nil { 52 if state == nil {
57 return nil, fmt.Errorf("cannot write state to nil state") 53 return nil, nil
58 } 54 }
59 55
60 // Get a write lock so we can access this instance 56 changes := ctx.Changes() // may be nil, if we're not working on a changeset
61 lock.Lock()
62 defer lock.Unlock()
63 // Look for the module state. If we don't have one, create it.
64 mod := state.ModuleByPath(ctx.Path())
65 if mod == nil {
66 mod = state.AddModule(ctx.Path())
67 }
68 57
69 // handling the interpolation error 58 // handling the interpolation error
70 if err != nil { 59 if diags.HasErrors() {
71 if n.ContinueOnErr || flagWarnOutputErrors { 60 if n.ContinueOnErr || flagWarnOutputErrors {
72 log.Printf("[ERROR] Output interpolation %q failed: %s", n.Name, err) 61 log.Printf("[ERROR] Output interpolation %q failed: %s", n.Addr.Name, diags.Err())
73 // if we're continuing, make sure the output is included, and 62 // if we're continuing, make sure the output is included, and
74 // marked as unknown 63 // marked as unknown. If the evaluator was able to find a type
75 mod.Outputs[n.Name] = &OutputState{ 64 // for the value in spite of the error then we'll use it.
76 Type: "string", 65 n.setValue(addr, state, changes, cty.UnknownVal(val.Type()))
77 Value: config.UnknownVariableValue,
78 }
79 return nil, EvalEarlyExitError{} 66 return nil, EvalEarlyExitError{}
80 } 67 }
81 return nil, err 68 return nil, diags.Err()
82 } 69 }
83 70
84 // Get the value from the config 71 n.setValue(addr, state, changes, val)
85 var valueRaw interface{} = config.UnknownVariableValue 72
86 if cfg != nil { 73 return nil, nil
87 var ok bool 74}
88 valueRaw, ok = cfg.Get("value") 75
89 if !ok { 76func (n *EvalWriteOutput) setValue(addr addrs.AbsOutputValue, state *states.SyncState, changes *plans.ChangesSync, val cty.Value) {
90 valueRaw = "" 77 if val.IsKnown() && !val.IsNull() {
91 } 78 // The state itself doesn't represent unknown values, so we null them
92 if cfg.IsComputed("value") { 79 // out here and then we'll save the real unknown value in the planned
93 valueRaw = config.UnknownVariableValue 80 // changeset below, if we have one on this graph walk.
94 } 81 log.Printf("[TRACE] EvalWriteOutput: Saving value for %s in state", addr)
82 stateVal := cty.UnknownAsNull(val)
83 state.SetOutputValue(addr, stateVal, n.Sensitive)
84 } else {
85 log.Printf("[TRACE] EvalWriteOutput: Removing %s from state (it is now null)", addr)
86 state.RemoveOutputValue(addr)
95 } 87 }
96 88
97 switch valueTyped := valueRaw.(type) { 89 // If we also have an active changeset then we'll replicate the value in
98 case string: 90 // there. This is used in preference to the state where present, since it
99 mod.Outputs[n.Name] = &OutputState{ 91 // *is* able to represent unknowns, while the state cannot.
100 Type: "string", 92 if changes != nil {
101 Sensitive: n.Sensitive, 93 // For the moment we are not properly tracking changes to output
102 Value: valueTyped, 94 // values, and just marking them always as "Create" or "Destroy"
103 } 95 // actions. A future release will rework the output lifecycle so we
104 case []interface{}: 96 // can track their changes properly, in a similar way to how we work
105 mod.Outputs[n.Name] = &OutputState{ 97 // with resource instances.
106 Type: "list", 98
107 Sensitive: n.Sensitive, 99 var change *plans.OutputChange
108 Value: valueTyped, 100 if !val.IsNull() {
109 } 101 change = &plans.OutputChange{
110 case map[string]interface{}: 102 Addr: addr,
111 mod.Outputs[n.Name] = &OutputState{
112 Type: "map",
113 Sensitive: n.Sensitive,
114 Value: valueTyped,
115 }
116 case []map[string]interface{}:
117 // an HCL map is multi-valued, so if this was read out of a config the
118 // map may still be in a slice.
119 if len(valueTyped) == 1 {
120 mod.Outputs[n.Name] = &OutputState{
121 Type: "map",
122 Sensitive: n.Sensitive, 103 Sensitive: n.Sensitive,
123 Value: valueTyped[0], 104 Change: plans.Change{
105 Action: plans.Create,
106 Before: cty.NullVal(cty.DynamicPseudoType),
107 After: val,
108 },
109 }
110 } else {
111 change = &plans.OutputChange{
112 Addr: addr,
113 Sensitive: n.Sensitive,
114 Change: plans.Change{
115 // This is just a weird placeholder delete action since
116 // we don't have an actual prior value to indicate.
117 // FIXME: Generate real planned changes for output values
118 // that include the old values.
119 Action: plans.Delete,
120 Before: cty.NullVal(cty.DynamicPseudoType),
121 After: cty.NullVal(cty.DynamicPseudoType),
122 },
124 } 123 }
125 break
126 } 124 }
127 return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map",
128 n.Name, valueTyped, len(valueTyped))
129 default:
130 return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped)
131 }
132 125
133 return nil, nil 126 cs, err := change.Encode()
127 if err != nil {
128 // Should never happen, since we just constructed this right above
129 panic(fmt.Sprintf("planned change for %s could not be encoded: %s", addr, err))
130 }
131 log.Printf("[TRACE] EvalWriteOutput: Saving %s change for %s in changeset", change.Action, addr)
132 changes.RemoveOutputChange(addr) // remove any existing planned change, if present
133 changes.AppendOutputChange(cs) // add the new planned change
134 }
134} 135}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
index 61f6ff9..7df6584 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
@@ -2,50 +2,86 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
5 6
6 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/hcl2/hcl"
8
9 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/configs"
11 "github.com/hashicorp/terraform/providers"
12 "github.com/hashicorp/terraform/tfdiags"
7) 13)
8 14
9// EvalBuildProviderConfig outputs a *ResourceConfig that is properly 15func buildProviderConfig(ctx EvalContext, addr addrs.ProviderConfig, config *configs.Provider) hcl.Body {
10// merged with parents and inputs on top of what is configured in the file. 16 var configBody hcl.Body
11type EvalBuildProviderConfig struct { 17 if config != nil {
12 Provider string 18 configBody = config.Config
13 Config **ResourceConfig 19 }
14 Output **ResourceConfig
15}
16 20
17func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) { 21 var inputBody hcl.Body
18 cfg := *n.Config 22 inputConfig := ctx.ProviderInput(addr)
19 23 if len(inputConfig) > 0 {
20 // If we have an Input configuration set, then merge that in 24 inputBody = configs.SynthBody("<input-prompt>", inputConfig)
21 if input := ctx.ProviderInput(n.Provider); input != nil {
22 // "input" is a map of the subset of config values that were known
23 // during the input walk, set by EvalInputProvider. Note that
24 // in particular it does *not* include attributes that had
25 // computed values at input time; those appear *only* in
26 // "cfg" here.
27 rc, err := config.NewRawConfig(input)
28 if err != nil {
29 return nil, err
30 }
31
32 merged := rc.Merge(cfg.raw)
33 cfg = NewResourceConfig(merged)
34 } 25 }
35 26
36 *n.Output = cfg 27 switch {
37 return nil, nil 28 case configBody != nil && inputBody != nil:
29 log.Printf("[TRACE] buildProviderConfig for %s: merging explicit config and input", addr)
30 // Note that the inputBody is the _base_ here, because configs.MergeBodies
31 // expects the base have all of the required fields, while these are
32 // forced to be optional for the override. The input process should
33 // guarantee that we have a value for each of the required arguments and
34 // that in practice the sets of attributes in each body will be
35 // disjoint.
36 return configs.MergeBodies(inputBody, configBody)
37 case configBody != nil:
38 log.Printf("[TRACE] buildProviderConfig for %s: using explicit config only", addr)
39 return configBody
40 case inputBody != nil:
41 log.Printf("[TRACE] buildProviderConfig for %s: using input only", addr)
42 return inputBody
43 default:
44 log.Printf("[TRACE] buildProviderConfig for %s: no configuration at all", addr)
45 return hcl.EmptyBody()
46 }
38} 47}
39 48
40// EvalConfigProvider is an EvalNode implementation that configures 49// EvalConfigProvider is an EvalNode implementation that configures
41// a provider that is already initialized and retrieved. 50// a provider that is already initialized and retrieved.
42type EvalConfigProvider struct { 51type EvalConfigProvider struct {
43 Provider string 52 Addr addrs.ProviderConfig
44 Config **ResourceConfig 53 Provider *providers.Interface
54 Config *configs.Provider
45} 55}
46 56
47func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) { 57func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
48 return nil, ctx.ConfigureProvider(n.Provider, *n.Config) 58 if n.Provider == nil {
59 return nil, fmt.Errorf("EvalConfigProvider Provider is nil")
60 }
61
62 var diags tfdiags.Diagnostics
63 provider := *n.Provider
64 config := n.Config
65
66 configBody := buildProviderConfig(ctx, n.Addr, config)
67
68 resp := provider.GetSchema()
69 diags = diags.Append(resp.Diagnostics)
70 if diags.HasErrors() {
71 return nil, diags.NonFatalErr()
72 }
73
74 configSchema := resp.Provider.Block
75 configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey)
76 diags = diags.Append(evalDiags)
77 if evalDiags.HasErrors() {
78 return nil, diags.NonFatalErr()
79 }
80
81 configDiags := ctx.ConfigureProvider(n.Addr, configVal)
82 configDiags = configDiags.InConfigBody(configBody)
83
84 return nil, configDiags.ErrWithWarnings()
49} 85}
50 86
51// EvalInitProvider is an EvalNode implementation that initializes a provider 87// EvalInitProvider is an EvalNode implementation that initializes a provider
@@ -53,85 +89,59 @@ func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
53// EvalGetProvider node. 89// EvalGetProvider node.
54type EvalInitProvider struct { 90type EvalInitProvider struct {
55 TypeName string 91 TypeName string
56 Name string 92 Addr addrs.ProviderConfig
57} 93}
58 94
59func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) { 95func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) {
60 return ctx.InitProvider(n.TypeName, n.Name) 96 return ctx.InitProvider(n.TypeName, n.Addr)
61} 97}
62 98
63// EvalCloseProvider is an EvalNode implementation that closes provider 99// EvalCloseProvider is an EvalNode implementation that closes provider
64// connections that aren't needed anymore. 100// connections that aren't needed anymore.
65type EvalCloseProvider struct { 101type EvalCloseProvider struct {
66 Name string 102 Addr addrs.ProviderConfig
67} 103}
68 104
69func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) { 105func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) {
70 ctx.CloseProvider(n.Name) 106 ctx.CloseProvider(n.Addr)
71 return nil, nil 107 return nil, nil
72} 108}
73 109
74// EvalGetProvider is an EvalNode implementation that retrieves an already 110// EvalGetProvider is an EvalNode implementation that retrieves an already
75// initialized provider instance for the given name. 111// initialized provider instance for the given name.
112//
113// Unlike most eval nodes, this takes an _absolute_ provider configuration,
114// because providers can be passed into and inherited between modules.
115// Resource nodes must therefore know the absolute path of the provider they
116// will use, which is usually accomplished by implementing
117// interface GraphNodeProviderConsumer.
76type EvalGetProvider struct { 118type EvalGetProvider struct {
77 Name string 119 Addr addrs.AbsProviderConfig
78 Output *ResourceProvider 120 Output *providers.Interface
121
122 // If non-nil, Schema will be updated after eval to refer to the
123 // schema of the provider.
124 Schema **ProviderSchema
79} 125}
80 126
81func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) { 127func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) {
82 result := ctx.Provider(n.Name) 128 if n.Addr.ProviderConfig.Type == "" {
129 // Should never happen
130 panic("EvalGetProvider used with uninitialized provider configuration address")
131 }
132
133 result := ctx.Provider(n.Addr)
83 if result == nil { 134 if result == nil {
84 return nil, fmt.Errorf("provider %s not initialized", n.Name) 135 return nil, fmt.Errorf("provider %s not initialized", n.Addr)
85 } 136 }
86 137
87 if n.Output != nil { 138 if n.Output != nil {
88 *n.Output = result 139 *n.Output = result
89 } 140 }
90 141
91 return nil, nil 142 if n.Schema != nil {
92} 143 *n.Schema = ctx.ProviderSchema(n.Addr)
93
94// EvalInputProvider is an EvalNode implementation that asks for input
95// for the given provider configurations.
96type EvalInputProvider struct {
97 Name string
98 Provider *ResourceProvider
99 Config **ResourceConfig
100}
101
102func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) {
103 rc := *n.Config
104 orig := rc.DeepCopy()
105
106 // Wrap the input into a namespace
107 input := &PrefixUIInput{
108 IdPrefix: fmt.Sprintf("provider.%s", n.Name),
109 QueryPrefix: fmt.Sprintf("provider.%s.", n.Name),
110 UIInput: ctx.Input(),
111 }
112
113 // Go through each provider and capture the input necessary
114 // to satisfy it.
115 config, err := (*n.Provider).Input(input, rc)
116 if err != nil {
117 return nil, fmt.Errorf(
118 "Error configuring %s: %s", n.Name, err)
119 } 144 }
120 145
121 // We only store values that have changed through Input.
122 // The goal is to cache cache input responses, not to provide a complete
123 // config for other providers.
124 confMap := make(map[string]interface{})
125 if config != nil && len(config.Config) > 0 {
126 // any values that weren't in the original ResourcConfig will be cached
127 for k, v := range config.Config {
128 if _, ok := orig.Config[k]; !ok {
129 confMap[k] = v
130 }
131 }
132 }
133
134 ctx.SetProviderInput(n.Name, confMap)
135
136 return nil, nil 146 return nil, nil
137} 147}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
index 89579c0..bc6b5cc 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
@@ -2,6 +2,9 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5
6 "github.com/hashicorp/terraform/configs/configschema"
7 "github.com/hashicorp/terraform/provisioners"
5) 8)
6 9
7// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner 10// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner
@@ -30,7 +33,8 @@ func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) {
30// initialized provisioner instance for the given name. 33// initialized provisioner instance for the given name.
31type EvalGetProvisioner struct { 34type EvalGetProvisioner struct {
32 Name string 35 Name string
33 Output *ResourceProvisioner 36 Output *provisioners.Interface
37 Schema **configschema.Block
34} 38}
35 39
36func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) { 40func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) {
@@ -43,5 +47,9 @@ func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) {
43 *n.Output = result 47 *n.Output = result
44 } 48 }
45 49
50 if n.Schema != nil {
51 *n.Schema = ctx.ProvisionerSchema(n.Name)
52 }
53
46 return result, nil 54 return result, nil
47} 55}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
index fb85a28..34f2d60 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
@@ -2,105 +2,320 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
6
7 "github.com/zclconf/go-cty/cty"
8
9 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/configs"
11 "github.com/hashicorp/terraform/plans"
12 "github.com/hashicorp/terraform/plans/objchange"
13 "github.com/hashicorp/terraform/providers"
14 "github.com/hashicorp/terraform/states"
15 "github.com/hashicorp/terraform/tfdiags"
5) 16)
6 17
7// EvalReadDataDiff is an EvalNode implementation that executes a data 18// EvalReadData is an EvalNode implementation that deals with the main part
8// resource's ReadDataDiff method to discover what attributes it exports. 19// of the data resource lifecycle: either actually reading from the data source
9type EvalReadDataDiff struct { 20// or generating a plan to do so.
10 Provider *ResourceProvider 21type EvalReadData struct {
11 Output **InstanceDiff 22 Addr addrs.ResourceInstance
12 OutputState **InstanceState 23 Config *configs.Resource
13 Config **ResourceConfig 24 Dependencies []addrs.Referenceable
14 Info *InstanceInfo 25 Provider *providers.Interface
15 26 ProviderAddr addrs.AbsProviderConfig
16 // Set Previous when re-evaluating diff during apply, to ensure that 27 ProviderSchema **ProviderSchema
17 // the "Destroy" flag is preserved. 28
18 Previous **InstanceDiff 29 // Planned is set when dealing with data resources that were deferred to
30 // the apply walk, to let us see what was planned. If this is set, the
31 // evaluation of the config is required to produce a wholly-known
32 // configuration which is consistent with the partial object included
33 // in this planned change.
34 Planned **plans.ResourceInstanceChange
35
36 // ForcePlanRead, if true, overrides the usual behavior of immediately
37 // reading from the data source where possible, instead forcing us to
38 // _always_ generate a plan. This is used during the plan walk, since we
39 // mustn't actually apply anything there. (The resulting state doesn't
40 // get persisted)
41 ForcePlanRead bool
42
43 // The result from this EvalNode has a few different possibilities
44 // depending on the input:
45 // - If Planned is nil then we assume we're aiming to _produce_ the plan,
46 // and so the following two outcomes are possible:
47 // - OutputChange.Action is plans.NoOp and OutputState is the complete
48 // result of reading from the data source. This is the easy path.
49 // - OutputChange.Action is plans.Read and OutputState is a planned
50 // object placeholder (states.ObjectPlanned). In this case, the
51 // returned change must be recorded in the overral changeset and
52 // eventually passed to another instance of this struct during the
53 // apply walk.
54 // - If Planned is non-nil then we assume we're aiming to complete a
55 // planned read from an earlier plan walk. In this case the only possible
56 // non-error outcome is to set Output.Action (if non-nil) to a plans.NoOp
57 // change and put the complete resulting state in OutputState, ready to
58 // be saved in the overall state and used for expression evaluation.
59 OutputChange **plans.ResourceInstanceChange
60 OutputValue *cty.Value
61 OutputConfigValue *cty.Value
62 OutputState **states.ResourceInstanceObject
19} 63}
20 64
21func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) { 65func (n *EvalReadData) Eval(ctx EvalContext) (interface{}, error) {
22 // TODO: test 66 absAddr := n.Addr.Absolute(ctx.Path())
67 log.Printf("[TRACE] EvalReadData: working on %s", absAddr)
23 68
24 err := ctx.Hook(func(h Hook) (HookAction, error) { 69 if n.ProviderSchema == nil || *n.ProviderSchema == nil {
25 return h.PreDiff(n.Info, nil) 70 return nil, fmt.Errorf("provider schema not available for %s", n.Addr)
26 })
27 if err != nil {
28 return nil, err
29 } 71 }
30 72
31 var diff *InstanceDiff 73 var diags tfdiags.Diagnostics
74 var change *plans.ResourceInstanceChange
75 var configVal cty.Value
32 76
33 if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() { 77 // TODO: Do we need to handle Delete changes here? EvalReadDataDiff and
34 // If we're re-diffing for a diff that was already planning to 78 // EvalReadDataApply did, but it seems like we should handle that via a
35 // destroy, then we'll just continue with that plan. 79 // separate mechanism since it boils down to just deleting the object from
36 diff = &InstanceDiff{Destroy: true} 80 // the state... and we do that on every plan anyway, forcing the data
37 } else { 81 // resource to re-read.
38 provider := *n.Provider
39 config := *n.Config
40 82
41 var err error 83 config := *n.Config
42 diff, err = provider.ReadDataDiff(n.Info, config) 84 provider := *n.Provider
85 providerSchema := *n.ProviderSchema
86 schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
87 if schema == nil {
88 // Should be caught during validation, so we don't bother with a pretty error here
89 return nil, fmt.Errorf("provider %q does not support data source %q", n.ProviderAddr.ProviderConfig.Type, n.Addr.Resource.Type)
90 }
91
92 // We'll always start by evaluating the configuration. What we do after
93 // that will depend on the evaluation result along with what other inputs
94 // we were given.
95 objTy := schema.ImpliedType()
96 priorVal := cty.NullVal(objTy) // for data resources, prior is always null because we start fresh every time
97
98 keyData := EvalDataForInstanceKey(n.Addr.Key)
99
100 var configDiags tfdiags.Diagnostics
101 configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData)
102 diags = diags.Append(configDiags)
103 if configDiags.HasErrors() {
104 return nil, diags.Err()
105 }
106
107 proposedNewVal := objchange.PlannedDataResourceObject(schema, configVal)
108
109 // If our configuration contains any unknown values then we must defer the
110 // read to the apply phase by producing a "Read" change for this resource,
111 // and a placeholder value for it in the state.
112 if n.ForcePlanRead || !configVal.IsWhollyKnown() {
113 // If the configuration is still unknown when we're applying a planned
114 // change then that indicates a bug in Terraform, since we should have
115 // everything resolved by now.
116 if n.Planned != nil && *n.Planned != nil {
117 return nil, fmt.Errorf(
118 "configuration for %s still contains unknown values during apply (this is a bug in Terraform; please report it!)",
119 absAddr,
120 )
121 }
122 if n.ForcePlanRead {
123 log.Printf("[TRACE] EvalReadData: %s configuration is fully known, but we're forcing a read plan to be created", absAddr)
124 } else {
125 log.Printf("[TRACE] EvalReadData: %s configuration not fully known yet, so deferring to apply phase", absAddr)
126 }
127
128 err := ctx.Hook(func(h Hook) (HookAction, error) {
129 return h.PreDiff(absAddr, states.CurrentGen, priorVal, proposedNewVal)
130 })
43 if err != nil { 131 if err != nil {
44 return nil, err 132 return nil, err
45 } 133 }
46 if diff == nil { 134
47 diff = new(InstanceDiff) 135 change = &plans.ResourceInstanceChange{
136 Addr: absAddr,
137 ProviderAddr: n.ProviderAddr,
138 Change: plans.Change{
139 Action: plans.Read,
140 Before: priorVal,
141 After: proposedNewVal,
142 },
48 } 143 }
49 144
50 // if id isn't explicitly set then it's always computed, because we're 145 err = ctx.Hook(func(h Hook) (HookAction, error) {
51 // always "creating a new resource". 146 return h.PostDiff(absAddr, states.CurrentGen, change.Action, priorVal, proposedNewVal)
52 diff.init() 147 })
53 if _, ok := diff.Attributes["id"]; !ok { 148 if err != nil {
54 diff.SetAttribute("id", &ResourceAttrDiff{ 149 return nil, err
55 Old: "", 150 }
56 NewComputed: true, 151
57 RequiresNew: true, 152 if n.OutputChange != nil {
58 Type: DiffAttrOutput, 153 *n.OutputChange = change
59 }) 154 }
155 if n.OutputValue != nil {
156 *n.OutputValue = change.After
157 }
158 if n.OutputConfigValue != nil {
159 *n.OutputConfigValue = configVal
60 } 160 }
161 if n.OutputState != nil {
162 state := &states.ResourceInstanceObject{
163 Value: change.After,
164 Status: states.ObjectPlanned, // because the partial value in the plan must be used for now
165 Dependencies: n.Dependencies,
166 }
167 *n.OutputState = state
168 }
169
170 return nil, diags.ErrWithWarnings()
61 } 171 }
62 172
63 err = ctx.Hook(func(h Hook) (HookAction, error) { 173 if n.Planned != nil && *n.Planned != nil && (*n.Planned).Action != plans.Read {
64 return h.PostDiff(n.Info, diff) 174 // If any other action gets in here then that's always a bug; this
175 // EvalNode only deals with reading.
176 return nil, fmt.Errorf(
177 "invalid action %s for %s: only Read is supported (this is a bug in Terraform; please report it!)",
178 (*n.Planned).Action, absAddr,
179 )
180 }
181
182 // If we get down here then our configuration is complete and we're read
183 // to actually call the provider to read the data.
184 log.Printf("[TRACE] EvalReadData: %s configuration is complete, so reading from provider", absAddr)
185
186 err := ctx.Hook(func(h Hook) (HookAction, error) {
187 // We don't have a state yet, so we'll just give the hook an
188 // empty one to work with.
189 return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType))
65 }) 190 })
66 if err != nil { 191 if err != nil {
67 return nil, err 192 return nil, err
68 } 193 }
69 194
70 *n.Output = diff 195 resp := provider.ReadDataSource(providers.ReadDataSourceRequest{
196 TypeName: n.Addr.Resource.Type,
197 Config: configVal,
198 })
199 diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config))
200 if diags.HasErrors() {
201 return nil, diags.Err()
202 }
203 newVal := resp.State
204 if newVal == cty.NilVal {
205 // This can happen with incompletely-configured mocks. We'll allow it
206 // and treat it as an alias for a properly-typed null value.
207 newVal = cty.NullVal(schema.ImpliedType())
208 }
209
210 for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) {
211 diags = diags.Append(tfdiags.Sourceless(
212 tfdiags.Error,
213 "Provider produced invalid object",
214 fmt.Sprintf(
215 "Provider %q produced an invalid value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
216 n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()),
217 ),
218 ))
219 }
220 if diags.HasErrors() {
221 return nil, diags.Err()
222 }
223
224 if newVal.IsNull() {
225 diags = diags.Append(tfdiags.Sourceless(
226 tfdiags.Error,
227 "Provider produced null object",
228 fmt.Sprintf(
229 "Provider %q produced a null value for %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
230 n.ProviderAddr.ProviderConfig.Type, absAddr,
231 ),
232 ))
233 }
234 if !newVal.IsWhollyKnown() {
235 diags = diags.Append(tfdiags.Sourceless(
236 tfdiags.Error,
237 "Provider produced invalid object",
238 fmt.Sprintf(
239 "Provider %q produced a value for %s that is not wholly known.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
240 n.ProviderAddr.ProviderConfig.Type, absAddr,
241 ),
242 ))
243
244 // We'll still save the object, but we need to eliminate any unknown
245 // values first because we can't serialize them in the state file.
246 // Note that this may cause set elements to be coalesced if they
247 // differed only by having unknown values, but we don't worry about
248 // that here because we're saving the value only for inspection
249 // purposes; the error we added above will halt the graph walk.
250 newVal = cty.UnknownAsNull(newVal)
251 }
252
253 // Since we've completed the read, we actually have no change to make, but
254 // we'll produce a NoOp one anyway to preserve the usual flow of the
255 // plan phase and allow it to produce a complete plan.
256 change = &plans.ResourceInstanceChange{
257 Addr: absAddr,
258 ProviderAddr: n.ProviderAddr,
259 Change: plans.Change{
260 Action: plans.NoOp,
261 Before: newVal,
262 After: newVal,
263 },
264 }
265 state := &states.ResourceInstanceObject{
266 Value: change.After,
267 Status: states.ObjectReady, // because we completed the read from the provider
268 Dependencies: n.Dependencies,
269 }
270
271 err = ctx.Hook(func(h Hook) (HookAction, error) {
272 return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal)
273 })
274 if err != nil {
275 return nil, err
276 }
71 277
278 if n.OutputChange != nil {
279 *n.OutputChange = change
280 }
281 if n.OutputValue != nil {
282 *n.OutputValue = change.After
283 }
284 if n.OutputConfigValue != nil {
285 *n.OutputConfigValue = configVal
286 }
72 if n.OutputState != nil { 287 if n.OutputState != nil {
73 state := &InstanceState{}
74 *n.OutputState = state 288 *n.OutputState = state
75
76 // Apply the diff to the returned state, so the state includes
77 // any attribute values that are not computed.
78 if !diff.Empty() && n.OutputState != nil {
79 *n.OutputState = state.MergeDiff(diff)
80 }
81 } 289 }
82 290
83 return nil, nil 291 return nil, diags.ErrWithWarnings()
84} 292}
85 293
86// EvalReadDataApply is an EvalNode implementation that executes a data 294// EvalReadDataApply is an EvalNode implementation that executes a data
87// resource's ReadDataApply method to read data from the data source. 295// resource's ReadDataApply method to read data from the data source.
88type EvalReadDataApply struct { 296type EvalReadDataApply struct {
89 Provider *ResourceProvider 297 Addr addrs.ResourceInstance
90 Output **InstanceState 298 Provider *providers.Interface
91 Diff **InstanceDiff 299 ProviderAddr addrs.AbsProviderConfig
92 Info *InstanceInfo 300 ProviderSchema **ProviderSchema
301 Output **states.ResourceInstanceObject
302 Config *configs.Resource
303 Change **plans.ResourceInstanceChange
304 StateReferences []addrs.Referenceable
93} 305}
94 306
95func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { 307func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) {
96 // TODO: test
97 provider := *n.Provider 308 provider := *n.Provider
98 diff := *n.Diff 309 change := *n.Change
310 providerSchema := *n.ProviderSchema
311 absAddr := n.Addr.Absolute(ctx.Path())
312
313 var diags tfdiags.Diagnostics
99 314
100 // If the diff is for *destroying* this resource then we'll 315 // If the diff is for *destroying* this resource then we'll
101 // just drop its state and move on, since data resources don't 316 // just drop its state and move on, since data resources don't
102 // support an actual "destroy" action. 317 // support an actual "destroy" action.
103 if diff != nil && diff.GetDestroy() { 318 if change != nil && change.Action == plans.Delete {
104 if n.Output != nil { 319 if n.Output != nil {
105 *n.Output = nil 320 *n.Output = nil
106 } 321 }
@@ -113,27 +328,56 @@ func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) {
113 err := ctx.Hook(func(h Hook) (HookAction, error) { 328 err := ctx.Hook(func(h Hook) (HookAction, error) {
114 // We don't have a state yet, so we'll just give the hook an 329 // We don't have a state yet, so we'll just give the hook an
115 // empty one to work with. 330 // empty one to work with.
116 return h.PreRefresh(n.Info, &InstanceState{}) 331 return h.PreRefresh(absAddr, states.CurrentGen, cty.NullVal(cty.DynamicPseudoType))
117 }) 332 })
118 if err != nil { 333 if err != nil {
119 return nil, err 334 return nil, err
120 } 335 }
121 336
122 state, err := provider.ReadDataApply(n.Info, diff) 337 resp := provider.ReadDataSource(providers.ReadDataSourceRequest{
123 if err != nil { 338 TypeName: n.Addr.Resource.Type,
124 return nil, fmt.Errorf("%s: %s", n.Info.Id, err) 339 Config: change.After,
340 })
341 diags = diags.Append(resp.Diagnostics.InConfigBody(n.Config.Config))
342 if diags.HasErrors() {
343 return nil, diags.Err()
344 }
345
346 schema, _ := providerSchema.SchemaForResourceAddr(n.Addr.ContainingResource())
347 if schema == nil {
348 // Should be caught during validation, so we don't bother with a pretty error here
349 return nil, fmt.Errorf("provider does not support data source %q", n.Addr.Resource.Type)
350 }
351
352 newVal := resp.State
353 for _, err := range newVal.Type().TestConformance(schema.ImpliedType()) {
354 diags = diags.Append(tfdiags.Sourceless(
355 tfdiags.Error,
356 "Provider produced invalid object",
357 fmt.Sprintf(
358 "Provider %q planned an invalid value for %s. The result could not be saved.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
359 n.ProviderAddr.ProviderConfig.Type, tfdiags.FormatErrorPrefixed(err, absAddr.String()),
360 ),
361 ))
362 }
363 if diags.HasErrors() {
364 return nil, diags.Err()
125 } 365 }
126 366
127 err = ctx.Hook(func(h Hook) (HookAction, error) { 367 err = ctx.Hook(func(h Hook) (HookAction, error) {
128 return h.PostRefresh(n.Info, state) 368 return h.PostRefresh(absAddr, states.CurrentGen, change.Before, newVal)
129 }) 369 })
130 if err != nil { 370 if err != nil {
131 return nil, err 371 return nil, err
132 } 372 }
133 373
134 if n.Output != nil { 374 if n.Output != nil {
135 *n.Output = state 375 *n.Output = &states.ResourceInstanceObject{
376 Value: newVal,
377 Status: states.ObjectReady,
378 Dependencies: n.StateReferences,
379 }
136 } 380 }
137 381
138 return nil, nil 382 return nil, diags.ErrWithWarnings()
139} 383}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
index fa2b812..03bc948 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
@@ -3,53 +3,102 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6
7 "github.com/zclconf/go-cty/cty"
8
9 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/providers"
11 "github.com/hashicorp/terraform/states"
12 "github.com/hashicorp/terraform/tfdiags"
6) 13)
7 14
8// EvalRefresh is an EvalNode implementation that does a refresh for 15// EvalRefresh is an EvalNode implementation that does a refresh for
9// a resource. 16// a resource.
10type EvalRefresh struct { 17type EvalRefresh struct {
11 Provider *ResourceProvider 18 Addr addrs.ResourceInstance
12 State **InstanceState 19 ProviderAddr addrs.AbsProviderConfig
13 Info *InstanceInfo 20 Provider *providers.Interface
14 Output **InstanceState 21 ProviderSchema **ProviderSchema
22 State **states.ResourceInstanceObject
23 Output **states.ResourceInstanceObject
15} 24}
16 25
17// TODO: test 26// TODO: test
18func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) { 27func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) {
19 provider := *n.Provider
20 state := *n.State 28 state := *n.State
29 absAddr := n.Addr.Absolute(ctx.Path())
30
31 var diags tfdiags.Diagnostics
21 32
22 // If we have no state, we don't do any refreshing 33 // If we have no state, we don't do any refreshing
23 if state == nil { 34 if state == nil {
24 log.Printf("[DEBUG] refresh: %s: no state, not refreshing", n.Info.Id) 35 log.Printf("[DEBUG] refresh: %s: no state, so not refreshing", n.Addr.Absolute(ctx.Path()))
25 return nil, nil 36 return nil, diags.ErrWithWarnings()
37 }
38
39 schema, _ := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource())
40 if schema == nil {
41 // Should be caught during validation, so we don't bother with a pretty error here
42 return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
26 } 43 }
27 44
28 // Call pre-refresh hook 45 // Call pre-refresh hook
29 err := ctx.Hook(func(h Hook) (HookAction, error) { 46 err := ctx.Hook(func(h Hook) (HookAction, error) {
30 return h.PreRefresh(n.Info, state) 47 return h.PreRefresh(absAddr, states.CurrentGen, state.Value)
31 }) 48 })
32 if err != nil { 49 if err != nil {
33 return nil, err 50 return nil, diags.ErrWithWarnings()
34 } 51 }
35 52
36 // Refresh! 53 // Refresh!
37 state, err = provider.Refresh(n.Info, state) 54 priorVal := state.Value
38 if err != nil { 55 req := providers.ReadResourceRequest{
39 return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error()) 56 TypeName: n.Addr.Resource.Type,
57 PriorState: priorVal,
40 } 58 }
41 59
60 provider := *n.Provider
61 resp := provider.ReadResource(req)
62 diags = diags.Append(resp.Diagnostics)
63 if diags.HasErrors() {
64 return nil, diags.Err()
65 }
66
67 if resp.NewState == cty.NilVal {
68 // This ought not to happen in real cases since it's not possible to
69 // send NilVal over the plugin RPC channel, but it can come up in
70 // tests due to sloppy mocking.
71 panic("new state is cty.NilVal")
72 }
73
74 for _, err := range resp.NewState.Type().TestConformance(schema.ImpliedType()) {
75 diags = diags.Append(tfdiags.Sourceless(
76 tfdiags.Error,
77 "Provider produced invalid object",
78 fmt.Sprintf(
79 "Provider %q planned an invalid value for %s during refresh: %s.\n\nThis is a bug in the provider, which should be reported in the provider's own issue tracker.",
80 n.ProviderAddr.ProviderConfig.Type, absAddr, tfdiags.FormatError(err),
81 ),
82 ))
83 }
84 if diags.HasErrors() {
85 return nil, diags.Err()
86 }
87
88 newState := state.DeepCopy()
89 newState.Value = resp.NewState
90
42 // Call post-refresh hook 91 // Call post-refresh hook
43 err = ctx.Hook(func(h Hook) (HookAction, error) { 92 err = ctx.Hook(func(h Hook) (HookAction, error) {
44 return h.PostRefresh(n.Info, state) 93 return h.PostRefresh(absAddr, states.CurrentGen, priorVal, newState.Value)
45 }) 94 })
46 if err != nil { 95 if err != nil {
47 return nil, err 96 return nil, err
48 } 97 }
49 98
50 if n.Output != nil { 99 if n.Output != nil {
51 *n.Output = state 100 *n.Output = newState
52 } 101 }
53 102
54 return nil, nil 103 return nil, diags.ErrWithWarnings()
55} 104}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
deleted file mode 100644
index 5eca678..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
+++ /dev/null
@@ -1,13 +0,0 @@
1package terraform
2
3// EvalInstanceInfo is an EvalNode implementation that fills in the
4// InstanceInfo as much as it can.
5type EvalInstanceInfo struct {
6 Info *InstanceInfo
7}
8
9// TODO: test
10func (n *EvalInstanceInfo) Eval(ctx EvalContext) (interface{}, error) {
11 n.Info.ModulePath = ctx.Path()
12 return nil, nil
13}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
index 82d8178..3485e4f 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
@@ -1,22 +1,37 @@
1package terraform 1package terraform
2 2
3import (
4 "github.com/hashicorp/terraform/tfdiags"
5)
6
3// EvalSequence is an EvalNode that evaluates in sequence. 7// EvalSequence is an EvalNode that evaluates in sequence.
4type EvalSequence struct { 8type EvalSequence struct {
5 Nodes []EvalNode 9 Nodes []EvalNode
6} 10}
7 11
8func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) { 12func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) {
13 var diags tfdiags.Diagnostics
14
9 for _, n := range n.Nodes { 15 for _, n := range n.Nodes {
10 if n == nil { 16 if n == nil {
11 continue 17 continue
12 } 18 }
13 19
14 if _, err := EvalRaw(n, ctx); err != nil { 20 if _, err := EvalRaw(n, ctx); err != nil {
15 return nil, err 21 if _, isEarlyExit := err.(EvalEarlyExitError); isEarlyExit {
22 // In this path we abort early, losing any non-error
23 // diagnostics we saw earlier.
24 return nil, err
25 }
26 diags = diags.Append(err)
27 if diags.HasErrors() {
28 // Halt if we get some errors, but warnings are okay.
29 break
30 }
16 } 31 }
17 } 32 }
18 33
19 return nil, nil 34 return nil, diags.ErrWithWarnings()
20} 35}
21 36
22// EvalNodeFilterable impl. 37// EvalNodeFilterable impl.
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
index 1182690..d506ce3 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
@@ -2,91 +2,149 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/configs"
9 "github.com/hashicorp/terraform/providers"
10 "github.com/hashicorp/terraform/states"
11 "github.com/hashicorp/terraform/tfdiags"
5) 12)
6 13
7// EvalReadState is an EvalNode implementation that reads the 14// EvalReadState is an EvalNode implementation that reads the
8// primary InstanceState for a specific resource out of the state. 15// current object for a specific instance in the state.
9type EvalReadState struct { 16type EvalReadState struct {
10 Name string 17 // Addr is the address of the instance to read state for.
11 Output **InstanceState 18 Addr addrs.ResourceInstance
19
20 // ProviderSchema is the schema for the provider given in Provider.
21 ProviderSchema **ProviderSchema
22
23 // Provider is the provider that will subsequently perform actions on
24 // the the state object. This is used to perform any schema upgrades
25 // that might be required to prepare the stored data for use.
26 Provider *providers.Interface
27
28 // Output will be written with a pointer to the retrieved object.
29 Output **states.ResourceInstanceObject
12} 30}
13 31
14func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) { 32func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) {
15 return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) { 33 if n.Provider == nil || *n.Provider == nil {
16 return rs.Primary, nil 34 panic("EvalReadState used with no Provider object")
17 }) 35 }
36 if n.ProviderSchema == nil || *n.ProviderSchema == nil {
37 panic("EvalReadState used with no ProviderSchema object")
38 }
39
40 absAddr := n.Addr.Absolute(ctx.Path())
41 log.Printf("[TRACE] EvalReadState: reading state for %s", absAddr)
42
43 src := ctx.State().ResourceInstanceObject(absAddr, states.CurrentGen)
44 if src == nil {
45 // Presumably we only have deposed objects, then.
46 log.Printf("[TRACE] EvalReadState: no state present for %s", absAddr)
47 return nil, nil
48 }
49
50 schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource())
51 if schema == nil {
52 // Shouldn't happen since we should've failed long ago if no schema is present
53 return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr)
54 }
55 var diags tfdiags.Diagnostics
56 src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion)
57 if diags.HasErrors() {
58 // Note that we don't have any channel to return warnings here. We'll
59 // accept that for now since warnings during a schema upgrade would
60 // be pretty weird anyway, since this operation is supposed to seem
61 // invisible to the user.
62 return nil, diags.Err()
63 }
64
65 obj, err := src.Decode(schema.ImpliedType())
66 if err != nil {
67 return nil, err
68 }
69
70 if n.Output != nil {
71 *n.Output = obj
72 }
73 return obj, nil
18} 74}
19 75
20// EvalReadStateDeposed is an EvalNode implementation that reads the 76// EvalReadStateDeposed is an EvalNode implementation that reads the
21// deposed InstanceState for a specific resource out of the state 77// deposed InstanceState for a specific resource out of the state
22type EvalReadStateDeposed struct { 78type EvalReadStateDeposed struct {
23 Name string 79 // Addr is the address of the instance to read state for.
24 Output **InstanceState 80 Addr addrs.ResourceInstance
25 // Index indicates which instance in the Deposed list to target, or -1 for 81
26 // the last item. 82 // Key identifies which deposed object we will read.
27 Index int 83 Key states.DeposedKey
84
85 // ProviderSchema is the schema for the provider given in Provider.
86 ProviderSchema **ProviderSchema
87
88 // Provider is the provider that will subsequently perform actions on
89 // the the state object. This is used to perform any schema upgrades
90 // that might be required to prepare the stored data for use.
91 Provider *providers.Interface
92
93 // Output will be written with a pointer to the retrieved object.
94 Output **states.ResourceInstanceObject
28} 95}
29 96
30func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) { 97func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
31 return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) { 98 if n.Provider == nil || *n.Provider == nil {
32 // Get the index. If it is negative, then we get the last one 99 panic("EvalReadStateDeposed used with no Provider object")
33 idx := n.Index 100 }
34 if idx < 0 { 101 if n.ProviderSchema == nil || *n.ProviderSchema == nil {
35 idx = len(rs.Deposed) - 1 102 panic("EvalReadStateDeposed used with no ProviderSchema object")
36 } 103 }
37 if idx >= 0 && idx < len(rs.Deposed) {
38 return rs.Deposed[idx], nil
39 } else {
40 return nil, fmt.Errorf("bad deposed index: %d, for resource: %#v", idx, rs)
41 }
42 })
43}
44 104
45// Does the bulk of the work for the various flavors of ReadState eval nodes. 105 key := n.Key
46// Each node just provides a reader function to get from the ResourceState to the 106 if key == states.NotDeposed {
47// InstanceState, and this takes care of all the plumbing. 107 return nil, fmt.Errorf("EvalReadStateDeposed used with no instance key; this is a bug in Terraform and should be reported")
48func readInstanceFromState(
49 ctx EvalContext,
50 resourceName string,
51 output **InstanceState,
52 readerFn func(*ResourceState) (*InstanceState, error),
53) (*InstanceState, error) {
54 state, lock := ctx.State()
55
56 // Get a read lock so we can access this instance
57 lock.RLock()
58 defer lock.RUnlock()
59
60 // Look for the module state. If we don't have one, then it doesn't matter.
61 mod := state.ModuleByPath(ctx.Path())
62 if mod == nil {
63 return nil, nil
64 } 108 }
109 absAddr := n.Addr.Absolute(ctx.Path())
110 log.Printf("[TRACE] EvalReadStateDeposed: reading state for %s deposed object %s", absAddr, n.Key)
65 111
66 // Look for the resource state. If we don't have one, then it is okay. 112 src := ctx.State().ResourceInstanceObject(absAddr, key)
67 rs := mod.Resources[resourceName] 113 if src == nil {
68 if rs == nil { 114 // Presumably we only have deposed objects, then.
115 log.Printf("[TRACE] EvalReadStateDeposed: no state present for %s deposed object %s", absAddr, n.Key)
69 return nil, nil 116 return nil, nil
70 } 117 }
71 118
72 // Use the delegate function to get the instance state from the resource state 119 schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource())
73 is, err := readerFn(rs) 120 if schema == nil {
121 // Shouldn't happen since we should've failed long ago if no schema is present
122 return nil, fmt.Errorf("no schema available for %s while reading state; this is a bug in Terraform and should be reported", absAddr)
123 }
124 var diags tfdiags.Diagnostics
125 src, diags = UpgradeResourceState(absAddr, *n.Provider, src, schema, currentVersion)
126 if diags.HasErrors() {
127 // Note that we don't have any channel to return warnings here. We'll
128 // accept that for now since warnings during a schema upgrade would
129 // be pretty weird anyway, since this operation is supposed to seem
130 // invisible to the user.
131 return nil, diags.Err()
132 }
133
134 obj, err := src.Decode(schema.ImpliedType())
74 if err != nil { 135 if err != nil {
75 return nil, err 136 return nil, err
76 } 137 }
77 138 if n.Output != nil {
78 // Write the result to the output pointer 139 *n.Output = obj
79 if output != nil {
80 *output = is
81 } 140 }
82 141 return obj, nil
83 return is, nil
84} 142}
85 143
86// EvalRequireState is an EvalNode implementation that early exits 144// EvalRequireState is an EvalNode implementation that exits early if the given
87// if the state doesn't have an ID. 145// object is null.
88type EvalRequireState struct { 146type EvalRequireState struct {
89 State **InstanceState 147 State **states.ResourceInstanceObject
90} 148}
91 149
92func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) { 150func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) {
@@ -95,7 +153,7 @@ func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) {
95 } 153 }
96 154
97 state := *n.State 155 state := *n.State
98 if state == nil || state.ID == "" { 156 if state == nil || state.Value.IsNull() {
99 return nil, EvalEarlyExitError{} 157 return nil, EvalEarlyExitError{}
100 } 158 }
101 159
@@ -107,12 +165,14 @@ func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) {
107type EvalUpdateStateHook struct{} 165type EvalUpdateStateHook struct{}
108 166
109func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) { 167func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) {
110 state, lock := ctx.State() 168 // In principle we could grab the lock here just long enough to take a
111 169 // deep copy and then pass that to our hooks below, but we'll instead
112 // Get a full lock. Even calling something like WriteState can modify 170 // hold the hook for the duration to avoid the potential confusing
113 // (prune) the state, so we need the full lock. 171 // situation of us racing to call PostStateUpdate concurrently with
114 lock.Lock() 172 // different state snapshots.
115 defer lock.Unlock() 173 stateSync := ctx.State()
174 state := stateSync.Lock().DeepCopy()
175 defer stateSync.Unlock()
116 176
117 // Call the hook 177 // Call the hook
118 err := ctx.Hook(func(h Hook) (HookAction, error) { 178 err := ctx.Hook(func(h Hook) (HookAction, error) {
@@ -125,171 +185,285 @@ func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) {
125 return nil, nil 185 return nil, nil
126} 186}
127 187
128// EvalWriteState is an EvalNode implementation that writes the 188// EvalWriteState is an EvalNode implementation that saves the given object
129// primary InstanceState for a specific resource into the state. 189// as the current object for the selected resource instance.
130type EvalWriteState struct { 190type EvalWriteState struct {
131 Name string 191 // Addr is the address of the instance to read state for.
132 ResourceType string 192 Addr addrs.ResourceInstance
133 Provider string 193
134 Dependencies []string 194 // State is the object state to save.
135 State **InstanceState 195 State **states.ResourceInstanceObject
196
197 // ProviderSchema is the schema for the provider given in ProviderAddr.
198 ProviderSchema **ProviderSchema
199
200 // ProviderAddr is the address of the provider configuration that
201 // produced the given object.
202 ProviderAddr addrs.AbsProviderConfig
136} 203}
137 204
138func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) { 205func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) {
139 return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies, 206 if n.State == nil {
140 func(rs *ResourceState) error { 207 // Note that a pointer _to_ nil is valid here, indicating the total
141 rs.Primary = *n.State 208 // absense of an object as we'd see during destroy.
142 return nil 209 panic("EvalWriteState used with no ResourceInstanceObject")
143 }, 210 }
144 ) 211
212 absAddr := n.Addr.Absolute(ctx.Path())
213 state := ctx.State()
214
215 if n.ProviderAddr.ProviderConfig.Type == "" {
216 return nil, fmt.Errorf("failed to write state for %s, missing provider type", absAddr)
217 }
218
219 obj := *n.State
220 if obj == nil || obj.Value.IsNull() {
221 // No need to encode anything: we'll just write it directly.
222 state.SetResourceInstanceCurrent(absAddr, nil, n.ProviderAddr)
223 log.Printf("[TRACE] EvalWriteState: removing state object for %s", absAddr)
224 return nil, nil
225 }
226 if n.ProviderSchema == nil || *n.ProviderSchema == nil {
227 // Should never happen, unless our state object is nil
228 panic("EvalWriteState used with pointer to nil ProviderSchema object")
229 }
230
231 if obj != nil {
232 log.Printf("[TRACE] EvalWriteState: writing current state object for %s", absAddr)
233 } else {
234 log.Printf("[TRACE] EvalWriteState: removing current state object for %s", absAddr)
235 }
236
237 schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource())
238 if schema == nil {
239 // It shouldn't be possible to get this far in any real scenario
240 // without a schema, but we might end up here in contrived tests that
241 // fail to set up their world properly.
242 return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr)
243 }
244 src, err := obj.Encode(schema.ImpliedType(), currentVersion)
245 if err != nil {
246 return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err)
247 }
248
249 state.SetResourceInstanceCurrent(absAddr, src, n.ProviderAddr)
250 return nil, nil
145} 251}
146 252
147// EvalWriteStateDeposed is an EvalNode implementation that writes 253// EvalWriteStateDeposed is an EvalNode implementation that writes
148// an InstanceState out to the Deposed list of a resource in the state. 254// an InstanceState out to the Deposed list of a resource in the state.
149type EvalWriteStateDeposed struct { 255type EvalWriteStateDeposed struct {
150 Name string 256 // Addr is the address of the instance to read state for.
151 ResourceType string 257 Addr addrs.ResourceInstance
152 Provider string 258
153 Dependencies []string 259 // Key indicates which deposed object to write to.
154 State **InstanceState 260 Key states.DeposedKey
155 // Index indicates which instance in the Deposed list to target, or -1 to append. 261
156 Index int 262 // State is the object state to save.
263 State **states.ResourceInstanceObject
264
265 // ProviderSchema is the schema for the provider given in ProviderAddr.
266 ProviderSchema **ProviderSchema
267
268 // ProviderAddr is the address of the provider configuration that
269 // produced the given object.
270 ProviderAddr addrs.AbsProviderConfig
157} 271}
158 272
159func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) { 273func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
160 return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies, 274 if n.State == nil {
161 func(rs *ResourceState) error { 275 // Note that a pointer _to_ nil is valid here, indicating the total
162 if n.Index == -1 { 276 // absense of an object as we'd see during destroy.
163 rs.Deposed = append(rs.Deposed, *n.State) 277 panic("EvalWriteStateDeposed used with no ResourceInstanceObject")
164 } else { 278 }
165 rs.Deposed[n.Index] = *n.State
166 }
167 return nil
168 },
169 )
170}
171 279
172// Pulls together the common tasks of the EvalWriteState nodes. All the args 280 absAddr := n.Addr.Absolute(ctx.Path())
173// are passed directly down from the EvalNode along with a `writer` function 281 key := n.Key
174// which is yielded the *ResourceState and is responsible for writing an 282 state := ctx.State()
175// InstanceState to the proper field in the ResourceState. 283
176func writeInstanceToState( 284 if key == states.NotDeposed {
177 ctx EvalContext, 285 // should never happen
178 resourceName string, 286 return nil, fmt.Errorf("can't save deposed object for %s without a deposed key; this is a bug in Terraform that should be reported", absAddr)
179 resourceType string, 287 }
180 provider string, 288
181 dependencies []string, 289 obj := *n.State
182 writerFn func(*ResourceState) error, 290 if obj == nil {
183) (*InstanceState, error) { 291 // No need to encode anything: we'll just write it directly.
184 state, lock := ctx.State() 292 state.SetResourceInstanceDeposed(absAddr, key, nil, n.ProviderAddr)
185 if state == nil { 293 log.Printf("[TRACE] EvalWriteStateDeposed: removing state object for %s deposed %s", absAddr, key)
186 return nil, fmt.Errorf("cannot write state to nil state") 294 return nil, nil
187 } 295 }
188 296 if n.ProviderSchema == nil || *n.ProviderSchema == nil {
189 // Get a write lock so we can access this instance 297 // Should never happen, unless our state object is nil
190 lock.Lock() 298 panic("EvalWriteStateDeposed used with no ProviderSchema object")
191 defer lock.Unlock() 299 }
192 300
193 // Look for the module state. If we don't have one, create it. 301 schema, currentVersion := (*n.ProviderSchema).SchemaForResourceAddr(n.Addr.ContainingResource())
194 mod := state.ModuleByPath(ctx.Path()) 302 if schema == nil {
195 if mod == nil { 303 // It shouldn't be possible to get this far in any real scenario
196 mod = state.AddModule(ctx.Path()) 304 // without a schema, but we might end up here in contrived tests that
197 } 305 // fail to set up their world properly.
198 306 return nil, fmt.Errorf("failed to encode %s in state: no resource type schema available", absAddr)
199 // Look for the resource state. 307 }
200 rs := mod.Resources[resourceName] 308 src, err := obj.Encode(schema.ImpliedType(), currentVersion)
201 if rs == nil { 309 if err != nil {
202 rs = &ResourceState{} 310 return nil, fmt.Errorf("failed to encode %s in state: %s", absAddr, err)
203 rs.init()
204 mod.Resources[resourceName] = rs
205 }
206 rs.Type = resourceType
207 rs.Dependencies = dependencies
208 rs.Provider = provider
209
210 if err := writerFn(rs); err != nil {
211 return nil, err
212 } 311 }
213 312
313 log.Printf("[TRACE] EvalWriteStateDeposed: writing state object for %s deposed %s", absAddr, key)
314 state.SetResourceInstanceDeposed(absAddr, key, src, n.ProviderAddr)
214 return nil, nil 315 return nil, nil
215} 316}
216 317
217// EvalDeposeState is an EvalNode implementation that takes the primary 318// EvalDeposeState is an EvalNode implementation that moves the current object
218// out of a state and makes it Deposed. This is done at the beginning of 319// for the given instance to instead be a deposed object, leaving the instance
219// create-before-destroy calls so that the create can create while preserving 320// with no current object.
220// the old state of the to-be-destroyed resource. 321// This is used at the beginning of a create-before-destroy replace action so
322// that the create can create while preserving the old state of the
323// to-be-destroyed object.
221type EvalDeposeState struct { 324type EvalDeposeState struct {
222 Name string 325 Addr addrs.ResourceInstance
326
327 // ForceKey, if a value other than states.NotDeposed, will be used as the
328 // key for the newly-created deposed object that results from this action.
329 // If set to states.NotDeposed (the zero value), a new unique key will be
330 // allocated.
331 ForceKey states.DeposedKey
332
333 // OutputKey, if non-nil, will be written with the deposed object key that
334 // was generated for the object. This can then be passed to
335 // EvalUndeposeState.Key so it knows which deposed instance to forget.
336 OutputKey *states.DeposedKey
223} 337}
224 338
225// TODO: test 339// TODO: test
226func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) { 340func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) {
227 state, lock := ctx.State() 341 absAddr := n.Addr.Absolute(ctx.Path())
228 342 state := ctx.State()
229 // Get a read lock so we can access this instance 343
230 lock.RLock() 344 var key states.DeposedKey
231 defer lock.RUnlock() 345 if n.ForceKey == states.NotDeposed {
232 346 key = state.DeposeResourceInstanceObject(absAddr)
233 // Look for the module state. If we don't have one, then it doesn't matter. 347 } else {
234 mod := state.ModuleByPath(ctx.Path()) 348 key = n.ForceKey
235 if mod == nil { 349 state.DeposeResourceInstanceObjectForceKey(absAddr, key)
236 return nil, nil
237 }
238
239 // Look for the resource state. If we don't have one, then it is okay.
240 rs := mod.Resources[n.Name]
241 if rs == nil {
242 return nil, nil
243 } 350 }
351 log.Printf("[TRACE] EvalDeposeState: prior object for %s now deposed with key %s", absAddr, key)
244 352
245 // If we don't have a primary, we have nothing to depose 353 if n.OutputKey != nil {
246 if rs.Primary == nil { 354 *n.OutputKey = key
247 return nil, nil
248 } 355 }
249 356
250 // Depose
251 rs.Deposed = append(rs.Deposed, rs.Primary)
252 rs.Primary = nil
253
254 return nil, nil 357 return nil, nil
255} 358}
256 359
257// EvalUndeposeState is an EvalNode implementation that reads the 360// EvalMaybeRestoreDeposedObject is an EvalNode implementation that will
258// InstanceState for a specific resource out of the state. 361// restore a particular deposed object of the specified resource instance
259type EvalUndeposeState struct { 362// to be the "current" object if and only if the instance doesn't currently
260 Name string 363// have a current object.
261 State **InstanceState 364//
365// This is intended for use when the create leg of a create before destroy
366// fails with no partial new object: if we didn't take any action, the user
367// would be left in the unfortunate situation of having no current object
368// and the previously-workign object now deposed. This EvalNode causes a
369// better outcome by restoring things to how they were before the replace
370// operation began.
371//
372// The create operation may have produced a partial result even though it
373// failed and it's important that we don't "forget" that state, so in that
374// situation the prior object remains deposed and the partial new object
375// remains the current object, allowing the situation to hopefully be
376// improved in a subsequent run.
377type EvalMaybeRestoreDeposedObject struct {
378 Addr addrs.ResourceInstance
379
380 // Key is a pointer to the deposed object key that should be forgotten
381 // from the state, which must be non-nil.
382 Key *states.DeposedKey
262} 383}
263 384
264// TODO: test 385// TODO: test
265func (n *EvalUndeposeState) Eval(ctx EvalContext) (interface{}, error) { 386func (n *EvalMaybeRestoreDeposedObject) Eval(ctx EvalContext) (interface{}, error) {
266 state, lock := ctx.State() 387 absAddr := n.Addr.Absolute(ctx.Path())
388 dk := *n.Key
389 state := ctx.State()
390
391 restored := state.MaybeRestoreResourceInstanceDeposed(absAddr, dk)
392 if restored {
393 log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s was restored as the current object", absAddr, dk)
394 } else {
395 log.Printf("[TRACE] EvalMaybeRestoreDeposedObject: %s deposed object %s remains deposed", absAddr, dk)
396 }
267 397
268 // Get a read lock so we can access this instance 398 return nil, nil
269 lock.RLock() 399}
270 defer lock.RUnlock()
271 400
272 // Look for the module state. If we don't have one, then it doesn't matter. 401// EvalWriteResourceState is an EvalNode implementation that ensures that
273 mod := state.ModuleByPath(ctx.Path()) 402// a suitable resource-level state record is present in the state, if that's
274 if mod == nil { 403// required for the "each mode" of that resource.
275 return nil, nil 404//
276 } 405// This is important primarily for the situation where count = 0, since this
406// eval is the only change we get to set the resource "each mode" to list
407// in that case, allowing expression evaluation to see it as a zero-element
408// list rather than as not set at all.
409type EvalWriteResourceState struct {
410 Addr addrs.Resource
411 Config *configs.Resource
412 ProviderAddr addrs.AbsProviderConfig
413}
277 414
278 // Look for the resource state. If we don't have one, then it is okay. 415// TODO: test
279 rs := mod.Resources[n.Name] 416func (n *EvalWriteResourceState) Eval(ctx EvalContext) (interface{}, error) {
280 if rs == nil { 417 var diags tfdiags.Diagnostics
281 return nil, nil 418 absAddr := n.Addr.Absolute(ctx.Path())
419 state := ctx.State()
420
421 count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx)
422 diags = diags.Append(countDiags)
423 if countDiags.HasErrors() {
424 return nil, diags.Err()
282 } 425 }
283 426
284 // If we don't have any desposed resource, then we don't have anything to do 427 // Currently we ony support NoEach and EachList, because for_each support
285 if len(rs.Deposed) == 0 { 428 // is not fully wired up across Terraform. Once for_each support is added,
286 return nil, nil 429 // we'll need to handle that here too, setting states.EachMap if the
430 // assigned expression is a map.
431 eachMode := states.NoEach
432 if count >= 0 { // -1 signals "count not set"
433 eachMode = states.EachList
287 } 434 }
288 435
289 // Undepose 436 // This method takes care of all of the business logic of updating this
290 idx := len(rs.Deposed) - 1 437 // while ensuring that any existing instances are preserved, etc.
291 rs.Primary = rs.Deposed[idx] 438 state.SetResourceMeta(absAddr, eachMode, n.ProviderAddr)
292 rs.Deposed[idx] = *n.State 439
440 return nil, nil
441}
442
443// EvalForgetResourceState is an EvalNode implementation that prunes out an
444// empty resource-level state for a given resource address, or produces an
445// error if it isn't empty after all.
446//
447// This should be the last action taken for a resource that has been removed
448// from the configuration altogether, to clean up the leftover husk of the
449// resource in the state after other EvalNodes have destroyed and removed
450// all of the instances and instance objects beneath it.
451type EvalForgetResourceState struct {
452 Addr addrs.Resource
453}
454
455func (n *EvalForgetResourceState) Eval(ctx EvalContext) (interface{}, error) {
456 absAddr := n.Addr.Absolute(ctx.Path())
457 state := ctx.State()
458
459 pruned := state.RemoveResourceIfEmpty(absAddr)
460 if !pruned {
461 // If this produces an error, it indicates a bug elsewhere in Terraform
462 // -- probably missing graph nodes, graph edges, or
463 // incorrectly-implemented evaluation steps.
464 return nil, fmt.Errorf("orphan resource %s still has a non-empty state after apply; this is a bug in Terraform", absAddr)
465 }
466 log.Printf("[TRACE] EvalForgetResourceState: Pruned husk of %s from state", absAddr)
293 467
294 return nil, nil 468 return nil, nil
295} 469}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go
new file mode 100644
index 0000000..e194000
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_state_upgrade.go
@@ -0,0 +1,106 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/configs/configschema"
9 "github.com/hashicorp/terraform/providers"
10 "github.com/hashicorp/terraform/states"
11 "github.com/hashicorp/terraform/tfdiags"
12)
13
14// UpgradeResourceState will, if necessary, run the provider-defined upgrade
15// logic against the given state object to make it compliant with the
16// current schema version. This is a no-op if the given state object is
17// already at the latest version.
18//
19// If any errors occur during upgrade, error diagnostics are returned. In that
20// case it is not safe to proceed with using the original state object.
21func UpgradeResourceState(addr addrs.AbsResourceInstance, provider providers.Interface, src *states.ResourceInstanceObjectSrc, currentSchema *configschema.Block, currentVersion uint64) (*states.ResourceInstanceObjectSrc, tfdiags.Diagnostics) {
22 if addr.Resource.Resource.Mode != addrs.ManagedResourceMode {
23 // We only do state upgrading for managed resources.
24 return src, nil
25 }
26
27 stateIsFlatmap := len(src.AttrsJSON) == 0
28
29 providerType := addr.Resource.Resource.DefaultProviderConfig().Type
30 if src.SchemaVersion > currentVersion {
31 log.Printf("[TRACE] UpgradeResourceState: can't downgrade state for %s from version %d to %d", addr, src.SchemaVersion, currentVersion)
32 var diags tfdiags.Diagnostics
33 diags = diags.Append(tfdiags.Sourceless(
34 tfdiags.Error,
35 "Resource instance managed by newer provider version",
36 // This is not a very good error message, but we don't retain enough
37 // information in state to give good feedback on what provider
38 // version might be required here. :(
39 fmt.Sprintf("The current state of %s was created by a newer provider version than is currently selected. Upgrade the %s provider to work with this state.", addr, providerType),
40 ))
41 return nil, diags
42 }
43
44 // If we get down here then we need to upgrade the state, with the
45 // provider's help.
46 // If this state was originally created by a version of Terraform prior to
47 // v0.12, this also includes translating from legacy flatmap to new-style
48 // representation, since only the provider has enough information to
49 // understand a flatmap built against an older schema.
50 if src.SchemaVersion != currentVersion {
51 log.Printf("[TRACE] UpgradeResourceState: upgrading state for %s from version %d to %d using provider %q", addr, src.SchemaVersion, currentVersion, providerType)
52 } else {
53 log.Printf("[TRACE] UpgradeResourceState: schema version of %s is still %d; calling provider %q for any other minor fixups", addr, currentVersion, providerType)
54 }
55
56 req := providers.UpgradeResourceStateRequest{
57 TypeName: addr.Resource.Resource.Type,
58
59 // TODO: The internal schema version representations are all using
60 // uint64 instead of int64, but unsigned integers aren't friendly
61 // to all protobuf target languages so in practice we use int64
62 // on the wire. In future we will change all of our internal
63 // representations to int64 too.
64 Version: int64(src.SchemaVersion),
65 }
66
67 if stateIsFlatmap {
68 req.RawStateFlatmap = src.AttrsFlat
69 } else {
70 req.RawStateJSON = src.AttrsJSON
71 }
72
73 resp := provider.UpgradeResourceState(req)
74 diags := resp.Diagnostics
75 if diags.HasErrors() {
76 return nil, diags
77 }
78
79 // After upgrading, the new value must conform to the current schema. When
80 // going over RPC this is actually already ensured by the
81 // marshaling/unmarshaling of the new value, but we'll check it here
82 // anyway for robustness, e.g. for in-process providers.
83 newValue := resp.UpgradedState
84 if errs := newValue.Type().TestConformance(currentSchema.ImpliedType()); len(errs) > 0 {
85 for _, err := range errs {
86 diags = diags.Append(tfdiags.Sourceless(
87 tfdiags.Error,
88 "Invalid resource state upgrade",
89 fmt.Sprintf("The %s provider upgraded the state for %s from a previous version, but produced an invalid result: %s.", providerType, addr, tfdiags.FormatError(err)),
90 ))
91 }
92 return nil, diags
93 }
94
95 new, err := src.CompleteUpgrade(newValue, currentSchema.ImpliedType(), uint64(currentVersion))
96 if err != nil {
97 // We already checked for type conformance above, so getting into this
98 // codepath should be rare and is probably a bug somewhere under CompleteUpgrade.
99 diags = diags.Append(tfdiags.Sourceless(
100 tfdiags.Error,
101 "Failed to encode result of resource state upgrade",
102 fmt.Sprintf("Failed to encode state for %s after resource schema upgrade: %s.", addr, tfdiags.FormatError(err)),
103 ))
104 }
105 return new, diags
106}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
index 3e5a84c..0033e01 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
@@ -2,126 +2,163 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 5 "log"
6 "github.com/hashicorp/terraform/config" 6
7 "github.com/mitchellh/mapstructure" 7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/terraform/addrs"
9 "github.com/hashicorp/terraform/configs"
10 "github.com/hashicorp/terraform/configs/configschema"
11 "github.com/hashicorp/terraform/providers"
12 "github.com/hashicorp/terraform/provisioners"
13 "github.com/hashicorp/terraform/tfdiags"
14 "github.com/zclconf/go-cty/cty"
15 "github.com/zclconf/go-cty/cty/convert"
16 "github.com/zclconf/go-cty/cty/gocty"
8) 17)
9 18
10// EvalValidateError is the error structure returned if there were
11// validation errors.
12type EvalValidateError struct {
13 Warnings []string
14 Errors []error
15}
16
17func (e *EvalValidateError) Error() string {
18 return fmt.Sprintf("Warnings: %s. Errors: %s", e.Warnings, e.Errors)
19}
20
21// EvalValidateCount is an EvalNode implementation that validates 19// EvalValidateCount is an EvalNode implementation that validates
22// the count of a resource. 20// the count of a resource.
23type EvalValidateCount struct { 21type EvalValidateCount struct {
24 Resource *config.Resource 22 Resource *configs.Resource
25} 23}
26 24
27// TODO: test 25// TODO: test
28func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) { 26func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) {
27 var diags tfdiags.Diagnostics
29 var count int 28 var count int
30 var errs []error
31 var err error 29 var err error
32 if _, err := ctx.Interpolate(n.Resource.RawCount, nil); err != nil { 30
33 errs = append(errs, fmt.Errorf( 31 val, valDiags := ctx.EvaluateExpr(n.Resource.Count, cty.Number, nil)
34 "Failed to interpolate count: %s", err)) 32 diags = diags.Append(valDiags)
33 if valDiags.HasErrors() {
35 goto RETURN 34 goto RETURN
36 } 35 }
37 36 if val.IsNull() || !val.IsKnown() {
38 count, err = n.Resource.Count() 37 goto RETURN
39 if err != nil {
40 // If we can't get the count during validation, then
41 // just replace it with the number 1.
42 c := n.Resource.RawCount.Config()
43 c[n.Resource.RawCount.Key] = "1"
44 count = 1
45 } 38 }
46 err = nil
47 39
48 if count < 0 { 40 err = gocty.FromCtyValue(val, &count)
49 errs = append(errs, fmt.Errorf( 41 if err != nil {
50 "Count is less than zero: %d", count)) 42 // The EvaluateExpr call above already guaranteed us a number value,
43 // so if we end up here then we have something that is out of range
44 // for an int, and the error message will include a description of
45 // the valid range.
46 rawVal := val.AsBigFloat()
47 diags = diags.Append(&hcl.Diagnostic{
48 Severity: hcl.DiagError,
49 Summary: "Invalid count value",
50 Detail: fmt.Sprintf("The number %s is not a valid count value: %s.", rawVal, err),
51 Subject: n.Resource.Count.Range().Ptr(),
52 })
53 } else if count < 0 {
54 rawVal := val.AsBigFloat()
55 diags = diags.Append(&hcl.Diagnostic{
56 Severity: hcl.DiagError,
57 Summary: "Invalid count value",
58 Detail: fmt.Sprintf("The number %s is not a valid count value: count must not be negative.", rawVal),
59 Subject: n.Resource.Count.Range().Ptr(),
60 })
51 } 61 }
52 62
53RETURN: 63RETURN:
54 if len(errs) != 0 { 64 return nil, diags.NonFatalErr()
55 err = &EvalValidateError{
56 Errors: errs,
57 }
58 }
59 return nil, err
60} 65}
61 66
62// EvalValidateProvider is an EvalNode implementation that validates 67// EvalValidateProvider is an EvalNode implementation that validates
63// the configuration of a resource. 68// a provider configuration.
64type EvalValidateProvider struct { 69type EvalValidateProvider struct {
65 Provider *ResourceProvider 70 Addr addrs.ProviderConfig
66 Config **ResourceConfig 71 Provider *providers.Interface
72 Config *configs.Provider
67} 73}
68 74
69func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) { 75func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) {
76 var diags tfdiags.Diagnostics
70 provider := *n.Provider 77 provider := *n.Provider
71 config := *n.Config
72 78
73 warns, errs := provider.Validate(config) 79 configBody := buildProviderConfig(ctx, n.Addr, n.Config)
74 if len(warns) == 0 && len(errs) == 0 { 80
75 return nil, nil 81 resp := provider.GetSchema()
82 diags = diags.Append(resp.Diagnostics)
83 if diags.HasErrors() {
84 return nil, diags.NonFatalErr()
76 } 85 }
77 86
78 return nil, &EvalValidateError{ 87 configSchema := resp.Provider.Block
79 Warnings: warns, 88 if configSchema == nil {
80 Errors: errs, 89 // Should never happen in real code, but often comes up in tests where
90 // mock schemas are being used that tend to be incomplete.
91 log.Printf("[WARN] EvalValidateProvider: no config schema is available for %s, so using empty schema", n.Addr)
92 configSchema = &configschema.Block{}
81 } 93 }
94
95 configVal, configBody, evalDiags := ctx.EvaluateBlock(configBody, configSchema, nil, EvalDataForNoInstanceKey)
96 diags = diags.Append(evalDiags)
97 if evalDiags.HasErrors() {
98 return nil, diags.NonFatalErr()
99 }
100
101 req := providers.PrepareProviderConfigRequest{
102 Config: configVal,
103 }
104
105 validateResp := provider.PrepareProviderConfig(req)
106 diags = diags.Append(validateResp.Diagnostics)
107
108 return nil, diags.NonFatalErr()
82} 109}
83 110
84// EvalValidateProvisioner is an EvalNode implementation that validates 111// EvalValidateProvisioner is an EvalNode implementation that validates
85// the configuration of a resource. 112// the configuration of a provisioner belonging to a resource. The provisioner
113// config is expected to contain the merged connection configurations.
86type EvalValidateProvisioner struct { 114type EvalValidateProvisioner struct {
87 Provisioner *ResourceProvisioner 115 ResourceAddr addrs.Resource
88 Config **ResourceConfig 116 Provisioner *provisioners.Interface
89 ConnConfig **ResourceConfig 117 Schema **configschema.Block
118 Config *configs.Provisioner
119 ResourceHasCount bool
90} 120}
91 121
92func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) { 122func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) {
93 provisioner := *n.Provisioner 123 provisioner := *n.Provisioner
94 config := *n.Config 124 config := *n.Config
95 var warns []string 125 schema := *n.Schema
96 var errs []error 126
127 var diags tfdiags.Diagnostics
97 128
98 { 129 {
99 // Validate the provisioner's own config first 130 // Validate the provisioner's own config first
100 w, e := provisioner.Validate(config)
101 warns = append(warns, w...)
102 errs = append(errs, e...)
103 }
104 131
105 { 132 configVal, _, configDiags := n.evaluateBlock(ctx, config.Config, schema)
106 // Now validate the connection config, which might either be from 133 diags = diags.Append(configDiags)
107 // the provisioner block itself or inherited from the resource's 134 if configDiags.HasErrors() {
108 // shared connection info. 135 return nil, diags.Err()
109 w, e := n.validateConnConfig(*n.ConnConfig) 136 }
110 warns = append(warns, w...)
111 errs = append(errs, e...)
112 }
113 137
114 if len(warns) == 0 && len(errs) == 0 { 138 if configVal == cty.NilVal {
115 return nil, nil 139 // Should never happen for a well-behaved EvaluateBlock implementation
140 return nil, fmt.Errorf("EvaluateBlock returned nil value")
141 }
142
143 req := provisioners.ValidateProvisionerConfigRequest{
144 Config: configVal,
145 }
146
147 resp := provisioner.ValidateProvisionerConfig(req)
148 diags = diags.Append(resp.Diagnostics)
116 } 149 }
117 150
118 return nil, &EvalValidateError{ 151 {
119 Warnings: warns, 152 // Now validate the connection config, which contains the merged bodies
120 Errors: errs, 153 // of the resource and provisioner connection blocks.
154 connDiags := n.validateConnConfig(ctx, config.Connection, n.ResourceAddr)
155 diags = diags.Append(connDiags)
121 } 156 }
157
158 return nil, diags.NonFatalErr()
122} 159}
123 160
124func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) (warns []string, errs []error) { 161func (n *EvalValidateProvisioner) validateConnConfig(ctx EvalContext, config *configs.Connection, self addrs.Referenceable) tfdiags.Diagnostics {
125 // We can't comprehensively validate the connection config since its 162 // We can't comprehensively validate the connection config since its
126 // final structure is decided by the communicator and we can't instantiate 163 // final structure is decided by the communicator and we can't instantiate
127 // that until we have a complete instance state. However, we *can* catch 164 // that until we have a complete instance state. However, we *can* catch
@@ -129,103 +166,379 @@ func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig)
129 // typos early rather than waiting until we actually try to run one of 166 // typos early rather than waiting until we actually try to run one of
130 // the resource's provisioners. 167 // the resource's provisioners.
131 168
132 type connConfigSuperset struct { 169 var diags tfdiags.Diagnostics
133 // All attribute types are interface{} here because at this point we
134 // may still have unresolved interpolation expressions, which will
135 // appear as strings regardless of the final goal type.
136 170
137 Type interface{} `mapstructure:"type"` 171 if config == nil || config.Config == nil {
138 User interface{} `mapstructure:"user"` 172 // No block to validate
139 Password interface{} `mapstructure:"password"` 173 return diags
140 Host interface{} `mapstructure:"host"` 174 }
141 Port interface{} `mapstructure:"port"`
142 Timeout interface{} `mapstructure:"timeout"`
143 ScriptPath interface{} `mapstructure:"script_path"`
144 175
145 // For type=ssh only (enforced in ssh communicator) 176 // We evaluate here just by evaluating the block and returning any
146 PrivateKey interface{} `mapstructure:"private_key"` 177 // diagnostics we get, since evaluation alone is enough to check for
147 HostKey interface{} `mapstructure:"host_key"` 178 // extraneous arguments and incorrectly-typed arguments.
148 Agent interface{} `mapstructure:"agent"` 179 _, _, configDiags := n.evaluateBlock(ctx, config.Config, connectionBlockSupersetSchema)
149 BastionHost interface{} `mapstructure:"bastion_host"` 180 diags = diags.Append(configDiags)
150 BastionHostKey interface{} `mapstructure:"bastion_host_key"`
151 BastionPort interface{} `mapstructure:"bastion_port"`
152 BastionUser interface{} `mapstructure:"bastion_user"`
153 BastionPassword interface{} `mapstructure:"bastion_password"`
154 BastionPrivateKey interface{} `mapstructure:"bastion_private_key"`
155 AgentIdentity interface{} `mapstructure:"agent_identity"`
156 181
157 // For type=winrm only (enforced in winrm communicator) 182 return diags
158 HTTPS interface{} `mapstructure:"https"` 183}
159 Insecure interface{} `mapstructure:"insecure"`
160 NTLM interface{} `mapstructure:"use_ntlm"`
161 CACert interface{} `mapstructure:"cacert"`
162 }
163 184
164 var metadata mapstructure.Metadata 185func (n *EvalValidateProvisioner) evaluateBlock(ctx EvalContext, body hcl.Body, schema *configschema.Block) (cty.Value, hcl.Body, tfdiags.Diagnostics) {
165 decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ 186 keyData := EvalDataForNoInstanceKey
166 Metadata: &metadata, 187 selfAddr := n.ResourceAddr.Instance(addrs.NoKey)
167 Result: &connConfigSuperset{}, // result is disregarded; we only care about unused keys
168 })
169 if err != nil {
170 // should never happen
171 errs = append(errs, err)
172 return
173 }
174 188
175 if err := decoder.Decode(connConfig.Config); err != nil { 189 if n.ResourceHasCount {
176 errs = append(errs, err) 190 // For a resource that has count, we allow count.index but don't
177 return 191 // know at this stage what it will return.
178 } 192 keyData = InstanceKeyEvalData{
193 CountIndex: cty.UnknownVal(cty.Number),
194 }
179 195
180 for _, attrName := range metadata.Unused { 196 // "self" can't point to an unknown key, but we'll force it to be
181 errs = append(errs, fmt.Errorf("unknown 'connection' argument %q", attrName)) 197 // key 0 here, which should return an unknown value of the
198 // expected type since none of these elements are known at this
199 // point anyway.
200 selfAddr = n.ResourceAddr.Instance(addrs.IntKey(0))
182 } 201 }
183 return 202
203 return ctx.EvaluateBlock(body, schema, selfAddr, keyData)
204}
205
206// connectionBlockSupersetSchema is a schema representing the superset of all
207// possible arguments for "connection" blocks across all supported connection
208// types.
209//
210// This currently lives here because we've not yet updated our communicator
211// subsystem to be aware of schema itself. Once that is done, we can remove
212// this and use a type-specific schema from the communicator to validate
213// exactly what is expected for a given connection type.
214var connectionBlockSupersetSchema = &configschema.Block{
215 Attributes: map[string]*configschema.Attribute{
216 // NOTE: "type" is not included here because it's treated special
217 // by the config loader and stored away in a separate field.
218
219 // Common attributes for both connection types
220 "host": {
221 Type: cty.String,
222 Required: true,
223 },
224 "type": {
225 Type: cty.String,
226 Optional: true,
227 },
228 "user": {
229 Type: cty.String,
230 Optional: true,
231 },
232 "password": {
233 Type: cty.String,
234 Optional: true,
235 },
236 "port": {
237 Type: cty.String,
238 Optional: true,
239 },
240 "timeout": {
241 Type: cty.String,
242 Optional: true,
243 },
244 "script_path": {
245 Type: cty.String,
246 Optional: true,
247 },
248
249 // For type=ssh only (enforced in ssh communicator)
250 "private_key": {
251 Type: cty.String,
252 Optional: true,
253 },
254 "certificate": {
255 Type: cty.String,
256 Optional: true,
257 },
258 "host_key": {
259 Type: cty.String,
260 Optional: true,
261 },
262 "agent": {
263 Type: cty.Bool,
264 Optional: true,
265 },
266 "agent_identity": {
267 Type: cty.String,
268 Optional: true,
269 },
270 "bastion_host": {
271 Type: cty.String,
272 Optional: true,
273 },
274 "bastion_host_key": {
275 Type: cty.String,
276 Optional: true,
277 },
278 "bastion_port": {
279 Type: cty.Number,
280 Optional: true,
281 },
282 "bastion_user": {
283 Type: cty.String,
284 Optional: true,
285 },
286 "bastion_password": {
287 Type: cty.String,
288 Optional: true,
289 },
290 "bastion_private_key": {
291 Type: cty.String,
292 Optional: true,
293 },
294
295 // For type=winrm only (enforced in winrm communicator)
296 "https": {
297 Type: cty.Bool,
298 Optional: true,
299 },
300 "insecure": {
301 Type: cty.Bool,
302 Optional: true,
303 },
304 "cacert": {
305 Type: cty.String,
306 Optional: true,
307 },
308 "use_ntlm": {
309 Type: cty.Bool,
310 Optional: true,
311 },
312 },
313}
314
315// connectionBlockSupersetSchema is a schema representing the superset of all
316// possible arguments for "connection" blocks across all supported connection
317// types.
318//
319// This currently lives here because we've not yet updated our communicator
320// subsystem to be aware of schema itself. It's exported only for use in the
321// configs/configupgrade package and should not be used from anywhere else.
322// The caller may not modify any part of the returned schema data structure.
323func ConnectionBlockSupersetSchema() *configschema.Block {
324 return connectionBlockSupersetSchema
184} 325}
185 326
186// EvalValidateResource is an EvalNode implementation that validates 327// EvalValidateResource is an EvalNode implementation that validates
187// the configuration of a resource. 328// the configuration of a resource.
188type EvalValidateResource struct { 329type EvalValidateResource struct {
189 Provider *ResourceProvider 330 Addr addrs.Resource
190 Config **ResourceConfig 331 Provider *providers.Interface
191 ResourceName string 332 ProviderSchema **ProviderSchema
192 ResourceType string 333 Config *configs.Resource
193 ResourceMode config.ResourceMode
194 334
195 // IgnoreWarnings means that warnings will not be passed through. This allows 335 // IgnoreWarnings means that warnings will not be passed through. This allows
196 // "just-in-time" passes of validation to continue execution through warnings. 336 // "just-in-time" passes of validation to continue execution through warnings.
197 IgnoreWarnings bool 337 IgnoreWarnings bool
338
339 // ConfigVal, if non-nil, will be updated with the value resulting from
340 // evaluating the given configuration body. Since validation is performed
341 // very early, this value is likely to contain lots of unknown values,
342 // but its type will conform to the schema of the resource type associated
343 // with the resource instance being validated.
344 ConfigVal *cty.Value
198} 345}
199 346
200func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) { 347func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) {
348 if n.ProviderSchema == nil || *n.ProviderSchema == nil {
349 return nil, fmt.Errorf("EvalValidateResource has nil schema for %s", n.Addr)
350 }
351
352 var diags tfdiags.Diagnostics
201 provider := *n.Provider 353 provider := *n.Provider
202 cfg := *n.Config 354 cfg := *n.Config
203 var warns []string 355 schema := *n.ProviderSchema
204 var errs []error 356 mode := cfg.Mode
357
358 keyData := EvalDataForNoInstanceKey
359 if n.Config.Count != nil {
360 // If the config block has count, we'll evaluate with an unknown
361 // number as count.index so we can still type check even though
362 // we won't expand count until the plan phase.
363 keyData = InstanceKeyEvalData{
364 CountIndex: cty.UnknownVal(cty.Number),
365 }
366
367 // Basic type-checking of the count argument. More complete validation
368 // of this will happen when we DynamicExpand during the plan walk.
369 countDiags := n.validateCount(ctx, n.Config.Count)
370 diags = diags.Append(countDiags)
371 }
372
373 for _, traversal := range n.Config.DependsOn {
374 ref, refDiags := addrs.ParseRef(traversal)
375 diags = diags.Append(refDiags)
376 if len(ref.Remaining) != 0 {
377 diags = diags.Append(&hcl.Diagnostic{
378 Severity: hcl.DiagError,
379 Summary: "Invalid depends_on reference",
380 Detail: "References in depends_on must be to a whole object (resource, etc), not to an attribute of an object.",
381 Subject: ref.Remaining.SourceRange().Ptr(),
382 })
383 }
384
385 // The ref must also refer to something that exists. To test that,
386 // we'll just eval it and count on the fact that our evaluator will
387 // detect references to non-existent objects.
388 if !diags.HasErrors() {
389 scope := ctx.EvaluationScope(nil, EvalDataForNoInstanceKey)
390 if scope != nil { // sometimes nil in tests, due to incomplete mocks
391 _, refDiags = scope.EvalReference(ref, cty.DynamicPseudoType)
392 diags = diags.Append(refDiags)
393 }
394 }
395 }
396
205 // Provider entry point varies depending on resource mode, because 397 // Provider entry point varies depending on resource mode, because
206 // managed resources and data resources are two distinct concepts 398 // managed resources and data resources are two distinct concepts
207 // in the provider abstraction. 399 // in the provider abstraction.
208 switch n.ResourceMode { 400 switch mode {
209 case config.ManagedResourceMode: 401 case addrs.ManagedResourceMode:
210 warns, errs = provider.ValidateResource(n.ResourceType, cfg) 402 schema, _ := schema.SchemaForResourceType(mode, cfg.Type)
211 case config.DataResourceMode: 403 if schema == nil {
212 warns, errs = provider.ValidateDataSource(n.ResourceType, cfg) 404 diags = diags.Append(&hcl.Diagnostic{
213 } 405 Severity: hcl.DiagError,
406 Summary: "Invalid resource type",
407 Detail: fmt.Sprintf("The provider %s does not support resource type %q.", cfg.ProviderConfigAddr(), cfg.Type),
408 Subject: &cfg.TypeRange,
409 })
410 return nil, diags.Err()
411 }
412
413 configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData)
414 diags = diags.Append(valDiags)
415 if valDiags.HasErrors() {
416 return nil, diags.Err()
417 }
418
419 if cfg.Managed != nil { // can be nil only in tests with poorly-configured mocks
420 for _, traversal := range cfg.Managed.IgnoreChanges {
421 moreDiags := schema.StaticValidateTraversal(traversal)
422 diags = diags.Append(moreDiags)
423 }
424 }
425
426 req := providers.ValidateResourceTypeConfigRequest{
427 TypeName: cfg.Type,
428 Config: configVal,
429 }
430
431 resp := provider.ValidateResourceTypeConfig(req)
432 diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config))
433
434 if n.ConfigVal != nil {
435 *n.ConfigVal = configVal
436 }
437
438 case addrs.DataResourceMode:
439 schema, _ := schema.SchemaForResourceType(mode, cfg.Type)
440 if schema == nil {
441 diags = diags.Append(&hcl.Diagnostic{
442 Severity: hcl.DiagError,
443 Summary: "Invalid data source",
444 Detail: fmt.Sprintf("The provider %s does not support data source %q.", cfg.ProviderConfigAddr(), cfg.Type),
445 Subject: &cfg.TypeRange,
446 })
447 return nil, diags.Err()
448 }
449
450 configVal, _, valDiags := ctx.EvaluateBlock(cfg.Config, schema, nil, keyData)
451 diags = diags.Append(valDiags)
452 if valDiags.HasErrors() {
453 return nil, diags.Err()
454 }
214 455
215 // If the resource name doesn't match the name regular 456 req := providers.ValidateDataSourceConfigRequest{
216 // expression, show an error. 457 TypeName: cfg.Type,
217 if !config.NameRegexp.Match([]byte(n.ResourceName)) { 458 Config: configVal,
218 errs = append(errs, fmt.Errorf( 459 }
219 "%s: resource name can only contain letters, numbers, "+ 460
220 "dashes, and underscores.", n.ResourceName)) 461 resp := provider.ValidateDataSourceConfig(req)
462 diags = diags.Append(resp.Diagnostics.InConfigBody(cfg.Config))
221 } 463 }
222 464
223 if (len(warns) == 0 || n.IgnoreWarnings) && len(errs) == 0 { 465 if n.IgnoreWarnings {
466 // If we _only_ have warnings then we'll return nil.
467 if diags.HasErrors() {
468 return nil, diags.NonFatalErr()
469 }
224 return nil, nil 470 return nil, nil
471 } else {
472 // We'll return an error if there are any diagnostics at all, even if
473 // some of them are warnings.
474 return nil, diags.NonFatalErr()
475 }
476}
477
478func (n *EvalValidateResource) validateCount(ctx EvalContext, expr hcl.Expression) tfdiags.Diagnostics {
479 if expr == nil {
480 return nil
481 }
482
483 var diags tfdiags.Diagnostics
484
485 countVal, countDiags := ctx.EvaluateExpr(expr, cty.Number, nil)
486 diags = diags.Append(countDiags)
487 if diags.HasErrors() {
488 return diags
489 }
490
491 if countVal.IsNull() {
492 diags = diags.Append(&hcl.Diagnostic{
493 Severity: hcl.DiagError,
494 Summary: "Invalid count argument",
495 Detail: `The given "count" argument value is null. An integer is required.`,
496 Subject: expr.Range().Ptr(),
497 })
498 return diags
225 } 499 }
226 500
227 return nil, &EvalValidateError{ 501 var err error
228 Warnings: warns, 502 countVal, err = convert.Convert(countVal, cty.Number)
229 Errors: errs, 503 if err != nil {
504 diags = diags.Append(&hcl.Diagnostic{
505 Severity: hcl.DiagError,
506 Summary: "Invalid count argument",
507 Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err),
508 Subject: expr.Range().Ptr(),
509 })
510 return diags
230 } 511 }
512
513 // If the value isn't known then that's the best we can do for now, but
514 // we'll check more thoroughly during the plan walk.
515 if !countVal.IsKnown() {
516 return diags
517 }
518
519 // If we _do_ know the value, then we can do a few more checks here.
520 var count int
521 err = gocty.FromCtyValue(countVal, &count)
522 if err != nil {
523 // Isn't a whole number, etc.
524 diags = diags.Append(&hcl.Diagnostic{
525 Severity: hcl.DiagError,
526 Summary: "Invalid count argument",
527 Detail: fmt.Sprintf(`The given "count" argument value is unsuitable: %s.`, err),
528 Subject: expr.Range().Ptr(),
529 })
530 return diags
531 }
532
533 if count < 0 {
534 diags = diags.Append(&hcl.Diagnostic{
535 Severity: hcl.DiagError,
536 Summary: "Invalid count argument",
537 Detail: `The given "count" argument value is unsuitable: count cannot be negative.`,
538 Subject: expr.Range().Ptr(),
539 })
540 return diags
541 }
542
543 return diags
231} 544}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
index ae4436a..edd604f 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
@@ -3,72 +3,65 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/hcl2/hcl"
7
8 "github.com/hashicorp/terraform/addrs"
9 "github.com/hashicorp/terraform/configs/configschema"
10 "github.com/hashicorp/terraform/lang"
11 "github.com/hashicorp/terraform/tfdiags"
7) 12)
8 13
9// EvalValidateResourceSelfRef is an EvalNode implementation that validates that 14// EvalValidateSelfRef is an EvalNode implementation that checks to ensure that
10// a configuration doesn't contain a reference to the resource itself. 15// expressions within a particular referencable block do not reference that
11// 16// same block.
12// This must be done prior to interpolating configuration in order to avoid 17type EvalValidateSelfRef struct {
13// any infinite loop scenarios. 18 Addr addrs.Referenceable
14type EvalValidateResourceSelfRef struct { 19 Config hcl.Body
15 Addr **ResourceAddress 20 ProviderSchema **ProviderSchema
16 Config **config.RawConfig
17} 21}
18 22
19func (n *EvalValidateResourceSelfRef) Eval(ctx EvalContext) (interface{}, error) { 23func (n *EvalValidateSelfRef) Eval(ctx EvalContext) (interface{}, error) {
20 addr := *n.Addr 24 var diags tfdiags.Diagnostics
21 conf := *n.Config 25 addr := n.Addr
22
23 // Go through the variables and find self references
24 var errs []error
25 for k, raw := range conf.Variables {
26 rv, ok := raw.(*config.ResourceVariable)
27 if !ok {
28 continue
29 }
30
31 // Build an address from the variable
32 varAddr := &ResourceAddress{
33 Path: addr.Path,
34 Mode: rv.Mode,
35 Type: rv.Type,
36 Name: rv.Name,
37 Index: rv.Index,
38 InstanceType: TypePrimary,
39 }
40 26
41 // If the variable access is a multi-access (*), then we just 27 addrStrs := make([]string, 0, 1)
42 // match the index so that we'll match our own addr if everything 28 addrStrs = append(addrStrs, addr.String())
43 // else matches. 29 switch tAddr := addr.(type) {
44 if rv.Multi && rv.Index == -1 { 30 case addrs.ResourceInstance:
45 varAddr.Index = addr.Index 31 // A resource instance may not refer to its containing resource either.
46 } 32 addrStrs = append(addrStrs, tAddr.ContainingResource().String())
33 }
47 34
48 // This is a weird thing where ResourceAddres has index "-1" when 35 if n.ProviderSchema == nil || *n.ProviderSchema == nil {
49 // index isn't set at all. This means index "0" for resource access. 36 return nil, fmt.Errorf("provider schema unavailable while validating %s for self-references; this is a bug in Terraform and should be reported", addr)
50 // So, if we have this scenario, just set our varAddr to -1 so it 37 }
51 // matches.
52 if addr.Index == -1 && varAddr.Index == 0 {
53 varAddr.Index = -1
54 }
55 38
56 // If the addresses match, then this is a self reference 39 providerSchema := *n.ProviderSchema
57 if varAddr.Equals(addr) && varAddr.Index == addr.Index { 40 var schema *configschema.Block
58 errs = append(errs, fmt.Errorf( 41 switch tAddr := addr.(type) {
59 "%s: self reference not allowed: %q", 42 case addrs.Resource:
60 addr, k)) 43 schema, _ = providerSchema.SchemaForResourceAddr(tAddr)
61 } 44 case addrs.ResourceInstance:
45 schema, _ = providerSchema.SchemaForResourceAddr(tAddr.ContainingResource())
62 } 46 }
63 47
64 // If no errors, no errors! 48 if schema == nil {
65 if len(errs) == 0 { 49 return nil, fmt.Errorf("no schema available for %s to validate for self-references; this is a bug in Terraform and should be reported", addr)
66 return nil, nil
67 } 50 }
68 51
69 // Wrap the errors in the proper wrapper so we can handle validation 52 refs, _ := lang.ReferencesInBlock(n.Config, schema)
70 // formatting properly upstream. 53 for _, ref := range refs {
71 return nil, &EvalValidateError{ 54 for _, addrStr := range addrStrs {
72 Errors: errs, 55 if ref.Subject.String() == addrStr {
56 diags = diags.Append(&hcl.Diagnostic{
57 Severity: hcl.DiagError,
58 Summary: "Self-referential block",
59 Detail: fmt.Sprintf("Configuration for %s may not refer to itself.", addrStr),
60 Subject: ref.SourceRange.ToHCL().Ptr(),
61 })
62 }
63 }
73 } 64 }
65
66 return nil, diags.NonFatalErr()
74} 67}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
index e39a33c..68adf76 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
@@ -4,12 +4,17 @@ import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 "reflect" 6 "reflect"
7 "strconv"
8 "strings" 7 "strings"
9 8
9 "github.com/hashicorp/hcl2/hcl"
10 "github.com/hashicorp/terraform/configs"
11
12 "github.com/hashicorp/terraform/addrs"
13
10 "github.com/hashicorp/terraform/config" 14 "github.com/hashicorp/terraform/config"
11 "github.com/hashicorp/terraform/config/module" 15 "github.com/hashicorp/terraform/config/module"
12 "github.com/hashicorp/terraform/helper/hilmapstructure" 16 "github.com/zclconf/go-cty/cty"
17 "github.com/zclconf/go-cty/cty/convert"
13) 18)
14 19
15// EvalTypeCheckVariable is an EvalNode which ensures that the variable 20// EvalTypeCheckVariable is an EvalNode which ensures that the variable
@@ -93,166 +98,88 @@ func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) {
93 return nil, nil 98 return nil, nil
94} 99}
95 100
96// EvalSetVariables is an EvalNode implementation that sets the variables 101// EvalSetModuleCallArguments is an EvalNode implementation that sets values
97// explicitly for interpolation later. 102// for arguments of a child module call, for later retrieval during
98type EvalSetVariables struct { 103// expression evaluation.
99 Module *string 104type EvalSetModuleCallArguments struct {
100 Variables map[string]interface{} 105 Module addrs.ModuleCallInstance
106 Values map[string]cty.Value
101} 107}
102 108
103// TODO: test 109// TODO: test
104func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) { 110func (n *EvalSetModuleCallArguments) Eval(ctx EvalContext) (interface{}, error) {
105 ctx.SetVariables(*n.Module, n.Variables) 111 ctx.SetModuleCallArguments(n.Module, n.Values)
106 return nil, nil 112 return nil, nil
107} 113}
108 114
109// EvalVariableBlock is an EvalNode implementation that evaluates the 115// EvalModuleCallArgument is an EvalNode implementation that produces the value
110// given configuration, and uses the final values as a way to set the 116// for a particular variable as will be used by a child module instance.
111// mapping. 117//
112type EvalVariableBlock struct { 118// The result is written into the map given in Values, with its key
113 Config **ResourceConfig 119// set to the local name of the variable, disregarding the module instance
114 VariableValues map[string]interface{} 120// address. Any existing values in that map are deleted first. This weird
121// interface is a result of trying to be convenient for use with
122// EvalContext.SetModuleCallArguments, which expects a map to merge in with
123// any existing arguments.
124type EvalModuleCallArgument struct {
125 Addr addrs.InputVariable
126 Config *configs.Variable
127 Expr hcl.Expression
128
129 // If this flag is set, any diagnostics are discarded and this operation
130 // will always succeed, though may produce an unknown value in the
131 // event of an error.
132 IgnoreDiagnostics bool
133
134 Values map[string]cty.Value
115} 135}
116 136
117func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) { 137func (n *EvalModuleCallArgument) Eval(ctx EvalContext) (interface{}, error) {
118 // Clear out the existing mapping 138 // Clear out the existing mapping
119 for k, _ := range n.VariableValues { 139 for k := range n.Values {
120 delete(n.VariableValues, k) 140 delete(n.Values, k)
121 }
122
123 // Get our configuration
124 rc := *n.Config
125 for k, v := range rc.Config {
126 vKind := reflect.ValueOf(v).Type().Kind()
127
128 switch vKind {
129 case reflect.Slice:
130 var vSlice []interface{}
131 if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil {
132 n.VariableValues[k] = vSlice
133 continue
134 }
135 case reflect.Map:
136 var vMap map[string]interface{}
137 if err := hilmapstructure.WeakDecode(v, &vMap); err == nil {
138 n.VariableValues[k] = vMap
139 continue
140 }
141 default:
142 var vString string
143 if err := hilmapstructure.WeakDecode(v, &vString); err == nil {
144 n.VariableValues[k] = vString
145 continue
146 }
147 }
148
149 return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k)
150 }
151
152 for _, path := range rc.ComputedKeys {
153 log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path)
154 err := n.setUnknownVariableValueForPath(path)
155 if err != nil {
156 return nil, err
157 }
158 } 141 }
159 142
160 return nil, nil 143 wantType := n.Config.Type
161} 144 name := n.Addr.Name
162 145 expr := n.Expr
163func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error { 146
164 pathComponents := strings.Split(path, ".") 147 if expr == nil {
165 148 // Should never happen, but we'll bail out early here rather than
166 if len(pathComponents) < 1 { 149 // crash in case it does. We set no value at all in this case,
167 return fmt.Errorf("No path comoponents in %s", path) 150 // making a subsequent call to EvalContext.SetModuleCallArguments
151 // a no-op.
152 log.Printf("[ERROR] attempt to evaluate %s with nil expression", n.Addr.String())
153 return nil, nil
168 } 154 }
169 155
170 if len(pathComponents) == 1 { 156 val, diags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil)
171 // Special case the "top level" since we know the type 157
172 if _, ok := n.VariableValues[pathComponents[0]]; !ok { 158 // We intentionally passed DynamicPseudoType to EvaluateExpr above because
173 n.VariableValues[pathComponents[0]] = config.UnknownVariableValue 159 // now we can do our own local type conversion and produce an error message
174 } 160 // with better context if it fails.
175 return nil 161 var convErr error
162 val, convErr = convert.Convert(val, wantType)
163 if convErr != nil {
164 diags = diags.Append(&hcl.Diagnostic{
165 Severity: hcl.DiagError,
166 Summary: "Invalid value for module argument",
167 Detail: fmt.Sprintf(
168 "The given value is not suitable for child module variable %q defined at %s: %s.",
169 name, n.Config.DeclRange.String(), convErr,
170 ),
171 Subject: expr.Range().Ptr(),
172 })
173 // We'll return a placeholder unknown value to avoid producing
174 // redundant downstream errors.
175 val = cty.UnknownVal(wantType)
176 } 176 }
177 177
178 // Otherwise find the correct point in the tree and then set to unknown 178 n.Values[name] = val
179 var current interface{} = n.VariableValues[pathComponents[0]] 179 if n.IgnoreDiagnostics {
180 for i := 1; i < len(pathComponents); i++ { 180 return nil, nil
181 switch tCurrent := current.(type) {
182 case []interface{}:
183 index, err := strconv.Atoi(pathComponents[i])
184 if err != nil {
185 return fmt.Errorf("Cannot convert %s to slice index in path %s",
186 pathComponents[i], path)
187 }
188 current = tCurrent[index]
189 case []map[string]interface{}:
190 index, err := strconv.Atoi(pathComponents[i])
191 if err != nil {
192 return fmt.Errorf("Cannot convert %s to slice index in path %s",
193 pathComponents[i], path)
194 }
195 current = tCurrent[index]
196 case map[string]interface{}:
197 if val, hasVal := tCurrent[pathComponents[i]]; hasVal {
198 current = val
199 continue
200 }
201
202 tCurrent[pathComponents[i]] = config.UnknownVariableValue
203 break
204 }
205 } 181 }
206 182 return nil, diags.ErrWithWarnings()
207 return nil
208}
209
210// EvalCoerceMapVariable is an EvalNode implementation that recognizes a
211// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a
212// bare map literal is indistinguishable from a list of maps w/ one element.
213//
214// We take all the same inputs as EvalTypeCheckVariable above, since we need
215// both the target type and the proposed value in order to properly coerce.
216type EvalCoerceMapVariable struct {
217 Variables map[string]interface{}
218 ModulePath []string
219 ModuleTree *module.Tree
220}
221
222// Eval implements the EvalNode interface. See EvalCoerceMapVariable for
223// details.
224func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) {
225 currentTree := n.ModuleTree
226 for _, pathComponent := range n.ModulePath[1:] {
227 currentTree = currentTree.Children()[pathComponent]
228 }
229 targetConfig := currentTree.Config()
230
231 prototypes := make(map[string]config.VariableType)
232 for _, variable := range targetConfig.Variables {
233 prototypes[variable.Name] = variable.Type()
234 }
235
236 for name, declaredType := range prototypes {
237 if declaredType != config.VariableTypeMap {
238 continue
239 }
240
241 proposedValue, ok := n.Variables[name]
242 if !ok {
243 continue
244 }
245
246 if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 {
247 if m, ok := list[0].(map[string]interface{}); ok {
248 log.Printf("[DEBUG] EvalCoerceMapVariable: "+
249 "Coercing single element list into map: %#v", m)
250 n.Variables[name] = m
251 }
252 }
253 }
254
255 return nil, nil
256} 183}
257 184
258// hclTypeName returns the name of the type that would represent this value in 185// hclTypeName returns the name of the type that would represent this value in
diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
index 0c3da48..6b4df67 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
@@ -1,48 +1,34 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "strings" 4 "github.com/hashicorp/terraform/addrs"
5 5 "github.com/hashicorp/terraform/configs"
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/providers"
7) 7)
8 8
9// ProviderEvalTree returns the evaluation tree for initializing and 9// ProviderEvalTree returns the evaluation tree for initializing and
10// configuring providers. 10// configuring providers.
11func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) EvalNode { 11func ProviderEvalTree(n *NodeApplyableProvider, config *configs.Provider) EvalNode {
12 var provider ResourceProvider 12 var provider providers.Interface
13 var resourceConfig *ResourceConfig
14 13
15 typeName := strings.SplitN(n.NameValue, ".", 2)[0] 14 addr := n.Addr
15 relAddr := addr.ProviderConfig
16 16
17 seq := make([]EvalNode, 0, 5) 17 seq := make([]EvalNode, 0, 5)
18 seq = append(seq, &EvalInitProvider{ 18 seq = append(seq, &EvalInitProvider{
19 TypeName: typeName, 19 TypeName: relAddr.Type,
20 Name: n.Name(), 20 Addr: addr.ProviderConfig,
21 }) 21 })
22 22
23 // Input stuff 23 // Input stuff
24 seq = append(seq, &EvalOpFilter{ 24 seq = append(seq, &EvalOpFilter{
25 Ops: []walkOperation{walkInput, walkImport}, 25 Ops: []walkOperation{walkImport},
26 Node: &EvalSequence{ 26 Node: &EvalSequence{
27 Nodes: []EvalNode{ 27 Nodes: []EvalNode{
28 &EvalGetProvider{ 28 &EvalGetProvider{
29 Name: n.Name(), 29 Addr: addr,
30 Output: &provider, 30 Output: &provider,
31 }, 31 },
32 &EvalInterpolateProvider{
33 Config: config,
34 Output: &resourceConfig,
35 },
36 &EvalBuildProviderConfig{
37 Provider: n.NameValue,
38 Config: &resourceConfig,
39 Output: &resourceConfig,
40 },
41 &EvalInputProvider{
42 Name: n.NameValue,
43 Provider: &provider,
44 Config: &resourceConfig,
45 },
46 }, 32 },
47 }, 33 },
48 }) 34 })
@@ -52,21 +38,13 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E
52 Node: &EvalSequence{ 38 Node: &EvalSequence{
53 Nodes: []EvalNode{ 39 Nodes: []EvalNode{
54 &EvalGetProvider{ 40 &EvalGetProvider{
55 Name: n.Name(), 41 Addr: addr,
56 Output: &provider, 42 Output: &provider,
57 }, 43 },
58 &EvalInterpolateProvider{
59 Config: config,
60 Output: &resourceConfig,
61 },
62 &EvalBuildProviderConfig{
63 Provider: n.NameValue,
64 Config: &resourceConfig,
65 Output: &resourceConfig,
66 },
67 &EvalValidateProvider{ 44 &EvalValidateProvider{
45 Addr: relAddr,
68 Provider: &provider, 46 Provider: &provider,
69 Config: &resourceConfig, 47 Config: config,
70 }, 48 },
71 }, 49 },
72 }, 50 },
@@ -78,18 +56,9 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E
78 Node: &EvalSequence{ 56 Node: &EvalSequence{
79 Nodes: []EvalNode{ 57 Nodes: []EvalNode{
80 &EvalGetProvider{ 58 &EvalGetProvider{
81 Name: n.Name(), 59 Addr: addr,
82 Output: &provider, 60 Output: &provider,
83 }, 61 },
84 &EvalInterpolateProvider{
85 Config: config,
86 Output: &resourceConfig,
87 },
88 &EvalBuildProviderConfig{
89 Provider: n.NameValue,
90 Config: &resourceConfig,
91 Output: &resourceConfig,
92 },
93 }, 62 },
94 }, 63 },
95 }) 64 })
@@ -101,8 +70,9 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E
101 Node: &EvalSequence{ 70 Node: &EvalSequence{
102 Nodes: []EvalNode{ 71 Nodes: []EvalNode{
103 &EvalConfigProvider{ 72 &EvalConfigProvider{
104 Provider: n.Name(), 73 Addr: relAddr,
105 Config: &resourceConfig, 74 Provider: &provider,
75 Config: config,
106 }, 76 },
107 }, 77 },
108 }, 78 },
@@ -113,6 +83,6 @@ func ProviderEvalTree(n *NodeApplyableProvider, config *config.ProviderConfig) E
113 83
114// CloseProviderEvalTree returns the evaluation tree for closing 84// CloseProviderEvalTree returns the evaluation tree for closing
115// provider connections that aren't needed anymore. 85// provider connections that aren't needed anymore.
116func CloseProviderEvalTree(n string) EvalNode { 86func CloseProviderEvalTree(addr addrs.AbsProviderConfig) EvalNode {
117 return &EvalCloseProvider{Name: n} 87 return &EvalCloseProvider{Addr: addr.ProviderConfig}
118} 88}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaluate.go b/vendor/github.com/hashicorp/terraform/terraform/evaluate.go
new file mode 100644
index 0000000..ab65d47
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/evaluate.go
@@ -0,0 +1,933 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6 "os"
7 "path/filepath"
8 "strconv"
9 "sync"
10
11 "github.com/agext/levenshtein"
12 "github.com/hashicorp/hcl2/hcl"
13 "github.com/zclconf/go-cty/cty"
14 "github.com/zclconf/go-cty/cty/convert"
15
16 "github.com/hashicorp/terraform/addrs"
17 "github.com/hashicorp/terraform/configs"
18 "github.com/hashicorp/terraform/configs/configschema"
19 "github.com/hashicorp/terraform/lang"
20 "github.com/hashicorp/terraform/plans"
21 "github.com/hashicorp/terraform/states"
22 "github.com/hashicorp/terraform/tfdiags"
23)
24
25// Evaluator provides the necessary contextual data for evaluating expressions
26// for a particular walk operation.
27type Evaluator struct {
28 // Operation defines what type of operation this evaluator is being used
29 // for.
30 Operation walkOperation
31
32 // Meta is contextual metadata about the current operation.
33 Meta *ContextMeta
34
35 // Config is the root node in the configuration tree.
36 Config *configs.Config
37
38 // VariableValues is a map from variable names to their associated values,
39 // within the module indicated by ModulePath. VariableValues is modified
40 // concurrently, and so it must be accessed only while holding
41 // VariableValuesLock.
42 //
43 // The first map level is string representations of addr.ModuleInstance
44 // values, while the second level is variable names.
45 VariableValues map[string]map[string]cty.Value
46 VariableValuesLock *sync.Mutex
47
48 // Schemas is a repository of all of the schemas we should need to
49 // evaluate expressions. This must be constructed by the caller to
50 // include schemas for all of the providers, resource types, data sources
51 // and provisioners used by the given configuration and state.
52 //
53 // This must not be mutated during evaluation.
54 Schemas *Schemas
55
56 // State is the current state, embedded in a wrapper that ensures that
57 // it can be safely accessed and modified concurrently.
58 State *states.SyncState
59
60 // Changes is the set of proposed changes, embedded in a wrapper that
61 // ensures they can be safely accessed and modified concurrently.
62 Changes *plans.ChangesSync
63}
64
65// Scope creates an evaluation scope for the given module path and optional
66// resource.
67//
68// If the "self" argument is nil then the "self" object is not available
69// in evaluated expressions. Otherwise, it behaves as an alias for the given
70// address.
71func (e *Evaluator) Scope(data lang.Data, self addrs.Referenceable) *lang.Scope {
72 return &lang.Scope{
73 Data: data,
74 SelfAddr: self,
75 PureOnly: e.Operation != walkApply && e.Operation != walkDestroy,
76 BaseDir: ".", // Always current working directory for now.
77 }
78}
79
80// evaluationStateData is an implementation of lang.Data that resolves
81// references primarily (but not exclusively) using information from a State.
82type evaluationStateData struct {
83 Evaluator *Evaluator
84
85 // ModulePath is the path through the dynamic module tree to the module
86 // that references will be resolved relative to.
87 ModulePath addrs.ModuleInstance
88
89 // InstanceKeyData describes the values, if any, that are accessible due
90 // to repetition of a containing object using "count" or "for_each"
91 // arguments. (It is _not_ used for the for_each inside "dynamic" blocks,
92 // since the user specifies in that case which variable name to locally
93 // shadow.)
94 InstanceKeyData InstanceKeyEvalData
95
96 // Operation records the type of walk the evaluationStateData is being used
97 // for.
98 Operation walkOperation
99}
100
101// InstanceKeyEvalData is used during evaluation to specify which values,
102// if any, should be produced for count.index, each.key, and each.value.
103type InstanceKeyEvalData struct {
104 // CountIndex is the value for count.index, or cty.NilVal if evaluating
105 // in a context where the "count" argument is not active.
106 //
107 // For correct operation, this should always be of type cty.Number if not
108 // nil.
109 CountIndex cty.Value
110
111 // EachKey and EachValue are the values for each.key and each.value
112 // respectively, or cty.NilVal if evaluating in a context where the
113 // "for_each" argument is not active. These must either both be set
114 // or neither set.
115 //
116 // For correct operation, EachKey must always be either of type cty.String
117 // or cty.Number if not nil.
118 EachKey, EachValue cty.Value
119}
120
121// EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for
122// evaluating in a context that has the given instance key.
123func EvalDataForInstanceKey(key addrs.InstanceKey) InstanceKeyEvalData {
124 // At the moment we don't actually implement for_each, so we only
125 // ever populate CountIndex.
126 // (When we implement for_each later we may need to reorganize this some,
127 // so that we can resolve the ambiguity that an int key may either be
128 // a count.index or an each.key where for_each is over a list.)
129
130 var countIdx cty.Value
131 if intKey, ok := key.(addrs.IntKey); ok {
132 countIdx = cty.NumberIntVal(int64(intKey))
133 }
134
135 return InstanceKeyEvalData{
136 CountIndex: countIdx,
137 }
138}
139
140// EvalDataForNoInstanceKey is a value of InstanceKeyData that sets no instance
141// key values at all, suitable for use in contexts where no keyed instance
142// is relevant.
143var EvalDataForNoInstanceKey = InstanceKeyEvalData{}
144
145// evaluationStateData must implement lang.Data
146var _ lang.Data = (*evaluationStateData)(nil)
147
148func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
149 var diags tfdiags.Diagnostics
150 switch addr.Name {
151
152 case "index":
153 idxVal := d.InstanceKeyData.CountIndex
154 if idxVal == cty.NilVal {
155 diags = diags.Append(&hcl.Diagnostic{
156 Severity: hcl.DiagError,
157 Summary: `Reference to "count" in non-counted context`,
158 Detail: fmt.Sprintf(`The "count" object can be used only in "resource" and "data" blocks, and only when the "count" argument is set.`),
159 Subject: rng.ToHCL().Ptr(),
160 })
161 return cty.UnknownVal(cty.Number), diags
162 }
163 return idxVal, diags
164
165 default:
166 diags = diags.Append(&hcl.Diagnostic{
167 Severity: hcl.DiagError,
168 Summary: `Invalid "count" attribute`,
169 Detail: fmt.Sprintf(`The "count" object does not have an attribute named %q. The only supported attribute is count.index, which is the index of each instance of a resource block that has the "count" argument set.`, addr.Name),
170 Subject: rng.ToHCL().Ptr(),
171 })
172 return cty.DynamicVal, diags
173 }
174}
175
176func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
177 var diags tfdiags.Diagnostics
178
179 // First we'll make sure the requested value is declared in configuration,
180 // so we can produce a nice message if not.
181 moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath)
182 if moduleConfig == nil {
183 // should never happen, since we can't be evaluating in a module
184 // that wasn't mentioned in configuration.
185 panic(fmt.Sprintf("input variable read from %s, which has no configuration", d.ModulePath))
186 }
187
188 config := moduleConfig.Module.Variables[addr.Name]
189 if config == nil {
190 var suggestions []string
191 for k := range moduleConfig.Module.Variables {
192 suggestions = append(suggestions, k)
193 }
194 suggestion := nameSuggestion(addr.Name, suggestions)
195 if suggestion != "" {
196 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
197 } else {
198 suggestion = fmt.Sprintf(" This variable can be declared with a variable %q {} block.", addr.Name)
199 }
200
201 diags = diags.Append(&hcl.Diagnostic{
202 Severity: hcl.DiagError,
203 Summary: `Reference to undeclared input variable`,
204 Detail: fmt.Sprintf(`An input variable with the name %q has not been declared.%s`, addr.Name, suggestion),
205 Subject: rng.ToHCL().Ptr(),
206 })
207 return cty.DynamicVal, diags
208 }
209
210 wantType := cty.DynamicPseudoType
211 if config.Type != cty.NilType {
212 wantType = config.Type
213 }
214
215 d.Evaluator.VariableValuesLock.Lock()
216 defer d.Evaluator.VariableValuesLock.Unlock()
217
218 // During the validate walk, input variables are always unknown so
219 // that we are validating the configuration for all possible input values
220 // rather than for a specific set. Checking against a specific set of
221 // input values then happens during the plan walk.
222 //
223 // This is important because otherwise the validation walk will tend to be
224 // overly strict, requiring expressions throughout the configuration to
225 // be complicated to accommodate all possible inputs, whereas returning
226 // known here allows for simpler patterns like using input values as
227 // guards to broadly enable/disable resources, avoid processing things
228 // that are disabled, etc. Terraform's static validation leans towards
229 // being liberal in what it accepts because the subsequent plan walk has
230 // more information available and so can be more conservative.
231 if d.Operation == walkValidate {
232 return cty.UnknownVal(wantType), diags
233 }
234
235 moduleAddrStr := d.ModulePath.String()
236 vals := d.Evaluator.VariableValues[moduleAddrStr]
237 if vals == nil {
238 return cty.UnknownVal(wantType), diags
239 }
240
241 val, isSet := vals[addr.Name]
242 if !isSet {
243 if config.Default != cty.NilVal {
244 return config.Default, diags
245 }
246 return cty.UnknownVal(wantType), diags
247 }
248
249 var err error
250 val, err = convert.Convert(val, wantType)
251 if err != nil {
252 // We should never get here because this problem should've been caught
253 // during earlier validation, but we'll do something reasonable anyway.
254 diags = diags.Append(&hcl.Diagnostic{
255 Severity: hcl.DiagError,
256 Summary: `Incorrect variable type`,
257 Detail: fmt.Sprintf(`The resolved value of variable %q is not appropriate: %s.`, addr.Name, err),
258 Subject: &config.DeclRange,
259 })
260 // Stub out our return value so that the semantic checker doesn't
261 // produce redundant downstream errors.
262 val = cty.UnknownVal(wantType)
263 }
264
265 return val, diags
266}
267
268func (d *evaluationStateData) GetLocalValue(addr addrs.LocalValue, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
269 var diags tfdiags.Diagnostics
270
271 // First we'll make sure the requested value is declared in configuration,
272 // so we can produce a nice message if not.
273 moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath)
274 if moduleConfig == nil {
275 // should never happen, since we can't be evaluating in a module
276 // that wasn't mentioned in configuration.
277 panic(fmt.Sprintf("local value read from %s, which has no configuration", d.ModulePath))
278 }
279
280 config := moduleConfig.Module.Locals[addr.Name]
281 if config == nil {
282 var suggestions []string
283 for k := range moduleConfig.Module.Locals {
284 suggestions = append(suggestions, k)
285 }
286 suggestion := nameSuggestion(addr.Name, suggestions)
287 if suggestion != "" {
288 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
289 }
290
291 diags = diags.Append(&hcl.Diagnostic{
292 Severity: hcl.DiagError,
293 Summary: `Reference to undeclared local value`,
294 Detail: fmt.Sprintf(`A local value with the name %q has not been declared.%s`, addr.Name, suggestion),
295 Subject: rng.ToHCL().Ptr(),
296 })
297 return cty.DynamicVal, diags
298 }
299
300 val := d.Evaluator.State.LocalValue(addr.Absolute(d.ModulePath))
301 if val == cty.NilVal {
302 // Not evaluated yet?
303 val = cty.DynamicVal
304 }
305
306 return val, diags
307}
308
309func (d *evaluationStateData) GetModuleInstance(addr addrs.ModuleCallInstance, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
310 var diags tfdiags.Diagnostics
311
312 // Output results live in the module that declares them, which is one of
313 // the child module instances of our current module path.
314 moduleAddr := addr.ModuleInstance(d.ModulePath)
315
316 // We'll consult the configuration to see what output names we are
317 // expecting, so we can ensure the resulting object is of the expected
318 // type even if our data is incomplete for some reason.
319 moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr)
320 if moduleConfig == nil {
321 // should never happen, since this should've been caught during
322 // static validation.
323 panic(fmt.Sprintf("output value read from %s, which has no configuration", moduleAddr))
324 }
325 outputConfigs := moduleConfig.Module.Outputs
326
327 vals := map[string]cty.Value{}
328 for n := range outputConfigs {
329 addr := addrs.OutputValue{Name: n}.Absolute(moduleAddr)
330
331 // If a pending change is present in our current changeset then its value
332 // takes priority over what's in state. (It will usually be the same but
333 // will differ if the new value is unknown during planning.)
334 if changeSrc := d.Evaluator.Changes.GetOutputChange(addr); changeSrc != nil {
335 change, err := changeSrc.Decode()
336 if err != nil {
337 // This should happen only if someone has tampered with a plan
338 // file, so we won't bother with a pretty error for it.
339 diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", addr, err))
340 vals[n] = cty.DynamicVal
341 continue
342 }
343 // We care only about the "after" value, which is the value this output
344 // will take on after the plan is applied.
345 vals[n] = change.After
346 } else {
347 os := d.Evaluator.State.OutputValue(addr)
348 if os == nil {
349 // Not evaluated yet?
350 vals[n] = cty.DynamicVal
351 continue
352 }
353 vals[n] = os.Value
354 }
355 }
356 return cty.ObjectVal(vals), diags
357}
358
359func (d *evaluationStateData) GetModuleInstanceOutput(addr addrs.ModuleCallOutput, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
360 var diags tfdiags.Diagnostics
361
362 // Output results live in the module that declares them, which is one of
363 // the child module instances of our current module path.
364 absAddr := addr.AbsOutputValue(d.ModulePath)
365 moduleAddr := absAddr.Module
366
367 // First we'll consult the configuration to see if an output of this
368 // name is declared at all.
369 moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr)
370 if moduleConfig == nil {
371 // this doesn't happen in normal circumstances due to our validation
372 // pass, but it can turn up in some unusual situations, like in the
373 // "terraform console" repl where arbitrary expressions can be
374 // evaluated.
375 diags = diags.Append(&hcl.Diagnostic{
376 Severity: hcl.DiagError,
377 Summary: `Reference to undeclared module`,
378 Detail: fmt.Sprintf(`The configuration contains no %s.`, moduleAddr),
379 Subject: rng.ToHCL().Ptr(),
380 })
381 return cty.DynamicVal, diags
382 }
383
384 config := moduleConfig.Module.Outputs[addr.Name]
385 if config == nil {
386 var suggestions []string
387 for k := range moduleConfig.Module.Outputs {
388 suggestions = append(suggestions, k)
389 }
390 suggestion := nameSuggestion(addr.Name, suggestions)
391 if suggestion != "" {
392 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
393 }
394
395 diags = diags.Append(&hcl.Diagnostic{
396 Severity: hcl.DiagError,
397 Summary: `Reference to undeclared output value`,
398 Detail: fmt.Sprintf(`An output value with the name %q has not been declared in %s.%s`, addr.Name, moduleDisplayAddr(moduleAddr), suggestion),
399 Subject: rng.ToHCL().Ptr(),
400 })
401 return cty.DynamicVal, diags
402 }
403
404 // If a pending change is present in our current changeset then its value
405 // takes priority over what's in state. (It will usually be the same but
406 // will differ if the new value is unknown during planning.)
407 if changeSrc := d.Evaluator.Changes.GetOutputChange(absAddr); changeSrc != nil {
408 change, err := changeSrc.Decode()
409 if err != nil {
410 // This should happen only if someone has tampered with a plan
411 // file, so we won't bother with a pretty error for it.
412 diags = diags.Append(fmt.Errorf("planned change for %s could not be decoded: %s", absAddr, err))
413 return cty.DynamicVal, diags
414 }
415 // We care only about the "after" value, which is the value this output
416 // will take on after the plan is applied.
417 return change.After, diags
418 }
419
420 os := d.Evaluator.State.OutputValue(absAddr)
421 if os == nil {
422 // Not evaluated yet?
423 return cty.DynamicVal, diags
424 }
425
426 return os.Value, diags
427}
428
429func (d *evaluationStateData) GetPathAttr(addr addrs.PathAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
430 var diags tfdiags.Diagnostics
431 switch addr.Name {
432
433 case "cwd":
434 wd, err := os.Getwd()
435 if err != nil {
436 diags = diags.Append(&hcl.Diagnostic{
437 Severity: hcl.DiagError,
438 Summary: `Failed to get working directory`,
439 Detail: fmt.Sprintf(`The value for path.cwd cannot be determined due to a system error: %s`, err),
440 Subject: rng.ToHCL().Ptr(),
441 })
442 return cty.DynamicVal, diags
443 }
444 return cty.StringVal(filepath.ToSlash(wd)), diags
445
446 case "module":
447 moduleConfig := d.Evaluator.Config.DescendentForInstance(d.ModulePath)
448 if moduleConfig == nil {
449 // should never happen, since we can't be evaluating in a module
450 // that wasn't mentioned in configuration.
451 panic(fmt.Sprintf("module.path read from module %s, which has no configuration", d.ModulePath))
452 }
453 sourceDir := moduleConfig.Module.SourceDir
454 return cty.StringVal(filepath.ToSlash(sourceDir)), diags
455
456 case "root":
457 sourceDir := d.Evaluator.Config.Module.SourceDir
458 return cty.StringVal(filepath.ToSlash(sourceDir)), diags
459
460 default:
461 suggestion := nameSuggestion(addr.Name, []string{"cwd", "module", "root"})
462 if suggestion != "" {
463 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
464 }
465 diags = diags.Append(&hcl.Diagnostic{
466 Severity: hcl.DiagError,
467 Summary: `Invalid "path" attribute`,
468 Detail: fmt.Sprintf(`The "path" object does not have an attribute named %q.%s`, addr.Name, suggestion),
469 Subject: rng.ToHCL().Ptr(),
470 })
471 return cty.DynamicVal, diags
472 }
473}
474
475func (d *evaluationStateData) GetResourceInstance(addr addrs.ResourceInstance, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
476 var diags tfdiags.Diagnostics
477
478 // Although we are giving a ResourceInstance address here, if it has
479 // a key of addrs.NoKey then it might actually be a request for all of
480 // the instances of a particular resource. The reference resolver can't
481 // resolve the ambiguity itself, so we must do it in here.
482
483 // First we'll consult the configuration to see if an resource of this
484 // name is declared at all.
485 moduleAddr := d.ModulePath
486 moduleConfig := d.Evaluator.Config.DescendentForInstance(moduleAddr)
487 if moduleConfig == nil {
488 // should never happen, since we can't be evaluating in a module
489 // that wasn't mentioned in configuration.
490 panic(fmt.Sprintf("resource value read from %s, which has no configuration", moduleAddr))
491 }
492
493 config := moduleConfig.Module.ResourceByAddr(addr.ContainingResource())
494 if config == nil {
495 diags = diags.Append(&hcl.Diagnostic{
496 Severity: hcl.DiagError,
497 Summary: `Reference to undeclared resource`,
498 Detail: fmt.Sprintf(`A resource %q %q has not been declared in %s`, addr.Resource.Type, addr.Resource.Name, moduleDisplayAddr(moduleAddr)),
499 Subject: rng.ToHCL().Ptr(),
500 })
501 return cty.DynamicVal, diags
502 }
503
504 // First we'll find the state for the resource as a whole, and decide
505 // from there whether we're going to interpret the given address as a
506 // resource or a resource instance address.
507 rs := d.Evaluator.State.Resource(addr.ContainingResource().Absolute(d.ModulePath))
508
509 if rs == nil {
510 schema := d.getResourceSchema(addr.ContainingResource(), config.ProviderConfigAddr().Absolute(d.ModulePath))
511
512 // If it doesn't exist at all then we can't reliably determine whether
513 // single-instance or whole-resource interpretation was intended, but
514 // we can decide this partially...
515 if addr.Key != addrs.NoKey {
516 // If there's an instance key then the user must be intending
517 // single-instance interpretation, and so we can return a
518 // properly-typed unknown value to help with type checking.
519 return cty.UnknownVal(schema.ImpliedType()), diags
520 }
521
522 // otherwise we must return DynamicVal so that both interpretations
523 // can proceed without generating errors, and we'll deal with this
524 // in a later step where more information is gathered.
525 // (In practice we should only end up here during the validate walk,
526 // since later walks should have at least partial states populated
527 // for all resources in the configuration.)
528 return cty.DynamicVal, diags
529 }
530
531 // Break out early during validation, because resource may not be expanded
532 // yet and indexed references may show up as invalid.
533 if d.Operation == walkValidate {
534 return cty.DynamicVal, diags
535 }
536
537 schema := d.getResourceSchema(addr.ContainingResource(), rs.ProviderConfig)
538
539 // If we are able to automatically convert to the "right" type of instance
540 // key for this each mode then we'll do so, to match with how we generally
541 // treat values elsewhere in the language. This allows code below to
542 // assume that any possible conversions have already been dealt with and
543 // just worry about validation.
544 key := d.coerceInstanceKey(addr.Key, rs.EachMode)
545
546 multi := false
547
548 switch rs.EachMode {
549 case states.NoEach:
550 if key != addrs.NoKey {
551 diags = diags.Append(&hcl.Diagnostic{
552 Severity: hcl.DiagError,
553 Summary: "Invalid resource index",
554 Detail: fmt.Sprintf("Resource %s does not have either \"count\" or \"for_each\" set, so it cannot be indexed.", addr.ContainingResource()),
555 Subject: rng.ToHCL().Ptr(),
556 })
557 return cty.DynamicVal, diags
558 }
559 case states.EachList:
560 multi = key == addrs.NoKey
561 if _, ok := addr.Key.(addrs.IntKey); !multi && !ok {
562 diags = diags.Append(&hcl.Diagnostic{
563 Severity: hcl.DiagError,
564 Summary: "Invalid resource index",
565 Detail: fmt.Sprintf("Resource %s must be indexed with a number value.", addr.ContainingResource()),
566 Subject: rng.ToHCL().Ptr(),
567 })
568 return cty.DynamicVal, diags
569 }
570 case states.EachMap:
571 multi = key == addrs.NoKey
572 if _, ok := addr.Key.(addrs.IntKey); !multi && !ok {
573 diags = diags.Append(&hcl.Diagnostic{
574 Severity: hcl.DiagError,
575 Summary: "Invalid resource index",
576 Detail: fmt.Sprintf("Resource %s must be indexed with a string value.", addr.ContainingResource()),
577 Subject: rng.ToHCL().Ptr(),
578 })
579 return cty.DynamicVal, diags
580 }
581 }
582
583 if !multi {
584 log.Printf("[TRACE] GetResourceInstance: %s is a single instance", addr)
585 is := rs.Instance(key)
586 if is == nil {
587 return cty.UnknownVal(schema.ImpliedType()), diags
588 }
589 return d.getResourceInstanceSingle(addr, rng, is, config, rs.ProviderConfig)
590 }
591
592 log.Printf("[TRACE] GetResourceInstance: %s has multiple keyed instances", addr)
593 return d.getResourceInstancesAll(addr.ContainingResource(), rng, config, rs, rs.ProviderConfig)
594}
595
596func (d *evaluationStateData) getResourceInstanceSingle(addr addrs.ResourceInstance, rng tfdiags.SourceRange, is *states.ResourceInstance, config *configs.Resource, providerAddr addrs.AbsProviderConfig) (cty.Value, tfdiags.Diagnostics) {
597 var diags tfdiags.Diagnostics
598
599 schema := d.getResourceSchema(addr.ContainingResource(), providerAddr)
600 if schema == nil {
601 // This shouldn't happen, since validation before we get here should've
602 // taken care of it, but we'll show a reasonable error message anyway.
603 diags = diags.Append(&hcl.Diagnostic{
604 Severity: hcl.DiagError,
605 Summary: `Missing resource type schema`,
606 Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, providerAddr),
607 Subject: rng.ToHCL().Ptr(),
608 })
609 return cty.DynamicVal, diags
610 }
611
612 ty := schema.ImpliedType()
613 if is == nil || is.Current == nil {
614 // Assume we're dealing with an instance that hasn't been created yet.
615 return cty.UnknownVal(ty), diags
616 }
617
618 if is.Current.Status == states.ObjectPlanned {
619 // If there's a pending change for this instance in our plan, we'll prefer
620 // that. This is important because the state can't represent unknown values
621 // and so its data is inaccurate when changes are pending.
622 if change := d.Evaluator.Changes.GetResourceInstanceChange(addr.Absolute(d.ModulePath), states.CurrentGen); change != nil {
623 val, err := change.After.Decode(ty)
624 if err != nil {
625 diags = diags.Append(&hcl.Diagnostic{
626 Severity: hcl.DiagError,
627 Summary: "Invalid resource instance data in plan",
628 Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", addr.Absolute(d.ModulePath), err),
629 Subject: &config.DeclRange,
630 })
631 return cty.UnknownVal(ty), diags
632 }
633 return val, diags
634 } else {
635 // If the object is in planned status then we should not
636 // get here, since we should've found a pending value
637 // in the plan above instead.
638 diags = diags.Append(&hcl.Diagnostic{
639 Severity: hcl.DiagError,
640 Summary: "Missing pending object in plan",
641 Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", addr),
642 Subject: &config.DeclRange,
643 })
644 return cty.UnknownVal(ty), diags
645 }
646 }
647
648 ios, err := is.Current.Decode(ty)
649 if err != nil {
650 // This shouldn't happen, since by the time we get here
651 // we should've upgraded the state data already.
652 diags = diags.Append(&hcl.Diagnostic{
653 Severity: hcl.DiagError,
654 Summary: "Invalid resource instance data in state",
655 Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", addr.Absolute(d.ModulePath), err),
656 Subject: &config.DeclRange,
657 })
658 return cty.UnknownVal(ty), diags
659 }
660
661 return ios.Value, diags
662}
663
664func (d *evaluationStateData) getResourceInstancesAll(addr addrs.Resource, rng tfdiags.SourceRange, config *configs.Resource, rs *states.Resource, providerAddr addrs.AbsProviderConfig) (cty.Value, tfdiags.Diagnostics) {
665 var diags tfdiags.Diagnostics
666
667 schema := d.getResourceSchema(addr, providerAddr)
668 if schema == nil {
669 // This shouldn't happen, since validation before we get here should've
670 // taken care of it, but we'll show a reasonable error message anyway.
671 diags = diags.Append(&hcl.Diagnostic{
672 Severity: hcl.DiagError,
673 Summary: `Missing resource type schema`,
674 Detail: fmt.Sprintf("No schema is available for %s in %s. This is a bug in Terraform and should be reported.", addr, providerAddr),
675 Subject: rng.ToHCL().Ptr(),
676 })
677 return cty.DynamicVal, diags
678 }
679
680 switch rs.EachMode {
681
682 case states.EachList:
683 // We need to infer the length of our resulting tuple by searching
684 // for the max IntKey in our instances map.
685 length := 0
686 for k := range rs.Instances {
687 if ik, ok := k.(addrs.IntKey); ok {
688 if int(ik) >= length {
689 length = int(ik) + 1
690 }
691 }
692 }
693
694 vals := make([]cty.Value, length)
695 for i := 0; i < length; i++ {
696 ty := schema.ImpliedType()
697 key := addrs.IntKey(i)
698 is, exists := rs.Instances[key]
699 if exists {
700 instAddr := addr.Instance(key).Absolute(d.ModulePath)
701
702 // Prefer pending value in plan if present. See getResourceInstanceSingle
703 // comment for the rationale.
704 if is.Current.Status == states.ObjectPlanned {
705 if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil {
706 val, err := change.After.Decode(ty)
707 if err != nil {
708 diags = diags.Append(&hcl.Diagnostic{
709 Severity: hcl.DiagError,
710 Summary: "Invalid resource instance data in plan",
711 Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err),
712 Subject: &config.DeclRange,
713 })
714 continue
715 }
716 vals[i] = val
717 continue
718 } else {
719 // If the object is in planned status then we should not
720 // get here, since we should've found a pending value
721 // in the plan above instead.
722 diags = diags.Append(&hcl.Diagnostic{
723 Severity: hcl.DiagError,
724 Summary: "Missing pending object in plan",
725 Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr),
726 Subject: &config.DeclRange,
727 })
728 continue
729 }
730 }
731
732 ios, err := is.Current.Decode(ty)
733 if err != nil {
734 // This shouldn't happen, since by the time we get here
735 // we should've upgraded the state data already.
736 diags = diags.Append(&hcl.Diagnostic{
737 Severity: hcl.DiagError,
738 Summary: "Invalid resource instance data in state",
739 Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err),
740 Subject: &config.DeclRange,
741 })
742 continue
743 }
744 vals[i] = ios.Value
745 } else {
746 // There shouldn't normally be "gaps" in our list but we'll
747 // allow it under the assumption that we're in a weird situation
748 // where e.g. someone has run "terraform state mv" to reorder
749 // a list and left a hole behind.
750 vals[i] = cty.UnknownVal(schema.ImpliedType())
751 }
752 }
753
754 // We use a tuple rather than a list here because resource schemas may
755 // include dynamically-typed attributes, which will then cause each
756 // instance to potentially have a different runtime type even though
757 // they all conform to the static schema.
758 return cty.TupleVal(vals), diags
759
760 case states.EachMap:
761 ty := schema.ImpliedType()
762 vals := make(map[string]cty.Value, len(rs.Instances))
763 for k, is := range rs.Instances {
764 if sk, ok := k.(addrs.StringKey); ok {
765 instAddr := addr.Instance(k).Absolute(d.ModulePath)
766
767 // Prefer pending value in plan if present. See getResourceInstanceSingle
768 // comment for the rationale.
769 // Prefer pending value in plan if present. See getResourceInstanceSingle
770 // comment for the rationale.
771 if is.Current.Status == states.ObjectPlanned {
772 if change := d.Evaluator.Changes.GetResourceInstanceChange(instAddr, states.CurrentGen); change != nil {
773 val, err := change.After.Decode(ty)
774 if err != nil {
775 diags = diags.Append(&hcl.Diagnostic{
776 Severity: hcl.DiagError,
777 Summary: "Invalid resource instance data in plan",
778 Detail: fmt.Sprintf("Instance %s data could not be decoded from the plan: %s.", instAddr, err),
779 Subject: &config.DeclRange,
780 })
781 continue
782 }
783 vals[string(sk)] = val
784 continue
785 } else {
786 // If the object is in planned status then we should not
787 // get here, since we should've found a pending value
788 // in the plan above instead.
789 diags = diags.Append(&hcl.Diagnostic{
790 Severity: hcl.DiagError,
791 Summary: "Missing pending object in plan",
792 Detail: fmt.Sprintf("Instance %s is marked as having a change pending but that change is not recorded in the plan. This is a bug in Terraform; please report it.", instAddr),
793 Subject: &config.DeclRange,
794 })
795 continue
796 }
797 }
798
799 ios, err := is.Current.Decode(ty)
800 if err != nil {
801 // This shouldn't happen, since by the time we get here
802 // we should've upgraded the state data already.
803 diags = diags.Append(&hcl.Diagnostic{
804 Severity: hcl.DiagError,
805 Summary: "Invalid resource instance data in state",
806 Detail: fmt.Sprintf("Instance %s data could not be decoded from the state: %s.", instAddr, err),
807 Subject: &config.DeclRange,
808 })
809 continue
810 }
811 vals[string(sk)] = ios.Value
812 }
813 }
814
815 // We use an object rather than a map here because resource schemas may
816 // include dynamically-typed attributes, which will then cause each
817 // instance to potentially have a different runtime type even though
818 // they all conform to the static schema.
819 return cty.ObjectVal(vals), diags
820
821 default:
822 // Should never happen since caller should deal with other modes
823 panic(fmt.Sprintf("unsupported EachMode %s", rs.EachMode))
824 }
825}
826
827func (d *evaluationStateData) getResourceSchema(addr addrs.Resource, providerAddr addrs.AbsProviderConfig) *configschema.Block {
828 providerType := providerAddr.ProviderConfig.Type
829 schemas := d.Evaluator.Schemas
830 schema, _ := schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type)
831 return schema
832}
833
834// coerceInstanceKey attempts to convert the given key to the type expected
835// for the given EachMode.
836//
837// If the key is already of the correct type or if it cannot be converted then
838// it is returned verbatim. If conversion is required and possible, the
839// converted value is returned. Callers should not try to determine if
840// conversion was possible, should instead just check if the result is of
841// the expected type.
842func (d *evaluationStateData) coerceInstanceKey(key addrs.InstanceKey, mode states.EachMode) addrs.InstanceKey {
843 if key == addrs.NoKey {
844 // An absent key can't be converted
845 return key
846 }
847
848 switch mode {
849 case states.NoEach:
850 // No conversions possible at all
851 return key
852 case states.EachMap:
853 if intKey, isInt := key.(addrs.IntKey); isInt {
854 return addrs.StringKey(strconv.Itoa(int(intKey)))
855 }
856 return key
857 case states.EachList:
858 if strKey, isStr := key.(addrs.StringKey); isStr {
859 i, err := strconv.Atoi(string(strKey))
860 if err != nil {
861 return key
862 }
863 return addrs.IntKey(i)
864 }
865 return key
866 default:
867 return key
868 }
869}
870
871func (d *evaluationStateData) GetTerraformAttr(addr addrs.TerraformAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
872 var diags tfdiags.Diagnostics
873 switch addr.Name {
874
875 case "workspace":
876 workspaceName := d.Evaluator.Meta.Env
877 return cty.StringVal(workspaceName), diags
878
879 case "env":
880 // Prior to Terraform 0.12 there was an attribute "env", which was
881 // an alias name for "workspace". This was deprecated and is now
882 // removed.
883 diags = diags.Append(&hcl.Diagnostic{
884 Severity: hcl.DiagError,
885 Summary: `Invalid "terraform" attribute`,
886 Detail: `The terraform.env attribute was deprecated in v0.10 and removed in v0.12. The "state environment" concept was rename to "workspace" in v0.12, and so the workspace name can now be accessed using the terraform.workspace attribute.`,
887 Subject: rng.ToHCL().Ptr(),
888 })
889 return cty.DynamicVal, diags
890
891 default:
892 diags = diags.Append(&hcl.Diagnostic{
893 Severity: hcl.DiagError,
894 Summary: `Invalid "terraform" attribute`,
895 Detail: fmt.Sprintf(`The "terraform" object does not have an attribute named %q. The only supported attribute is terraform.workspace, the name of the currently-selected workspace.`, addr.Name),
896 Subject: rng.ToHCL().Ptr(),
897 })
898 return cty.DynamicVal, diags
899 }
900}
901
902// nameSuggestion tries to find a name from the given slice of suggested names
903// that is close to the given name and returns it if found. If no suggestion
904// is close enough, returns the empty string.
905//
906// The suggestions are tried in order, so earlier suggestions take precedence
907// if the given string is similar to two or more suggestions.
908//
909// This function is intended to be used with a relatively-small number of
910// suggestions. It's not optimized for hundreds or thousands of them.
911func nameSuggestion(given string, suggestions []string) string {
912 for _, suggestion := range suggestions {
913 dist := levenshtein.Distance(given, suggestion, nil)
914 if dist < 3 { // threshold determined experimentally
915 return suggestion
916 }
917 }
918 return ""
919}
920
921// moduleDisplayAddr returns a string describing the given module instance
922// address that is appropriate for returning to users in situations where the
923// root module is possible. Specifically, it returns "the root module" if the
924// root module instance is given, or a string representation of the module
925// address otherwise.
926func moduleDisplayAddr(addr addrs.ModuleInstance) string {
927 switch {
928 case addr.IsRoot():
929 return "the root module"
930 default:
931 return addr.String()
932 }
933}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go b/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go
new file mode 100644
index 0000000..4255102
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/evaluate_valid.go
@@ -0,0 +1,299 @@
1package terraform
2
3import (
4 "fmt"
5 "sort"
6
7 "github.com/hashicorp/hcl2/hcl"
8
9 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/configs"
11 "github.com/hashicorp/terraform/helper/didyoumean"
12 "github.com/hashicorp/terraform/tfdiags"
13)
14
15// StaticValidateReferences checks the given references against schemas and
16// other statically-checkable rules, producing error diagnostics if any
17// problems are found.
18//
19// If this method returns errors for a particular reference then evaluating
20// that reference is likely to generate a very similar error, so callers should
21// not run this method and then also evaluate the source expression(s) and
22// merge the two sets of diagnostics together, since this will result in
23// confusing redundant errors.
24//
25// This method can find more errors than can be found by evaluating an
26// expression with a partially-populated scope, since it checks the referenced
27// names directly against the schema rather than relying on evaluation errors.
28//
29// The result may include warning diagnostics if, for example, deprecated
30// features are referenced.
31func (d *evaluationStateData) StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics {
32 var diags tfdiags.Diagnostics
33 for _, ref := range refs {
34 moreDiags := d.staticValidateReference(ref, self)
35 diags = diags.Append(moreDiags)
36 }
37 return diags
38}
39
40func (d *evaluationStateData) staticValidateReference(ref *addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics {
41 modCfg := d.Evaluator.Config.DescendentForInstance(d.ModulePath)
42 if modCfg == nil {
43 // This is a bug in the caller rather than a problem with the
44 // reference, but rather than crashing out here in an unhelpful way
45 // we'll just ignore it and trust a different layer to catch it.
46 return nil
47 }
48
49 if ref.Subject == addrs.Self {
50 // The "self" address is a special alias for the address given as
51 // our self parameter here, if present.
52 if self == nil {
53 var diags tfdiags.Diagnostics
54 diags = diags.Append(&hcl.Diagnostic{
55 Severity: hcl.DiagError,
56 Summary: `Invalid "self" reference`,
57 // This detail message mentions some current practice that
58 // this codepath doesn't really "know about". If the "self"
59 // object starts being supported in more contexts later then
60 // we'll need to adjust this message.
61 Detail: `The "self" object is not available in this context. This object can be used only in resource provisioner and connection blocks.`,
62 Subject: ref.SourceRange.ToHCL().Ptr(),
63 })
64 return diags
65 }
66
67 synthRef := *ref // shallow copy
68 synthRef.Subject = self
69 ref = &synthRef
70 }
71
72 switch addr := ref.Subject.(type) {
73
74 // For static validation we validate both resource and resource instance references the same way.
75 // We mostly disregard the index, though we do some simple validation of
76 // its _presence_ in staticValidateSingleResourceReference and
77 // staticValidateMultiResourceReference respectively.
78 case addrs.Resource:
79 var diags tfdiags.Diagnostics
80 diags = diags.Append(d.staticValidateSingleResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange))
81 diags = diags.Append(d.staticValidateResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange))
82 return diags
83 case addrs.ResourceInstance:
84 var diags tfdiags.Diagnostics
85 diags = diags.Append(d.staticValidateMultiResourceReference(modCfg, addr, ref.Remaining, ref.SourceRange))
86 diags = diags.Append(d.staticValidateResourceReference(modCfg, addr.ContainingResource(), ref.Remaining, ref.SourceRange))
87 return diags
88
89 // We also handle all module call references the same way, disregarding index.
90 case addrs.ModuleCall:
91 return d.staticValidateModuleCallReference(modCfg, addr, ref.Remaining, ref.SourceRange)
92 case addrs.ModuleCallInstance:
93 return d.staticValidateModuleCallReference(modCfg, addr.Call, ref.Remaining, ref.SourceRange)
94 case addrs.ModuleCallOutput:
95 // This one is a funny one because we will take the output name referenced
96 // and use it to fake up a "remaining" that would make sense for the
97 // module call itself, rather than for the specific output, and then
98 // we can just re-use our static module call validation logic.
99 remain := make(hcl.Traversal, len(ref.Remaining)+1)
100 copy(remain[1:], ref.Remaining)
101 remain[0] = hcl.TraverseAttr{
102 Name: addr.Name,
103
104 // Using the whole reference as the source range here doesn't exactly
105 // match how HCL would normally generate an attribute traversal,
106 // but is close enough for our purposes.
107 SrcRange: ref.SourceRange.ToHCL(),
108 }
109 return d.staticValidateModuleCallReference(modCfg, addr.Call.Call, remain, ref.SourceRange)
110
111 default:
112 // Anything else we'll just permit through without any static validation
113 // and let it be caught during dynamic evaluation, in evaluate.go .
114 return nil
115 }
116}
117
118func (d *evaluationStateData) staticValidateSingleResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics {
119 // If we have at least one step in "remain" and this resource has
120 // "count" set then we know for sure this in invalid because we have
121 // something like:
122 // aws_instance.foo.bar
123 // ...when we really need
124 // aws_instance.foo[count.index].bar
125
126 // It is _not_ safe to do this check when remain is empty, because that
127 // would also match aws_instance.foo[count.index].bar due to `count.index`
128 // not being statically-resolvable as part of a reference, and match
129 // direct references to the whole aws_instance.foo tuple.
130 if len(remain) == 0 {
131 return nil
132 }
133
134 var diags tfdiags.Diagnostics
135
136 cfg := modCfg.Module.ResourceByAddr(addr)
137 if cfg == nil {
138 // We'll just bail out here and catch this in our subsequent call to
139 // staticValidateResourceReference, then.
140 return diags
141 }
142
143 if cfg.Count != nil {
144 diags = diags.Append(&hcl.Diagnostic{
145 Severity: hcl.DiagError,
146 Summary: `Missing resource instance key`,
147 Detail: fmt.Sprintf("Because %s has \"count\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[count.index]", addr, addr),
148 Subject: rng.ToHCL().Ptr(),
149 })
150 }
151 if cfg.ForEach != nil {
152 diags = diags.Append(&hcl.Diagnostic{
153 Severity: hcl.DiagError,
154 Summary: `Missing resource instance key`,
155 Detail: fmt.Sprintf("Because %s has \"for_each\" set, its attributes must be accessed on specific instances.\n\nFor example, to correlate with indices of a referring resource, use:\n %s[each.key]", addr, addr),
156 Subject: rng.ToHCL().Ptr(),
157 })
158 }
159
160 return diags
161}
162
163func (d *evaluationStateData) staticValidateMultiResourceReference(modCfg *configs.Config, addr addrs.ResourceInstance, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics {
164 var diags tfdiags.Diagnostics
165
166 cfg := modCfg.Module.ResourceByAddr(addr.ContainingResource())
167 if cfg == nil {
168 // We'll just bail out here and catch this in our subsequent call to
169 // staticValidateResourceReference, then.
170 return diags
171 }
172
173 if addr.Key == addrs.NoKey {
174 // This is a different path into staticValidateSingleResourceReference
175 return d.staticValidateSingleResourceReference(modCfg, addr.ContainingResource(), remain, rng)
176 } else {
177 if cfg.Count == nil && cfg.ForEach == nil {
178 diags = diags.Append(&hcl.Diagnostic{
179 Severity: hcl.DiagError,
180 Summary: `Unexpected resource instance key`,
181 Detail: fmt.Sprintf(`Because %s does not have "count" or "for_each" set, references to it must not include an index key. Remove the bracketed index to refer to the single instance of this resource.`, addr.ContainingResource()),
182 Subject: rng.ToHCL().Ptr(),
183 })
184 }
185 }
186
187 return diags
188}
189
190func (d *evaluationStateData) staticValidateResourceReference(modCfg *configs.Config, addr addrs.Resource, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics {
191 var diags tfdiags.Diagnostics
192
193 var modeAdjective string
194 switch addr.Mode {
195 case addrs.ManagedResourceMode:
196 modeAdjective = "managed"
197 case addrs.DataResourceMode:
198 modeAdjective = "data"
199 default:
200 // should never happen
201 modeAdjective = "<invalid-mode>"
202 }
203
204 cfg := modCfg.Module.ResourceByAddr(addr)
205 if cfg == nil {
206 diags = diags.Append(&hcl.Diagnostic{
207 Severity: hcl.DiagError,
208 Summary: `Reference to undeclared resource`,
209 Detail: fmt.Sprintf(`A %s resource %q %q has not been declared in %s.`, modeAdjective, addr.Type, addr.Name, moduleConfigDisplayAddr(modCfg.Path)),
210 Subject: rng.ToHCL().Ptr(),
211 })
212 return diags
213 }
214
215 // Normally accessing this directly is wrong because it doesn't take into
216 // account provider inheritance, etc but it's okay here because we're only
217 // paying attention to the type anyway.
218 providerType := cfg.ProviderConfigAddr().Type
219 schema, _ := d.Evaluator.Schemas.ResourceTypeConfig(providerType, addr.Mode, addr.Type)
220
221 if schema == nil {
222 // Prior validation should've taken care of a resource block with an
223 // unsupported type, so we should never get here but we'll handle it
224 // here anyway for robustness.
225 diags = diags.Append(&hcl.Diagnostic{
226 Severity: hcl.DiagError,
227 Summary: `Invalid resource type`,
228 Detail: fmt.Sprintf(`A %s resource type %q is not supported by provider %q.`, modeAdjective, addr.Type, providerType),
229 Subject: rng.ToHCL().Ptr(),
230 })
231 return diags
232 }
233
234 // As a special case we'll detect attempts to access an attribute called
235 // "count" and produce a special error for it, since versions of Terraform
236 // prior to v0.12 offered this as a weird special case that we can no
237 // longer support.
238 if len(remain) > 0 {
239 if step, ok := remain[0].(hcl.TraverseAttr); ok && step.Name == "count" {
240 diags = diags.Append(&hcl.Diagnostic{
241 Severity: hcl.DiagError,
242 Summary: `Invalid resource count attribute`,
243 Detail: fmt.Sprintf(`The special "count" attribute is no longer supported after Terraform v0.12. Instead, use length(%s) to count resource instances.`, addr),
244 Subject: rng.ToHCL().Ptr(),
245 })
246 return diags
247 }
248 }
249
250 // If we got this far then we'll try to validate the remaining traversal
251 // steps against our schema.
252 moreDiags := schema.StaticValidateTraversal(remain)
253 diags = diags.Append(moreDiags)
254
255 return diags
256}
257
258func (d *evaluationStateData) staticValidateModuleCallReference(modCfg *configs.Config, addr addrs.ModuleCall, remain hcl.Traversal, rng tfdiags.SourceRange) tfdiags.Diagnostics {
259 var diags tfdiags.Diagnostics
260
261 // For now, our focus here is just in testing that the referenced module
262 // call exists. All other validation is deferred until evaluation time.
263 _, exists := modCfg.Module.ModuleCalls[addr.Name]
264 if !exists {
265 var suggestions []string
266 for name := range modCfg.Module.ModuleCalls {
267 suggestions = append(suggestions, name)
268 }
269 sort.Strings(suggestions)
270 suggestion := didyoumean.NameSuggestion(addr.Name, suggestions)
271 if suggestion != "" {
272 suggestion = fmt.Sprintf(" Did you mean %q?", suggestion)
273 }
274
275 diags = diags.Append(&hcl.Diagnostic{
276 Severity: hcl.DiagError,
277 Summary: `Reference to undeclared module`,
278 Detail: fmt.Sprintf(`No module call named %q is declared in %s.%s`, addr.Name, moduleConfigDisplayAddr(modCfg.Path), suggestion),
279 Subject: rng.ToHCL().Ptr(),
280 })
281 return diags
282 }
283
284 return diags
285}
286
287// moduleConfigDisplayAddr returns a string describing the given module
288// address that is appropriate for returning to users in situations where the
289// root module is possible. Specifically, it returns "the root module" if the
290// root module instance is given, or a string representation of the module
291// address otherwise.
292func moduleConfigDisplayAddr(addr addrs.Module) string {
293 switch {
294 case addr.IsRoot():
295 return "the root module"
296 default:
297 return addr.String()
298 }
299}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go
index 735ec4e..58d45a7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph.go
@@ -3,17 +3,13 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 "runtime/debug"
7 "strings"
8 6
9 "github.com/hashicorp/terraform/dag" 7 "github.com/hashicorp/terraform/tfdiags"
10)
11 8
12// RootModuleName is the name given to the root module implicitly. 9 "github.com/hashicorp/terraform/addrs"
13const RootModuleName = "root"
14 10
15// RootModulePath is the path for the root module. 11 "github.com/hashicorp/terraform/dag"
16var RootModulePath = []string{RootModuleName} 12)
17 13
18// Graph represents the graph that Terraform uses to represent resources 14// Graph represents the graph that Terraform uses to represent resources
19// and their dependencies. 15// and their dependencies.
@@ -23,9 +19,7 @@ type Graph struct {
23 dag.AcyclicGraph 19 dag.AcyclicGraph
24 20
25 // Path is the path in the module tree that this Graph represents. 21 // Path is the path in the module tree that this Graph represents.
26 // The root is represented by a single element list containing 22 Path addrs.ModuleInstance
27 // RootModuleName
28 Path []string
29 23
30 // debugName is a name for reference in the debug output. This is usually 24 // debugName is a name for reference in the debug output. This is usually
31 // to indicate what topmost builder was, and if this graph is a shadow or 25 // to indicate what topmost builder was, and if this graph is a shadow or
@@ -40,71 +34,42 @@ func (g *Graph) DirectedGraph() dag.Grapher {
40// Walk walks the graph with the given walker for callbacks. The graph 34// Walk walks the graph with the given walker for callbacks. The graph
41// will be walked with full parallelism, so the walker should expect 35// will be walked with full parallelism, so the walker should expect
42// to be called in concurrently. 36// to be called in concurrently.
43func (g *Graph) Walk(walker GraphWalker) error { 37func (g *Graph) Walk(walker GraphWalker) tfdiags.Diagnostics {
44 return g.walk(walker) 38 return g.walk(walker)
45} 39}
46 40
47func (g *Graph) walk(walker GraphWalker) error { 41func (g *Graph) walk(walker GraphWalker) tfdiags.Diagnostics {
48 // The callbacks for enter/exiting a graph 42 // The callbacks for enter/exiting a graph
49 ctx := walker.EnterPath(g.Path) 43 ctx := walker.EnterPath(g.Path)
50 defer walker.ExitPath(g.Path) 44 defer walker.ExitPath(g.Path)
51 45
52 // Get the path for logs 46 // Get the path for logs
53 path := strings.Join(ctx.Path(), ".") 47 path := ctx.Path().String()
54
55 // Determine if our walker is a panic wrapper
56 panicwrap, ok := walker.(GraphWalkerPanicwrapper)
57 if !ok {
58 panicwrap = nil // just to be sure
59 }
60 48
61 debugName := "walk-graph.json" 49 debugName := "walk-graph.json"
62 if g.debugName != "" { 50 if g.debugName != "" {
63 debugName = g.debugName + "-" + debugName 51 debugName = g.debugName + "-" + debugName
64 } 52 }
65 53
66 debugBuf := dbug.NewFileWriter(debugName)
67 g.SetDebugWriter(debugBuf)
68 defer debugBuf.Close()
69
70 // Walk the graph. 54 // Walk the graph.
71 var walkFn dag.WalkFunc 55 var walkFn dag.WalkFunc
72 walkFn = func(v dag.Vertex) (rerr error) { 56 walkFn = func(v dag.Vertex) (diags tfdiags.Diagnostics) {
73 log.Printf("[TRACE] vertex '%s.%s': walking", path, dag.VertexName(v)) 57 log.Printf("[TRACE] vertex %q: starting visit (%T)", dag.VertexName(v), v)
74 g.DebugVisitInfo(v, g.debugName) 58 g.DebugVisitInfo(v, g.debugName)
75 59
76 // If we have a panic wrap GraphWalker and a panic occurs, recover
77 // and call that. We ensure the return value is an error, however,
78 // so that future nodes are not called.
79 defer func() { 60 defer func() {
80 // If no panicwrap, do nothing 61 log.Printf("[TRACE] vertex %q: visit complete", dag.VertexName(v))
81 if panicwrap == nil {
82 return
83 }
84
85 // If no panic, do nothing
86 err := recover()
87 if err == nil {
88 return
89 }
90
91 // Modify the return value to show the error
92 rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s",
93 dag.VertexName(v), err, debug.Stack())
94
95 // Call the panic wrapper
96 panicwrap.Panic(v, err)
97 }() 62 }()
98 63
99 walker.EnterVertex(v) 64 walker.EnterVertex(v)
100 defer walker.ExitVertex(v, rerr) 65 defer walker.ExitVertex(v, diags)
101 66
102 // vertexCtx is the context that we use when evaluating. This 67 // vertexCtx is the context that we use when evaluating. This
103 // is normally the context of our graph but can be overridden 68 // is normally the context of our graph but can be overridden
104 // with a GraphNodeSubPath impl. 69 // with a GraphNodeSubPath impl.
105 vertexCtx := ctx 70 vertexCtx := ctx
106 if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 { 71 if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 {
107 vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path())) 72 vertexCtx = walker.EnterPath(pn.Path())
108 defer walker.ExitPath(pn.Path()) 73 defer walker.ExitPath(pn.Path())
109 } 74 }
110 75
@@ -112,60 +77,64 @@ func (g *Graph) walk(walker GraphWalker) error {
112 if ev, ok := v.(GraphNodeEvalable); ok { 77 if ev, ok := v.(GraphNodeEvalable); ok {
113 tree := ev.EvalTree() 78 tree := ev.EvalTree()
114 if tree == nil { 79 if tree == nil {
115 panic(fmt.Sprintf( 80 panic(fmt.Sprintf("%q (%T): nil eval tree", dag.VertexName(v), v))
116 "%s.%s (%T): nil eval tree", path, dag.VertexName(v), v))
117 } 81 }
118 82
119 // Allow the walker to change our tree if needed. Eval, 83 // Allow the walker to change our tree if needed. Eval,
120 // then callback with the output. 84 // then callback with the output.
121 log.Printf("[TRACE] vertex '%s.%s': evaluating", path, dag.VertexName(v)) 85 log.Printf("[TRACE] vertex %q: evaluating", dag.VertexName(v))
122 86
123 g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path)) 87 g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path))
124 88
125 tree = walker.EnterEvalTree(v, tree) 89 tree = walker.EnterEvalTree(v, tree)
126 output, err := Eval(tree, vertexCtx) 90 output, err := Eval(tree, vertexCtx)
127 if rerr = walker.ExitEvalTree(v, output, err); rerr != nil { 91 diags = diags.Append(walker.ExitEvalTree(v, output, err))
92 if diags.HasErrors() {
128 return 93 return
129 } 94 }
130 } 95 }
131 96
132 // If the node is dynamically expanded, then expand it 97 // If the node is dynamically expanded, then expand it
133 if ev, ok := v.(GraphNodeDynamicExpandable); ok { 98 if ev, ok := v.(GraphNodeDynamicExpandable); ok {
134 log.Printf( 99 log.Printf("[TRACE] vertex %q: expanding dynamic subgraph", dag.VertexName(v))
135 "[TRACE] vertex '%s.%s': expanding/walking dynamic subgraph",
136 path,
137 dag.VertexName(v))
138 100
139 g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path)) 101 g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path))
140 102
141 g, err := ev.DynamicExpand(vertexCtx) 103 g, err := ev.DynamicExpand(vertexCtx)
142 if err != nil { 104 if err != nil {
143 rerr = err 105 diags = diags.Append(err)
144 return 106 return
145 } 107 }
146 if g != nil { 108 if g != nil {
147 // Walk the subgraph 109 // Walk the subgraph
148 if rerr = g.walk(walker); rerr != nil { 110 log.Printf("[TRACE] vertex %q: entering dynamic subgraph", dag.VertexName(v))
111 subDiags := g.walk(walker)
112 diags = diags.Append(subDiags)
113 if subDiags.HasErrors() {
114 log.Printf("[TRACE] vertex %q: dynamic subgraph encountered errors", dag.VertexName(v))
149 return 115 return
150 } 116 }
117 log.Printf("[TRACE] vertex %q: dynamic subgraph completed successfully", dag.VertexName(v))
118 } else {
119 log.Printf("[TRACE] vertex %q: produced no dynamic subgraph", dag.VertexName(v))
151 } 120 }
152 } 121 }
153 122
154 // If the node has a subgraph, then walk the subgraph 123 // If the node has a subgraph, then walk the subgraph
155 if sn, ok := v.(GraphNodeSubgraph); ok { 124 if sn, ok := v.(GraphNodeSubgraph); ok {
156 log.Printf( 125 log.Printf("[TRACE] vertex %q: entering static subgraph", dag.VertexName(v))
157 "[TRACE] vertex '%s.%s': walking subgraph",
158 path,
159 dag.VertexName(v))
160 126
161 g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path)) 127 g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path))
162 128
163 if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil { 129 subDiags := sn.Subgraph().(*Graph).walk(walker)
130 if subDiags.HasErrors() {
131 log.Printf("[TRACE] vertex %q: static subgraph encountered errors", dag.VertexName(v))
164 return 132 return
165 } 133 }
134 log.Printf("[TRACE] vertex %q: static subgraph completed successfully", dag.VertexName(v))
166 } 135 }
167 136
168 return nil 137 return
169 } 138 }
170 139
171 return g.AcyclicGraph.Walk(walkFn) 140 return g.AcyclicGraph.Walk(walkFn)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
index 6374bb9..66b21f3 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
@@ -4,6 +4,10 @@ import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 "strings" 6 "strings"
7
8 "github.com/hashicorp/terraform/tfdiags"
9
10 "github.com/hashicorp/terraform/addrs"
7) 11)
8 12
9// GraphBuilder is an interface that can be implemented and used with 13// GraphBuilder is an interface that can be implemented and used with
@@ -12,7 +16,7 @@ type GraphBuilder interface {
12 // Build builds the graph for the given module path. It is up to 16 // Build builds the graph for the given module path. It is up to
13 // the interface implementation whether this build should expand 17 // the interface implementation whether this build should expand
14 // the graph or not. 18 // the graph or not.
15 Build(path []string) (*Graph, error) 19 Build(addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics)
16} 20}
17 21
18// BasicGraphBuilder is a GraphBuilder that builds a graph out of a 22// BasicGraphBuilder is a GraphBuilder that builds a graph out of a
@@ -25,21 +29,16 @@ type BasicGraphBuilder struct {
25 Name string 29 Name string
26} 30}
27 31
28func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) { 32func (b *BasicGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
33 var diags tfdiags.Diagnostics
29 g := &Graph{Path: path} 34 g := &Graph{Path: path}
30 35
31 debugName := "graph.json" 36 var lastStepStr string
32 if b.Name != "" {
33 debugName = b.Name + "-" + debugName
34 }
35 debugBuf := dbug.NewFileWriter(debugName)
36 g.SetDebugWriter(debugBuf)
37 defer debugBuf.Close()
38
39 for _, step := range b.Steps { 37 for _, step := range b.Steps {
40 if step == nil { 38 if step == nil {
41 continue 39 continue
42 } 40 }
41 log.Printf("[TRACE] Executing graph transform %T", step)
43 42
44 stepName := fmt.Sprintf("%T", step) 43 stepName := fmt.Sprintf("%T", step)
45 dot := strings.LastIndex(stepName, ".") 44 dot := strings.LastIndex(stepName, ".")
@@ -56,12 +55,20 @@ func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) {
56 } 55 }
57 debugOp.End(errMsg) 56 debugOp.End(errMsg)
58 57
59 log.Printf( 58 if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr {
60 "[TRACE] Graph after step %T:\n\n%s", 59 log.Printf("[TRACE] Completed graph transform %T with new graph:\n%s------", step, thisStepStr)
61 step, g.StringWithNodeTypes()) 60 lastStepStr = thisStepStr
61 } else {
62 log.Printf("[TRACE] Completed graph transform %T (no changes)", step)
63 }
62 64
63 if err != nil { 65 if err != nil {
64 return g, err 66 if nf, isNF := err.(tfdiags.NonFatalError); isNF {
67 diags = diags.Append(nf.Diagnostics)
68 } else {
69 diags = diags.Append(err)
70 return g, diags
71 }
65 } 72 }
66 } 73 }
67 74
@@ -69,9 +76,10 @@ func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) {
69 if b.Validate { 76 if b.Validate {
70 if err := g.Validate(); err != nil { 77 if err := g.Validate(); err != nil {
71 log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String()) 78 log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String())
72 return nil, err 79 diags = diags.Append(err)
80 return nil, diags
73 } 81 }
74 } 82 }
75 83
76 return g, nil 84 return g, diags
77} 85}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
index 0c2b233..7182dd7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
@@ -1,8 +1,12 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config/module" 4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/configs"
5 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/dag"
7 "github.com/hashicorp/terraform/plans"
8 "github.com/hashicorp/terraform/states"
9 "github.com/hashicorp/terraform/tfdiags"
6) 10)
7 11
8// ApplyGraphBuilder implements GraphBuilder and is responsible for building 12// ApplyGraphBuilder implements GraphBuilder and is responsible for building
@@ -13,26 +17,28 @@ import (
13// that aren't explicitly in the diff. There are other scenarios where the 17// that aren't explicitly in the diff. There are other scenarios where the
14// diff can be deviated, so this is just one layer of protection. 18// diff can be deviated, so this is just one layer of protection.
15type ApplyGraphBuilder struct { 19type ApplyGraphBuilder struct {
16 // Module is the root module for the graph to build. 20 // Config is the configuration tree that the diff was built from.
17 Module *module.Tree 21 Config *configs.Config
18 22
19 // Diff is the diff to apply. 23 // Changes describes the changes that we need apply.
20 Diff *Diff 24 Changes *plans.Changes
21 25
22 // State is the current state 26 // State is the current state
23 State *State 27 State *states.State
24 28
25 // Providers is the list of providers supported. 29 // Components is a factory for the plug-in components (providers and
26 Providers []string 30 // provisioners) available for use.
31 Components contextComponentFactory
27 32
28 // Provisioners is the list of provisioners supported. 33 // Schemas is the repository of schemas we will draw from to analyse
29 Provisioners []string 34 // the configuration.
35 Schemas *Schemas
30 36
31 // Targets are resources to target. This is only required to make sure 37 // Targets are resources to target. This is only required to make sure
32 // unnecessary outputs aren't included in the apply graph. The plan 38 // unnecessary outputs aren't included in the apply graph. The plan
33 // builder successfully handles targeting resources. In the future, 39 // builder successfully handles targeting resources. In the future,
34 // outputs should go into the diff so that this is unnecessary. 40 // outputs should go into the diff so that this is unnecessary.
35 Targets []string 41 Targets []addrs.Targetable
36 42
37 // DisableReduce, if true, will not reduce the graph. Great for testing. 43 // DisableReduce, if true, will not reduce the graph. Great for testing.
38 DisableReduce bool 44 DisableReduce bool
@@ -45,7 +51,7 @@ type ApplyGraphBuilder struct {
45} 51}
46 52
47// See GraphBuilder 53// See GraphBuilder
48func (b *ApplyGraphBuilder) Build(path []string) (*Graph, error) { 54func (b *ApplyGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
49 return (&BasicGraphBuilder{ 55 return (&BasicGraphBuilder{
50 Steps: b.Steps(), 56 Steps: b.Steps(),
51 Validate: b.Validate, 57 Validate: b.Validate,
@@ -68,53 +74,99 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
68 } 74 }
69 } 75 }
70 76
77 concreteOrphanResource := func(a *NodeAbstractResource) dag.Vertex {
78 return &NodeDestroyResource{
79 NodeAbstractResource: a,
80 }
81 }
82
83 concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex {
84 return &NodeApplyableResourceInstance{
85 NodeAbstractResourceInstance: a,
86 }
87 }
88
71 steps := []GraphTransformer{ 89 steps := []GraphTransformer{
72 // Creates all the nodes represented in the diff. 90 // Creates all the resources represented in the config. During apply,
73 &DiffTransformer{ 91 // we use this just to ensure that the whole-resource metadata is
92 // updated to reflect things such as whether the count argument is
93 // set in config, or which provider configuration manages each resource.
94 &ConfigTransformer{
74 Concrete: concreteResource, 95 Concrete: concreteResource,
96 Config: b.Config,
97 },
75 98
76 Diff: b.Diff, 99 // Creates all the resource instances represented in the diff, along
77 Module: b.Module, 100 // with dependency edges against the whole-resource nodes added by
78 State: b.State, 101 // ConfigTransformer above.
102 &DiffTransformer{
103 Concrete: concreteResourceInstance,
104 State: b.State,
105 Changes: b.Changes,
106 },
107
108 // Creates extra cleanup nodes for any entire resources that are
109 // no longer present in config, so we can make sure we clean up the
110 // leftover empty resource states after the instances have been
111 // destroyed.
112 // (We don't track this particular type of change in the plan because
113 // it's just cleanup of our own state object, and so doesn't effect
114 // any real remote objects or consumable outputs.)
115 &OrphanResourceTransformer{
116 Concrete: concreteOrphanResource,
117 Config: b.Config,
118 State: b.State,
79 }, 119 },
80 120
81 // Create orphan output nodes 121 // Create orphan output nodes
82 &OrphanOutputTransformer{Module: b.Module, State: b.State}, 122 &OrphanOutputTransformer{Config: b.Config, State: b.State},
83 123
84 // Attach the configuration to any resources 124 // Attach the configuration to any resources
85 &AttachResourceConfigTransformer{Module: b.Module}, 125 &AttachResourceConfigTransformer{Config: b.Config},
86 126
87 // Attach the state 127 // Attach the state
88 &AttachStateTransformer{State: b.State}, 128 &AttachStateTransformer{State: b.State},
89 129
90 // add providers
91 TransformProviders(b.Providers, concreteProvider, b.Module),
92
93 // Destruction ordering 130 // Destruction ordering
94 &DestroyEdgeTransformer{Module: b.Module, State: b.State}, 131 &DestroyEdgeTransformer{
132 Config: b.Config,
133 State: b.State,
134 Schemas: b.Schemas,
135 },
95 GraphTransformIf( 136 GraphTransformIf(
96 func() bool { return !b.Destroy }, 137 func() bool { return !b.Destroy },
97 &CBDEdgeTransformer{Module: b.Module, State: b.State}, 138 &CBDEdgeTransformer{
139 Config: b.Config,
140 State: b.State,
141 Schemas: b.Schemas,
142 },
98 ), 143 ),
99 144
100 // Provisioner-related transformations 145 // Provisioner-related transformations
101 &MissingProvisionerTransformer{Provisioners: b.Provisioners}, 146 &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()},
102 &ProvisionerTransformer{}, 147 &ProvisionerTransformer{},
103 148
104 // Add root variables 149 // Add root variables
105 &RootVariableTransformer{Module: b.Module}, 150 &RootVariableTransformer{Config: b.Config},
106 151
107 // Add the local values 152 // Add the local values
108 &LocalTransformer{Module: b.Module}, 153 &LocalTransformer{Config: b.Config},
109 154
110 // Add the outputs 155 // Add the outputs
111 &OutputTransformer{Module: b.Module}, 156 &OutputTransformer{Config: b.Config},
112 157
113 // Add module variables 158 // Add module variables
114 &ModuleVariableTransformer{Module: b.Module}, 159 &ModuleVariableTransformer{Config: b.Config},
160
161 // add providers
162 TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config),
115 163
116 // Remove modules no longer present in the config 164 // Remove modules no longer present in the config
117 &RemovedModuleTransformer{Module: b.Module, State: b.State}, 165 &RemovedModuleTransformer{Config: b.Config, State: b.State},
166
167 // Must attach schemas before ReferenceTransformer so that we can
168 // analyze the configuration to find references.
169 &AttachSchemaTransformer{Schemas: b.Schemas},
118 170
119 // Connect references so ordering is correct 171 // Connect references so ordering is correct
120 &ReferenceTransformer{}, 172 &ReferenceTransformer{},
@@ -135,7 +187,9 @@ func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
135 ), 187 ),
136 188
137 // Add the node to fix the state count boundaries 189 // Add the node to fix the state count boundaries
138 &CountBoundaryTransformer{}, 190 &CountBoundaryTransformer{
191 Config: b.Config,
192 },
139 193
140 // Target 194 // Target
141 &TargetsTransformer{Targets: b.Targets}, 195 &TargetsTransformer{Targets: b.Targets},
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
index 014b348..a6047a9 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
@@ -1,8 +1,11 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config/module" 4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/configs"
5 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/dag"
7 "github.com/hashicorp/terraform/states"
8 "github.com/hashicorp/terraform/tfdiags"
6) 9)
7 10
8// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for 11// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for
@@ -11,21 +14,29 @@ import (
11// Planning a pure destroy operation is simple because we can ignore most 14// Planning a pure destroy operation is simple because we can ignore most
12// ordering configuration and simply reverse the state. 15// ordering configuration and simply reverse the state.
13type DestroyPlanGraphBuilder struct { 16type DestroyPlanGraphBuilder struct {
14 // Module is the root module for the graph to build. 17 // Config is the configuration tree to build the plan from.
15 Module *module.Tree 18 Config *configs.Config
16 19
17 // State is the current state 20 // State is the current state
18 State *State 21 State *states.State
22
23 // Components is a factory for the plug-in components (providers and
24 // provisioners) available for use.
25 Components contextComponentFactory
26
27 // Schemas is the repository of schemas we will draw from to analyse
28 // the configuration.
29 Schemas *Schemas
19 30
20 // Targets are resources to target 31 // Targets are resources to target
21 Targets []string 32 Targets []addrs.Targetable
22 33
23 // Validate will do structural validation of the graph. 34 // Validate will do structural validation of the graph.
24 Validate bool 35 Validate bool
25} 36}
26 37
27// See GraphBuilder 38// See GraphBuilder
28func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) { 39func (b *DestroyPlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
29 return (&BasicGraphBuilder{ 40 return (&BasicGraphBuilder{
30 Steps: b.Steps(), 41 Steps: b.Steps(),
31 Validate: b.Validate, 42 Validate: b.Validate,
@@ -35,25 +46,44 @@ func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) {
35 46
36// See GraphBuilder 47// See GraphBuilder
37func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer { 48func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
38 concreteResource := func(a *NodeAbstractResource) dag.Vertex { 49 concreteResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex {
39 return &NodePlanDestroyableResource{ 50 return &NodePlanDestroyableResourceInstance{
40 NodeAbstractResource: a, 51 NodeAbstractResourceInstance: a,
52 }
53 }
54 concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex {
55 return &NodePlanDeposedResourceInstanceObject{
56 NodeAbstractResourceInstance: a,
57 DeposedKey: key,
58 }
59 }
60
61 concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
62 return &NodeApplyableProvider{
63 NodeAbstractProvider: a,
41 } 64 }
42 } 65 }
43 66
44 steps := []GraphTransformer{ 67 steps := []GraphTransformer{
45 // Creates all the nodes represented in the state. 68 // Creates nodes for the resource instances tracked in the state.
46 &StateTransformer{ 69 &StateTransformer{
47 Concrete: concreteResource, 70 ConcreteCurrent: concreteResourceInstance,
48 State: b.State, 71 ConcreteDeposed: concreteResourceInstanceDeposed,
72 State: b.State,
49 }, 73 },
50 74
51 // Attach the configuration to any resources 75 // Attach the configuration to any resources
52 &AttachResourceConfigTransformer{Module: b.Module}, 76 &AttachResourceConfigTransformer{Config: b.Config},
77
78 TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config),
53 79
54 // Destruction ordering. We require this only so that 80 // Destruction ordering. We require this only so that
55 // targeting below will prune the correct things. 81 // targeting below will prune the correct things.
56 &DestroyEdgeTransformer{Module: b.Module, State: b.State}, 82 &DestroyEdgeTransformer{
83 Config: b.Config,
84 State: b.State,
85 Schemas: b.Schemas,
86 },
57 87
58 // Target. Note we don't set "Destroy: true" here since we already 88 // Target. Note we don't set "Destroy: true" here since we already
59 // created proper destroy ordering. 89 // created proper destroy ordering.
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go
new file mode 100644
index 0000000..eb6c897
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_eval.go
@@ -0,0 +1,108 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/configs"
6 "github.com/hashicorp/terraform/dag"
7 "github.com/hashicorp/terraform/states"
8 "github.com/hashicorp/terraform/tfdiags"
9)
10
11// EvalGraphBuilder implements GraphBuilder and constructs a graph suitable
12// for evaluating in-memory values (input variables, local values, output
13// values) in the state without any other side-effects.
14//
15// This graph is used only in weird cases, such as the "terraform console"
16// CLI command, where we need to evaluate expressions against the state
17// without taking any other actions.
18//
19// The generated graph will include nodes for providers, resources, etc
20// just to allow indirect dependencies to be resolved, but these nodes will
21// not take any actions themselves since we assume that their parts of the
22// state, if any, are already complete.
23//
24// Although the providers are never configured, they must still be available
25// in order to obtain schema information used for type checking, etc.
26type EvalGraphBuilder struct {
27 // Config is the configuration tree.
28 Config *configs.Config
29
30 // State is the current state
31 State *states.State
32
33 // Components is a factory for the plug-in components (providers and
34 // provisioners) available for use.
35 Components contextComponentFactory
36
37 // Schemas is the repository of schemas we will draw from to analyse
38 // the configuration.
39 Schemas *Schemas
40}
41
42// See GraphBuilder
43func (b *EvalGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
44 return (&BasicGraphBuilder{
45 Steps: b.Steps(),
46 Validate: true,
47 Name: "EvalGraphBuilder",
48 }).Build(path)
49}
50
51// See GraphBuilder
52func (b *EvalGraphBuilder) Steps() []GraphTransformer {
53 concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
54 return &NodeEvalableProvider{
55 NodeAbstractProvider: a,
56 }
57 }
58
59 steps := []GraphTransformer{
60 // Creates all the data resources that aren't in the state. This will also
61 // add any orphans from scaling in as destroy nodes.
62 &ConfigTransformer{
63 Concrete: nil, // just use the abstract type
64 Config: b.Config,
65 Unique: true,
66 },
67
68 // Attach the state
69 &AttachStateTransformer{State: b.State},
70
71 // Attach the configuration to any resources
72 &AttachResourceConfigTransformer{Config: b.Config},
73
74 // Add root variables
75 &RootVariableTransformer{Config: b.Config},
76
77 // Add the local values
78 &LocalTransformer{Config: b.Config},
79
80 // Add the outputs
81 &OutputTransformer{Config: b.Config},
82
83 // Add module variables
84 &ModuleVariableTransformer{Config: b.Config},
85
86 TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config),
87
88 // Must attach schemas before ReferenceTransformer so that we can
89 // analyze the configuration to find references.
90 &AttachSchemaTransformer{Schemas: b.Schemas},
91
92 // Connect so that the references are ready for targeting. We'll
93 // have to connect again later for providers and so on.
94 &ReferenceTransformer{},
95
96 // Although we don't configure providers, we do still start them up
97 // to get their schemas, and so we must shut them down again here.
98 &CloseProviderTransformer{},
99
100 // Single root
101 &RootTransformer{},
102
103 // Remove redundant edges to simplify the graph.
104 &TransitiveReductionTransformer{},
105 }
106
107 return steps
108}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
index 07a1eaf..7b0e39f 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
@@ -1,8 +1,10 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config/module" 4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/configs"
5 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/dag"
7 "github.com/hashicorp/terraform/tfdiags"
6) 8)
7 9
8// ImportGraphBuilder implements GraphBuilder and is responsible for building 10// ImportGraphBuilder implements GraphBuilder and is responsible for building
@@ -12,15 +14,19 @@ type ImportGraphBuilder struct {
12 // ImportTargets are the list of resources to import. 14 // ImportTargets are the list of resources to import.
13 ImportTargets []*ImportTarget 15 ImportTargets []*ImportTarget
14 16
15 // Module is the module to add to the graph. See ImportOpts.Module. 17 // Module is a configuration to build the graph from. See ImportOpts.Config.
16 Module *module.Tree 18 Config *configs.Config
17 19
18 // Providers is the list of providers supported. 20 // Components is the factory for our available plugin components.
19 Providers []string 21 Components contextComponentFactory
22
23 // Schemas is the repository of schemas we will draw from to analyse
24 // the configuration.
25 Schemas *Schemas
20} 26}
21 27
22// Build builds the graph according to the steps returned by Steps. 28// Build builds the graph according to the steps returned by Steps.
23func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) { 29func (b *ImportGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
24 return (&BasicGraphBuilder{ 30 return (&BasicGraphBuilder{
25 Steps: b.Steps(), 31 Steps: b.Steps(),
26 Validate: true, 32 Validate: true,
@@ -33,9 +39,9 @@ func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) {
33func (b *ImportGraphBuilder) Steps() []GraphTransformer { 39func (b *ImportGraphBuilder) Steps() []GraphTransformer {
34 // Get the module. If we don't have one, we just use an empty tree 40 // Get the module. If we don't have one, we just use an empty tree
35 // so that the transform still works but does nothing. 41 // so that the transform still works but does nothing.
36 mod := b.Module 42 config := b.Config
37 if mod == nil { 43 if config == nil {
38 mod = module.NewEmptyTree() 44 config = configs.NewEmptyConfig()
39 } 45 }
40 46
41 // Custom factory for creating providers. 47 // Custom factory for creating providers.
@@ -47,16 +53,36 @@ func (b *ImportGraphBuilder) Steps() []GraphTransformer {
47 53
48 steps := []GraphTransformer{ 54 steps := []GraphTransformer{
49 // Create all our resources from the configuration and state 55 // Create all our resources from the configuration and state
50 &ConfigTransformer{Module: mod}, 56 &ConfigTransformer{Config: config},
51 57
52 // Add the import steps 58 // Add the import steps
53 &ImportStateTransformer{Targets: b.ImportTargets}, 59 &ImportStateTransformer{Targets: b.ImportTargets},
54 60
55 TransformProviders(b.Providers, concreteProvider, mod), 61 // Add root variables
62 &RootVariableTransformer{Config: b.Config},
63
64 TransformProviders(b.Components.ResourceProviders(), concreteProvider, config),
56 65
57 // This validates that the providers only depend on variables 66 // This validates that the providers only depend on variables
58 &ImportProviderValidateTransformer{}, 67 &ImportProviderValidateTransformer{},
59 68
69 // Add the local values
70 &LocalTransformer{Config: b.Config},
71
72 // Add the outputs
73 &OutputTransformer{Config: b.Config},
74
75 // Add module variables
76 &ModuleVariableTransformer{Config: b.Config},
77
78 // Must attach schemas before ReferenceTransformer so that we can
79 // analyze the configuration to find references.
80 &AttachSchemaTransformer{Schemas: b.Schemas},
81
82 // Connect so that the references are ready for targeting. We'll
83 // have to connect again later for providers and so on.
84 &ReferenceTransformer{},
85
60 // Close opened plugin connections 86 // Close opened plugin connections
61 &CloseProviderTransformer{}, 87 &CloseProviderTransformer{},
62 88
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
deleted file mode 100644
index 0df48cd..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
+++ /dev/null
@@ -1,27 +0,0 @@
1package terraform
2
3import (
4 "github.com/hashicorp/terraform/dag"
5)
6
7// InputGraphBuilder creates the graph for the input operation.
8//
9// Unlike other graph builders, this is a function since it currently modifies
10// and is based on the PlanGraphBuilder. The PlanGraphBuilder passed in will be
11// modified and should not be used for any other operations.
12func InputGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
13 // We're going to customize the concrete functions
14 p.CustomConcrete = true
15
16 // Set the provider to the normal provider. This will ask for input.
17 p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
18 return &NodeApplyableProvider{
19 NodeAbstractProvider: a,
20 }
21 }
22
23 // We purposely don't set any more concrete fields since the remainder
24 // should be no-ops.
25
26 return p
27}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
index f8dd0fc..17adfd2 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
@@ -3,8 +3,11 @@ package terraform
3import ( 3import (
4 "sync" 4 "sync"
5 5
6 "github.com/hashicorp/terraform/config/module" 6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/configs"
7 "github.com/hashicorp/terraform/dag" 8 "github.com/hashicorp/terraform/dag"
9 "github.com/hashicorp/terraform/states"
10 "github.com/hashicorp/terraform/tfdiags"
8) 11)
9 12
10// PlanGraphBuilder implements GraphBuilder and is responsible for building 13// PlanGraphBuilder implements GraphBuilder and is responsible for building
@@ -19,20 +22,22 @@ import (
19// create-before-destroy can be completely ignored. 22// create-before-destroy can be completely ignored.
20// 23//
21type PlanGraphBuilder struct { 24type PlanGraphBuilder struct {
22 // Module is the root module for the graph to build. 25 // Config is the configuration tree to build a plan from.
23 Module *module.Tree 26 Config *configs.Config
24 27
25 // State is the current state 28 // State is the current state
26 State *State 29 State *states.State
27 30
28 // Providers is the list of providers supported. 31 // Components is a factory for the plug-in components (providers and
29 Providers []string 32 // provisioners) available for use.
33 Components contextComponentFactory
30 34
31 // Provisioners is the list of provisioners supported. 35 // Schemas is the repository of schemas we will draw from to analyse
32 Provisioners []string 36 // the configuration.
37 Schemas *Schemas
33 38
34 // Targets are resources to target 39 // Targets are resources to target
35 Targets []string 40 Targets []addrs.Targetable
36 41
37 // DisableReduce, if true, will not reduce the graph. Great for testing. 42 // DisableReduce, if true, will not reduce the graph. Great for testing.
38 DisableReduce bool 43 DisableReduce bool
@@ -46,13 +51,13 @@ type PlanGraphBuilder struct {
46 CustomConcrete bool 51 CustomConcrete bool
47 ConcreteProvider ConcreteProviderNodeFunc 52 ConcreteProvider ConcreteProviderNodeFunc
48 ConcreteResource ConcreteResourceNodeFunc 53 ConcreteResource ConcreteResourceNodeFunc
49 ConcreteResourceOrphan ConcreteResourceNodeFunc 54 ConcreteResourceOrphan ConcreteResourceInstanceNodeFunc
50 55
51 once sync.Once 56 once sync.Once
52} 57}
53 58
54// See GraphBuilder 59// See GraphBuilder
55func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) { 60func (b *PlanGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
56 return (&BasicGraphBuilder{ 61 return (&BasicGraphBuilder{
57 Steps: b.Steps(), 62 Steps: b.Steps(),
58 Validate: b.Validate, 63 Validate: b.Validate,
@@ -64,66 +69,82 @@ func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) {
64func (b *PlanGraphBuilder) Steps() []GraphTransformer { 69func (b *PlanGraphBuilder) Steps() []GraphTransformer {
65 b.once.Do(b.init) 70 b.once.Do(b.init)
66 71
72 concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex {
73 return &NodePlanDeposedResourceInstanceObject{
74 NodeAbstractResourceInstance: a,
75 DeposedKey: key,
76 }
77 }
78
67 steps := []GraphTransformer{ 79 steps := []GraphTransformer{
68 // Creates all the resources represented in the config 80 // Creates all the resources represented in the config
69 &ConfigTransformer{ 81 &ConfigTransformer{
70 Concrete: b.ConcreteResource, 82 Concrete: b.ConcreteResource,
71 Module: b.Module, 83 Config: b.Config,
72 }, 84 },
73 85
74 // Add the local values 86 // Add the local values
75 &LocalTransformer{Module: b.Module}, 87 &LocalTransformer{Config: b.Config},
76 88
77 // Add the outputs 89 // Add the outputs
78 &OutputTransformer{Module: b.Module}, 90 &OutputTransformer{Config: b.Config},
79 91
80 // Add orphan resources 92 // Add orphan resources
81 &OrphanResourceTransformer{ 93 &OrphanResourceInstanceTransformer{
82 Concrete: b.ConcreteResourceOrphan, 94 Concrete: b.ConcreteResourceOrphan,
83 State: b.State, 95 State: b.State,
84 Module: b.Module, 96 Config: b.Config,
97 },
98
99 // We also need nodes for any deposed instance objects present in the
100 // state, so we can plan to destroy them. (This intentionally
101 // skips creating nodes for _current_ objects, since ConfigTransformer
102 // created nodes that will do that during DynamicExpand.)
103 &StateTransformer{
104 ConcreteDeposed: concreteResourceInstanceDeposed,
105 State: b.State,
85 }, 106 },
86 107
87 // Create orphan output nodes 108 // Create orphan output nodes
88 &OrphanOutputTransformer{ 109 &OrphanOutputTransformer{
89 Module: b.Module, 110 Config: b.Config,
90 State: b.State, 111 State: b.State,
91 }, 112 },
92 113
93 // Attach the configuration to any resources 114 // Attach the configuration to any resources
94 &AttachResourceConfigTransformer{Module: b.Module}, 115 &AttachResourceConfigTransformer{Config: b.Config},
95 116
96 // Attach the state 117 // Attach the state
97 &AttachStateTransformer{State: b.State}, 118 &AttachStateTransformer{State: b.State},
98 119
99 // Add root variables 120 // Add root variables
100 &RootVariableTransformer{Module: b.Module}, 121 &RootVariableTransformer{Config: b.Config},
101
102 TransformProviders(b.Providers, b.ConcreteProvider, b.Module),
103 122
104 // Provisioner-related transformations. Only add these if requested. 123 &MissingProvisionerTransformer{Provisioners: b.Components.ResourceProvisioners()},
105 GraphTransformIf( 124 &ProvisionerTransformer{},
106 func() bool { return b.Provisioners != nil },
107 GraphTransformMulti(
108 &MissingProvisionerTransformer{Provisioners: b.Provisioners},
109 &ProvisionerTransformer{},
110 ),
111 ),
112 125
113 // Add module variables 126 // Add module variables
114 &ModuleVariableTransformer{ 127 &ModuleVariableTransformer{
115 Module: b.Module, 128 Config: b.Config,
116 }, 129 },
117 130
131 TransformProviders(b.Components.ResourceProviders(), b.ConcreteProvider, b.Config),
132
118 // Remove modules no longer present in the config 133 // Remove modules no longer present in the config
119 &RemovedModuleTransformer{Module: b.Module, State: b.State}, 134 &RemovedModuleTransformer{Config: b.Config, State: b.State},
135
136 // Must attach schemas before ReferenceTransformer so that we can
137 // analyze the configuration to find references.
138 &AttachSchemaTransformer{Schemas: b.Schemas},
120 139
121 // Connect so that the references are ready for targeting. We'll 140 // Connect so that the references are ready for targeting. We'll
122 // have to connect again later for providers and so on. 141 // have to connect again later for providers and so on.
123 &ReferenceTransformer{}, 142 &ReferenceTransformer{},
124 143
125 // Add the node to fix the state count boundaries 144 // Add the node to fix the state count boundaries
126 &CountBoundaryTransformer{}, 145 &CountBoundaryTransformer{
146 Config: b.Config,
147 },
127 148
128 // Target 149 // Target
129 &TargetsTransformer{ 150 &TargetsTransformer{
@@ -136,6 +157,10 @@ func (b *PlanGraphBuilder) Steps() []GraphTransformer {
136 IgnoreIndices: true, 157 IgnoreIndices: true,
137 }, 158 },
138 159
160 // Detect when create_before_destroy must be forced on for a particular
161 // node due to dependency edges, to avoid graph cycles during apply.
162 &ForcedCBDTransformer{},
163
139 // Close opened plugin connections 164 // Close opened plugin connections
140 &CloseProviderTransformer{}, 165 &CloseProviderTransformer{},
141 &CloseProvisionerTransformer{}, 166 &CloseProvisionerTransformer{},
@@ -167,15 +192,13 @@ func (b *PlanGraphBuilder) init() {
167 192
168 b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { 193 b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
169 return &NodePlannableResource{ 194 return &NodePlannableResource{
170 NodeAbstractCountResource: &NodeAbstractCountResource{ 195 NodeAbstractResource: a,
171 NodeAbstractResource: a,
172 },
173 } 196 }
174 } 197 }
175 198
176 b.ConcreteResourceOrphan = func(a *NodeAbstractResource) dag.Vertex { 199 b.ConcreteResourceOrphan = func(a *NodeAbstractResourceInstance) dag.Vertex {
177 return &NodePlannableResourceOrphan{ 200 return &NodePlannableResourceInstanceOrphan{
178 NodeAbstractResource: a, 201 NodeAbstractResourceInstance: a,
179 } 202 }
180 } 203 }
181} 204}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
index 9638d4c..0342cdb 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
@@ -3,8 +3,11 @@ package terraform
3import ( 3import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/states"
7 "github.com/hashicorp/terraform/config/module" 7 "github.com/hashicorp/terraform/tfdiags"
8
9 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/configs"
8 "github.com/hashicorp/terraform/dag" 11 "github.com/hashicorp/terraform/dag"
9) 12)
10 13
@@ -21,17 +24,22 @@ import (
21// create-before-destroy can be completely ignored. 24// create-before-destroy can be completely ignored.
22// 25//
23type RefreshGraphBuilder struct { 26type RefreshGraphBuilder struct {
24 // Module is the root module for the graph to build. 27 // Config is the configuration tree.
25 Module *module.Tree 28 Config *configs.Config
29
30 // State is the prior state
31 State *states.State
26 32
27 // State is the current state 33 // Components is a factory for the plug-in components (providers and
28 State *State 34 // provisioners) available for use.
35 Components contextComponentFactory
29 36
30 // Providers is the list of providers supported. 37 // Schemas is the repository of schemas we will draw from to analyse
31 Providers []string 38 // the configuration.
39 Schemas *Schemas
32 40
33 // Targets are resources to target 41 // Targets are resources to target
34 Targets []string 42 Targets []addrs.Targetable
35 43
36 // DisableReduce, if true, will not reduce the graph. Great for testing. 44 // DisableReduce, if true, will not reduce the graph. Great for testing.
37 DisableReduce bool 45 DisableReduce bool
@@ -41,7 +49,7 @@ type RefreshGraphBuilder struct {
41} 49}
42 50
43// See GraphBuilder 51// See GraphBuilder
44func (b *RefreshGraphBuilder) Build(path []string) (*Graph, error) { 52func (b *RefreshGraphBuilder) Build(path addrs.ModuleInstance) (*Graph, tfdiags.Diagnostics) {
45 return (&BasicGraphBuilder{ 53 return (&BasicGraphBuilder{
46 Steps: b.Steps(), 54 Steps: b.Steps(),
47 Validate: b.Validate, 55 Validate: b.Validate,
@@ -60,23 +68,27 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
60 68
61 concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex { 69 concreteManagedResource := func(a *NodeAbstractResource) dag.Vertex {
62 return &NodeRefreshableManagedResource{ 70 return &NodeRefreshableManagedResource{
63 NodeAbstractCountResource: &NodeAbstractCountResource{ 71 NodeAbstractResource: a,
64 NodeAbstractResource: a,
65 },
66 } 72 }
67 } 73 }
68 74
69 concreteManagedResourceInstance := func(a *NodeAbstractResource) dag.Vertex { 75 concreteManagedResourceInstance := func(a *NodeAbstractResourceInstance) dag.Vertex {
70 return &NodeRefreshableManagedResourceInstance{ 76 return &NodeRefreshableManagedResourceInstance{
71 NodeAbstractResource: a, 77 NodeAbstractResourceInstance: a,
78 }
79 }
80
81 concreteResourceInstanceDeposed := func(a *NodeAbstractResourceInstance, key states.DeposedKey) dag.Vertex {
82 // The "Plan" node type also handles refreshing behavior.
83 return &NodePlanDeposedResourceInstanceObject{
84 NodeAbstractResourceInstance: a,
85 DeposedKey: key,
72 } 86 }
73 } 87 }
74 88
75 concreteDataResource := func(a *NodeAbstractResource) dag.Vertex { 89 concreteDataResource := func(a *NodeAbstractResource) dag.Vertex {
76 return &NodeRefreshableDataResource{ 90 return &NodeRefreshableDataResource{
77 NodeAbstractCountResource: &NodeAbstractCountResource{ 91 NodeAbstractResource: a,
78 NodeAbstractResource: a,
79 },
80 } 92 }
81 } 93 }
82 94
@@ -88,13 +100,13 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
88 if b.State.HasResources() { 100 if b.State.HasResources() {
89 return &ConfigTransformer{ 101 return &ConfigTransformer{
90 Concrete: concreteManagedResource, 102 Concrete: concreteManagedResource,
91 Module: b.Module, 103 Config: b.Config,
92 Unique: true, 104 Unique: true,
93 ModeFilter: true, 105 ModeFilter: true,
94 Mode: config.ManagedResourceMode, 106 Mode: addrs.ManagedResourceMode,
95 } 107 }
96 } 108 }
97 log.Println("[TRACE] No managed resources in state during refresh, skipping managed resource transformer") 109 log.Println("[TRACE] No managed resources in state during refresh; skipping managed resource transformer")
98 return nil 110 return nil
99 }(), 111 }(),
100 112
@@ -102,40 +114,53 @@ func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
102 // add any orphans from scaling in as destroy nodes. 114 // add any orphans from scaling in as destroy nodes.
103 &ConfigTransformer{ 115 &ConfigTransformer{
104 Concrete: concreteDataResource, 116 Concrete: concreteDataResource,
105 Module: b.Module, 117 Config: b.Config,
106 Unique: true, 118 Unique: true,
107 ModeFilter: true, 119 ModeFilter: true,
108 Mode: config.DataResourceMode, 120 Mode: addrs.DataResourceMode,
109 }, 121 },
110 122
111 // Add any fully-orphaned resources from config (ones that have been 123 // Add any fully-orphaned resources from config (ones that have been
112 // removed completely, not ones that are just orphaned due to a scaled-in 124 // removed completely, not ones that are just orphaned due to a scaled-in
113 // count. 125 // count.
114 &OrphanResourceTransformer{ 126 &OrphanResourceInstanceTransformer{
115 Concrete: concreteManagedResourceInstance, 127 Concrete: concreteManagedResourceInstance,
116 State: b.State, 128 State: b.State,
117 Module: b.Module, 129 Config: b.Config,
130 },
131
132 // We also need nodes for any deposed instance objects present in the
133 // state, so we can check if they still exist. (This intentionally
134 // skips creating nodes for _current_ objects, since ConfigTransformer
135 // created nodes that will do that during DynamicExpand.)
136 &StateTransformer{
137 ConcreteDeposed: concreteResourceInstanceDeposed,
138 State: b.State,
118 }, 139 },
119 140
120 // Attach the state 141 // Attach the state
121 &AttachStateTransformer{State: b.State}, 142 &AttachStateTransformer{State: b.State},
122 143
123 // Attach the configuration to any resources 144 // Attach the configuration to any resources
124 &AttachResourceConfigTransformer{Module: b.Module}, 145 &AttachResourceConfigTransformer{Config: b.Config},
125 146
126 // Add root variables 147 // Add root variables
127 &RootVariableTransformer{Module: b.Module}, 148 &RootVariableTransformer{Config: b.Config},
128
129 TransformProviders(b.Providers, concreteProvider, b.Module),
130 149
131 // Add the local values 150 // Add the local values
132 &LocalTransformer{Module: b.Module}, 151 &LocalTransformer{Config: b.Config},
133 152
134 // Add the outputs 153 // Add the outputs
135 &OutputTransformer{Module: b.Module}, 154 &OutputTransformer{Config: b.Config},
136 155
137 // Add module variables 156 // Add module variables
138 &ModuleVariableTransformer{Module: b.Module}, 157 &ModuleVariableTransformer{Config: b.Config},
158
159 TransformProviders(b.Components.ResourceProviders(), concreteProvider, b.Config),
160
161 // Must attach schemas before ReferenceTransformer so that we can
162 // analyze the configuration to find references.
163 &AttachSchemaTransformer{Schemas: b.Schemas},
139 164
140 // Connect so that the references are ready for targeting. We'll 165 // Connect so that the references are ready for targeting. We'll
141 // have to connect again later for providers and so on. 166 // have to connect again later for providers and so on.
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
index 645ec7b..1881f95 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
@@ -23,9 +23,7 @@ func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
23 23
24 p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex { 24 p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
25 return &NodeValidatableResource{ 25 return &NodeValidatableResource{
26 NodeAbstractCountResource: &NodeAbstractCountResource{ 26 NodeAbstractResource: a,
27 NodeAbstractResource: a,
28 },
29 } 27 }
30 } 28 }
31 29
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
index 2897eb5..768590f 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
@@ -1,7 +1,11 @@
1package terraform 1package terraform
2 2
3import (
4 "github.com/hashicorp/terraform/addrs"
5)
6
3// GraphNodeSubPath says that a node is part of a graph with a 7// GraphNodeSubPath says that a node is part of a graph with a
4// different path, and the context should be adjusted accordingly. 8// different path, and the context should be adjusted accordingly.
5type GraphNodeSubPath interface { 9type GraphNodeSubPath interface {
6 Path() []string 10 Path() addrs.ModuleInstance
7} 11}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
index 34ce6f6..e980e0c 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
@@ -1,60 +1,32 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/addrs"
4 "github.com/hashicorp/terraform/dag" 5 "github.com/hashicorp/terraform/dag"
6 "github.com/hashicorp/terraform/tfdiags"
5) 7)
6 8
7// GraphWalker is an interface that can be implemented that when used 9// GraphWalker is an interface that can be implemented that when used
8// with Graph.Walk will invoke the given callbacks under certain events. 10// with Graph.Walk will invoke the given callbacks under certain events.
9type GraphWalker interface { 11type GraphWalker interface {
10 EnterPath([]string) EvalContext 12 EnterPath(addrs.ModuleInstance) EvalContext
11 ExitPath([]string) 13 ExitPath(addrs.ModuleInstance)
12 EnterVertex(dag.Vertex) 14 EnterVertex(dag.Vertex)
13 ExitVertex(dag.Vertex, error) 15 ExitVertex(dag.Vertex, tfdiags.Diagnostics)
14 EnterEvalTree(dag.Vertex, EvalNode) EvalNode 16 EnterEvalTree(dag.Vertex, EvalNode) EvalNode
15 ExitEvalTree(dag.Vertex, interface{}, error) error 17 ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics
16} 18}
17 19
18// GrpahWalkerPanicwrapper can be optionally implemented to catch panics
19// that occur while walking the graph. This is not generally recommended
20// since panics should crash Terraform and result in a bug report. However,
21// this is particularly useful for situations like the shadow graph where
22// you don't ever want to cause a panic.
23type GraphWalkerPanicwrapper interface {
24 GraphWalker
25
26 // Panic is called when a panic occurs. This will halt the panic from
27 // propogating so if the walker wants it to crash still it should panic
28 // again. This is called from within a defer so runtime/debug.Stack can
29 // be used to get the stack trace of the panic.
30 Panic(dag.Vertex, interface{})
31}
32
33// GraphWalkerPanicwrap wraps an existing Graphwalker to wrap and swallow
34// the panics. This doesn't lose the panics since the panics are still
35// returned as errors as part of a graph walk.
36func GraphWalkerPanicwrap(w GraphWalker) GraphWalkerPanicwrapper {
37 return &graphWalkerPanicwrapper{
38 GraphWalker: w,
39 }
40}
41
42type graphWalkerPanicwrapper struct {
43 GraphWalker
44}
45
46func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {}
47
48// NullGraphWalker is a GraphWalker implementation that does nothing. 20// NullGraphWalker is a GraphWalker implementation that does nothing.
49// This can be embedded within other GraphWalker implementations for easily 21// This can be embedded within other GraphWalker implementations for easily
50// implementing all the required functions. 22// implementing all the required functions.
51type NullGraphWalker struct{} 23type NullGraphWalker struct{}
52 24
53func (NullGraphWalker) EnterPath([]string) EvalContext { return new(MockEvalContext) } 25func (NullGraphWalker) EnterPath(addrs.ModuleInstance) EvalContext { return new(MockEvalContext) }
54func (NullGraphWalker) ExitPath([]string) {} 26func (NullGraphWalker) ExitPath(addrs.ModuleInstance) {}
55func (NullGraphWalker) EnterVertex(dag.Vertex) {} 27func (NullGraphWalker) EnterVertex(dag.Vertex) {}
56func (NullGraphWalker) ExitVertex(dag.Vertex, error) {} 28func (NullGraphWalker) ExitVertex(dag.Vertex, tfdiags.Diagnostics) {}
57func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n } 29func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n }
58func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) error { 30func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) tfdiags.Diagnostics {
59 return nil 31 return nil
60} 32}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
index 89f376e..03c192a 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
@@ -2,12 +2,19 @@ package terraform
2 2
3import ( 3import (
4 "context" 4 "context"
5 "fmt"
6 "log" 5 "log"
7 "sync" 6 "sync"
8 7
9 "github.com/hashicorp/errwrap" 8 "github.com/zclconf/go-cty/cty"
9
10 "github.com/hashicorp/terraform/addrs"
11 "github.com/hashicorp/terraform/configs/configschema"
10 "github.com/hashicorp/terraform/dag" 12 "github.com/hashicorp/terraform/dag"
13 "github.com/hashicorp/terraform/plans"
14 "github.com/hashicorp/terraform/providers"
15 "github.com/hashicorp/terraform/provisioners"
16 "github.com/hashicorp/terraform/states"
17 "github.com/hashicorp/terraform/tfdiags"
11) 18)
12 19
13// ContextGraphWalker is the GraphWalker implementation used with the 20// ContextGraphWalker is the GraphWalker implementation used with the
@@ -16,54 +23,56 @@ type ContextGraphWalker struct {
16 NullGraphWalker 23 NullGraphWalker
17 24
18 // Configurable values 25 // Configurable values
19 Context *Context 26 Context *Context
20 Operation walkOperation 27 State *states.SyncState // Used for safe concurrent access to state
21 StopContext context.Context 28 Changes *plans.ChangesSync // Used for safe concurrent writes to changes
22 29 Operation walkOperation
23 // Outputs, do not set these. Do not read these while the graph 30 StopContext context.Context
24 // is being walked. 31 RootVariableValues InputValues
25 ValidationWarnings []string 32
26 ValidationErrors []error 33 // This is an output. Do not set this, nor read it while a graph walk
27 34 // is in progress.
28 errorLock sync.Mutex 35 NonFatalDiagnostics tfdiags.Diagnostics
29 once sync.Once 36
30 contexts map[string]*BuiltinEvalContext 37 errorLock sync.Mutex
31 contextLock sync.Mutex 38 once sync.Once
32 interpolaterVars map[string]map[string]interface{} 39 contexts map[string]*BuiltinEvalContext
33 interpolaterVarLock sync.Mutex 40 contextLock sync.Mutex
34 providerCache map[string]ResourceProvider 41 variableValues map[string]map[string]cty.Value
35 providerLock sync.Mutex 42 variableValuesLock sync.Mutex
36 provisionerCache map[string]ResourceProvisioner 43 providerCache map[string]providers.Interface
37 provisionerLock sync.Mutex 44 providerSchemas map[string]*ProviderSchema
45 providerLock sync.Mutex
46 provisionerCache map[string]provisioners.Interface
47 provisionerSchemas map[string]*configschema.Block
48 provisionerLock sync.Mutex
38} 49}
39 50
40func (w *ContextGraphWalker) EnterPath(path []string) EvalContext { 51func (w *ContextGraphWalker) EnterPath(path addrs.ModuleInstance) EvalContext {
41 w.once.Do(w.init) 52 w.once.Do(w.init)
42 53
43 w.contextLock.Lock() 54 w.contextLock.Lock()
44 defer w.contextLock.Unlock() 55 defer w.contextLock.Unlock()
45 56
46 // If we already have a context for this path cached, use that 57 // If we already have a context for this path cached, use that
47 key := PathCacheKey(path) 58 key := path.String()
48 if ctx, ok := w.contexts[key]; ok { 59 if ctx, ok := w.contexts[key]; ok {
49 return ctx 60 return ctx
50 } 61 }
51 62
52 // Setup the variables for this interpolater 63 // Our evaluator shares some locks with the main context and the walker
53 variables := make(map[string]interface{}) 64 // so that we can safely run multiple evaluations at once across
54 if len(path) <= 1 { 65 // different modules.
55 for k, v := range w.Context.variables { 66 evaluator := &Evaluator{
56 variables[k] = v 67 Meta: w.Context.meta,
57 } 68 Config: w.Context.config,
58 } 69 Operation: w.Operation,
59 w.interpolaterVarLock.Lock() 70 State: w.State,
60 if m, ok := w.interpolaterVars[key]; ok { 71 Changes: w.Changes,
61 for k, v := range m { 72 Schemas: w.Context.schemas,
62 variables[k] = v 73 VariableValues: w.variableValues,
63 } 74 VariableValuesLock: &w.variableValuesLock,
64 } 75 }
65 w.interpolaterVars[key] = variables
66 w.interpolaterVarLock.Unlock()
67 76
68 ctx := &BuiltinEvalContext{ 77 ctx := &BuiltinEvalContext{
69 StopContext: w.StopContext, 78 StopContext: w.StopContext,
@@ -71,26 +80,17 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
71 Hooks: w.Context.hooks, 80 Hooks: w.Context.hooks,
72 InputValue: w.Context.uiInput, 81 InputValue: w.Context.uiInput,
73 Components: w.Context.components, 82 Components: w.Context.components,
83 Schemas: w.Context.schemas,
74 ProviderCache: w.providerCache, 84 ProviderCache: w.providerCache,
75 ProviderInputConfig: w.Context.providerInputConfig, 85 ProviderInputConfig: w.Context.providerInputConfig,
76 ProviderLock: &w.providerLock, 86 ProviderLock: &w.providerLock,
77 ProvisionerCache: w.provisionerCache, 87 ProvisionerCache: w.provisionerCache,
78 ProvisionerLock: &w.provisionerLock, 88 ProvisionerLock: &w.provisionerLock,
79 DiffValue: w.Context.diff, 89 ChangesValue: w.Changes,
80 DiffLock: &w.Context.diffLock, 90 StateValue: w.State,
81 StateValue: w.Context.state, 91 Evaluator: evaluator,
82 StateLock: &w.Context.stateLock, 92 VariableValues: w.variableValues,
83 Interpolater: &Interpolater{ 93 VariableValuesLock: &w.variableValuesLock,
84 Operation: w.Operation,
85 Meta: w.Context.meta,
86 Module: w.Context.module,
87 State: w.Context.state,
88 StateLock: &w.Context.stateLock,
89 VariableValues: variables,
90 VariableValuesLock: &w.interpolaterVarLock,
91 },
92 InterpolaterVars: w.interpolaterVars,
93 InterpolaterVarLock: &w.interpolaterVarLock,
94 } 94 }
95 95
96 w.contexts[key] = ctx 96 w.contexts[key] = ctx
@@ -98,8 +98,7 @@ func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
98} 98}
99 99
100func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { 100func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {
101 log.Printf("[TRACE] [%s] Entering eval tree: %s", 101 log.Printf("[TRACE] [%s] Entering eval tree: %s", w.Operation, dag.VertexName(v))
102 w.Operation, dag.VertexName(v))
103 102
104 // Acquire a lock on the semaphore 103 // Acquire a lock on the semaphore
105 w.Context.parallelSem.Acquire() 104 w.Context.parallelSem.Acquire()
@@ -109,10 +108,8 @@ func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {
109 return EvalFilter(n, EvalNodeFilterOp(w.Operation)) 108 return EvalFilter(n, EvalNodeFilterOp(w.Operation))
110} 109}
111 110
112func (w *ContextGraphWalker) ExitEvalTree( 111func (w *ContextGraphWalker) ExitEvalTree(v dag.Vertex, output interface{}, err error) tfdiags.Diagnostics {
113 v dag.Vertex, output interface{}, err error) error { 112 log.Printf("[TRACE] [%s] Exiting eval tree: %s", w.Operation, dag.VertexName(v))
114 log.Printf("[TRACE] [%s] Exiting eval tree: %s",
115 w.Operation, dag.VertexName(v))
116 113
117 // Release the semaphore 114 // Release the semaphore
118 w.Context.parallelSem.Release() 115 w.Context.parallelSem.Release()
@@ -125,30 +122,36 @@ func (w *ContextGraphWalker) ExitEvalTree(
125 w.errorLock.Lock() 122 w.errorLock.Lock()
126 defer w.errorLock.Unlock() 123 defer w.errorLock.Unlock()
127 124
128 // Try to get a validation error out of it. If its not a validation 125 // If the error is non-fatal then we'll accumulate its diagnostics in our
129 // error, then just record the normal error. 126 // non-fatal list, rather than returning it directly, so that the graph
130 verr, ok := err.(*EvalValidateError) 127 // walk can continue.
131 if !ok { 128 if nferr, ok := err.(tfdiags.NonFatalError); ok {
132 return err 129 log.Printf("[WARN] %s: %s", dag.VertexName(v), nferr)
133 } 130 w.NonFatalDiagnostics = w.NonFatalDiagnostics.Append(nferr.Diagnostics)
134 131 return nil
135 for _, msg := range verr.Warnings {
136 w.ValidationWarnings = append(
137 w.ValidationWarnings,
138 fmt.Sprintf("%s: %s", dag.VertexName(v), msg))
139 }
140 for _, e := range verr.Errors {
141 w.ValidationErrors = append(
142 w.ValidationErrors,
143 errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", dag.VertexName(v)), e))
144 } 132 }
145 133
146 return nil 134 // Otherwise, we'll let our usual diagnostics machinery figure out how to
135 // unpack this as one or more diagnostic messages and return that. If we
136 // get down here then the returned diagnostics will contain at least one
137 // error, causing the graph walk to halt.
138 var diags tfdiags.Diagnostics
139 diags = diags.Append(err)
140 return diags
147} 141}
148 142
149func (w *ContextGraphWalker) init() { 143func (w *ContextGraphWalker) init() {
150 w.contexts = make(map[string]*BuiltinEvalContext, 5) 144 w.contexts = make(map[string]*BuiltinEvalContext)
151 w.providerCache = make(map[string]ResourceProvider, 5) 145 w.providerCache = make(map[string]providers.Interface)
152 w.provisionerCache = make(map[string]ResourceProvisioner, 5) 146 w.providerSchemas = make(map[string]*ProviderSchema)
153 w.interpolaterVars = make(map[string]map[string]interface{}, 5) 147 w.provisionerCache = make(map[string]provisioners.Interface)
148 w.provisionerSchemas = make(map[string]*configschema.Block)
149 w.variableValues = make(map[string]map[string]cty.Value)
150
151 // Populate root module variable values. Other modules will be populated
152 // during the graph walk.
153 w.variableValues[""] = make(map[string]cty.Value)
154 for k, iv := range w.RootVariableValues {
155 w.variableValues[""][k] = iv.Value
156 }
154} 157}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
index 3fb3748..a3756e7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
@@ -7,7 +7,6 @@ type walkOperation byte
7 7
8const ( 8const (
9 walkInvalid walkOperation = iota 9 walkInvalid walkOperation = iota
10 walkInput
11 walkApply 10 walkApply
12 walkPlan 11 walkPlan
13 walkPlanDestroy 12 walkPlanDestroy
@@ -15,4 +14,5 @@ const (
15 walkValidate 14 walkValidate
16 walkDestroy 15 walkDestroy
17 walkImport 16 walkImport
17 walkEval // used just to prepare EvalContext for expression evaluation, with no other actions
18) 18)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
index 95ef4e9..b51e1a2 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
@@ -4,9 +4,23 @@ package terraform
4 4
5import "strconv" 5import "strconv"
6 6
7const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate" 7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[GraphTypeInvalid-0]
12 _ = x[GraphTypeLegacy-1]
13 _ = x[GraphTypeRefresh-2]
14 _ = x[GraphTypePlan-3]
15 _ = x[GraphTypePlanDestroy-4]
16 _ = x[GraphTypeApply-5]
17 _ = x[GraphTypeValidate-6]
18 _ = x[GraphTypeEval-7]
19}
20
21const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeValidateGraphTypeEval"
8 22
9var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125} 23var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 111, 124}
10 24
11func (i GraphType) String() string { 25func (i GraphType) String() string {
12 if i >= GraphType(len(_GraphType_index)-1) { 26 if i >= GraphType(len(_GraphType_index)-1) {
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go
index ab11e8e..c0bb23a 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/hook.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook.go
@@ -1,5 +1,14 @@
1package terraform 1package terraform
2 2
3import (
4 "github.com/zclconf/go-cty/cty"
5
6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/plans"
8 "github.com/hashicorp/terraform/providers"
9 "github.com/hashicorp/terraform/states"
10)
11
3// HookAction is an enum of actions that can be taken as a result of a hook 12// HookAction is an enum of actions that can be taken as a result of a hook
4// callback. This allows you to modify the behavior of Terraform at runtime. 13// callback. This allows you to modify the behavior of Terraform at runtime.
5type HookAction byte 14type HookAction byte
@@ -21,42 +30,56 @@ const (
21// NilHook into your struct, which implements all of the interface but does 30// NilHook into your struct, which implements all of the interface but does
22// nothing. Then, override only the functions you want to implement. 31// nothing. Then, override only the functions you want to implement.
23type Hook interface { 32type Hook interface {
24 // PreApply and PostApply are called before and after a single 33 // PreApply and PostApply are called before and after an action for a
25 // resource is applied. The error argument in PostApply is the 34 // single instance is applied. The error argument in PostApply is the
26 // error, if any, that was returned from the provider Apply call itself. 35 // error, if any, that was returned from the provider Apply call itself.
27 PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) 36 PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error)
28 PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) 37 PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error)
29 38
30 // PreDiff and PostDiff are called before and after a single resource 39 // PreDiff and PostDiff are called before and after a provider is given
31 // resource is diffed. 40 // the opportunity to customize the proposed new state to produce the
32 PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) 41 // planned new state.
33 PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) 42 PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error)
34 43 PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error)
35 // Provisioning hooks 44
45 // The provisioning hooks signal both the overall start end end of
46 // provisioning for a particular instance and of each of the individual
47 // configured provisioners for each instance. The sequence of these
48 // for a given instance might look something like this:
36 // 49 //
37 // All should be self-explanatory. ProvisionOutput is called with 50 // PreProvisionInstance(aws_instance.foo[1], ...)
38 // output sent back by the provisioners. This will be called multiple 51 // PreProvisionInstanceStep(aws_instance.foo[1], "file")
39 // times as output comes in, but each call should represent a line of 52 // PostProvisionInstanceStep(aws_instance.foo[1], "file", nil)
40 // output. The ProvisionOutput method cannot control whether the 53 // PreProvisionInstanceStep(aws_instance.foo[1], "remote-exec")
41 // hook continues running. 54 // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Installing foo...")
42 PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) 55 // ProvisionOutput(aws_instance.foo[1], "remote-exec", "Configuring bar...")
43 PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) 56 // PostProvisionInstanceStep(aws_instance.foo[1], "remote-exec", nil)
44 PreProvision(*InstanceInfo, string) (HookAction, error) 57 // PostProvisionInstance(aws_instance.foo[1], ...)
45 PostProvision(*InstanceInfo, string, error) (HookAction, error) 58 //
46 ProvisionOutput(*InstanceInfo, string, string) 59 // ProvisionOutput is called with output sent back by the provisioners.
60 // This will be called multiple times as output comes in, with each call
61 // representing one line of output. It cannot control whether the
62 // provisioner continues running.
63 PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error)
64 PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error)
65 PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error)
66 PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error)
67 ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string)
47 68
48 // PreRefresh and PostRefresh are called before and after a single 69 // PreRefresh and PostRefresh are called before and after a single
49 // resource state is refreshed, respectively. 70 // resource state is refreshed, respectively.
50 PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) 71 PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error)
51 PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) 72 PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error)
52
53 // PostStateUpdate is called after the state is updated.
54 PostStateUpdate(*State) (HookAction, error)
55 73
56 // PreImportState and PostImportState are called before and after 74 // PreImportState and PostImportState are called before and after
57 // a single resource's state is being improted. 75 // (respectively) each state import operation for a given resource address.
58 PreImportState(*InstanceInfo, string) (HookAction, error) 76 PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error)
59 PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) 77 PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error)
78
79 // PostStateUpdate is called each time the state is updated. It receives
80 // a deep copy of the state, which it may therefore access freely without
81 // any need for locks to protect from concurrent writes from the caller.
82 PostStateUpdate(new *states.State) (HookAction, error)
60} 83}
61 84
62// NilHook is a Hook implementation that does nothing. It exists only to 85// NilHook is a Hook implementation that does nothing. It exists only to
@@ -64,59 +87,60 @@ type Hook interface {
64// and only implement the functions you are interested in. 87// and only implement the functions you are interested in.
65type NilHook struct{} 88type NilHook struct{}
66 89
67func (*NilHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) { 90var _ Hook = (*NilHook)(nil)
91
92func (*NilHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) {
68 return HookActionContinue, nil 93 return HookActionContinue, nil
69} 94}
70 95
71func (*NilHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) { 96func (*NilHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) {
72 return HookActionContinue, nil 97 return HookActionContinue, nil
73} 98}
74 99
75func (*NilHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) { 100func (*NilHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) {
76 return HookActionContinue, nil 101 return HookActionContinue, nil
77} 102}
78 103
79func (*NilHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) { 104func (*NilHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) {
80 return HookActionContinue, nil 105 return HookActionContinue, nil
81} 106}
82 107
83func (*NilHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { 108func (*NilHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) {
84 return HookActionContinue, nil 109 return HookActionContinue, nil
85} 110}
86 111
87func (*NilHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { 112func (*NilHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) {
88 return HookActionContinue, nil 113 return HookActionContinue, nil
89} 114}
90 115
91func (*NilHook) PreProvision(*InstanceInfo, string) (HookAction, error) { 116func (*NilHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) {
92 return HookActionContinue, nil 117 return HookActionContinue, nil
93} 118}
94 119
95func (*NilHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { 120func (*NilHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) {
96 return HookActionContinue, nil 121 return HookActionContinue, nil
97} 122}
98 123
99func (*NilHook) ProvisionOutput( 124func (*NilHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) {
100 *InstanceInfo, string, string) {
101} 125}
102 126
103func (*NilHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { 127func (*NilHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) {
104 return HookActionContinue, nil 128 return HookActionContinue, nil
105} 129}
106 130
107func (*NilHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { 131func (*NilHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) {
108 return HookActionContinue, nil 132 return HookActionContinue, nil
109} 133}
110 134
111func (*NilHook) PreImportState(*InstanceInfo, string) (HookAction, error) { 135func (*NilHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) {
112 return HookActionContinue, nil 136 return HookActionContinue, nil
113} 137}
114 138
115func (*NilHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) { 139func (*NilHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) {
116 return HookActionContinue, nil 140 return HookActionContinue, nil
117} 141}
118 142
119func (*NilHook) PostStateUpdate(*State) (HookAction, error) { 143func (*NilHook) PostStateUpdate(new *states.State) (HookAction, error) {
120 return HookActionContinue, nil 144 return HookActionContinue, nil
121} 145}
122 146
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
index 0e46400..6efa319 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
@@ -1,245 +1,274 @@
1package terraform 1package terraform
2 2
3import "sync" 3import (
4 "sync"
5
6 "github.com/zclconf/go-cty/cty"
7
8 "github.com/hashicorp/terraform/addrs"
9 "github.com/hashicorp/terraform/plans"
10 "github.com/hashicorp/terraform/providers"
11 "github.com/hashicorp/terraform/states"
12)
4 13
5// MockHook is an implementation of Hook that can be used for tests. 14// MockHook is an implementation of Hook that can be used for tests.
6// It records all of its function calls. 15// It records all of its function calls.
7type MockHook struct { 16type MockHook struct {
8 sync.Mutex 17 sync.Mutex
9 18
10 PreApplyCalled bool 19 PreApplyCalled bool
11 PreApplyInfo *InstanceInfo 20 PreApplyAddr addrs.AbsResourceInstance
12 PreApplyDiff *InstanceDiff 21 PreApplyGen states.Generation
13 PreApplyState *InstanceState 22 PreApplyAction plans.Action
14 PreApplyReturn HookAction 23 PreApplyPriorState cty.Value
15 PreApplyError error 24 PreApplyPlannedState cty.Value
25 PreApplyReturn HookAction
26 PreApplyError error
16 27
17 PostApplyCalled bool 28 PostApplyCalled bool
18 PostApplyInfo *InstanceInfo 29 PostApplyAddr addrs.AbsResourceInstance
19 PostApplyState *InstanceState 30 PostApplyGen states.Generation
31 PostApplyNewState cty.Value
20 PostApplyError error 32 PostApplyError error
21 PostApplyReturn HookAction 33 PostApplyReturn HookAction
22 PostApplyReturnError error 34 PostApplyReturnError error
23 PostApplyFn func(*InstanceInfo, *InstanceState, error) (HookAction, error) 35 PostApplyFn func(addrs.AbsResourceInstance, states.Generation, cty.Value, error) (HookAction, error)
24 36
25 PreDiffCalled bool 37 PreDiffCalled bool
26 PreDiffInfo *InstanceInfo 38 PreDiffAddr addrs.AbsResourceInstance
27 PreDiffState *InstanceState 39 PreDiffGen states.Generation
28 PreDiffReturn HookAction 40 PreDiffPriorState cty.Value
29 PreDiffError error 41 PreDiffProposedState cty.Value
30 42 PreDiffReturn HookAction
31 PostDiffCalled bool 43 PreDiffError error
32 PostDiffInfo *InstanceInfo 44
33 PostDiffDiff *InstanceDiff 45 PostDiffCalled bool
34 PostDiffReturn HookAction 46 PostDiffAddr addrs.AbsResourceInstance
35 PostDiffError error 47 PostDiffGen states.Generation
36 48 PostDiffAction plans.Action
37 PreProvisionResourceCalled bool 49 PostDiffPriorState cty.Value
38 PreProvisionResourceInfo *InstanceInfo 50 PostDiffPlannedState cty.Value
39 PreProvisionInstanceState *InstanceState 51 PostDiffReturn HookAction
40 PreProvisionResourceReturn HookAction 52 PostDiffError error
41 PreProvisionResourceError error 53
42 54 PreProvisionInstanceCalled bool
43 PostProvisionResourceCalled bool 55 PreProvisionInstanceAddr addrs.AbsResourceInstance
44 PostProvisionResourceInfo *InstanceInfo 56 PreProvisionInstanceState cty.Value
45 PostProvisionInstanceState *InstanceState 57 PreProvisionInstanceReturn HookAction
46 PostProvisionResourceReturn HookAction 58 PreProvisionInstanceError error
47 PostProvisionResourceError error 59
48 60 PostProvisionInstanceCalled bool
49 PreProvisionCalled bool 61 PostProvisionInstanceAddr addrs.AbsResourceInstance
50 PreProvisionInfo *InstanceInfo 62 PostProvisionInstanceState cty.Value
51 PreProvisionProvisionerId string 63 PostProvisionInstanceReturn HookAction
52 PreProvisionReturn HookAction 64 PostProvisionInstanceError error
53 PreProvisionError error 65
54 66 PreProvisionInstanceStepCalled bool
55 PostProvisionCalled bool 67 PreProvisionInstanceStepAddr addrs.AbsResourceInstance
56 PostProvisionInfo *InstanceInfo 68 PreProvisionInstanceStepProvisionerType string
57 PostProvisionProvisionerId string 69 PreProvisionInstanceStepReturn HookAction
58 PostProvisionErrorArg error 70 PreProvisionInstanceStepError error
59 PostProvisionReturn HookAction 71
60 PostProvisionError error 72 PostProvisionInstanceStepCalled bool
61 73 PostProvisionInstanceStepAddr addrs.AbsResourceInstance
62 ProvisionOutputCalled bool 74 PostProvisionInstanceStepProvisionerType string
63 ProvisionOutputInfo *InstanceInfo 75 PostProvisionInstanceStepErrorArg error
64 ProvisionOutputProvisionerId string 76 PostProvisionInstanceStepReturn HookAction
65 ProvisionOutputMessage string 77 PostProvisionInstanceStepError error
66 78
67 PostRefreshCalled bool 79 ProvisionOutputCalled bool
68 PostRefreshInfo *InstanceInfo 80 ProvisionOutputAddr addrs.AbsResourceInstance
69 PostRefreshState *InstanceState 81 ProvisionOutputProvisionerType string
70 PostRefreshReturn HookAction 82 ProvisionOutputMessage string
71 PostRefreshError error 83
72 84 PreRefreshCalled bool
73 PreRefreshCalled bool 85 PreRefreshAddr addrs.AbsResourceInstance
74 PreRefreshInfo *InstanceInfo 86 PreRefreshGen states.Generation
75 PreRefreshState *InstanceState 87 PreRefreshPriorState cty.Value
76 PreRefreshReturn HookAction 88 PreRefreshReturn HookAction
77 PreRefreshError error 89 PreRefreshError error
90
91 PostRefreshCalled bool
92 PostRefreshAddr addrs.AbsResourceInstance
93 PostRefreshGen states.Generation
94 PostRefreshPriorState cty.Value
95 PostRefreshNewState cty.Value
96 PostRefreshReturn HookAction
97 PostRefreshError error
78 98
79 PreImportStateCalled bool 99 PreImportStateCalled bool
80 PreImportStateInfo *InstanceInfo 100 PreImportStateAddr addrs.AbsResourceInstance
81 PreImportStateId string 101 PreImportStateID string
82 PreImportStateReturn HookAction 102 PreImportStateReturn HookAction
83 PreImportStateError error 103 PreImportStateError error
84 104
85 PostImportStateCalled bool 105 PostImportStateCalled bool
86 PostImportStateInfo *InstanceInfo 106 PostImportStateAddr addrs.AbsResourceInstance
87 PostImportStateState []*InstanceState 107 PostImportStateNewStates []providers.ImportedResource
88 PostImportStateReturn HookAction 108 PostImportStateReturn HookAction
89 PostImportStateError error 109 PostImportStateError error
90 110
91 PostStateUpdateCalled bool 111 PostStateUpdateCalled bool
92 PostStateUpdateState *State 112 PostStateUpdateState *states.State
93 PostStateUpdateReturn HookAction 113 PostStateUpdateReturn HookAction
94 PostStateUpdateError error 114 PostStateUpdateError error
95} 115}
96 116
97func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) (HookAction, error) { 117var _ Hook = (*MockHook)(nil)
118
119func (h *MockHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) {
98 h.Lock() 120 h.Lock()
99 defer h.Unlock() 121 defer h.Unlock()
100 122
101 h.PreApplyCalled = true 123 h.PreApplyCalled = true
102 h.PreApplyInfo = n 124 h.PreApplyAddr = addr
103 h.PreApplyDiff = d 125 h.PreApplyGen = gen
104 h.PreApplyState = s 126 h.PreApplyAction = action
127 h.PreApplyPriorState = priorState
128 h.PreApplyPlannedState = plannedNewState
105 return h.PreApplyReturn, h.PreApplyError 129 return h.PreApplyReturn, h.PreApplyError
106} 130}
107 131
108func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAction, error) { 132func (h *MockHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) {
109 h.Lock() 133 h.Lock()
110 defer h.Unlock() 134 defer h.Unlock()
111 135
112 h.PostApplyCalled = true 136 h.PostApplyCalled = true
113 h.PostApplyInfo = n 137 h.PostApplyAddr = addr
114 h.PostApplyState = s 138 h.PostApplyGen = gen
115 h.PostApplyError = e 139 h.PostApplyNewState = newState
140 h.PostApplyError = err
116 141
117 if h.PostApplyFn != nil { 142 if h.PostApplyFn != nil {
118 return h.PostApplyFn(n, s, e) 143 return h.PostApplyFn(addr, gen, newState, err)
119 } 144 }
120 145
121 return h.PostApplyReturn, h.PostApplyReturnError 146 return h.PostApplyReturn, h.PostApplyReturnError
122} 147}
123 148
124func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error) { 149func (h *MockHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) {
125 h.Lock() 150 h.Lock()
126 defer h.Unlock() 151 defer h.Unlock()
127 152
128 h.PreDiffCalled = true 153 h.PreDiffCalled = true
129 h.PreDiffInfo = n 154 h.PreDiffAddr = addr
130 h.PreDiffState = s 155 h.PreDiffGen = gen
156 h.PreDiffPriorState = priorState
157 h.PreDiffProposedState = proposedNewState
131 return h.PreDiffReturn, h.PreDiffError 158 return h.PreDiffReturn, h.PreDiffError
132} 159}
133 160
134func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error) { 161func (h *MockHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) {
135 h.Lock() 162 h.Lock()
136 defer h.Unlock() 163 defer h.Unlock()
137 164
138 h.PostDiffCalled = true 165 h.PostDiffCalled = true
139 h.PostDiffInfo = n 166 h.PostDiffAddr = addr
140 h.PostDiffDiff = d 167 h.PostDiffGen = gen
168 h.PostDiffAction = action
169 h.PostDiffPriorState = priorState
170 h.PostDiffPlannedState = plannedNewState
141 return h.PostDiffReturn, h.PostDiffError 171 return h.PostDiffReturn, h.PostDiffError
142} 172}
143 173
144func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { 174func (h *MockHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) {
145 h.Lock() 175 h.Lock()
146 defer h.Unlock() 176 defer h.Unlock()
147 177
148 h.PreProvisionResourceCalled = true 178 h.PreProvisionInstanceCalled = true
149 h.PreProvisionResourceInfo = n 179 h.PreProvisionInstanceAddr = addr
150 h.PreProvisionInstanceState = s 180 h.PreProvisionInstanceState = state
151 return h.PreProvisionResourceReturn, h.PreProvisionResourceError 181 return h.PreProvisionInstanceReturn, h.PreProvisionInstanceError
152} 182}
153 183
154func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { 184func (h *MockHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) {
155 h.Lock() 185 h.Lock()
156 defer h.Unlock() 186 defer h.Unlock()
157 187
158 h.PostProvisionResourceCalled = true 188 h.PostProvisionInstanceCalled = true
159 h.PostProvisionResourceInfo = n 189 h.PostProvisionInstanceAddr = addr
160 h.PostProvisionInstanceState = s 190 h.PostProvisionInstanceState = state
161 return h.PostProvisionResourceReturn, h.PostProvisionResourceError 191 return h.PostProvisionInstanceReturn, h.PostProvisionInstanceError
162} 192}
163 193
164func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, error) { 194func (h *MockHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) {
165 h.Lock() 195 h.Lock()
166 defer h.Unlock() 196 defer h.Unlock()
167 197
168 h.PreProvisionCalled = true 198 h.PreProvisionInstanceStepCalled = true
169 h.PreProvisionInfo = n 199 h.PreProvisionInstanceStepAddr = addr
170 h.PreProvisionProvisionerId = provId 200 h.PreProvisionInstanceStepProvisionerType = typeName
171 return h.PreProvisionReturn, h.PreProvisionError 201 return h.PreProvisionInstanceStepReturn, h.PreProvisionInstanceStepError
172} 202}
173 203
174func (h *MockHook) PostProvision(n *InstanceInfo, provId string, err error) (HookAction, error) { 204func (h *MockHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) {
175 h.Lock() 205 h.Lock()
176 defer h.Unlock() 206 defer h.Unlock()
177 207
178 h.PostProvisionCalled = true 208 h.PostProvisionInstanceStepCalled = true
179 h.PostProvisionInfo = n 209 h.PostProvisionInstanceStepAddr = addr
180 h.PostProvisionProvisionerId = provId 210 h.PostProvisionInstanceStepProvisionerType = typeName
181 h.PostProvisionErrorArg = err 211 h.PostProvisionInstanceStepErrorArg = err
182 return h.PostProvisionReturn, h.PostProvisionError 212 return h.PostProvisionInstanceStepReturn, h.PostProvisionInstanceStepError
183} 213}
184 214
185func (h *MockHook) ProvisionOutput( 215func (h *MockHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) {
186 n *InstanceInfo,
187 provId string,
188 msg string) {
189 h.Lock() 216 h.Lock()
190 defer h.Unlock() 217 defer h.Unlock()
191 218
192 h.ProvisionOutputCalled = true 219 h.ProvisionOutputCalled = true
193 h.ProvisionOutputInfo = n 220 h.ProvisionOutputAddr = addr
194 h.ProvisionOutputProvisionerId = provId 221 h.ProvisionOutputProvisionerType = typeName
195 h.ProvisionOutputMessage = msg 222 h.ProvisionOutputMessage = line
196} 223}
197 224
198func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { 225func (h *MockHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) {
199 h.Lock() 226 h.Lock()
200 defer h.Unlock() 227 defer h.Unlock()
201 228
202 h.PreRefreshCalled = true 229 h.PreRefreshCalled = true
203 h.PreRefreshInfo = n 230 h.PreRefreshAddr = addr
204 h.PreRefreshState = s 231 h.PreRefreshGen = gen
232 h.PreRefreshPriorState = priorState
205 return h.PreRefreshReturn, h.PreRefreshError 233 return h.PreRefreshReturn, h.PreRefreshError
206} 234}
207 235
208func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { 236func (h *MockHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) {
209 h.Lock() 237 h.Lock()
210 defer h.Unlock() 238 defer h.Unlock()
211 239
212 h.PostRefreshCalled = true 240 h.PostRefreshCalled = true
213 h.PostRefreshInfo = n 241 h.PostRefreshAddr = addr
214 h.PostRefreshState = s 242 h.PostRefreshPriorState = priorState
243 h.PostRefreshNewState = newState
215 return h.PostRefreshReturn, h.PostRefreshError 244 return h.PostRefreshReturn, h.PostRefreshError
216} 245}
217 246
218func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, error) { 247func (h *MockHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) {
219 h.Lock() 248 h.Lock()
220 defer h.Unlock() 249 defer h.Unlock()
221 250
222 h.PreImportStateCalled = true 251 h.PreImportStateCalled = true
223 h.PreImportStateInfo = info 252 h.PreImportStateAddr = addr
224 h.PreImportStateId = id 253 h.PreImportStateID = importID
225 return h.PreImportStateReturn, h.PreImportStateError 254 return h.PreImportStateReturn, h.PreImportStateError
226} 255}
227 256
228func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (HookAction, error) { 257func (h *MockHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) {
229 h.Lock() 258 h.Lock()
230 defer h.Unlock() 259 defer h.Unlock()
231 260
232 h.PostImportStateCalled = true 261 h.PostImportStateCalled = true
233 h.PostImportStateInfo = info 262 h.PostImportStateAddr = addr
234 h.PostImportStateState = s 263 h.PostImportStateNewStates = imported
235 return h.PostImportStateReturn, h.PostImportStateError 264 return h.PostImportStateReturn, h.PostImportStateError
236} 265}
237 266
238func (h *MockHook) PostStateUpdate(s *State) (HookAction, error) { 267func (h *MockHook) PostStateUpdate(new *states.State) (HookAction, error) {
239 h.Lock() 268 h.Lock()
240 defer h.Unlock() 269 defer h.Unlock()
241 270
242 h.PostStateUpdateCalled = true 271 h.PostStateUpdateCalled = true
243 h.PostStateUpdateState = s 272 h.PostStateUpdateState = new
244 return h.PostStateUpdateReturn, h.PostStateUpdateError 273 return h.PostStateUpdateReturn, h.PostStateUpdateError
245} 274}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
index 104d009..811fb33 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
@@ -2,6 +2,13 @@ package terraform
2 2
3import ( 3import (
4 "sync/atomic" 4 "sync/atomic"
5
6 "github.com/zclconf/go-cty/cty"
7
8 "github.com/hashicorp/terraform/addrs"
9 "github.com/hashicorp/terraform/plans"
10 "github.com/hashicorp/terraform/providers"
11 "github.com/hashicorp/terraform/states"
5) 12)
6 13
7// stopHook is a private Hook implementation that Terraform uses to 14// stopHook is a private Hook implementation that Terraform uses to
@@ -10,63 +17,69 @@ type stopHook struct {
10 stop uint32 17 stop uint32
11} 18}
12 19
13func (h *stopHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) { 20var _ Hook = (*stopHook)(nil)
21
22func (h *stopHook) PreApply(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) {
14 return h.hook() 23 return h.hook()
15} 24}
16 25
17func (h *stopHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) { 26func (h *stopHook) PostApply(addr addrs.AbsResourceInstance, gen states.Generation, newState cty.Value, err error) (HookAction, error) {
18 return h.hook() 27 return h.hook()
19} 28}
20 29
21func (h *stopHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) { 30func (h *stopHook) PreDiff(addr addrs.AbsResourceInstance, gen states.Generation, priorState, proposedNewState cty.Value) (HookAction, error) {
22 return h.hook() 31 return h.hook()
23} 32}
24 33
25func (h *stopHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) { 34func (h *stopHook) PostDiff(addr addrs.AbsResourceInstance, gen states.Generation, action plans.Action, priorState, plannedNewState cty.Value) (HookAction, error) {
26 return h.hook() 35 return h.hook()
27} 36}
28 37
29func (h *stopHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { 38func (h *stopHook) PreProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) {
30 return h.hook() 39 return h.hook()
31} 40}
32 41
33func (h *stopHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) { 42func (h *stopHook) PostProvisionInstance(addr addrs.AbsResourceInstance, state cty.Value) (HookAction, error) {
34 return h.hook() 43 return h.hook()
35} 44}
36 45
37func (h *stopHook) PreProvision(*InstanceInfo, string) (HookAction, error) { 46func (h *stopHook) PreProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string) (HookAction, error) {
38 return h.hook() 47 return h.hook()
39} 48}
40 49
41func (h *stopHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) { 50func (h *stopHook) PostProvisionInstanceStep(addr addrs.AbsResourceInstance, typeName string, err error) (HookAction, error) {
42 return h.hook() 51 return h.hook()
43} 52}
44 53
45func (h *stopHook) ProvisionOutput(*InstanceInfo, string, string) { 54func (h *stopHook) ProvisionOutput(addr addrs.AbsResourceInstance, typeName string, line string) {
46} 55}
47 56
48func (h *stopHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { 57func (h *stopHook) PreRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value) (HookAction, error) {
49 return h.hook() 58 return h.hook()
50} 59}
51 60
52func (h *stopHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) { 61func (h *stopHook) PostRefresh(addr addrs.AbsResourceInstance, gen states.Generation, priorState cty.Value, newState cty.Value) (HookAction, error) {
53 return h.hook() 62 return h.hook()
54} 63}
55 64
56func (h *stopHook) PreImportState(*InstanceInfo, string) (HookAction, error) { 65func (h *stopHook) PreImportState(addr addrs.AbsResourceInstance, importID string) (HookAction, error) {
57 return h.hook() 66 return h.hook()
58} 67}
59 68
60func (h *stopHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) { 69func (h *stopHook) PostImportState(addr addrs.AbsResourceInstance, imported []providers.ImportedResource) (HookAction, error) {
61 return h.hook() 70 return h.hook()
62} 71}
63 72
64func (h *stopHook) PostStateUpdate(*State) (HookAction, error) { 73func (h *stopHook) PostStateUpdate(new *states.State) (HookAction, error) {
65 return h.hook() 74 return h.hook()
66} 75}
67 76
68func (h *stopHook) hook() (HookAction, error) { 77func (h *stopHook) hook() (HookAction, error) {
69 if h.Stopped() { 78 if h.Stopped() {
79 // FIXME: This should really return an error since stopping partway
80 // through is not a successful run-to-completion, but we'll need to
81 // introduce that cautiously since existing automation solutions may
82 // be depending on this behavior.
70 return HookActionHalt, nil 83 return HookActionHalt, nil
71 } 84 }
72 85
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
index b8e7d1f..95b7a98 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
@@ -4,6 +4,16 @@ package terraform
4 4
5import "strconv" 5import "strconv"
6 6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[TypeInvalid-0]
12 _ = x[TypePrimary-1]
13 _ = x[TypeTainted-2]
14 _ = x[TypeDeposed-3]
15}
16
7const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" 17const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed"
8 18
9var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} 19var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
index 4f4e178..26c1857 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
@@ -45,65 +45,7 @@ type InterpolationScope struct {
45func (i *Interpolater) Values( 45func (i *Interpolater) Values(
46 scope *InterpolationScope, 46 scope *InterpolationScope,
47 vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) { 47 vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) {
48 if scope == nil { 48 return nil, fmt.Errorf("type Interpolator is no longer supported; use the evaluator API instead")
49 scope = &InterpolationScope{}
50 }
51
52 result := make(map[string]ast.Variable, len(vars))
53
54 // Copy the default variables
55 if i.Module != nil && scope != nil {
56 mod := i.Module
57 if len(scope.Path) > 1 {
58 mod = i.Module.Child(scope.Path[1:])
59 }
60 for _, v := range mod.Config().Variables {
61 // Set default variables
62 if v.Default == nil {
63 continue
64 }
65
66 n := fmt.Sprintf("var.%s", v.Name)
67 variable, err := hil.InterfaceToVariable(v.Default)
68 if err != nil {
69 return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default)
70 }
71
72 result[n] = variable
73 }
74 }
75
76 for n, rawV := range vars {
77 var err error
78 switch v := rawV.(type) {
79 case *config.CountVariable:
80 err = i.valueCountVar(scope, n, v, result)
81 case *config.ModuleVariable:
82 err = i.valueModuleVar(scope, n, v, result)
83 case *config.PathVariable:
84 err = i.valuePathVar(scope, n, v, result)
85 case *config.ResourceVariable:
86 err = i.valueResourceVar(scope, n, v, result)
87 case *config.SelfVariable:
88 err = i.valueSelfVar(scope, n, v, result)
89 case *config.SimpleVariable:
90 err = i.valueSimpleVar(scope, n, v, result)
91 case *config.TerraformVariable:
92 err = i.valueTerraformVar(scope, n, v, result)
93 case *config.LocalVariable:
94 err = i.valueLocalVar(scope, n, v, result)
95 case *config.UserVariable:
96 err = i.valueUserVar(scope, n, v, result)
97 default:
98 err = fmt.Errorf("%s: unknown variable type: %T", n, rawV)
99 }
100
101 if err != nil {
102 return nil, err
103 }
104 }
105
106 return result, nil
107} 49}
108 50
109func (i *Interpolater) valueCountVar( 51func (i *Interpolater) valueCountVar(
@@ -153,7 +95,7 @@ func (i *Interpolater) valueModuleVar(
153 defer i.StateLock.RUnlock() 95 defer i.StateLock.RUnlock()
154 96
155 // Get the module where we're looking for the value 97 // Get the module where we're looking for the value
156 mod := i.State.ModuleByPath(path) 98 mod := i.State.ModuleByPath(normalizeModulePath(path))
157 if mod == nil { 99 if mod == nil {
158 // If the module doesn't exist, then we can return an empty string. 100 // If the module doesn't exist, then we can return an empty string.
159 // This happens usually only in Refresh() when we haven't populated 101 // This happens usually only in Refresh() when we haven't populated
@@ -257,13 +199,13 @@ func (i *Interpolater) valueResourceVar(
257 } 199 }
258 200
259 if variable == nil { 201 if variable == nil {
260 // During the input walk we tolerate missing variables because 202 // During the refresh walk we tolerate missing variables because
261 // we haven't yet had a chance to refresh state, so dynamic data may 203 // we haven't yet had a chance to refresh state, so dynamic data may
262 // not yet be complete. 204 // not yet be complete.
263 // If it truly is missing, we'll catch it on a later walk. 205 // If it truly is missing, we'll catch it on a later walk.
264 // This applies only to graph nodes that interpolate during the 206 // This applies only to graph nodes that interpolate during the
265 // config walk, e.g. providers. 207 // refresh walk, e.g. providers.
266 if i.Operation == walkInput || i.Operation == walkRefresh { 208 if i.Operation == walkRefresh {
267 result[n] = unknownVariable() 209 result[n] = unknownVariable()
268 return nil 210 return nil
269 } 211 }
@@ -365,7 +307,7 @@ func (i *Interpolater) valueLocalVar(
365 } 307 }
366 308
367 // Get the relevant module 309 // Get the relevant module
368 module := i.State.ModuleByPath(scope.Path) 310 module := i.State.ModuleByPath(normalizeModulePath(scope.Path))
369 if module == nil { 311 if module == nil {
370 result[n] = unknownVariable() 312 result[n] = unknownVariable()
371 return nil 313 return nil
@@ -584,10 +526,7 @@ MISSING:
584 // 526 //
585 // For a Destroy, we're also fine with computed values, since our goal is 527 // For a Destroy, we're also fine with computed values, since our goal is
586 // only to get destroy nodes for existing resources. 528 // only to get destroy nodes for existing resources.
587 // 529 if i.Operation == walkRefresh || i.Operation == walkPlanDestroy {
588 // For an input walk, computed values are okay to return because we're only
589 // looking for missing variables to prompt the user for.
590 if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput {
591 return &unknownVariable, nil 530 return &unknownVariable, nil
592 } 531 }
593 532
@@ -607,13 +546,6 @@ func (i *Interpolater) computeResourceMultiVariable(
607 546
608 unknownVariable := unknownVariable() 547 unknownVariable := unknownVariable()
609 548
610 // If we're only looking for input, we don't need to expand a
611 // multi-variable. This prevents us from encountering things that should be
612 // known but aren't because the state has yet to be refreshed.
613 if i.Operation == walkInput {
614 return &unknownVariable, nil
615 }
616
617 // Get the information about this resource variable, and verify 549 // Get the information about this resource variable, and verify
618 // that it exists and such. 550 // that it exists and such.
619 module, cr, err := i.resourceVariableInfo(scope, v) 551 module, cr, err := i.resourceVariableInfo(scope, v)
@@ -695,7 +627,7 @@ func (i *Interpolater) computeResourceMultiVariable(
695 // 627 //
696 // For an input walk, computed values are okay to return because we're only 628 // For an input walk, computed values are okay to return because we're only
697 // looking for missing variables to prompt the user for. 629 // looking for missing variables to prompt the user for.
698 if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput { 630 if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy {
699 return &unknownVariable, nil 631 return &unknownVariable, nil
700 } 632 }
701 633
@@ -776,7 +708,7 @@ func (i *Interpolater) resourceVariableInfo(
776 } 708 }
777 709
778 // Get the relevant module 710 // Get the relevant module
779 module := i.State.ModuleByPath(scope.Path) 711 module := i.State.ModuleByPath(normalizeModulePath(scope.Path))
780 return module, cr, nil 712 return module, cr, nil
781} 713}
782 714
diff --git a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go
index 4594cb6..66a68c7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go
@@ -1,84 +1,135 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config" 4 version "github.com/hashicorp/go-version"
5 "github.com/hashicorp/terraform/config/module" 5
6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/configs"
6 "github.com/hashicorp/terraform/moduledeps" 8 "github.com/hashicorp/terraform/moduledeps"
7 "github.com/hashicorp/terraform/plugin/discovery" 9 "github.com/hashicorp/terraform/plugin/discovery"
10 "github.com/hashicorp/terraform/states"
8) 11)
9 12
10// ModuleTreeDependencies returns the dependencies of the tree of modules 13// ConfigTreeDependencies returns the dependencies of the tree of modules
11// described by the given configuration tree and state. 14// described by the given configuration and state.
12// 15//
13// Both configuration and state are required because there can be resources 16// Both configuration and state are required because there can be resources
14// implied by instances in the state that no longer exist in config. 17// implied by instances in the state that no longer exist in config.
15// 18func ConfigTreeDependencies(root *configs.Config, state *states.State) *moduledeps.Module {
16// This function will panic if any invalid version constraint strings are
17// present in the configuration. This is guaranteed not to happen for any
18// configuration that has passed a call to Config.Validate().
19func ModuleTreeDependencies(root *module.Tree, state *State) *moduledeps.Module {
20 // First we walk the configuration tree to build the overall structure 19 // First we walk the configuration tree to build the overall structure
21 // and capture the explicit/implicit/inherited provider dependencies. 20 // and capture the explicit/implicit/inherited provider dependencies.
22 deps := moduleTreeConfigDependencies(root, nil) 21 deps := configTreeConfigDependencies(root, nil)
23 22
24 // Next we walk over the resources in the state to catch any additional 23 // Next we walk over the resources in the state to catch any additional
25 // dependencies created by existing resources that are no longer in config. 24 // dependencies created by existing resources that are no longer in config.
26 // Most things we find in state will already be present in 'deps', but 25 // Most things we find in state will already be present in 'deps', but
27 // we're interested in the rare thing that isn't. 26 // we're interested in the rare thing that isn't.
28 moduleTreeMergeStateDependencies(deps, state) 27 configTreeMergeStateDependencies(deps, state)
29 28
30 return deps 29 return deps
31} 30}
32 31
33func moduleTreeConfigDependencies(root *module.Tree, inheritProviders map[string]*config.ProviderConfig) *moduledeps.Module { 32func configTreeConfigDependencies(root *configs.Config, inheritProviders map[string]*configs.Provider) *moduledeps.Module {
34 if root == nil { 33 if root == nil {
35 // If no config is provided, we'll make a synthetic root. 34 // If no config is provided, we'll make a synthetic root.
36 // This isn't necessarily correct if we're called with a nil that 35 // This isn't necessarily correct if we're called with a nil that
37 // *isn't* at the root, but in practice that can never happen. 36 // *isn't* at the root, but in practice that can never happen.
38 return &moduledeps.Module{ 37 return &moduledeps.Module{
39 Name: "root", 38 Name: "root",
39 Providers: make(moduledeps.Providers),
40 } 40 }
41 } 41 }
42 42
43 name := "root"
44 if len(root.Path) != 0 {
45 name = root.Path[len(root.Path)-1]
46 }
47
43 ret := &moduledeps.Module{ 48 ret := &moduledeps.Module{
44 Name: root.Name(), 49 Name: name,
45 } 50 }
46 51
47 cfg := root.Config() 52 module := root.Module
48 providerConfigs := cfg.ProviderConfigsByFullName()
49 53
50 // Provider dependencies 54 // Provider dependencies
51 { 55 {
52 providers := make(moduledeps.Providers, len(providerConfigs)) 56 providers := make(moduledeps.Providers)
53 57
54 // Any providerConfigs elements are *explicit* provider dependencies, 58 // The main way to declare a provider dependency is explicitly inside
55 // which is the only situation where the user might provide an actual 59 // the "terraform" block, which allows declaring a requirement without
56 // version constraint. We'll take care of these first. 60 // also creating a configuration.
57 for fullName, pCfg := range providerConfigs { 61 for fullName, constraints := range module.ProviderRequirements {
58 inst := moduledeps.ProviderInstance(fullName) 62 inst := moduledeps.ProviderInstance(fullName)
59 versionSet := discovery.AllVersions 63
60 if pCfg.Version != "" { 64 // The handling here is a bit fiddly because the moduledeps package
61 versionSet = discovery.ConstraintStr(pCfg.Version).MustParse() 65 // was designed around the legacy (pre-0.12) configuration model
66 // and hasn't yet been revised to handle the new model. As a result,
67 // we need to do some translation here.
68 // FIXME: Eventually we should adjust the underlying model so we
69 // can also retain the source location of each constraint, for
70 // more informative output from the "terraform providers" command.
71 var rawConstraints version.Constraints
72 for _, constraint := range constraints {
73 rawConstraints = append(rawConstraints, constraint.Required...)
62 } 74 }
75 discoConstraints := discovery.NewConstraints(rawConstraints)
76
63 providers[inst] = moduledeps.ProviderDependency{ 77 providers[inst] = moduledeps.ProviderDependency{
64 Constraints: versionSet, 78 Constraints: discoConstraints,
65 Reason: moduledeps.ProviderDependencyExplicit, 79 Reason: moduledeps.ProviderDependencyExplicit,
66 } 80 }
67 } 81 }
68 82
83 // Provider configurations can also include version constraints,
84 // allowing for more terse declaration in situations where both a
85 // configuration and a constraint are defined in the same module.
86 for fullName, pCfg := range module.ProviderConfigs {
87 inst := moduledeps.ProviderInstance(fullName)
88 discoConstraints := discovery.AllVersions
89 if pCfg.Version.Required != nil {
90 discoConstraints = discovery.NewConstraints(pCfg.Version.Required)
91 }
92 if existing, exists := providers[inst]; exists {
93 existing.Constraints = existing.Constraints.Append(discoConstraints)
94 } else {
95 providers[inst] = moduledeps.ProviderDependency{
96 Constraints: discoConstraints,
97 Reason: moduledeps.ProviderDependencyExplicit,
98 }
99 }
100 }
101
69 // Each resource in the configuration creates an *implicit* provider 102 // Each resource in the configuration creates an *implicit* provider
70 // dependency, though we'll only record it if there isn't already 103 // dependency, though we'll only record it if there isn't already
71 // an explicit dependency on the same provider. 104 // an explicit dependency on the same provider.
72 for _, rc := range cfg.Resources { 105 for _, rc := range module.ManagedResources {
73 fullName := rc.ProviderFullName() 106 addr := rc.ProviderConfigAddr()
74 inst := moduledeps.ProviderInstance(fullName) 107 inst := moduledeps.ProviderInstance(addr.StringCompact())
108 if _, exists := providers[inst]; exists {
109 // Explicit dependency already present
110 continue
111 }
112
113 reason := moduledeps.ProviderDependencyImplicit
114 if _, inherited := inheritProviders[addr.StringCompact()]; inherited {
115 reason = moduledeps.ProviderDependencyInherited
116 }
117
118 providers[inst] = moduledeps.ProviderDependency{
119 Constraints: discovery.AllVersions,
120 Reason: reason,
121 }
122 }
123 for _, rc := range module.DataResources {
124 addr := rc.ProviderConfigAddr()
125 inst := moduledeps.ProviderInstance(addr.StringCompact())
75 if _, exists := providers[inst]; exists { 126 if _, exists := providers[inst]; exists {
76 // Explicit dependency already present 127 // Explicit dependency already present
77 continue 128 continue
78 } 129 }
79 130
80 reason := moduledeps.ProviderDependencyImplicit 131 reason := moduledeps.ProviderDependencyImplicit
81 if _, inherited := inheritProviders[fullName]; inherited { 132 if _, inherited := inheritProviders[addr.String()]; inherited {
82 reason = moduledeps.ProviderDependencyInherited 133 reason = moduledeps.ProviderDependencyInherited
83 } 134 }
84 135
@@ -91,31 +142,31 @@ func moduleTreeConfigDependencies(root *module.Tree, inheritProviders map[string
91 ret.Providers = providers 142 ret.Providers = providers
92 } 143 }
93 144
94 childInherit := make(map[string]*config.ProviderConfig) 145 childInherit := make(map[string]*configs.Provider)
95 for k, v := range inheritProviders { 146 for k, v := range inheritProviders {
96 childInherit[k] = v 147 childInherit[k] = v
97 } 148 }
98 for k, v := range providerConfigs { 149 for k, v := range module.ProviderConfigs {
99 childInherit[k] = v 150 childInherit[k] = v
100 } 151 }
101 for _, c := range root.Children() { 152 for _, c := range root.Children {
102 ret.Children = append(ret.Children, moduleTreeConfigDependencies(c, childInherit)) 153 ret.Children = append(ret.Children, configTreeConfigDependencies(c, childInherit))
103 } 154 }
104 155
105 return ret 156 return ret
106} 157}
107 158
108func moduleTreeMergeStateDependencies(root *moduledeps.Module, state *State) { 159func configTreeMergeStateDependencies(root *moduledeps.Module, state *states.State) {
109 if state == nil { 160 if state == nil {
110 return 161 return
111 } 162 }
112 163
113 findModule := func(path []string) *moduledeps.Module { 164 findModule := func(path addrs.ModuleInstance) *moduledeps.Module {
114 module := root 165 module := root
115 for _, name := range path[1:] { // skip initial "root" 166 for _, step := range path {
116 var next *moduledeps.Module 167 var next *moduledeps.Module
117 for _, cm := range module.Children { 168 for _, cm := range module.Children {
118 if cm.Name == name { 169 if cm.Name == step.Name {
119 next = cm 170 next = cm
120 break 171 break
121 } 172 }
@@ -124,7 +175,8 @@ func moduleTreeMergeStateDependencies(root *moduledeps.Module, state *State) {
124 if next == nil { 175 if next == nil {
125 // If we didn't find a next node, we'll need to make one 176 // If we didn't find a next node, we'll need to make one
126 next = &moduledeps.Module{ 177 next = &moduledeps.Module{
127 Name: name, 178 Name: step.Name,
179 Providers: make(moduledeps.Providers),
128 } 180 }
129 module.Children = append(module.Children, next) 181 module.Children = append(module.Children, next)
130 } 182 }
@@ -135,15 +187,11 @@ func moduleTreeMergeStateDependencies(root *moduledeps.Module, state *State) {
135 } 187 }
136 188
137 for _, ms := range state.Modules { 189 for _, ms := range state.Modules {
138 module := findModule(ms.Path) 190 module := findModule(ms.Addr)
139 191
140 for _, is := range ms.Resources { 192 for _, rs := range ms.Resources {
141 fullName := config.ResourceProviderFullName(is.Type, is.Provider) 193 inst := moduledeps.ProviderInstance(rs.ProviderConfig.ProviderConfig.StringCompact())
142 inst := moduledeps.ProviderInstance(fullName)
143 if _, exists := module.Providers[inst]; !exists { 194 if _, exists := module.Providers[inst]; !exists {
144 if module.Providers == nil {
145 module.Providers = make(moduledeps.Providers)
146 }
147 module.Providers[inst] = moduledeps.ProviderDependency{ 195 module.Providers[inst] = moduledeps.ProviderDependency{
148 Constraints: discovery.AllVersions, 196 Constraints: discovery.AllVersions,
149 Reason: moduledeps.ProviderDependencyFromState, 197 Reason: moduledeps.ProviderDependencyFromState,
@@ -151,5 +199,4 @@ func moduleTreeMergeStateDependencies(root *moduledeps.Module, state *State) {
151 } 199 }
152 } 200 }
153 } 201 }
154
155} 202}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
index bd32c79..e495203 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
@@ -1,14 +1,22 @@
1package terraform 1package terraform
2 2
3// NodeCountBoundary fixes any "count boundarie" in the state: resources 3import (
4// that are named "foo.0" when they should be named "foo" 4 "github.com/hashicorp/terraform/configs"
5type NodeCountBoundary struct{} 5)
6
7// NodeCountBoundary fixes up any transitions between "each modes" in objects
8// saved in state, such as switching from NoEach to EachInt.
9type NodeCountBoundary struct {
10 Config *configs.Config
11}
6 12
7func (n *NodeCountBoundary) Name() string { 13func (n *NodeCountBoundary) Name() string {
8 return "meta.count-boundary (count boundary fixup)" 14 return "meta.count-boundary (EachMode fixup)"
9} 15}
10 16
11// GraphNodeEvalable 17// GraphNodeEvalable
12func (n *NodeCountBoundary) EvalTree() EvalNode { 18func (n *NodeCountBoundary) EvalTree() EvalNode {
13 return &EvalCountFixZeroOneBoundaryGlobal{} 19 return &EvalCountFixZeroOneBoundaryGlobal{
20 Config: n.Config,
21 }
14} 22}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
index e32cea8..6ba3990 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
@@ -1,22 +1,40 @@
1package terraform 1package terraform
2 2
3// NodeDestroyableDataResource represents a resource that is "plannable": 3import (
4// it is ready to be planned in order to create a diff. 4 "github.com/hashicorp/terraform/providers"
5type NodeDestroyableDataResource struct { 5 "github.com/hashicorp/terraform/states"
6 *NodeAbstractResource 6)
7
8// NodeDestroyableDataResourceInstance represents a resource that is "destroyable":
9// it is ready to be destroyed.
10type NodeDestroyableDataResourceInstance struct {
11 *NodeAbstractResourceInstance
7} 12}
8 13
9// GraphNodeEvalable 14// GraphNodeEvalable
10func (n *NodeDestroyableDataResource) EvalTree() EvalNode { 15func (n *NodeDestroyableDataResourceInstance) EvalTree() EvalNode {
11 addr := n.NodeAbstractResource.Addr 16 addr := n.ResourceInstanceAddr()
12 17
13 // stateId is the ID to put into the state 18 var providerSchema *ProviderSchema
14 stateId := addr.stateId() 19 // We don't need the provider, but we're calling EvalGetProvider to load the
20 // schema.
21 var provider providers.Interface
15 22
16 // Just destroy it. 23 // Just destroy it.
17 var state *InstanceState 24 var state *states.ResourceInstanceObject
18 return &EvalWriteState{ 25 return &EvalSequence{
19 Name: stateId, 26 Nodes: []EvalNode{
20 State: &state, // state is nil here 27 &EvalGetProvider{
28 Addr: n.ResolvedProvider,
29 Output: &provider,
30 Schema: &providerSchema,
31 },
32 &EvalWriteState{
33 Addr: addr.Resource,
34 State: &state,
35 ProviderAddr: n.ResolvedProvider,
36 ProviderSchema: &providerSchema,
37 },
38 },
21 } 39 }
22} 40}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
index d5ca641..ab82163 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
@@ -2,46 +2,71 @@ package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/dag" 4 "github.com/hashicorp/terraform/dag"
5 "github.com/hashicorp/terraform/plans"
6 "github.com/hashicorp/terraform/providers"
7 "github.com/hashicorp/terraform/states"
8 "github.com/hashicorp/terraform/tfdiags"
9 "github.com/zclconf/go-cty/cty"
5) 10)
6 11
7// NodeRefreshableDataResource represents a resource that is "plannable": 12// NodeRefreshableDataResource represents a resource that is "refreshable".
8// it is ready to be planned in order to create a diff.
9type NodeRefreshableDataResource struct { 13type NodeRefreshableDataResource struct {
10 *NodeAbstractCountResource 14 *NodeAbstractResource
11} 15}
12 16
17var (
18 _ GraphNodeSubPath = (*NodeRefreshableDataResource)(nil)
19 _ GraphNodeDynamicExpandable = (*NodeRefreshableDataResource)(nil)
20 _ GraphNodeReferenceable = (*NodeRefreshableDataResource)(nil)
21 _ GraphNodeReferencer = (*NodeRefreshableDataResource)(nil)
22 _ GraphNodeResource = (*NodeRefreshableDataResource)(nil)
23 _ GraphNodeAttachResourceConfig = (*NodeRefreshableDataResource)(nil)
24)
25
13// GraphNodeDynamicExpandable 26// GraphNodeDynamicExpandable
14func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) { 27func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
15 // Grab the state which we read 28 var diags tfdiags.Diagnostics
16 state, lock := ctx.State() 29
17 lock.RLock() 30 count, countKnown, countDiags := evaluateResourceCountExpressionKnown(n.Config.Count, ctx)
18 defer lock.RUnlock() 31 diags = diags.Append(countDiags)
19 32 if countDiags.HasErrors() {
20 // Expand the resource count which must be available by now from EvalTree 33 return nil, diags.Err()
21 count, err := n.Config.Count() 34 }
22 if err != nil { 35 if !countKnown {
23 return nil, err 36 // If the count isn't known yet, we'll skip refreshing and try expansion
37 // again during the plan walk.
38 return nil, nil
24 } 39 }
25 40
41 // Next we need to potentially rename an instance address in the state
42 // if we're transitioning whether "count" is set at all.
43 fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1)
44
45 // Our graph transformers require access to the full state, so we'll
46 // temporarily lock it while we work on this.
47 state := ctx.State().Lock()
48 defer ctx.State().Unlock()
49
26 // The concrete resource factory we'll use 50 // The concrete resource factory we'll use
27 concreteResource := func(a *NodeAbstractResource) dag.Vertex { 51 concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {
28 // Add the config and state since we don't do that via transforms 52 // Add the config and state since we don't do that via transforms
29 a.Config = n.Config 53 a.Config = n.Config
30 a.ResolvedProvider = n.ResolvedProvider 54 a.ResolvedProvider = n.ResolvedProvider
31 55
32 return &NodeRefreshableDataResourceInstance{ 56 return &NodeRefreshableDataResourceInstance{
33 NodeAbstractResource: a, 57 NodeAbstractResourceInstance: a,
34 } 58 }
35 } 59 }
36 60
37 // We also need a destroyable resource for orphans that are a result of a 61 // We also need a destroyable resource for orphans that are a result of a
38 // scaled-in count. 62 // scaled-in count.
39 concreteResourceDestroyable := func(a *NodeAbstractResource) dag.Vertex { 63 concreteResourceDestroyable := func(a *NodeAbstractResourceInstance) dag.Vertex {
40 // Add the config since we don't do that via transforms 64 // Add the config and provider since we don't do that via transforms
41 a.Config = n.Config 65 a.Config = n.Config
66 a.ResolvedProvider = n.ResolvedProvider
42 67
43 return &NodeDestroyableDataResource{ 68 return &NodeDestroyableDataResourceInstance{
44 NodeAbstractResource: a, 69 NodeAbstractResourceInstance: a,
45 } 70 }
46 } 71 }
47 72
@@ -50,6 +75,7 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
50 // Expand the count. 75 // Expand the count.
51 &ResourceCountTransformer{ 76 &ResourceCountTransformer{
52 Concrete: concreteResource, 77 Concrete: concreteResource,
78 Schema: n.Schema,
53 Count: count, 79 Count: count,
54 Addr: n.ResourceAddr(), 80 Addr: n.ResourceAddr(),
55 }, 81 },
@@ -67,7 +93,7 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
67 &AttachStateTransformer{State: state}, 93 &AttachStateTransformer{State: state},
68 94
69 // Targeting 95 // Targeting
70 &TargetsTransformer{ParsedTargets: n.Targets}, 96 &TargetsTransformer{Targets: n.Targets},
71 97
72 // Connect references so ordering is correct 98 // Connect references so ordering is correct
73 &ReferenceTransformer{}, 99 &ReferenceTransformer{},
@@ -83,139 +109,118 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
83 Name: "NodeRefreshableDataResource", 109 Name: "NodeRefreshableDataResource",
84 } 110 }
85 111
86 return b.Build(ctx.Path()) 112 graph, diags := b.Build(ctx.Path())
113 return graph, diags.ErrWithWarnings()
87} 114}
88 115
89// NodeRefreshableDataResourceInstance represents a _single_ resource instance 116// NodeRefreshableDataResourceInstance represents a single resource instance
90// that is refreshable. 117// that is refreshable.
91type NodeRefreshableDataResourceInstance struct { 118type NodeRefreshableDataResourceInstance struct {
92 *NodeAbstractResource 119 *NodeAbstractResourceInstance
93} 120}
94 121
95// GraphNodeEvalable 122// GraphNodeEvalable
96func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode { 123func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
97 addr := n.NodeAbstractResource.Addr 124 addr := n.ResourceInstanceAddr()
98
99 // stateId is the ID to put into the state
100 stateId := addr.stateId()
101
102 // Build the instance info. More of this will be populated during eval
103 info := &InstanceInfo{
104 Id: stateId,
105 Type: addr.Type,
106 }
107
108 // Get the state if we have it, if not we build it
109 rs := n.ResourceState
110 if rs == nil {
111 rs = &ResourceState{
112 Provider: n.ResolvedProvider,
113 }
114 }
115 125
116 // If the config isn't empty we update the state 126 // These variables are the state for the eval sequence below, and are
117 if n.Config != nil { 127 // updated through pointers.
118 rs = &ResourceState{ 128 var provider providers.Interface
119 Type: n.Config.Type, 129 var providerSchema *ProviderSchema
120 Provider: n.Config.Provider, 130 var change *plans.ResourceInstanceChange
121 Dependencies: n.StateReferences(), 131 var state *states.ResourceInstanceObject
122 } 132 var configVal cty.Value
123 }
124
125 // Build the resource for eval
126 resource := &Resource{
127 Name: addr.Name,
128 Type: addr.Type,
129 CountIndex: addr.Index,
130 }
131 if resource.CountIndex < 0 {
132 resource.CountIndex = 0
133 }
134
135 // Declare a bunch of variables that are used for state during
136 // evaluation. Most of this are written to by-address below.
137 var config *ResourceConfig
138 var diff *InstanceDiff
139 var provider ResourceProvider
140 var state *InstanceState
141 133
142 return &EvalSequence{ 134 return &EvalSequence{
143 Nodes: []EvalNode{ 135 Nodes: []EvalNode{
136 &EvalGetProvider{
137 Addr: n.ResolvedProvider,
138 Output: &provider,
139 Schema: &providerSchema,
140 },
141
144 // Always destroy the existing state first, since we must 142 // Always destroy the existing state first, since we must
145 // make sure that values from a previous read will not 143 // make sure that values from a previous read will not
146 // get interpolated if we end up needing to defer our 144 // get interpolated if we end up needing to defer our
147 // loading until apply time. 145 // loading until apply time.
148 &EvalWriteState{ 146 &EvalWriteState{
149 Name: stateId, 147 Addr: addr.Resource,
150 ResourceType: rs.Type, 148 ProviderAddr: n.ResolvedProvider,
151 Provider: n.ResolvedProvider, 149 State: &state, // a pointer to nil, here
152 Dependencies: rs.Dependencies, 150 ProviderSchema: &providerSchema,
153 State: &state, // state is nil here
154 }, 151 },
155 152
156 &EvalInterpolate{
157 Config: n.Config.RawConfig.Copy(),
158 Resource: resource,
159 Output: &config,
160 },
161
162 // The rest of this pass can proceed only if there are no
163 // computed values in our config.
164 // (If there are, we'll deal with this during the plan and
165 // apply phases.)
166 &EvalIf{ 153 &EvalIf{
167 If: func(ctx EvalContext) (bool, error) { 154 If: func(ctx EvalContext) (bool, error) {
168 if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 {
169 return true, EvalEarlyExitError{}
170 }
171
172 // If the config explicitly has a depends_on for this 155 // If the config explicitly has a depends_on for this
173 // data source, assume the intention is to prevent 156 // data source, assume the intention is to prevent
174 // refreshing ahead of that dependency. 157 // refreshing ahead of that dependency, and therefore
158 // we need to deal with this resource during the apply
159 // phase..
175 if len(n.Config.DependsOn) > 0 { 160 if len(n.Config.DependsOn) > 0 {
176 return true, EvalEarlyExitError{} 161 return true, EvalEarlyExitError{}
177 } 162 }
178 163
179 return true, nil 164 return true, nil
180 }, 165 },
181
182 Then: EvalNoop{}, 166 Then: EvalNoop{},
183 }, 167 },
184 168
185 // The remainder of this pass is the same as running 169 // EvalReadData will _attempt_ to read the data source, but may
186 // a "plan" pass immediately followed by an "apply" pass, 170 // generate an incomplete planned object if the configuration
187 // populating the state early so it'll be available to 171 // includes values that won't be known until apply.
188 // provider configurations that need this data during 172 &EvalReadData{
189 // refresh/plan. 173 Addr: addr.Resource,
190 &EvalGetProvider{ 174 Config: n.Config,
191 Name: n.ResolvedProvider, 175 Dependencies: n.StateReferences(),
192 Output: &provider, 176 Provider: &provider,
193 }, 177 ProviderAddr: n.ResolvedProvider,
194 178 ProviderSchema: &providerSchema,
195 &EvalReadDataDiff{ 179 OutputChange: &change,
196 Info: info, 180 OutputConfigValue: &configVal,
197 Config: &config, 181 OutputState: &state,
198 Provider: &provider,
199 Output: &diff,
200 OutputState: &state,
201 },
202
203 &EvalReadDataApply{
204 Info: info,
205 Diff: &diff,
206 Provider: &provider,
207 Output: &state,
208 }, 182 },
209 183
210 &EvalWriteState{ 184 &EvalIf{
211 Name: stateId, 185 If: func(ctx EvalContext) (bool, error) {
212 ResourceType: rs.Type, 186 return (*state).Status != states.ObjectPlanned, nil
213 Provider: n.ResolvedProvider, 187 },
214 Dependencies: rs.Dependencies, 188 Then: &EvalSequence{
215 State: &state, 189 Nodes: []EvalNode{
190 &EvalWriteState{
191 Addr: addr.Resource,
192 ProviderAddr: n.ResolvedProvider,
193 State: &state,
194 ProviderSchema: &providerSchema,
195 },
196 &EvalUpdateStateHook{},
197 },
198 },
199 Else: &EvalSequence{
200 // We can't deal with this yet, so we'll repeat this step
201 // during the plan walk to produce a planned change to read
202 // this during the apply walk. However, we do still need to
203 // save the generated change and partial state so that
204 // results from it can be included in other data resources
205 // or provider configurations during the refresh walk.
206 // (The planned object we save in the state here will be
207 // pruned out at the end of the refresh walk, returning
208 // it back to being unset again for subsequent walks.)
209 Nodes: []EvalNode{
210 &EvalWriteDiff{
211 Addr: addr.Resource,
212 Change: &change,
213 ProviderSchema: &providerSchema,
214 },
215 &EvalWriteState{
216 Addr: addr.Resource,
217 ProviderAddr: n.ResolvedProvider,
218 State: &state,
219 ProviderSchema: &providerSchema,
220 },
221 },
222 },
216 }, 223 },
217
218 &EvalUpdateStateHook{},
219 }, 224 },
220 } 225 }
221} 226}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_local.go b/vendor/github.com/hashicorp/terraform/terraform/node_local.go
index d387222..591eb30 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_local.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_local.go
@@ -1,10 +1,10 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt" 4 "github.com/hashicorp/terraform/addrs"
5 "strings" 5 "github.com/hashicorp/terraform/configs"
6 6 "github.com/hashicorp/terraform/dag"
7 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/terraform/lang"
8) 8)
9 9
10// NodeLocal represents a named local value in a particular module. 10// NodeLocal represents a named local value in a particular module.
@@ -12,22 +12,26 @@ import (
12// Local value nodes only have one operation, common to all walk types: 12// Local value nodes only have one operation, common to all walk types:
13// evaluate the result and place it in state. 13// evaluate the result and place it in state.
14type NodeLocal struct { 14type NodeLocal struct {
15 PathValue []string 15 Addr addrs.AbsLocalValue
16 Config *config.Local 16 Config *configs.Local
17} 17}
18 18
19func (n *NodeLocal) Name() string { 19var (
20 result := fmt.Sprintf("local.%s", n.Config.Name) 20 _ GraphNodeSubPath = (*NodeLocal)(nil)
21 if len(n.PathValue) > 1 { 21 _ RemovableIfNotTargeted = (*NodeLocal)(nil)
22 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) 22 _ GraphNodeReferenceable = (*NodeLocal)(nil)
23 } 23 _ GraphNodeReferencer = (*NodeLocal)(nil)
24 _ GraphNodeEvalable = (*NodeLocal)(nil)
25 _ dag.GraphNodeDotter = (*NodeLocal)(nil)
26)
24 27
25 return result 28func (n *NodeLocal) Name() string {
29 return n.Addr.String()
26} 30}
27 31
28// GraphNodeSubPath 32// GraphNodeSubPath
29func (n *NodeLocal) Path() []string { 33func (n *NodeLocal) Path() addrs.ModuleInstance {
30 return n.PathValue 34 return n.Addr.Module
31} 35}
32 36
33// RemovableIfNotTargeted 37// RemovableIfNotTargeted
@@ -36,31 +40,31 @@ func (n *NodeLocal) RemoveIfNotTargeted() bool {
36} 40}
37 41
38// GraphNodeReferenceable 42// GraphNodeReferenceable
39func (n *NodeLocal) ReferenceableName() []string { 43func (n *NodeLocal) ReferenceableAddrs() []addrs.Referenceable {
40 name := fmt.Sprintf("local.%s", n.Config.Name) 44 return []addrs.Referenceable{n.Addr.LocalValue}
41 return []string{name}
42} 45}
43 46
44// GraphNodeReferencer 47// GraphNodeReferencer
45func (n *NodeLocal) References() []string { 48func (n *NodeLocal) References() []*addrs.Reference {
46 var result []string 49 refs, _ := lang.ReferencesInExpr(n.Config.Expr)
47 result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) 50 return appendResourceDestroyReferences(refs)
48 for _, v := range result {
49 split := strings.Split(v, "/")
50 for i, s := range split {
51 split[i] = s + ".destroy"
52 }
53
54 result = append(result, strings.Join(split, "/"))
55 }
56
57 return result
58} 51}
59 52
60// GraphNodeEvalable 53// GraphNodeEvalable
61func (n *NodeLocal) EvalTree() EvalNode { 54func (n *NodeLocal) EvalTree() EvalNode {
62 return &EvalLocal{ 55 return &EvalLocal{
63 Name: n.Config.Name, 56 Addr: n.Addr.LocalValue,
64 Value: n.Config.RawConfig, 57 Expr: n.Config.Expr,
58 }
59}
60
61// dag.GraphNodeDotter impl.
62func (n *NodeLocal) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
63 return &dag.DotNode{
64 Name: name,
65 Attrs: map[string]string{
66 "label": n.Name(),
67 "shape": "note",
68 },
65 } 69 }
66} 70}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go
index bb3e5ee..cb55a1a 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_removed.go
@@ -2,76 +2,80 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log" 5
6 "reflect" 6 "github.com/hashicorp/terraform/addrs"
7) 7)
8 8
9// NodeModuleRemoved represents a module that is no longer in the 9// NodeModuleRemoved represents a module that is no longer in the
10// config. 10// config.
11type NodeModuleRemoved struct { 11type NodeModuleRemoved struct {
12 PathValue []string 12 Addr addrs.ModuleInstance
13} 13}
14 14
15var (
16 _ GraphNodeSubPath = (*NodeModuleRemoved)(nil)
17 _ GraphNodeEvalable = (*NodeModuleRemoved)(nil)
18 _ GraphNodeReferencer = (*NodeModuleRemoved)(nil)
19 _ GraphNodeReferenceOutside = (*NodeModuleRemoved)(nil)
20)
21
15func (n *NodeModuleRemoved) Name() string { 22func (n *NodeModuleRemoved) Name() string {
16 return fmt.Sprintf("%s (removed)", modulePrefixStr(n.PathValue)) 23 return fmt.Sprintf("%s (removed)", n.Addr.String())
17} 24}
18 25
19// GraphNodeSubPath 26// GraphNodeSubPath
20func (n *NodeModuleRemoved) Path() []string { 27func (n *NodeModuleRemoved) Path() addrs.ModuleInstance {
21 return n.PathValue 28 return n.Addr
22} 29}
23 30
24// GraphNodeEvalable 31// GraphNodeEvalable
25func (n *NodeModuleRemoved) EvalTree() EvalNode { 32func (n *NodeModuleRemoved) EvalTree() EvalNode {
26 return &EvalOpFilter{ 33 return &EvalOpFilter{
27 Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, 34 Ops: []walkOperation{walkRefresh, walkApply, walkDestroy},
28 Node: &EvalDeleteModule{ 35 Node: &EvalCheckModuleRemoved{
29 PathValue: n.PathValue, 36 Addr: n.Addr,
30 }, 37 },
31 } 38 }
32} 39}
33 40
34func (n *NodeModuleRemoved) ReferenceGlobal() bool { 41func (n *NodeModuleRemoved) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) {
35 return true 42 // Our "References" implementation indicates that this node depends on
43 // the call to the module it represents, which implicitly depends on
44 // everything inside the module. That reference must therefore be
45 // interpreted in terms of our parent module.
46 return n.Addr, n.Addr.Parent()
36} 47}
37 48
38func (n *NodeModuleRemoved) References() []string { 49func (n *NodeModuleRemoved) References() []*addrs.Reference {
39 return []string{modulePrefixStr(n.PathValue)} 50 // We depend on the call to the module we represent, because that
40} 51 // implicitly then depends on everything inside that module.
52 // Our ReferenceOutside implementation causes this to be interpreted
53 // within the parent module.
41 54
42// EvalDeleteModule is an EvalNode implementation that removes an empty module 55 _, call := n.Addr.CallInstance()
43// entry from the state. 56 return []*addrs.Reference{
44type EvalDeleteModule struct { 57 {
45 PathValue []string 58 Subject: call,
46}
47 59
48func (n *EvalDeleteModule) Eval(ctx EvalContext) (interface{}, error) { 60 // No source range here, because there's nothing reasonable for
49 state, lock := ctx.State() 61 // us to return.
50 if state == nil { 62 },
51 return nil, nil
52 } 63 }
64}
53 65
54 // Get a write lock so we can access this instance 66// EvalCheckModuleRemoved is an EvalNode implementation that verifies that
55 lock.Lock() 67// a module has been removed from the state as expected.
56 defer lock.Unlock() 68type EvalCheckModuleRemoved struct {
57 69 Addr addrs.ModuleInstance
58 // Make sure we have a clean state 70}
59 // Destroyed resources aren't deleted, they're written with an ID of "".
60 state.prune()
61 71
62 // find the module and delete it 72func (n *EvalCheckModuleRemoved) Eval(ctx EvalContext) (interface{}, error) {
63 for i, m := range state.Modules { 73 mod := ctx.State().Module(n.Addr)
64 if reflect.DeepEqual(m.Path, n.PathValue) { 74 if mod != nil {
65 if !m.Empty() { 75 // If we get here then that indicates a bug either in the states
66 // a targeted apply may leave module resources even without a config, 76 // module or in an earlier step of the graph walk, since we should've
67 // so just log this and return. 77 // pruned out the module when the last resource was removed from it.
68 log.Printf("[DEBUG] cannot remove module %s, not empty", modulePrefixStr(n.PathValue)) 78 return nil, fmt.Errorf("leftover module %s in state that should have been removed; this is a bug in Terraform and should be reported", n.Addr)
69 break
70 }
71 state.Modules = append(state.Modules[:i], state.Modules[i+1:]...)
72 break
73 }
74 } 79 }
75
76 return nil, nil 80 return nil, nil
77} 81}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
index 66ff7d5..aca5a6a 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
@@ -1,40 +1,43 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt" 4 "github.com/hashicorp/hcl2/hcl"
5 5 "github.com/hashicorp/terraform/addrs"
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/configs"
7 "github.com/hashicorp/terraform/config/module" 7 "github.com/hashicorp/terraform/dag"
8 "github.com/hashicorp/terraform/lang"
9 "github.com/zclconf/go-cty/cty"
8) 10)
9 11
10// NodeApplyableModuleVariable represents a module variable input during 12// NodeApplyableModuleVariable represents a module variable input during
11// the apply step. 13// the apply step.
12type NodeApplyableModuleVariable struct { 14type NodeApplyableModuleVariable struct {
13 PathValue []string 15 Addr addrs.AbsInputVariableInstance
14 Config *config.Variable // Config is the var in the config 16 Config *configs.Variable // Config is the var in the config
15 Value *config.RawConfig // Value is the value that is set 17 Expr hcl.Expression // Expr is the value expression given in the call
16
17 Module *module.Tree // Antiquated, want to remove
18} 18}
19 19
20func (n *NodeApplyableModuleVariable) Name() string { 20// Ensure that we are implementing all of the interfaces we think we are
21 result := fmt.Sprintf("var.%s", n.Config.Name) 21// implementing.
22 if len(n.PathValue) > 1 { 22var (
23 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) 23 _ GraphNodeSubPath = (*NodeApplyableModuleVariable)(nil)
24 } 24 _ RemovableIfNotTargeted = (*NodeApplyableModuleVariable)(nil)
25 _ GraphNodeReferenceOutside = (*NodeApplyableModuleVariable)(nil)
26 _ GraphNodeReferenceable = (*NodeApplyableModuleVariable)(nil)
27 _ GraphNodeReferencer = (*NodeApplyableModuleVariable)(nil)
28 _ GraphNodeEvalable = (*NodeApplyableModuleVariable)(nil)
29 _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil)
30)
25 31
26 return result 32func (n *NodeApplyableModuleVariable) Name() string {
33 return n.Addr.String()
27} 34}
28 35
29// GraphNodeSubPath 36// GraphNodeSubPath
30func (n *NodeApplyableModuleVariable) Path() []string { 37func (n *NodeApplyableModuleVariable) Path() addrs.ModuleInstance {
31 // We execute in the parent scope (above our own module) so that 38 // We execute in the parent scope (above our own module) because
32 // we can access the proper interpolations. 39 // expressions in our value are resolved in that context.
33 if len(n.PathValue) > 2 { 40 return n.Addr.Module.Parent()
34 return n.PathValue[:len(n.PathValue)-1]
35 }
36
37 return rootModulePath
38} 41}
39 42
40// RemovableIfNotTargeted 43// RemovableIfNotTargeted
@@ -44,95 +47,96 @@ func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool {
44 return true 47 return true
45} 48}
46 49
47// GraphNodeReferenceGlobal 50// GraphNodeReferenceOutside implementation
48func (n *NodeApplyableModuleVariable) ReferenceGlobal() bool { 51func (n *NodeApplyableModuleVariable) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) {
49 // We have to create fully qualified references because we cross 52
50 // boundaries here: our ReferenceableName is in one path and our 53 // Module input variables have their value expressions defined in the
51 // References are from another path. 54 // context of their calling (parent) module, and so references from
52 return true 55 // a node of this type should be resolved in the parent module instance.
56 referencePath = n.Addr.Module.Parent()
57
58 // Input variables are _referenced_ from their own module, though.
59 selfPath = n.Addr.Module
60
61 return // uses named return values
53} 62}
54 63
55// GraphNodeReferenceable 64// GraphNodeReferenceable
56func (n *NodeApplyableModuleVariable) ReferenceableName() []string { 65func (n *NodeApplyableModuleVariable) ReferenceableAddrs() []addrs.Referenceable {
57 return []string{n.Name()} 66 return []addrs.Referenceable{n.Addr.Variable}
58} 67}
59 68
60// GraphNodeReferencer 69// GraphNodeReferencer
61func (n *NodeApplyableModuleVariable) References() []string { 70func (n *NodeApplyableModuleVariable) References() []*addrs.Reference {
62 // If we have no value set, we depend on nothing
63 if n.Value == nil {
64 return nil
65 }
66 71
67 // Can't depend on anything if we're in the root 72 // If we have no value expression, we cannot depend on anything.
68 if len(n.PathValue) < 2 { 73 if n.Expr == nil {
69 return nil 74 return nil
70 } 75 }
71 76
72 // Otherwise, we depend on anything that is in our value, but 77 // Variables in the root don't depend on anything, because their values
73 // specifically in the namespace of the parent path. 78 // are gathered prior to the graph walk and recorded in the context.
74 // Create the prefix based on the path 79 if len(n.Addr.Module) == 0 {
75 var prefix string 80 return nil
76 if p := n.Path(); len(p) > 0 {
77 prefix = modulePrefixStr(p)
78 } 81 }
79 82
80 result := ReferencesFromConfig(n.Value) 83 // Otherwise, we depend on anything referenced by our value expression.
81 return modulePrefixList(result, prefix) 84 // We ignore diagnostics here under the assumption that we'll re-eval
85 // all these things later and catch them then; for our purposes here,
86 // we only care about valid references.
87 //
88 // Due to our GraphNodeReferenceOutside implementation, the addresses
89 // returned by this function are interpreted in the _parent_ module from
90 // where our associated variable was declared, which is correct because
91 // our value expression is assigned within a "module" block in the parent
92 // module.
93 refs, _ := lang.ReferencesInExpr(n.Expr)
94 return refs
82} 95}
83 96
84// GraphNodeEvalable 97// GraphNodeEvalable
85func (n *NodeApplyableModuleVariable) EvalTree() EvalNode { 98func (n *NodeApplyableModuleVariable) EvalTree() EvalNode {
86 // If we have no value, do nothing 99 // If we have no value, do nothing
87 if n.Value == nil { 100 if n.Expr == nil {
88 return &EvalNoop{} 101 return &EvalNoop{}
89 } 102 }
90 103
91 // Otherwise, interpolate the value of this variable and set it 104 // Otherwise, interpolate the value of this variable and set it
92 // within the variables mapping. 105 // within the variables mapping.
93 var config *ResourceConfig 106 vals := make(map[string]cty.Value)
94 variables := make(map[string]interface{}) 107
108 _, call := n.Addr.Module.CallInstance()
95 109
96 return &EvalSequence{ 110 return &EvalSequence{
97 Nodes: []EvalNode{ 111 Nodes: []EvalNode{
98 &EvalOpFilter{ 112 &EvalOpFilter{
99 Ops: []walkOperation{walkInput},
100 Node: &EvalInterpolate{
101 Config: n.Value,
102 Output: &config,
103 ContinueOnErr: true,
104 },
105 },
106 &EvalOpFilter{
107 Ops: []walkOperation{walkRefresh, walkPlan, walkApply, 113 Ops: []walkOperation{walkRefresh, walkPlan, walkApply,
108 walkDestroy, walkValidate}, 114 walkDestroy, walkValidate},
109 Node: &EvalInterpolate{ 115 Node: &EvalModuleCallArgument{
110 Config: n.Value, 116 Addr: n.Addr.Variable,
111 Output: &config, 117 Config: n.Config,
112 }, 118 Expr: n.Expr,
113 }, 119 Values: vals,
114 120
115 &EvalVariableBlock{ 121 IgnoreDiagnostics: false,
116 Config: &config, 122 },
117 VariableValues: variables,
118 },
119
120 &EvalCoerceMapVariable{
121 Variables: variables,
122 ModulePath: n.PathValue,
123 ModuleTree: n.Module,
124 }, 123 },
125 124
126 &EvalTypeCheckVariable{ 125 &EvalSetModuleCallArguments{
127 Variables: variables, 126 Module: call,
128 ModulePath: n.PathValue, 127 Values: vals,
129 ModuleTree: n.Module,
130 }, 128 },
129 },
130 }
131}
131 132
132 &EvalSetVariables{ 133// dag.GraphNodeDotter impl.
133 Module: &n.PathValue[len(n.PathValue)-1], 134func (n *NodeApplyableModuleVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
134 Variables: variables, 135 return &dag.DotNode{
135 }, 136 Name: name,
137 Attrs: map[string]string{
138 "label": n.Name(),
139 "shape": "note",
136 }, 140 },
137 } 141 }
138} 142}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
index 83e9925..bb3d065 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_output.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
@@ -2,31 +2,38 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "strings"
6 5
7 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/configs"
8 "github.com/hashicorp/terraform/dag" 8 "github.com/hashicorp/terraform/dag"
9 "github.com/hashicorp/terraform/lang"
9) 10)
10 11
11// NodeApplyableOutput represents an output that is "applyable": 12// NodeApplyableOutput represents an output that is "applyable":
12// it is ready to be applied. 13// it is ready to be applied.
13type NodeApplyableOutput struct { 14type NodeApplyableOutput struct {
14 PathValue []string 15 Addr addrs.AbsOutputValue
15 Config *config.Output // Config is the output in the config 16 Config *configs.Output // Config is the output in the config
16} 17}
17 18
18func (n *NodeApplyableOutput) Name() string { 19var (
19 result := fmt.Sprintf("output.%s", n.Config.Name) 20 _ GraphNodeSubPath = (*NodeApplyableOutput)(nil)
20 if len(n.PathValue) > 1 { 21 _ RemovableIfNotTargeted = (*NodeApplyableOutput)(nil)
21 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) 22 _ GraphNodeTargetDownstream = (*NodeApplyableOutput)(nil)
22 } 23 _ GraphNodeReferenceable = (*NodeApplyableOutput)(nil)
24 _ GraphNodeReferencer = (*NodeApplyableOutput)(nil)
25 _ GraphNodeReferenceOutside = (*NodeApplyableOutput)(nil)
26 _ GraphNodeEvalable = (*NodeApplyableOutput)(nil)
27 _ dag.GraphNodeDotter = (*NodeApplyableOutput)(nil)
28)
23 29
24 return result 30func (n *NodeApplyableOutput) Name() string {
31 return n.Addr.String()
25} 32}
26 33
27// GraphNodeSubPath 34// GraphNodeSubPath
28func (n *NodeApplyableOutput) Path() []string { 35func (n *NodeApplyableOutput) Path() addrs.ModuleInstance {
29 return n.PathValue 36 return n.Addr.Module
30} 37}
31 38
32// RemovableIfNotTargeted 39// RemovableIfNotTargeted
@@ -44,27 +51,64 @@ func (n *NodeApplyableOutput) TargetDownstream(targetedDeps, untargetedDeps *dag
44 return true 51 return true
45} 52}
46 53
54func referenceOutsideForOutput(addr addrs.AbsOutputValue) (selfPath, referencePath addrs.ModuleInstance) {
55
56 // Output values have their expressions resolved in the context of the
57 // module where they are defined.
58 referencePath = addr.Module
59
60 // ...but they are referenced in the context of their calling module.
61 selfPath = addr.Module.Parent()
62
63 return // uses named return values
64
65}
66
67// GraphNodeReferenceOutside implementation
68func (n *NodeApplyableOutput) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) {
69 return referenceOutsideForOutput(n.Addr)
70}
71
72func referenceableAddrsForOutput(addr addrs.AbsOutputValue) []addrs.Referenceable {
73 // An output in the root module can't be referenced at all.
74 if addr.Module.IsRoot() {
75 return nil
76 }
77
78 // Otherwise, we can be referenced via a reference to our output name
79 // on the parent module's call, or via a reference to the entire call.
80 // e.g. module.foo.bar or just module.foo .
81 // Note that our ReferenceOutside method causes these addresses to be
82 // relative to the calling module, not the module where the output
83 // was declared.
84 _, outp := addr.ModuleCallOutput()
85 _, call := addr.Module.CallInstance()
86 return []addrs.Referenceable{outp, call}
87
88}
89
47// GraphNodeReferenceable 90// GraphNodeReferenceable
48func (n *NodeApplyableOutput) ReferenceableName() []string { 91func (n *NodeApplyableOutput) ReferenceableAddrs() []addrs.Referenceable {
49 name := fmt.Sprintf("output.%s", n.Config.Name) 92 return referenceableAddrsForOutput(n.Addr)
50 return []string{name}
51} 93}
52 94
53// GraphNodeReferencer 95func referencesForOutput(c *configs.Output) []*addrs.Reference {
54func (n *NodeApplyableOutput) References() []string { 96 impRefs, _ := lang.ReferencesInExpr(c.Expr)
55 var result []string 97 expRefs, _ := lang.References(c.DependsOn)
56 result = append(result, n.Config.DependsOn...) 98 l := len(impRefs) + len(expRefs)
57 result = append(result, ReferencesFromConfig(n.Config.RawConfig)...) 99 if l == 0 {
58 for _, v := range result { 100 return nil
59 split := strings.Split(v, "/")
60 for i, s := range split {
61 split[i] = s + ".destroy"
62 }
63
64 result = append(result, strings.Join(split, "/"))
65 } 101 }
102 refs := make([]*addrs.Reference, 0, l)
103 refs = append(refs, impRefs...)
104 refs = append(refs, expRefs...)
105 return refs
66 106
67 return result 107}
108
109// GraphNodeReferencer
110func (n *NodeApplyableOutput) References() []*addrs.Reference {
111 return appendResourceDestroyReferences(referencesForOutput(n.Config))
68} 112}
69 113
70// GraphNodeEvalable 114// GraphNodeEvalable
@@ -72,47 +116,51 @@ func (n *NodeApplyableOutput) EvalTree() EvalNode {
72 return &EvalSequence{ 116 return &EvalSequence{
73 Nodes: []EvalNode{ 117 Nodes: []EvalNode{
74 &EvalOpFilter{ 118 &EvalOpFilter{
75 // Don't let interpolation errors stop Input, since it happens
76 // before Refresh.
77 Ops: []walkOperation{walkInput},
78 Node: &EvalWriteOutput{
79 Name: n.Config.Name,
80 Sensitive: n.Config.Sensitive,
81 Value: n.Config.RawConfig,
82 ContinueOnErr: true,
83 },
84 },
85 &EvalOpFilter{
86 Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy}, 119 Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkValidate, walkDestroy, walkPlanDestroy},
87 Node: &EvalWriteOutput{ 120 Node: &EvalWriteOutput{
88 Name: n.Config.Name, 121 Addr: n.Addr.OutputValue,
89 Sensitive: n.Config.Sensitive, 122 Sensitive: n.Config.Sensitive,
90 Value: n.Config.RawConfig, 123 Expr: n.Config.Expr,
91 }, 124 },
92 }, 125 },
93 }, 126 },
94 } 127 }
95} 128}
96 129
130// dag.GraphNodeDotter impl.
131func (n *NodeApplyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
132 return &dag.DotNode{
133 Name: name,
134 Attrs: map[string]string{
135 "label": n.Name(),
136 "shape": "note",
137 },
138 }
139}
140
97// NodeDestroyableOutput represents an output that is "destroybale": 141// NodeDestroyableOutput represents an output that is "destroybale":
98// its application will remove the output from the state. 142// its application will remove the output from the state.
99type NodeDestroyableOutput struct { 143type NodeDestroyableOutput struct {
100 PathValue []string 144 Addr addrs.AbsOutputValue
101 Config *config.Output // Config is the output in the config 145 Config *configs.Output // Config is the output in the config
102} 146}
103 147
104func (n *NodeDestroyableOutput) Name() string { 148var (
105 result := fmt.Sprintf("output.%s (destroy)", n.Config.Name) 149 _ GraphNodeSubPath = (*NodeDestroyableOutput)(nil)
106 if len(n.PathValue) > 1 { 150 _ RemovableIfNotTargeted = (*NodeDestroyableOutput)(nil)
107 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) 151 _ GraphNodeTargetDownstream = (*NodeDestroyableOutput)(nil)
108 } 152 _ GraphNodeReferencer = (*NodeDestroyableOutput)(nil)
153 _ GraphNodeEvalable = (*NodeDestroyableOutput)(nil)
154 _ dag.GraphNodeDotter = (*NodeDestroyableOutput)(nil)
155)
109 156
110 return result 157func (n *NodeDestroyableOutput) Name() string {
158 return fmt.Sprintf("%s (destroy)", n.Addr.String())
111} 159}
112 160
113// GraphNodeSubPath 161// GraphNodeSubPath
114func (n *NodeDestroyableOutput) Path() []string { 162func (n *NodeDestroyableOutput) Path() addrs.ModuleInstance {
115 return n.PathValue 163 return n.Addr.Module
116} 164}
117 165
118// RemovableIfNotTargeted 166// RemovableIfNotTargeted
@@ -129,25 +177,24 @@ func (n *NodeDestroyableOutput) TargetDownstream(targetedDeps, untargetedDeps *d
129} 177}
130 178
131// GraphNodeReferencer 179// GraphNodeReferencer
132func (n *NodeDestroyableOutput) References() []string { 180func (n *NodeDestroyableOutput) References() []*addrs.Reference {
133 var result []string 181 return referencesForOutput(n.Config)
134 result = append(result, n.Config.DependsOn...)
135 result = append(result, ReferencesFromConfig(n.Config.RawConfig)...)
136 for _, v := range result {
137 split := strings.Split(v, "/")
138 for i, s := range split {
139 split[i] = s + ".destroy"
140 }
141
142 result = append(result, strings.Join(split, "/"))
143 }
144
145 return result
146} 182}
147 183
148// GraphNodeEvalable 184// GraphNodeEvalable
149func (n *NodeDestroyableOutput) EvalTree() EvalNode { 185func (n *NodeDestroyableOutput) EvalTree() EvalNode {
150 return &EvalDeleteOutput{ 186 return &EvalDeleteOutput{
151 Name: n.Config.Name, 187 Addr: n.Addr.OutputValue,
188 }
189}
190
191// dag.GraphNodeDotter impl.
192func (n *NodeDestroyableOutput) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
193 return &dag.DotNode{
194 Name: name,
195 Attrs: map[string]string{
196 "label": n.Name(),
197 "shape": "note",
198 },
152 } 199 }
153} 200}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
index 0fd1554..518b8aa 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
@@ -2,31 +2,39 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5
6 "github.com/hashicorp/terraform/addrs"
5) 7)
6 8
7// NodeOutputOrphan represents an output that is an orphan. 9// NodeOutputOrphan represents an output that is an orphan.
8type NodeOutputOrphan struct { 10type NodeOutputOrphan struct {
9 OutputName string 11 Addr addrs.AbsOutputValue
10 PathValue []string
11} 12}
12 13
14var (
15 _ GraphNodeSubPath = (*NodeOutputOrphan)(nil)
16 _ GraphNodeReferenceable = (*NodeOutputOrphan)(nil)
17 _ GraphNodeReferenceOutside = (*NodeOutputOrphan)(nil)
18 _ GraphNodeEvalable = (*NodeOutputOrphan)(nil)
19)
20
13func (n *NodeOutputOrphan) Name() string { 21func (n *NodeOutputOrphan) Name() string {
14 result := fmt.Sprintf("output.%s (orphan)", n.OutputName) 22 return fmt.Sprintf("%s (orphan)", n.Addr.String())
15 if len(n.PathValue) > 1 { 23}
16 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
17 }
18 24
19 return result 25// GraphNodeReferenceOutside implementation
26func (n *NodeOutputOrphan) ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance) {
27 return referenceOutsideForOutput(n.Addr)
20} 28}
21 29
22// GraphNodeReferenceable 30// GraphNodeReferenceable
23func (n *NodeOutputOrphan) ReferenceableName() []string { 31func (n *NodeOutputOrphan) ReferenceableAddrs() []addrs.Referenceable {
24 return []string{"output." + n.OutputName} 32 return referenceableAddrsForOutput(n.Addr)
25} 33}
26 34
27// GraphNodeSubPath 35// GraphNodeSubPath
28func (n *NodeOutputOrphan) Path() []string { 36func (n *NodeOutputOrphan) Path() addrs.ModuleInstance {
29 return n.PathValue 37 return n.Addr.Module
30} 38}
31 39
32// GraphNodeEvalable 40// GraphNodeEvalable
@@ -34,7 +42,7 @@ func (n *NodeOutputOrphan) EvalTree() EvalNode {
34 return &EvalOpFilter{ 42 return &EvalOpFilter{
35 Ops: []walkOperation{walkRefresh, walkApply, walkDestroy}, 43 Ops: []walkOperation{walkRefresh, walkApply, walkDestroy},
36 Node: &EvalDeleteOutput{ 44 Node: &EvalDeleteOutput{
37 Name: n.OutputName, 45 Addr: n.Addr.OutputValue,
38 }, 46 },
39 } 47 }
40} 48}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
index 9e490f7..a0cdcfe 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
@@ -1,10 +1,10 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt" 4 "github.com/hashicorp/terraform/addrs"
5 "strings" 5 "github.com/hashicorp/terraform/configs"
6 "github.com/hashicorp/terraform/configs/configschema"
6 7
7 "github.com/hashicorp/terraform/config"
8 "github.com/hashicorp/terraform/dag" 8 "github.com/hashicorp/terraform/dag"
9) 9)
10 10
@@ -15,37 +15,33 @@ type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex
15// NodeAbstractProvider represents a provider that has no associated operations. 15// NodeAbstractProvider represents a provider that has no associated operations.
16// It registers all the common interfaces across operations for providers. 16// It registers all the common interfaces across operations for providers.
17type NodeAbstractProvider struct { 17type NodeAbstractProvider struct {
18 NameValue string 18 Addr addrs.AbsProviderConfig
19 PathValue []string
20 19
21 // The fields below will be automatically set using the Attach 20 // The fields below will be automatically set using the Attach
22 // interfaces if you're running those transforms, but also be explicitly 21 // interfaces if you're running those transforms, but also be explicitly
23 // set if you already have that information. 22 // set if you already have that information.
24 23
25 Config *config.ProviderConfig 24 Config *configs.Provider
25 Schema *configschema.Block
26} 26}
27 27
28func ResolveProviderName(name string, path []string) string { 28var (
29 if strings.Contains(name, "provider.") { 29 _ GraphNodeSubPath = (*NodeAbstractProvider)(nil)
30 // already resolved 30 _ RemovableIfNotTargeted = (*NodeAbstractProvider)(nil)
31 return name 31 _ GraphNodeReferencer = (*NodeAbstractProvider)(nil)
32 } 32 _ GraphNodeProvider = (*NodeAbstractProvider)(nil)
33 33 _ GraphNodeAttachProvider = (*NodeAbstractProvider)(nil)
34 name = fmt.Sprintf("provider.%s", name) 34 _ GraphNodeAttachProviderConfigSchema = (*NodeAbstractProvider)(nil)
35 if len(path) >= 1 { 35 _ dag.GraphNodeDotter = (*NodeAbstractProvider)(nil)
36 name = fmt.Sprintf("%s.%s", modulePrefixStr(path), name) 36)
37 }
38
39 return name
40}
41 37
42func (n *NodeAbstractProvider) Name() string { 38func (n *NodeAbstractProvider) Name() string {
43 return ResolveProviderName(n.NameValue, n.PathValue) 39 return n.Addr.String()
44} 40}
45 41
46// GraphNodeSubPath 42// GraphNodeSubPath
47func (n *NodeAbstractProvider) Path() []string { 43func (n *NodeAbstractProvider) Path() addrs.ModuleInstance {
48 return n.PathValue 44 return n.Addr.Module
49} 45}
50 46
51// RemovableIfNotTargeted 47// RemovableIfNotTargeted
@@ -56,21 +52,21 @@ func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool {
56} 52}
57 53
58// GraphNodeReferencer 54// GraphNodeReferencer
59func (n *NodeAbstractProvider) References() []string { 55func (n *NodeAbstractProvider) References() []*addrs.Reference {
60 if n.Config == nil { 56 if n.Config == nil || n.Schema == nil {
61 return nil 57 return nil
62 } 58 }
63 59
64 return ReferencesFromConfig(n.Config.RawConfig) 60 return ReferencesFromConfig(n.Config.Config, n.Schema)
65} 61}
66 62
67// GraphNodeProvider 63// GraphNodeProvider
68func (n *NodeAbstractProvider) ProviderName() string { 64func (n *NodeAbstractProvider) ProviderAddr() addrs.AbsProviderConfig {
69 return n.NameValue 65 return n.Addr
70} 66}
71 67
72// GraphNodeProvider 68// GraphNodeProvider
73func (n *NodeAbstractProvider) ProviderConfig() *config.ProviderConfig { 69func (n *NodeAbstractProvider) ProviderConfig() *configs.Provider {
74 if n.Config == nil { 70 if n.Config == nil {
75 return nil 71 return nil
76 } 72 }
@@ -79,10 +75,15 @@ func (n *NodeAbstractProvider) ProviderConfig() *config.ProviderConfig {
79} 75}
80 76
81// GraphNodeAttachProvider 77// GraphNodeAttachProvider
82func (n *NodeAbstractProvider) AttachProvider(c *config.ProviderConfig) { 78func (n *NodeAbstractProvider) AttachProvider(c *configs.Provider) {
83 n.Config = c 79 n.Config = c
84} 80}
85 81
82// GraphNodeAttachProviderConfigSchema impl.
83func (n *NodeAbstractProvider) AttachProviderConfigSchema(schema *configschema.Block) {
84 n.Schema = schema
85}
86
86// GraphNodeDotter impl. 87// GraphNodeDotter impl.
87func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { 88func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
88 return &dag.DotNode{ 89 return &dag.DotNode{
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
index a00bc46..30d8813 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
@@ -2,6 +2,8 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5
6 "github.com/hashicorp/terraform/dag"
5) 7)
6 8
7// NodeDisabledProvider represents a provider that is disabled. A disabled 9// NodeDisabledProvider represents a provider that is disabled. A disabled
@@ -11,24 +13,15 @@ type NodeDisabledProvider struct {
11 *NodeAbstractProvider 13 *NodeAbstractProvider
12} 14}
13 15
16var (
17 _ GraphNodeSubPath = (*NodeDisabledProvider)(nil)
18 _ RemovableIfNotTargeted = (*NodeDisabledProvider)(nil)
19 _ GraphNodeReferencer = (*NodeDisabledProvider)(nil)
20 _ GraphNodeProvider = (*NodeDisabledProvider)(nil)
21 _ GraphNodeAttachProvider = (*NodeDisabledProvider)(nil)
22 _ dag.GraphNodeDotter = (*NodeDisabledProvider)(nil)
23)
24
14func (n *NodeDisabledProvider) Name() string { 25func (n *NodeDisabledProvider) Name() string {
15 return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name()) 26 return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name())
16} 27}
17
18// GraphNodeEvalable
19func (n *NodeDisabledProvider) EvalTree() EvalNode {
20 var resourceConfig *ResourceConfig
21 return &EvalSequence{
22 Nodes: []EvalNode{
23 &EvalInterpolateProvider{
24 Config: n.ProviderConfig(),
25 Output: &resourceConfig,
26 },
27 &EvalBuildProviderConfig{
28 Provider: n.ProviderName(),
29 Config: &resourceConfig,
30 Output: &resourceConfig,
31 },
32 },
33 }
34}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go
new file mode 100644
index 0000000..580e60c
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provider_eval.go
@@ -0,0 +1,20 @@
1package terraform
2
3// NodeEvalableProvider represents a provider during an "eval" walk.
4// This special provider node type just initializes a provider and
5// fetches its schema, without configuring it or otherwise interacting
6// with it.
7type NodeEvalableProvider struct {
8 *NodeAbstractProvider
9}
10
11// GraphNodeEvalable
12func (n *NodeEvalableProvider) EvalTree() EvalNode {
13 addr := n.Addr
14 relAddr := addr.ProviderConfig
15
16 return &EvalInitProvider{
17 TypeName: relAddr.Type,
18 Addr: addr.ProviderConfig,
19 }
20}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
index bb117c1..31ed1a8 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
@@ -3,6 +3,7 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/addrs"
6 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/terraform/config"
7) 8)
8 9
@@ -10,7 +11,7 @@ import (
10// It registers all the common interfaces across operations for providers. 11// It registers all the common interfaces across operations for providers.
11type NodeProvisioner struct { 12type NodeProvisioner struct {
12 NameValue string 13 NameValue string
13 PathValue []string 14 PathValue addrs.ModuleInstance
14 15
15 // The fields below will be automatically set using the Attach 16 // The fields below will be automatically set using the Attach
16 // interfaces if you're running those transforms, but also be explicitly 17 // interfaces if you're running those transforms, but also be explicitly
@@ -19,17 +20,23 @@ type NodeProvisioner struct {
19 Config *config.ProviderConfig 20 Config *config.ProviderConfig
20} 21}
21 22
23var (
24 _ GraphNodeSubPath = (*NodeProvisioner)(nil)
25 _ GraphNodeProvisioner = (*NodeProvisioner)(nil)
26 _ GraphNodeEvalable = (*NodeProvisioner)(nil)
27)
28
22func (n *NodeProvisioner) Name() string { 29func (n *NodeProvisioner) Name() string {
23 result := fmt.Sprintf("provisioner.%s", n.NameValue) 30 result := fmt.Sprintf("provisioner.%s", n.NameValue)
24 if len(n.PathValue) > 1 { 31 if len(n.PathValue) > 0 {
25 result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result) 32 result = fmt.Sprintf("%s.%s", n.PathValue.String(), result)
26 } 33 }
27 34
28 return result 35 return result
29} 36}
30 37
31// GraphNodeSubPath 38// GraphNodeSubPath
32func (n *NodeProvisioner) Path() []string { 39func (n *NodeProvisioner) Path() addrs.ModuleInstance {
33 return n.PathValue 40 return n.PathValue
34} 41}
35 42
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
index 73509c8..3a0570c 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
@@ -2,10 +2,16 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "strings" 5 "log"
6 "sort"
6 7
7 "github.com/hashicorp/terraform/config" 8 "github.com/hashicorp/terraform/addrs"
9 "github.com/hashicorp/terraform/configs"
10 "github.com/hashicorp/terraform/configs/configschema"
8 "github.com/hashicorp/terraform/dag" 11 "github.com/hashicorp/terraform/dag"
12 "github.com/hashicorp/terraform/lang"
13 "github.com/hashicorp/terraform/states"
14 "github.com/hashicorp/terraform/tfdiags"
9) 15)
10 16
11// ConcreteResourceNodeFunc is a callback type used to convert an 17// ConcreteResourceNodeFunc is a callback type used to convert an
@@ -16,225 +22,420 @@ type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex
16// The type of operation cannot be assumed, only that this node represents 22// The type of operation cannot be assumed, only that this node represents
17// the given resource. 23// the given resource.
18type GraphNodeResource interface { 24type GraphNodeResource interface {
19 ResourceAddr() *ResourceAddress 25 ResourceAddr() addrs.AbsResource
26}
27
28// ConcreteResourceInstanceNodeFunc is a callback type used to convert an
29// abstract resource instance to a concrete one of some type.
30type ConcreteResourceInstanceNodeFunc func(*NodeAbstractResourceInstance) dag.Vertex
31
32// GraphNodeResourceInstance is implemented by any nodes that represent
33// a resource instance. A single resource may have multiple instances if,
34// for example, the "count" or "for_each" argument is used for it in
35// configuration.
36type GraphNodeResourceInstance interface {
37 ResourceInstanceAddr() addrs.AbsResourceInstance
20} 38}
21 39
22// NodeAbstractResource represents a resource that has no associated 40// NodeAbstractResource represents a resource that has no associated
23// operations. It registers all the interfaces for a resource that common 41// operations. It registers all the interfaces for a resource that common
24// across multiple operation types. 42// across multiple operation types.
25type NodeAbstractResource struct { 43type NodeAbstractResource struct {
26 Addr *ResourceAddress // Addr is the address for this resource 44 Addr addrs.AbsResource // Addr is the address for this resource
27 45
28 // The fields below will be automatically set using the Attach 46 // The fields below will be automatically set using the Attach
29 // interfaces if you're running those transforms, but also be explicitly 47 // interfaces if you're running those transforms, but also be explicitly
30 // set if you already have that information. 48 // set if you already have that information.
31 49
32 Config *config.Resource // Config is the resource in the config 50 Schema *configschema.Block // Schema for processing the configuration body
33 ResourceState *ResourceState // ResourceState is the ResourceState for this 51 SchemaVersion uint64 // Schema version of "Schema", as decided by the provider
52 Config *configs.Resource // Config is the resource in the config
34 53
35 Targets []ResourceAddress // Set from GraphNodeTargetable 54 ProvisionerSchemas map[string]*configschema.Block
55
56 Targets []addrs.Targetable // Set from GraphNodeTargetable
36 57
37 // The address of the provider this resource will use 58 // The address of the provider this resource will use
38 ResolvedProvider string 59 ResolvedProvider addrs.AbsProviderConfig
60}
61
62var (
63 _ GraphNodeSubPath = (*NodeAbstractResource)(nil)
64 _ GraphNodeReferenceable = (*NodeAbstractResource)(nil)
65 _ GraphNodeReferencer = (*NodeAbstractResource)(nil)
66 _ GraphNodeProviderConsumer = (*NodeAbstractResource)(nil)
67 _ GraphNodeProvisionerConsumer = (*NodeAbstractResource)(nil)
68 _ GraphNodeResource = (*NodeAbstractResource)(nil)
69 _ GraphNodeAttachResourceConfig = (*NodeAbstractResource)(nil)
70 _ GraphNodeAttachResourceSchema = (*NodeAbstractResource)(nil)
71 _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResource)(nil)
72 _ GraphNodeTargetable = (*NodeAbstractResource)(nil)
73 _ dag.GraphNodeDotter = (*NodeAbstractResource)(nil)
74)
75
76// NewNodeAbstractResource creates an abstract resource graph node for
77// the given absolute resource address.
78func NewNodeAbstractResource(addr addrs.AbsResource) *NodeAbstractResource {
79 return &NodeAbstractResource{
80 Addr: addr,
81 }
82}
83
84// NodeAbstractResourceInstance represents a resource instance with no
85// associated operations. It embeds NodeAbstractResource but additionally
86// contains an instance key, used to identify one of potentially many
87// instances that were created from a resource in configuration, e.g. using
88// the "count" or "for_each" arguments.
89type NodeAbstractResourceInstance struct {
90 NodeAbstractResource
91 InstanceKey addrs.InstanceKey
92
93 // The fields below will be automatically set using the Attach
94 // interfaces if you're running those transforms, but also be explicitly
95 // set if you already have that information.
96
97 ResourceState *states.Resource
98}
99
100var (
101 _ GraphNodeSubPath = (*NodeAbstractResourceInstance)(nil)
102 _ GraphNodeReferenceable = (*NodeAbstractResourceInstance)(nil)
103 _ GraphNodeReferencer = (*NodeAbstractResourceInstance)(nil)
104 _ GraphNodeProviderConsumer = (*NodeAbstractResourceInstance)(nil)
105 _ GraphNodeProvisionerConsumer = (*NodeAbstractResourceInstance)(nil)
106 _ GraphNodeResource = (*NodeAbstractResourceInstance)(nil)
107 _ GraphNodeResourceInstance = (*NodeAbstractResourceInstance)(nil)
108 _ GraphNodeAttachResourceState = (*NodeAbstractResourceInstance)(nil)
109 _ GraphNodeAttachResourceConfig = (*NodeAbstractResourceInstance)(nil)
110 _ GraphNodeAttachResourceSchema = (*NodeAbstractResourceInstance)(nil)
111 _ GraphNodeAttachProvisionerSchema = (*NodeAbstractResourceInstance)(nil)
112 _ GraphNodeTargetable = (*NodeAbstractResourceInstance)(nil)
113 _ dag.GraphNodeDotter = (*NodeAbstractResourceInstance)(nil)
114)
115
116// NewNodeAbstractResourceInstance creates an abstract resource instance graph
117// node for the given absolute resource instance address.
118func NewNodeAbstractResourceInstance(addr addrs.AbsResourceInstance) *NodeAbstractResourceInstance {
119 // Due to the fact that we embed NodeAbstractResource, the given address
120 // actually ends up split between the resource address in the embedded
121 // object and the InstanceKey field in our own struct. The
122 // ResourceInstanceAddr method will stick these back together again on
123 // request.
124 return &NodeAbstractResourceInstance{
125 NodeAbstractResource: NodeAbstractResource{
126 Addr: addr.ContainingResource(),
127 },
128 InstanceKey: addr.Resource.Key,
129 }
39} 130}
40 131
41func (n *NodeAbstractResource) Name() string { 132func (n *NodeAbstractResource) Name() string {
42 return n.Addr.String() 133 return n.ResourceAddr().String()
134}
135
136func (n *NodeAbstractResourceInstance) Name() string {
137 return n.ResourceInstanceAddr().String()
43} 138}
44 139
45// GraphNodeSubPath 140// GraphNodeSubPath
46func (n *NodeAbstractResource) Path() []string { 141func (n *NodeAbstractResource) Path() addrs.ModuleInstance {
47 return n.Addr.Path 142 return n.Addr.Module
48} 143}
49 144
50// GraphNodeReferenceable 145// GraphNodeReferenceable
51func (n *NodeAbstractResource) ReferenceableName() []string { 146func (n *NodeAbstractResource) ReferenceableAddrs() []addrs.Referenceable {
52 // We always are referenceable as "type.name" as long as 147 return []addrs.Referenceable{n.Addr.Resource}
53 // we have a config or address. Determine what that value is. 148}
54 var id string
55 if n.Config != nil {
56 id = n.Config.Id()
57 } else if n.Addr != nil {
58 addrCopy := n.Addr.Copy()
59 addrCopy.Path = nil // ReferenceTransformer handles paths
60 addrCopy.Index = -1 // We handle indexes below
61 id = addrCopy.String()
62 } else {
63 // No way to determine our type.name, just return
64 return nil
65 }
66 149
67 var result []string 150// GraphNodeReferenceable
151func (n *NodeAbstractResourceInstance) ReferenceableAddrs() []addrs.Referenceable {
152 addr := n.ResourceInstanceAddr()
153 return []addrs.Referenceable{
154 addr.Resource,
155
156 // A resource instance can also be referenced by the address of its
157 // containing resource, so that e.g. a reference to aws_instance.foo
158 // would match both aws_instance.foo[0] and aws_instance.foo[1].
159 addr.ContainingResource().Resource,
160 }
161}
68 162
69 // Always include our own ID. This is primarily for backwards 163// GraphNodeReferencer
70 // compatibility with states that didn't yet support the more 164func (n *NodeAbstractResource) References() []*addrs.Reference {
71 // specific dep string. 165 // If we have a config then we prefer to use that.
72 result = append(result, id) 166 if c := n.Config; c != nil {
167 var result []*addrs.Reference
168
169 for _, traversal := range c.DependsOn {
170 ref, err := addrs.ParseRef(traversal)
171 if err != nil {
172 // We ignore this here, because this isn't a suitable place to return
173 // errors. This situation should be caught and rejected during
174 // validation.
175 log.Printf("[ERROR] Can't parse %#v from depends_on as reference: %s", traversal, err)
176 continue
177 }
73 178
74 // We represent all multi-access 179 result = append(result, ref)
75 result = append(result, fmt.Sprintf("%s.*", id)) 180 }
76 181
77 // We represent either a specific number, or all numbers 182 if n.Schema == nil {
78 suffix := "N" 183 // Should never happens, but we'll log if it does so that we can
79 if n.Addr != nil { 184 // see this easily when debugging.
80 idx := n.Addr.Index 185 log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name())
81 if idx == -1 {
82 idx = 0
83 } 186 }
84 187
85 suffix = fmt.Sprintf("%d", idx) 188 refs, _ := lang.ReferencesInExpr(c.Count)
189 result = append(result, refs...)
190 refs, _ = lang.ReferencesInBlock(c.Config, n.Schema)
191 result = append(result, refs...)
192 if c.Managed != nil {
193 for _, p := range c.Managed.Provisioners {
194 if p.When != configs.ProvisionerWhenCreate {
195 continue
196 }
197 if p.Connection != nil {
198 refs, _ = lang.ReferencesInBlock(p.Connection.Config, connectionBlockSupersetSchema)
199 result = append(result, refs...)
200 }
201
202 schema := n.ProvisionerSchemas[p.Type]
203 if schema == nil {
204 log.Printf("[WARN] no schema for provisioner %q is attached to %s, so provisioner block references cannot be detected", p.Type, n.Name())
205 }
206 refs, _ = lang.ReferencesInBlock(p.Config, schema)
207 result = append(result, refs...)
208 }
209 }
210 return result
86 } 211 }
87 result = append(result, fmt.Sprintf("%s.%s", id, suffix))
88 212
89 return result 213 // Otherwise, we have no references.
214 return nil
90} 215}
91 216
92// GraphNodeReferencer 217// GraphNodeReferencer
93func (n *NodeAbstractResource) References() []string { 218func (n *NodeAbstractResourceInstance) References() []*addrs.Reference {
94 // If we have a config, that is our source of truth 219 // If we have a configuration attached then we'll delegate to our
95 if c := n.Config; c != nil { 220 // embedded abstract resource, which knows how to extract dependencies
96 // Grab all the references 221 // from configuration.
97 var result []string 222 if n.Config != nil {
98 result = append(result, c.DependsOn...) 223 if n.Schema == nil {
99 result = append(result, ReferencesFromConfig(c.RawCount)...) 224 // We'll produce a log message about this out here so that
100 result = append(result, ReferencesFromConfig(c.RawConfig)...) 225 // we can include the full instance address, since the equivalent
101 for _, p := range c.Provisioners { 226 // message in NodeAbstractResource.References cannot see it.
102 if p.When == config.ProvisionerWhenCreate { 227 log.Printf("[WARN] no schema is attached to %s, so config references cannot be detected", n.Name())
103 result = append(result, ReferencesFromConfig(p.ConnInfo)...) 228 return nil
104 result = append(result, ReferencesFromConfig(p.RawConfig)...)
105 }
106 } 229 }
107 230 return n.NodeAbstractResource.References()
108 return uniqueStrings(result)
109 } 231 }
110 232
111 // If we have state, that is our next source 233 // Otherwise, if we have state then we'll use the values stored in state
112 if s := n.ResourceState; s != nil { 234 // as a fallback.
113 return s.Dependencies 235 if rs := n.ResourceState; rs != nil {
236 if s := rs.Instance(n.InstanceKey); s != nil {
237 // State is still storing dependencies as old-style strings, so we'll
238 // need to do a little work here to massage this to the form we now
239 // want.
240 var result []*addrs.Reference
241 for _, addr := range s.Current.Dependencies {
242 if addr == nil {
243 // Should never happen; indicates a bug in the state loader
244 panic(fmt.Sprintf("dependencies for current object on %s contains nil address", n.ResourceInstanceAddr()))
245 }
246
247 // This is a little weird: we need to manufacture an addrs.Reference
248 // with a fake range here because the state isn't something we can
249 // make source references into.
250 result = append(result, &addrs.Reference{
251 Subject: addr,
252 SourceRange: tfdiags.SourceRange{
253 Filename: "(state file)",
254 },
255 })
256 }
257 return result
258 }
114 } 259 }
115 260
261 // If we have neither config nor state then we have no references.
116 return nil 262 return nil
117} 263}
118 264
265// converts an instance address to the legacy dotted notation
266func dottedInstanceAddr(tr addrs.ResourceInstance) string {
267 // The legacy state format uses dot-separated instance keys,
268 // rather than bracketed as in our modern syntax.
269 var suffix string
270 switch tk := tr.Key.(type) {
271 case addrs.IntKey:
272 suffix = fmt.Sprintf(".%d", int(tk))
273 case addrs.StringKey:
274 suffix = fmt.Sprintf(".%s", string(tk))
275 }
276 return tr.Resource.String() + suffix
277}
278
119// StateReferences returns the dependencies to put into the state for 279// StateReferences returns the dependencies to put into the state for
120// this resource. 280// this resource.
121func (n *NodeAbstractResource) StateReferences() []string { 281func (n *NodeAbstractResourceInstance) StateReferences() []addrs.Referenceable {
122 self := n.ReferenceableName() 282 selfAddrs := n.ReferenceableAddrs()
123 283
124 // Determine what our "prefix" is for checking for references to 284 // Since we don't include the source location references in our
125 // ourself. 285 // results from this method, we'll also filter out duplicates:
126 addrCopy := n.Addr.Copy() 286 // there's no point in listing the same object twice without
127 addrCopy.Index = -1 287 // that additional context.
128 selfPrefix := addrCopy.String() + "." 288 seen := map[string]struct{}{}
289
290 // Pretend that we've already "seen" all of our own addresses so that we
291 // won't record self-references in the state. This can arise if, for
292 // example, a provisioner for a resource refers to the resource itself,
293 // which is valid (since provisioners always run after apply) but should
294 // not create an explicit dependency edge.
295 for _, selfAddr := range selfAddrs {
296 seen[selfAddr.String()] = struct{}{}
297 if riAddr, ok := selfAddr.(addrs.ResourceInstance); ok {
298 seen[riAddr.ContainingResource().String()] = struct{}{}
299 }
300 }
129 301
130 depsRaw := n.References() 302 depsRaw := n.References()
131 deps := make([]string, 0, len(depsRaw)) 303 deps := make([]addrs.Referenceable, 0, len(depsRaw))
132 for _, d := range depsRaw { 304 for _, d := range depsRaw {
133 // Ignore any variable dependencies 305 subj := d.Subject
134 if strings.HasPrefix(d, "var.") { 306 if mco, isOutput := subj.(addrs.ModuleCallOutput); isOutput {
135 continue 307 // For state dependencies, we simplify outputs to just refer
308 // to the module as a whole. It's not really clear why we do this,
309 // but this logic is preserved from before the 0.12 rewrite of
310 // this function.
311 subj = mco.Call
136 } 312 }
137 313
138 // If this has a backup ref, ignore those for now. The old state 314 k := subj.String()
139 // file never contained those and I'd rather store the rich types we 315 if _, exists := seen[k]; exists {
140 // add in the future.
141 if idx := strings.IndexRune(d, '/'); idx != -1 {
142 d = d[:idx]
143 }
144
145 // If we're referencing ourself, then ignore it
146 found := false
147 for _, s := range self {
148 if d == s {
149 found = true
150 }
151 }
152 if found {
153 continue 316 continue
154 } 317 }
155 318 seen[k] = struct{}{}
156 // If this is a reference to ourself and a specific index, we keep 319 switch tr := subj.(type) {
157 // it. For example, if this resource is "foo.bar" and the reference 320 case addrs.ResourceInstance:
158 // is "foo.bar.0" then we keep it exact. Otherwise, we strip it. 321 deps = append(deps, tr)
159 if strings.HasSuffix(d, ".0") && !strings.HasPrefix(d, selfPrefix) { 322 case addrs.Resource:
160 d = d[:len(d)-2] 323 deps = append(deps, tr)
161 } 324 case addrs.ModuleCallInstance:
162 325 deps = append(deps, tr)
163 // This is sad. The dependencies are currently in the format of 326 default:
164 // "module.foo.bar" (the full field). This strips the field off. 327 // No other reference types are recorded in the state.
165 if strings.HasPrefix(d, "module.") {
166 parts := strings.SplitN(d, ".", 3)
167 d = strings.Join(parts[0:2], ".")
168 } 328 }
169
170 deps = append(deps, d)
171 } 329 }
172 330
331 // We'll also sort them, since that'll avoid creating changes in the
332 // serialized state that make no semantic difference.
333 sort.Slice(deps, func(i, j int) bool {
334 // Simple string-based sort because we just care about consistency,
335 // not user-friendliness.
336 return deps[i].String() < deps[j].String()
337 })
338
173 return deps 339 return deps
174} 340}
175 341
176func (n *NodeAbstractResource) SetProvider(p string) { 342func (n *NodeAbstractResource) SetProvider(p addrs.AbsProviderConfig) {
177 n.ResolvedProvider = p 343 n.ResolvedProvider = p
178} 344}
179 345
180// GraphNodeProviderConsumer 346// GraphNodeProviderConsumer
181func (n *NodeAbstractResource) ProvidedBy() string { 347func (n *NodeAbstractResource) ProvidedBy() (addrs.AbsProviderConfig, bool) {
182 // If we have a config we prefer that above all else 348 // If we have a config we prefer that above all else
183 if n.Config != nil { 349 if n.Config != nil {
184 return resourceProvider(n.Config.Type, n.Config.Provider) 350 relAddr := n.Config.ProviderConfigAddr()
351 return relAddr.Absolute(n.Path()), false
352 }
353
354 // Use our type and containing module path to guess a provider configuration address
355 return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Addr.Module), false
356}
357
358// GraphNodeProviderConsumer
359func (n *NodeAbstractResourceInstance) ProvidedBy() (addrs.AbsProviderConfig, bool) {
360 // If we have a config we prefer that above all else
361 if n.Config != nil {
362 relAddr := n.Config.ProviderConfigAddr()
363 return relAddr.Absolute(n.Path()), false
185 } 364 }
186 365
187 // If we have state, then we will use the provider from there 366 // If we have state, then we will use the provider from there
188 if n.ResourceState != nil && n.ResourceState.Provider != "" { 367 if n.ResourceState != nil {
189 return n.ResourceState.Provider 368 // An address from the state must match exactly, since we must ensure
369 // we refresh/destroy a resource with the same provider configuration
370 // that created it.
371 return n.ResourceState.ProviderConfig, true
190 } 372 }
191 373
192 // Use our type 374 // Use our type and containing module path to guess a provider configuration address
193 return resourceProvider(n.Addr.Type, "") 375 return n.Addr.Resource.DefaultProviderConfig().Absolute(n.Path()), false
194} 376}
195 377
196// GraphNodeProvisionerConsumer 378// GraphNodeProvisionerConsumer
197func (n *NodeAbstractResource) ProvisionedBy() []string { 379func (n *NodeAbstractResource) ProvisionedBy() []string {
198 // If we have no configuration, then we have no provisioners 380 // If we have no configuration, then we have no provisioners
199 if n.Config == nil { 381 if n.Config == nil || n.Config.Managed == nil {
200 return nil 382 return nil
201 } 383 }
202 384
203 // Build the list of provisioners we need based on the configuration. 385 // Build the list of provisioners we need based on the configuration.
204 // It is okay to have duplicates here. 386 // It is okay to have duplicates here.
205 result := make([]string, len(n.Config.Provisioners)) 387 result := make([]string, len(n.Config.Managed.Provisioners))
206 for i, p := range n.Config.Provisioners { 388 for i, p := range n.Config.Managed.Provisioners {
207 result[i] = p.Type 389 result[i] = p.Type
208 } 390 }
209 391
210 return result 392 return result
211} 393}
212 394
213// GraphNodeResource, GraphNodeAttachResourceState 395// GraphNodeProvisionerConsumer
214func (n *NodeAbstractResource) ResourceAddr() *ResourceAddress { 396func (n *NodeAbstractResource) AttachProvisionerSchema(name string, schema *configschema.Block) {
397 if n.ProvisionerSchemas == nil {
398 n.ProvisionerSchemas = make(map[string]*configschema.Block)
399 }
400 n.ProvisionerSchemas[name] = schema
401}
402
403// GraphNodeResource
404func (n *NodeAbstractResource) ResourceAddr() addrs.AbsResource {
215 return n.Addr 405 return n.Addr
216} 406}
217 407
408// GraphNodeResourceInstance
409func (n *NodeAbstractResourceInstance) ResourceInstanceAddr() addrs.AbsResourceInstance {
410 return n.NodeAbstractResource.Addr.Instance(n.InstanceKey)
411}
412
218// GraphNodeAddressable, TODO: remove, used by target, should unify 413// GraphNodeAddressable, TODO: remove, used by target, should unify
219func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress { 414func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress {
220 return n.ResourceAddr() 415 return NewLegacyResourceAddress(n.Addr)
221} 416}
222 417
223// GraphNodeTargetable 418// GraphNodeTargetable
224func (n *NodeAbstractResource) SetTargets(targets []ResourceAddress) { 419func (n *NodeAbstractResource) SetTargets(targets []addrs.Targetable) {
225 n.Targets = targets 420 n.Targets = targets
226} 421}
227 422
228// GraphNodeAttachResourceState 423// GraphNodeAttachResourceState
229func (n *NodeAbstractResource) AttachResourceState(s *ResourceState) { 424func (n *NodeAbstractResourceInstance) AttachResourceState(s *states.Resource) {
230 n.ResourceState = s 425 n.ResourceState = s
231} 426}
232 427
233// GraphNodeAttachResourceConfig 428// GraphNodeAttachResourceConfig
234func (n *NodeAbstractResource) AttachResourceConfig(c *config.Resource) { 429func (n *NodeAbstractResource) AttachResourceConfig(c *configs.Resource) {
235 n.Config = c 430 n.Config = c
236} 431}
237 432
433// GraphNodeAttachResourceSchema impl
434func (n *NodeAbstractResource) AttachResourceSchema(schema *configschema.Block, version uint64) {
435 n.Schema = schema
436 n.SchemaVersion = version
437}
438
238// GraphNodeDotter impl. 439// GraphNodeDotter impl.
239func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode { 440func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
240 return &dag.DotNode{ 441 return &dag.DotNode{
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
deleted file mode 100644
index 573570d..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
+++ /dev/null
@@ -1,50 +0,0 @@
1package terraform
2
3// NodeAbstractCountResource should be embedded instead of NodeAbstractResource
4// if the resource has a `count` value that needs to be expanded.
5//
6// The embedder should implement `DynamicExpand` to process the count.
7type NodeAbstractCountResource struct {
8 *NodeAbstractResource
9
10 // Validate, if true, will perform the validation for the count.
11 // This should only be turned on for the "validate" operation.
12 Validate bool
13}
14
15// GraphNodeEvalable
16func (n *NodeAbstractCountResource) EvalTree() EvalNode {
17 // We only check if the count is computed if we're not validating.
18 // If we're validating we allow computed counts since they just turn
19 // into more computed values.
20 var evalCountCheckComputed EvalNode
21 if !n.Validate {
22 evalCountCheckComputed = &EvalCountCheckComputed{Resource: n.Config}
23 }
24
25 return &EvalSequence{
26 Nodes: []EvalNode{
27 // The EvalTree for a plannable resource primarily involves
28 // interpolating the count since it can contain variables
29 // we only just received access to.
30 //
31 // With the interpolated count, we can then DynamicExpand
32 // into the proper number of instances.
33 &EvalInterpolate{Config: n.Config.RawCount},
34
35 // Check if the count is computed
36 evalCountCheckComputed,
37
38 // If validation is enabled, perform the validation
39 &EvalIf{
40 If: func(ctx EvalContext) (bool, error) {
41 return n.Validate, nil
42 },
43
44 Then: &EvalValidateCount{Resource: n.Config},
45 },
46
47 &EvalCountFixZeroOneBoundary{Resource: n.Config},
48 },
49 }
50}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
index 40ee1cf..3e2fff3 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
@@ -1,400 +1,71 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt" 4 "log"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/dag"
8 "github.com/hashicorp/terraform/lang"
7) 9)
8 10
9// NodeApplyableResource represents a resource that is "applyable": 11// NodeApplyableResource represents a resource that is "applyable":
10// it is ready to be applied and is represented by a diff. 12// it may need to have its record in the state adjusted to match configuration.
13//
14// Unlike in the plan walk, this resource node does not DynamicExpand. Instead,
15// it should be inserted into the same graph as any instances of the nodes
16// with dependency edges ensuring that the resource is evaluated before any
17// of its instances, which will turn ensure that the whole-resource record
18// in the state is suitably prepared to receive any updates to instances.
11type NodeApplyableResource struct { 19type NodeApplyableResource struct {
12 *NodeAbstractResource 20 *NodeAbstractResource
13} 21}
14 22
15// GraphNodeCreator 23var (
16func (n *NodeApplyableResource) CreateAddr() *ResourceAddress { 24 _ GraphNodeResource = (*NodeApplyableResource)(nil)
17 return n.NodeAbstractResource.Addr 25 _ GraphNodeEvalable = (*NodeApplyableResource)(nil)
18} 26 _ GraphNodeProviderConsumer = (*NodeApplyableResource)(nil)
19 27 _ GraphNodeAttachResourceConfig = (*NodeApplyableResource)(nil)
20// GraphNodeReferencer, overriding NodeAbstractResource 28 _ GraphNodeReferencer = (*NodeApplyableResource)(nil)
21func (n *NodeApplyableResource) References() []string { 29)
22 result := n.NodeAbstractResource.References()
23
24 // The "apply" side of a resource generally also depends on the
25 // destruction of its dependencies as well. For example, if a LB
26 // references a set of VMs with ${vm.foo.*.id}, then we must wait for
27 // the destruction so we get the newly updated list of VMs.
28 //
29 // The exception here is CBD. When CBD is set, we don't do this since
30 // it would create a cycle. By not creating a cycle, we require two
31 // applies since the first apply the creation step will use the OLD
32 // values (pre-destroy) and the second step will update.
33 //
34 // This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x).
35 // We mimic that behavior here now and can improve upon it in the future.
36 //
37 // This behavior is tested in graph_build_apply_test.go to test ordering.
38 cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy
39 if !cbd {
40 // The "apply" side of a resource always depends on the destruction
41 // of all its dependencies in addition to the creation.
42 for _, v := range result {
43 result = append(result, v+".destroy")
44 }
45 }
46 30
47 return result 31func (n *NodeApplyableResource) Name() string {
32 return n.NodeAbstractResource.Name() + " (prepare state)"
48} 33}
49 34
50// GraphNodeEvalable 35func (n *NodeApplyableResource) References() []*addrs.Reference {
51func (n *NodeApplyableResource) EvalTree() EvalNode { 36 if n.Config == nil {
52 addr := n.NodeAbstractResource.Addr 37 log.Printf("[WARN] NodeApplyableResource %q: no configuration, so can't determine References", dag.VertexName(n))
53 38 return nil
54 // stateId is the ID to put into the state
55 stateId := addr.stateId()
56
57 // Build the instance info. More of this will be populated during eval
58 info := &InstanceInfo{
59 Id: stateId,
60 Type: addr.Type,
61 } 39 }
62 40
63 // Build the resource for eval 41 var result []*addrs.Reference
64 resource := &Resource{
65 Name: addr.Name,
66 Type: addr.Type,
67 CountIndex: addr.Index,
68 }
69 if resource.CountIndex < 0 {
70 resource.CountIndex = 0
71 }
72 42
73 // Determine the dependencies for the state. 43 // Since this node type only updates resource-level metadata, we only
74 stateDeps := n.StateReferences() 44 // need to worry about the parts of the configuration that affect
45 // our "each mode": the count and for_each meta-arguments.
46 refs, _ := lang.ReferencesInExpr(n.Config.Count)
47 result = append(result, refs...)
48 refs, _ = lang.ReferencesInExpr(n.Config.ForEach)
49 result = append(result, refs...)
75 50
76 // Eval info is different depending on what kind of resource this is 51 return result
77 switch n.Config.Mode {
78 case config.ManagedResourceMode:
79 return n.evalTreeManagedResource(
80 stateId, info, resource, stateDeps,
81 )
82 case config.DataResourceMode:
83 return n.evalTreeDataResource(
84 stateId, info, resource, stateDeps)
85 default:
86 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
87 }
88} 52}
89 53
90func (n *NodeApplyableResource) evalTreeDataResource( 54// GraphNodeEvalable
91 stateId string, info *InstanceInfo, 55func (n *NodeApplyableResource) EvalTree() EvalNode {
92 resource *Resource, stateDeps []string) EvalNode { 56 addr := n.ResourceAddr()
93 var provider ResourceProvider 57 config := n.Config
94 var config *ResourceConfig 58 providerAddr := n.ResolvedProvider
95 var diff *InstanceDiff 59
96 var state *InstanceState 60 if config == nil {
97 61 // Nothing to do, then.
98 return &EvalSequence{ 62 log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr)
99 Nodes: []EvalNode{ 63 return &EvalNoop{}
100 // Build the instance info
101 &EvalInstanceInfo{
102 Info: info,
103 },
104
105 // Get the saved diff for apply
106 &EvalReadDiff{
107 Name: stateId,
108 Diff: &diff,
109 },
110
111 // Stop here if we don't actually have a diff
112 &EvalIf{
113 If: func(ctx EvalContext) (bool, error) {
114 if diff == nil {
115 return true, EvalEarlyExitError{}
116 }
117
118 if diff.GetAttributesLen() == 0 {
119 return true, EvalEarlyExitError{}
120 }
121
122 return true, nil
123 },
124 Then: EvalNoop{},
125 },
126
127 // Normally we interpolate count as a preparation step before
128 // a DynamicExpand, but an apply graph has pre-expanded nodes
129 // and so the count would otherwise never be interpolated.
130 //
131 // This is redundant when there are multiple instances created
132 // from the same config (count > 1) but harmless since the
133 // underlying structures have mutexes to make this concurrency-safe.
134 //
135 // In most cases this isn't actually needed because we dealt with
136 // all of the counts during the plan walk, but we do it here
137 // for completeness because other code assumes that the
138 // final count is always available during interpolation.
139 //
140 // Here we are just populating the interpolated value in-place
141 // inside this RawConfig object, like we would in
142 // NodeAbstractCountResource.
143 &EvalInterpolate{
144 Config: n.Config.RawCount,
145 ContinueOnErr: true,
146 },
147
148 // We need to re-interpolate the config here, rather than
149 // just using the diff's values directly, because we've
150 // potentially learned more variable values during the
151 // apply pass that weren't known when the diff was produced.
152 &EvalInterpolate{
153 Config: n.Config.RawConfig.Copy(),
154 Resource: resource,
155 Output: &config,
156 },
157
158 &EvalGetProvider{
159 Name: n.ResolvedProvider,
160 Output: &provider,
161 },
162
163 // Make a new diff with our newly-interpolated config.
164 &EvalReadDataDiff{
165 Info: info,
166 Config: &config,
167 Previous: &diff,
168 Provider: &provider,
169 Output: &diff,
170 },
171
172 &EvalReadDataApply{
173 Info: info,
174 Diff: &diff,
175 Provider: &provider,
176 Output: &state,
177 },
178
179 &EvalWriteState{
180 Name: stateId,
181 ResourceType: n.Config.Type,
182 Provider: n.ResolvedProvider,
183 Dependencies: stateDeps,
184 State: &state,
185 },
186
187 // Clear the diff now that we've applied it, so
188 // later nodes won't see a diff that's now a no-op.
189 &EvalWriteDiff{
190 Name: stateId,
191 Diff: nil,
192 },
193
194 &EvalUpdateStateHook{},
195 },
196 } 64 }
197}
198
199func (n *NodeApplyableResource) evalTreeManagedResource(
200 stateId string, info *InstanceInfo,
201 resource *Resource, stateDeps []string) EvalNode {
202 // Declare a bunch of variables that are used for state during
203 // evaluation. Most of this are written to by-address below.
204 var provider ResourceProvider
205 var diff, diffApply *InstanceDiff
206 var state *InstanceState
207 var resourceConfig *ResourceConfig
208 var err error
209 var createNew bool
210 var createBeforeDestroyEnabled bool
211
212 return &EvalSequence{
213 Nodes: []EvalNode{
214 // Build the instance info
215 &EvalInstanceInfo{
216 Info: info,
217 },
218
219 // Get the saved diff for apply
220 &EvalReadDiff{
221 Name: stateId,
222 Diff: &diffApply,
223 },
224
225 // We don't want to do any destroys
226 &EvalIf{
227 If: func(ctx EvalContext) (bool, error) {
228 if diffApply == nil {
229 return true, EvalEarlyExitError{}
230 }
231
232 if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 {
233 return true, EvalEarlyExitError{}
234 }
235
236 diffApply.SetDestroy(false)
237 return true, nil
238 },
239 Then: EvalNoop{},
240 },
241
242 &EvalIf{
243 If: func(ctx EvalContext) (bool, error) {
244 destroy := false
245 if diffApply != nil {
246 destroy = diffApply.GetDestroy() || diffApply.RequiresNew()
247 }
248
249 createBeforeDestroyEnabled =
250 n.Config.Lifecycle.CreateBeforeDestroy &&
251 destroy
252
253 return createBeforeDestroyEnabled, nil
254 },
255 Then: &EvalDeposeState{
256 Name: stateId,
257 },
258 },
259
260 // Normally we interpolate count as a preparation step before
261 // a DynamicExpand, but an apply graph has pre-expanded nodes
262 // and so the count would otherwise never be interpolated.
263 //
264 // This is redundant when there are multiple instances created
265 // from the same config (count > 1) but harmless since the
266 // underlying structures have mutexes to make this concurrency-safe.
267 //
268 // In most cases this isn't actually needed because we dealt with
269 // all of the counts during the plan walk, but we need to do this
270 // in order to support interpolation of resource counts from
271 // apply-time-interpolated expressions, such as those in
272 // "provisioner" blocks.
273 //
274 // Here we are just populating the interpolated value in-place
275 // inside this RawConfig object, like we would in
276 // NodeAbstractCountResource.
277 &EvalInterpolate{
278 Config: n.Config.RawCount,
279 ContinueOnErr: true,
280 },
281
282 &EvalInterpolate{
283 Config: n.Config.RawConfig.Copy(),
284 Resource: resource,
285 Output: &resourceConfig,
286 },
287 &EvalGetProvider{
288 Name: n.ResolvedProvider,
289 Output: &provider,
290 },
291 &EvalReadState{
292 Name: stateId,
293 Output: &state,
294 },
295 // Re-run validation to catch any errors we missed, e.g. type
296 // mismatches on computed values.
297 &EvalValidateResource{
298 Provider: &provider,
299 Config: &resourceConfig,
300 ResourceName: n.Config.Name,
301 ResourceType: n.Config.Type,
302 ResourceMode: n.Config.Mode,
303 IgnoreWarnings: true,
304 },
305 &EvalDiff{
306 Info: info,
307 Config: &resourceConfig,
308 Resource: n.Config,
309 Provider: &provider,
310 Diff: &diffApply,
311 State: &state,
312 OutputDiff: &diffApply,
313 },
314
315 // Get the saved diff
316 &EvalReadDiff{
317 Name: stateId,
318 Diff: &diff,
319 },
320
321 // Compare the diffs
322 &EvalCompareDiff{
323 Info: info,
324 One: &diff,
325 Two: &diffApply,
326 },
327
328 &EvalGetProvider{
329 Name: n.ResolvedProvider,
330 Output: &provider,
331 },
332 &EvalReadState{
333 Name: stateId,
334 Output: &state,
335 },
336 // Call pre-apply hook
337 &EvalApplyPre{
338 Info: info,
339 State: &state,
340 Diff: &diffApply,
341 },
342 &EvalApply{
343 Info: info,
344 State: &state,
345 Diff: &diffApply,
346 Provider: &provider,
347 Output: &state,
348 Error: &err,
349 CreateNew: &createNew,
350 },
351 &EvalWriteState{
352 Name: stateId,
353 ResourceType: n.Config.Type,
354 Provider: n.ResolvedProvider,
355 Dependencies: stateDeps,
356 State: &state,
357 },
358 &EvalApplyProvisioners{
359 Info: info,
360 State: &state,
361 Resource: n.Config,
362 InterpResource: resource,
363 CreateNew: &createNew,
364 Error: &err,
365 When: config.ProvisionerWhenCreate,
366 },
367 &EvalIf{
368 If: func(ctx EvalContext) (bool, error) {
369 return createBeforeDestroyEnabled && err != nil, nil
370 },
371 Then: &EvalUndeposeState{
372 Name: stateId,
373 State: &state,
374 },
375 Else: &EvalWriteState{
376 Name: stateId,
377 ResourceType: n.Config.Type,
378 Provider: n.ResolvedProvider,
379 Dependencies: stateDeps,
380 State: &state,
381 },
382 },
383
384 // We clear the diff out here so that future nodes
385 // don't see a diff that is already complete. There
386 // is no longer a diff!
387 &EvalWriteDiff{
388 Name: stateId,
389 Diff: nil,
390 },
391 65
392 &EvalApplyPost{ 66 return &EvalWriteResourceState{
393 Info: info, 67 Addr: addr.Resource,
394 State: &state, 68 Config: config,
395 Error: &err, 69 ProviderAddr: providerAddr,
396 },
397 &EvalUpdateStateHook{},
398 },
399 } 70 }
400} 71}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go
new file mode 100644
index 0000000..dad7bfc
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go
@@ -0,0 +1,433 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/zclconf/go-cty/cty"
7
8 "github.com/hashicorp/terraform/addrs"
9 "github.com/hashicorp/terraform/configs"
10 "github.com/hashicorp/terraform/plans"
11 "github.com/hashicorp/terraform/providers"
12 "github.com/hashicorp/terraform/states"
13 "github.com/hashicorp/terraform/tfdiags"
14)
15
16// NodeApplyableResourceInstance represents a resource instance that is
17// "applyable": it is ready to be applied and is represented by a diff.
18//
19// This node is for a specific instance of a resource. It will usually be
20// accompanied in the graph by a NodeApplyableResource representing its
21// containing resource, and should depend on that node to ensure that the
22// state is properly prepared to receive changes to instances.
23type NodeApplyableResourceInstance struct {
24 *NodeAbstractResourceInstance
25
26 destroyNode GraphNodeDestroyerCBD
27 graphNodeDeposer // implementation of GraphNodeDeposer
28}
29
30var (
31 _ GraphNodeResource = (*NodeApplyableResourceInstance)(nil)
32 _ GraphNodeResourceInstance = (*NodeApplyableResourceInstance)(nil)
33 _ GraphNodeCreator = (*NodeApplyableResourceInstance)(nil)
34 _ GraphNodeReferencer = (*NodeApplyableResourceInstance)(nil)
35 _ GraphNodeDeposer = (*NodeApplyableResourceInstance)(nil)
36 _ GraphNodeEvalable = (*NodeApplyableResourceInstance)(nil)
37)
38
39// GraphNodeAttachDestroyer
40func (n *NodeApplyableResourceInstance) AttachDestroyNode(d GraphNodeDestroyerCBD) {
41 n.destroyNode = d
42}
43
44// createBeforeDestroy checks this nodes config status and the status af any
45// companion destroy node for CreateBeforeDestroy.
46func (n *NodeApplyableResourceInstance) createBeforeDestroy() bool {
47 cbd := false
48
49 if n.Config != nil && n.Config.Managed != nil {
50 cbd = n.Config.Managed.CreateBeforeDestroy
51 }
52
53 if n.destroyNode != nil {
54 cbd = cbd || n.destroyNode.CreateBeforeDestroy()
55 }
56
57 return cbd
58}
59
60// GraphNodeCreator
61func (n *NodeApplyableResourceInstance) CreateAddr() *addrs.AbsResourceInstance {
62 addr := n.ResourceInstanceAddr()
63 return &addr
64}
65
66// GraphNodeReferencer, overriding NodeAbstractResourceInstance
67func (n *NodeApplyableResourceInstance) References() []*addrs.Reference {
68 // Start with the usual resource instance implementation
69 ret := n.NodeAbstractResourceInstance.References()
70
71 // Applying a resource must also depend on the destruction of any of its
72 // dependencies, since this may for example affect the outcome of
73 // evaluating an entire list of resources with "count" set (by reducing
74 // the count).
75 //
76 // However, we can't do this in create_before_destroy mode because that
77 // would create a dependency cycle. We make a compromise here of requiring
78 // changes to be updated across two applies in this case, since the first
79 // plan will use the old values.
80 if !n.createBeforeDestroy() {
81 for _, ref := range ret {
82 switch tr := ref.Subject.(type) {
83 case addrs.ResourceInstance:
84 newRef := *ref // shallow copy so we can mutate
85 newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
86 newRef.Remaining = nil // can't access attributes of something being destroyed
87 ret = append(ret, &newRef)
88 case addrs.Resource:
89 newRef := *ref // shallow copy so we can mutate
90 newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
91 newRef.Remaining = nil // can't access attributes of something being destroyed
92 ret = append(ret, &newRef)
93 }
94 }
95 }
96
97 return ret
98}
99
100// GraphNodeEvalable
101func (n *NodeApplyableResourceInstance) EvalTree() EvalNode {
102 addr := n.ResourceInstanceAddr()
103
104 // State still uses legacy-style internal ids, so we need to shim to get
105 // a suitable key to use.
106 stateId := NewLegacyResourceInstanceAddress(addr).stateId()
107
108 // Determine the dependencies for the state.
109 stateDeps := n.StateReferences()
110
111 if n.Config == nil {
112 // This should not be possible, but we've got here in at least one
113 // case as discussed in the following issue:
114 // https://github.com/hashicorp/terraform/issues/21258
115 // To avoid an outright crash here, we'll instead return an explicit
116 // error.
117 var diags tfdiags.Diagnostics
118 diags = diags.Append(tfdiags.Sourceless(
119 tfdiags.Error,
120 "Resource node has no configuration attached",
121 fmt.Sprintf(
122 "The graph node for %s has no configuration attached to it. This suggests a bug in Terraform's apply graph builder; please report it!",
123 addr,
124 ),
125 ))
126 err := diags.Err()
127 return &EvalReturnError{
128 Error: &err,
129 }
130 }
131
132 // Eval info is different depending on what kind of resource this is
133 switch n.Config.Mode {
134 case addrs.ManagedResourceMode:
135 return n.evalTreeManagedResource(addr, stateId, stateDeps)
136 case addrs.DataResourceMode:
137 return n.evalTreeDataResource(addr, stateId, stateDeps)
138 default:
139 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
140 }
141}
142
143func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode {
144 var provider providers.Interface
145 var providerSchema *ProviderSchema
146 var change *plans.ResourceInstanceChange
147 var state *states.ResourceInstanceObject
148
149 return &EvalSequence{
150 Nodes: []EvalNode{
151 &EvalGetProvider{
152 Addr: n.ResolvedProvider,
153 Output: &provider,
154 Schema: &providerSchema,
155 },
156
157 // Get the saved diff for apply
158 &EvalReadDiff{
159 Addr: addr.Resource,
160 ProviderSchema: &providerSchema,
161 Change: &change,
162 },
163
164 // Stop early if we don't actually have a diff
165 &EvalIf{
166 If: func(ctx EvalContext) (bool, error) {
167 if change == nil {
168 return true, EvalEarlyExitError{}
169 }
170 return true, nil
171 },
172 Then: EvalNoop{},
173 },
174
175 // In this particular call to EvalReadData we include our planned
176 // change, which signals that we expect this read to complete fully
177 // with no unknown values; it'll produce an error if not.
178 &EvalReadData{
179 Addr: addr.Resource,
180 Config: n.Config,
181 Dependencies: n.StateReferences(),
182 Planned: &change, // setting this indicates that the result must be complete
183 Provider: &provider,
184 ProviderAddr: n.ResolvedProvider,
185 ProviderSchema: &providerSchema,
186 OutputState: &state,
187 },
188
189 &EvalWriteState{
190 Addr: addr.Resource,
191 ProviderAddr: n.ResolvedProvider,
192 ProviderSchema: &providerSchema,
193 State: &state,
194 },
195
196 // Clear the diff now that we've applied it, so
197 // later nodes won't see a diff that's now a no-op.
198 &EvalWriteDiff{
199 Addr: addr.Resource,
200 ProviderSchema: &providerSchema,
201 Change: nil,
202 },
203
204 &EvalUpdateStateHook{},
205 },
206 }
207}
208
209func (n *NodeApplyableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode {
210 // Declare a bunch of variables that are used for state during
211 // evaluation. Most of this are written to by-address below.
212 var provider providers.Interface
213 var providerSchema *ProviderSchema
214 var diff, diffApply *plans.ResourceInstanceChange
215 var state *states.ResourceInstanceObject
216 var err error
217 var createNew bool
218 var createBeforeDestroyEnabled bool
219 var configVal cty.Value
220 var deposedKey states.DeposedKey
221
222 return &EvalSequence{
223 Nodes: []EvalNode{
224 &EvalGetProvider{
225 Addr: n.ResolvedProvider,
226 Output: &provider,
227 Schema: &providerSchema,
228 },
229
230 // Get the saved diff for apply
231 &EvalReadDiff{
232 Addr: addr.Resource,
233 ProviderSchema: &providerSchema,
234 Change: &diffApply,
235 },
236
237 // We don't want to do any destroys
238 // (these are handled by NodeDestroyResourceInstance instead)
239 &EvalIf{
240 If: func(ctx EvalContext) (bool, error) {
241 if diffApply == nil {
242 return true, EvalEarlyExitError{}
243 }
244 if diffApply.Action == plans.Delete {
245 return true, EvalEarlyExitError{}
246 }
247 return true, nil
248 },
249 Then: EvalNoop{},
250 },
251
252 &EvalIf{
253 If: func(ctx EvalContext) (bool, error) {
254 destroy := false
255 if diffApply != nil {
256 destroy = (diffApply.Action == plans.Delete || diffApply.Action.IsReplace())
257 }
258 if destroy && n.createBeforeDestroy() {
259 createBeforeDestroyEnabled = true
260 }
261 return createBeforeDestroyEnabled, nil
262 },
263 Then: &EvalDeposeState{
264 Addr: addr.Resource,
265 ForceKey: n.PreallocatedDeposedKey,
266 OutputKey: &deposedKey,
267 },
268 },
269
270 &EvalReadState{
271 Addr: addr.Resource,
272 Provider: &provider,
273 ProviderSchema: &providerSchema,
274
275 Output: &state,
276 },
277
278 // Get the saved diff
279 &EvalReadDiff{
280 Addr: addr.Resource,
281 ProviderSchema: &providerSchema,
282 Change: &diff,
283 },
284
285 // Make a new diff, in case we've learned new values in the state
286 // during apply which we can now incorporate.
287 &EvalDiff{
288 Addr: addr.Resource,
289 Config: n.Config,
290 Provider: &provider,
291 ProviderAddr: n.ResolvedProvider,
292 ProviderSchema: &providerSchema,
293 State: &state,
294 PreviousDiff: &diff,
295 OutputChange: &diffApply,
296 OutputValue: &configVal,
297 OutputState: &state,
298 },
299
300 // Compare the diffs
301 &EvalCheckPlannedChange{
302 Addr: addr.Resource,
303 ProviderAddr: n.ResolvedProvider,
304 ProviderSchema: &providerSchema,
305 Planned: &diff,
306 Actual: &diffApply,
307 },
308
309 &EvalGetProvider{
310 Addr: n.ResolvedProvider,
311 Output: &provider,
312 Schema: &providerSchema,
313 },
314 &EvalReadState{
315 Addr: addr.Resource,
316 Provider: &provider,
317 ProviderSchema: &providerSchema,
318
319 Output: &state,
320 },
321
322 &EvalReduceDiff{
323 Addr: addr.Resource,
324 InChange: &diffApply,
325 Destroy: false,
326 OutChange: &diffApply,
327 },
328
329 // EvalReduceDiff may have simplified our planned change
330 // into a NoOp if it only requires destroying, since destroying
331 // is handled by NodeDestroyResourceInstance.
332 &EvalIf{
333 If: func(ctx EvalContext) (bool, error) {
334 if diffApply == nil || diffApply.Action == plans.NoOp {
335 return true, EvalEarlyExitError{}
336 }
337 return true, nil
338 },
339 Then: EvalNoop{},
340 },
341
342 // Call pre-apply hook
343 &EvalApplyPre{
344 Addr: addr.Resource,
345 State: &state,
346 Change: &diffApply,
347 },
348 &EvalApply{
349 Addr: addr.Resource,
350 Config: n.Config,
351 Dependencies: n.StateReferences(),
352 State: &state,
353 Change: &diffApply,
354 Provider: &provider,
355 ProviderAddr: n.ResolvedProvider,
356 ProviderSchema: &providerSchema,
357 Output: &state,
358 Error: &err,
359 CreateNew: &createNew,
360 },
361 &EvalMaybeTainted{
362 Addr: addr.Resource,
363 State: &state,
364 Change: &diffApply,
365 Error: &err,
366 StateOutput: &state,
367 },
368 &EvalWriteState{
369 Addr: addr.Resource,
370 ProviderAddr: n.ResolvedProvider,
371 ProviderSchema: &providerSchema,
372 State: &state,
373 },
374 &EvalApplyProvisioners{
375 Addr: addr.Resource,
376 State: &state, // EvalApplyProvisioners will skip if already tainted
377 ResourceConfig: n.Config,
378 CreateNew: &createNew,
379 Error: &err,
380 When: configs.ProvisionerWhenCreate,
381 },
382 &EvalMaybeTainted{
383 Addr: addr.Resource,
384 State: &state,
385 Change: &diffApply,
386 Error: &err,
387 StateOutput: &state,
388 },
389 &EvalWriteState{
390 Addr: addr.Resource,
391 ProviderAddr: n.ResolvedProvider,
392 ProviderSchema: &providerSchema,
393 State: &state,
394 },
395 &EvalIf{
396 If: func(ctx EvalContext) (bool, error) {
397 return createBeforeDestroyEnabled && err != nil, nil
398 },
399 Then: &EvalMaybeRestoreDeposedObject{
400 Addr: addr.Resource,
401 Key: &deposedKey,
402 },
403 },
404
405 // We clear the diff out here so that future nodes
406 // don't see a diff that is already complete. There
407 // is no longer a diff!
408 &EvalIf{
409 If: func(ctx EvalContext) (bool, error) {
410 if !diff.Action.IsReplace() {
411 return true, nil
412 }
413 if !n.createBeforeDestroy() {
414 return true, nil
415 }
416 return false, nil
417 },
418 Then: &EvalWriteDiff{
419 Addr: addr.Resource,
420 ProviderSchema: &providerSchema,
421 Change: nil,
422 },
423 },
424
425 &EvalApplyPost{
426 Addr: addr.Resource,
427 State: &state,
428 Error: &err,
429 },
430 &EvalUpdateStateHook{},
431 },
432 }
433}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
index 657bbee..ca2267e 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
@@ -2,81 +2,114 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
5 6
6 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/terraform/plans"
8 "github.com/hashicorp/terraform/providers"
9
10 "github.com/hashicorp/terraform/addrs"
11 "github.com/hashicorp/terraform/configs"
12 "github.com/hashicorp/terraform/states"
7) 13)
8 14
9// NodeDestroyResource represents a resource that is to be destroyed. 15// NodeDestroyResourceInstance represents a resource instance that is to be
10type NodeDestroyResource struct { 16// destroyed.
11 *NodeAbstractResource 17type NodeDestroyResourceInstance struct {
18 *NodeAbstractResourceInstance
19
20 // If DeposedKey is set to anything other than states.NotDeposed then
21 // this node destroys a deposed object of the associated instance
22 // rather than its current object.
23 DeposedKey states.DeposedKey
24
25 CreateBeforeDestroyOverride *bool
12} 26}
13 27
14func (n *NodeDestroyResource) Name() string { 28var (
15 return n.NodeAbstractResource.Name() + " (destroy)" 29 _ GraphNodeResource = (*NodeDestroyResourceInstance)(nil)
30 _ GraphNodeResourceInstance = (*NodeDestroyResourceInstance)(nil)
31 _ GraphNodeDestroyer = (*NodeDestroyResourceInstance)(nil)
32 _ GraphNodeDestroyerCBD = (*NodeDestroyResourceInstance)(nil)
33 _ GraphNodeReferenceable = (*NodeDestroyResourceInstance)(nil)
34 _ GraphNodeReferencer = (*NodeDestroyResourceInstance)(nil)
35 _ GraphNodeEvalable = (*NodeDestroyResourceInstance)(nil)
36 _ GraphNodeProviderConsumer = (*NodeDestroyResourceInstance)(nil)
37 _ GraphNodeProvisionerConsumer = (*NodeDestroyResourceInstance)(nil)
38)
39
40func (n *NodeDestroyResourceInstance) Name() string {
41 if n.DeposedKey != states.NotDeposed {
42 return fmt.Sprintf("%s (destroy deposed %s)", n.ResourceInstanceAddr(), n.DeposedKey)
43 }
44 return n.ResourceInstanceAddr().String() + " (destroy)"
16} 45}
17 46
18// GraphNodeDestroyer 47// GraphNodeDestroyer
19func (n *NodeDestroyResource) DestroyAddr() *ResourceAddress { 48func (n *NodeDestroyResourceInstance) DestroyAddr() *addrs.AbsResourceInstance {
20 return n.Addr 49 addr := n.ResourceInstanceAddr()
50 return &addr
21} 51}
22 52
23// GraphNodeDestroyerCBD 53// GraphNodeDestroyerCBD
24func (n *NodeDestroyResource) CreateBeforeDestroy() bool { 54func (n *NodeDestroyResourceInstance) CreateBeforeDestroy() bool {
55 if n.CreateBeforeDestroyOverride != nil {
56 return *n.CreateBeforeDestroyOverride
57 }
58
25 // If we have no config, we just assume no 59 // If we have no config, we just assume no
26 if n.Config == nil { 60 if n.Config == nil || n.Config.Managed == nil {
27 return false 61 return false
28 } 62 }
29 63
30 return n.Config.Lifecycle.CreateBeforeDestroy 64 return n.Config.Managed.CreateBeforeDestroy
31} 65}
32 66
33// GraphNodeDestroyerCBD 67// GraphNodeDestroyerCBD
34func (n *NodeDestroyResource) ModifyCreateBeforeDestroy(v bool) error { 68func (n *NodeDestroyResourceInstance) ModifyCreateBeforeDestroy(v bool) error {
35 // If we have no config, do nothing since it won't affect the 69 n.CreateBeforeDestroyOverride = &v
36 // create step anyways.
37 if n.Config == nil {
38 return nil
39 }
40
41 // Set CBD to true
42 n.Config.Lifecycle.CreateBeforeDestroy = true
43
44 return nil 70 return nil
45} 71}
46 72
47// GraphNodeReferenceable, overriding NodeAbstractResource 73// GraphNodeReferenceable, overriding NodeAbstractResource
48func (n *NodeDestroyResource) ReferenceableName() []string { 74func (n *NodeDestroyResourceInstance) ReferenceableAddrs() []addrs.Referenceable {
49 // We modify our referenceable name to have the suffix of ".destroy" 75 normalAddrs := n.NodeAbstractResourceInstance.ReferenceableAddrs()
50 // since depending on the creation side doesn't necessarilly mean 76 destroyAddrs := make([]addrs.Referenceable, len(normalAddrs))
51 // depending on destruction. 77
52 suffix := ".destroy" 78 phaseType := addrs.ResourceInstancePhaseDestroy
53
54 // If we're CBD, we also append "-cbd". This is because CBD will setup
55 // its own edges (in CBDEdgeTransformer). Depending on the "destroy"
56 // side generally doesn't mean depending on CBD as well. See GH-11349
57 if n.CreateBeforeDestroy() { 79 if n.CreateBeforeDestroy() {
58 suffix += "-cbd" 80 phaseType = addrs.ResourceInstancePhaseDestroyCBD
59 } 81 }
60 82
61 result := n.NodeAbstractResource.ReferenceableName() 83 for i, normalAddr := range normalAddrs {
62 for i, v := range result { 84 switch ta := normalAddr.(type) {
63 result[i] = v + suffix 85 case addrs.Resource:
86 destroyAddrs[i] = ta.Phase(phaseType)
87 case addrs.ResourceInstance:
88 destroyAddrs[i] = ta.Phase(phaseType)
89 default:
90 destroyAddrs[i] = normalAddr
91 }
64 } 92 }
65 93
66 return result 94 return destroyAddrs
67} 95}
68 96
69// GraphNodeReferencer, overriding NodeAbstractResource 97// GraphNodeReferencer, overriding NodeAbstractResource
70func (n *NodeDestroyResource) References() []string { 98func (n *NodeDestroyResourceInstance) References() []*addrs.Reference {
71 // If we have a config, then we need to include destroy-time dependencies 99 // If we have a config, then we need to include destroy-time dependencies
72 if c := n.Config; c != nil { 100 if c := n.Config; c != nil && c.Managed != nil {
73 var result []string 101 var result []*addrs.Reference
74 for _, p := range c.Provisioners { 102
75 // We include conn info and config for destroy time provisioners 103 // We include conn info and config for destroy time provisioners
76 // as dependencies that we have. 104 // as dependencies that we have.
77 if p.When == config.ProvisionerWhenDestroy { 105 for _, p := range c.Managed.Provisioners {
78 result = append(result, ReferencesFromConfig(p.ConnInfo)...) 106 schema := n.ProvisionerSchemas[p.Type]
79 result = append(result, ReferencesFromConfig(p.RawConfig)...) 107
108 if p.When == configs.ProvisionerWhenDestroy {
109 if p.Connection != nil {
110 result = append(result, ReferencesFromConfig(p.Connection.Config, connectionBlockSupersetSchema)...)
111 }
112 result = append(result, ReferencesFromConfig(p.Config, schema)...)
80 } 113 }
81 } 114 }
82 115
@@ -86,117 +119,66 @@ func (n *NodeDestroyResource) References() []string {
86 return nil 119 return nil
87} 120}
88 121
89// GraphNodeDynamicExpandable
90func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
91 // If we have no config we do nothing
92 if n.Addr == nil {
93 return nil, nil
94 }
95
96 state, lock := ctx.State()
97 lock.RLock()
98 defer lock.RUnlock()
99
100 // Start creating the steps
101 steps := make([]GraphTransformer, 0, 5)
102
103 // We want deposed resources in the state to be destroyed
104 steps = append(steps, &DeposedTransformer{
105 State: state,
106 View: n.Addr.stateId(),
107 ResolvedProvider: n.ResolvedProvider,
108 })
109
110 // Target
111 steps = append(steps, &TargetsTransformer{
112 ParsedTargets: n.Targets,
113 })
114
115 // Always end with the root being added
116 steps = append(steps, &RootTransformer{})
117
118 // Build the graph
119 b := &BasicGraphBuilder{
120 Steps: steps,
121 Name: "NodeResourceDestroy",
122 }
123 return b.Build(ctx.Path())
124}
125
126// GraphNodeEvalable 122// GraphNodeEvalable
127func (n *NodeDestroyResource) EvalTree() EvalNode { 123func (n *NodeDestroyResourceInstance) EvalTree() EvalNode {
128 // stateId is the ID to put into the state 124 addr := n.ResourceInstanceAddr()
129 stateId := n.Addr.stateId()
130
131 // Build the instance info. More of this will be populated during eval
132 info := &InstanceInfo{
133 Id: stateId,
134 Type: n.Addr.Type,
135 uniqueExtra: "destroy",
136 }
137
138 // Build the resource for eval
139 addr := n.Addr
140 resource := &Resource{
141 Name: addr.Name,
142 Type: addr.Type,
143 CountIndex: addr.Index,
144 }
145 if resource.CountIndex < 0 {
146 resource.CountIndex = 0
147 }
148 125
149 // Get our state 126 // Get our state
150 rs := n.ResourceState 127 rs := n.ResourceState
151 if rs == nil { 128 var is *states.ResourceInstance
152 rs = &ResourceState{ 129 if rs != nil {
153 Provider: n.ResolvedProvider, 130 is = rs.Instance(n.InstanceKey)
154 } 131 }
132 if is == nil {
133 log.Printf("[WARN] NodeDestroyResourceInstance for %s with no state", addr)
155 } 134 }
156 135
157 var diffApply *InstanceDiff 136 var changeApply *plans.ResourceInstanceChange
158 var provider ResourceProvider 137 var provider providers.Interface
159 var state *InstanceState 138 var providerSchema *ProviderSchema
139 var state *states.ResourceInstanceObject
160 var err error 140 var err error
161 return &EvalOpFilter{ 141 return &EvalOpFilter{
162 Ops: []walkOperation{walkApply, walkDestroy}, 142 Ops: []walkOperation{walkApply, walkDestroy},
163 Node: &EvalSequence{ 143 Node: &EvalSequence{
164 Nodes: []EvalNode{ 144 Nodes: []EvalNode{
145 &EvalGetProvider{
146 Addr: n.ResolvedProvider,
147 Output: &provider,
148 Schema: &providerSchema,
149 },
150
165 // Get the saved diff for apply 151 // Get the saved diff for apply
166 &EvalReadDiff{ 152 &EvalReadDiff{
167 Name: stateId, 153 Addr: addr.Resource,
168 Diff: &diffApply, 154 ProviderSchema: &providerSchema,
155 Change: &changeApply,
169 }, 156 },
170 157
171 // Filter the diff so we only get the destroy 158 &EvalReduceDiff{
172 &EvalFilterDiff{ 159 Addr: addr.Resource,
173 Diff: &diffApply, 160 InChange: &changeApply,
174 Output: &diffApply, 161 Destroy: true,
175 Destroy: true, 162 OutChange: &changeApply,
176 }, 163 },
177 164
178 // If we're not destroying, then compare diffs 165 // EvalReduceDiff may have simplified our planned change
166 // into a NoOp if it does not require destroying.
179 &EvalIf{ 167 &EvalIf{
180 If: func(ctx EvalContext) (bool, error) { 168 If: func(ctx EvalContext) (bool, error) {
181 if diffApply != nil && diffApply.GetDestroy() { 169 if changeApply == nil || changeApply.Action == plans.NoOp {
182 return true, nil 170 return true, EvalEarlyExitError{}
183 } 171 }
184 172 return true, nil
185 return true, EvalEarlyExitError{}
186 }, 173 },
187 Then: EvalNoop{}, 174 Then: EvalNoop{},
188 }, 175 },
189 176
190 // Load the instance info so we have the module path set
191 &EvalInstanceInfo{Info: info},
192
193 &EvalGetProvider{
194 Name: n.ResolvedProvider,
195 Output: &provider,
196 },
197 &EvalReadState{ 177 &EvalReadState{
198 Name: stateId, 178 Addr: addr.Resource,
199 Output: &state, 179 Output: &state,
180 Provider: &provider,
181 ProviderSchema: &providerSchema,
200 }, 182 },
201 &EvalRequireState{ 183 &EvalRequireState{
202 State: &state, 184 State: &state,
@@ -204,15 +186,15 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
204 186
205 // Call pre-apply hook 187 // Call pre-apply hook
206 &EvalApplyPre{ 188 &EvalApplyPre{
207 Info: info, 189 Addr: addr.Resource,
208 State: &state, 190 State: &state,
209 Diff: &diffApply, 191 Change: &changeApply,
210 }, 192 },
211 193
212 // Run destroy provisioners if not tainted 194 // Run destroy provisioners if not tainted
213 &EvalIf{ 195 &EvalIf{
214 If: func(ctx EvalContext) (bool, error) { 196 If: func(ctx EvalContext) (bool, error) {
215 if state != nil && state.Tainted { 197 if state != nil && state.Status == states.ObjectTainted {
216 return false, nil 198 return false, nil
217 } 199 }
218 200
@@ -220,12 +202,11 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
220 }, 202 },
221 203
222 Then: &EvalApplyProvisioners{ 204 Then: &EvalApplyProvisioners{
223 Info: info, 205 Addr: addr.Resource,
224 State: &state, 206 State: &state,
225 Resource: n.Config, 207 ResourceConfig: n.Config,
226 InterpResource: resource,
227 Error: &err, 208 Error: &err,
228 When: config.ProvisionerWhenDestroy, 209 When: configs.ProvisionerWhenDestroy,
229 }, 210 },
230 }, 211 },
231 212
@@ -237,7 +218,7 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
237 }, 218 },
238 219
239 Then: &EvalApplyPost{ 220 Then: &EvalApplyPost{
240 Info: info, 221 Addr: addr.Resource,
241 State: &state, 222 State: &state,
242 Error: &err, 223 Error: &err,
243 }, 224 },
@@ -246,41 +227,38 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
246 // Make sure we handle data sources properly. 227 // Make sure we handle data sources properly.
247 &EvalIf{ 228 &EvalIf{
248 If: func(ctx EvalContext) (bool, error) { 229 If: func(ctx EvalContext) (bool, error) {
249 if n.Addr == nil { 230 return addr.Resource.Resource.Mode == addrs.DataResourceMode, nil
250 return false, fmt.Errorf("nil address")
251 }
252
253 if n.Addr.Mode == config.DataResourceMode {
254 return true, nil
255 }
256
257 return false, nil
258 }, 231 },
259 232
260 Then: &EvalReadDataApply{ 233 Then: &EvalReadDataApply{
261 Info: info, 234 Addr: addr.Resource,
262 Diff: &diffApply, 235 Config: n.Config,
263 Provider: &provider, 236 Change: &changeApply,
264 Output: &state, 237 Provider: &provider,
238 ProviderAddr: n.ResolvedProvider,
239 ProviderSchema: &providerSchema,
240 Output: &state,
265 }, 241 },
266 Else: &EvalApply{ 242 Else: &EvalApply{
267 Info: info, 243 Addr: addr.Resource,
268 State: &state, 244 Config: nil, // No configuration because we are destroying
269 Diff: &diffApply, 245 State: &state,
270 Provider: &provider, 246 Change: &changeApply,
271 Output: &state, 247 Provider: &provider,
272 Error: &err, 248 ProviderAddr: n.ResolvedProvider,
249 ProviderSchema: &providerSchema,
250 Output: &state,
251 Error: &err,
273 }, 252 },
274 }, 253 },
275 &EvalWriteState{ 254 &EvalWriteState{
276 Name: stateId, 255 Addr: addr.Resource,
277 ResourceType: n.Addr.Type, 256 ProviderAddr: n.ResolvedProvider,
278 Provider: n.ResolvedProvider, 257 ProviderSchema: &providerSchema,
279 Dependencies: rs.Dependencies, 258 State: &state,
280 State: &state,
281 }, 259 },
282 &EvalApplyPost{ 260 &EvalApplyPost{
283 Info: info, 261 Addr: addr.Resource,
284 State: &state, 262 State: &state,
285 Error: &err, 263 Error: &err,
286 }, 264 },
@@ -289,3 +267,55 @@ func (n *NodeDestroyResource) EvalTree() EvalNode {
289 }, 267 },
290 } 268 }
291} 269}
270
271// NodeDestroyResourceInstance represents a resource that is to be destroyed.
272//
273// Destroying a resource is a state-only operation: it is the individual
274// instances being destroyed that affects remote objects. During graph
275// construction, NodeDestroyResource should always depend on any other node
276// related to the given resource, since it's just a final cleanup to avoid
277// leaving skeleton resource objects in state after their instances have
278// all been destroyed.
279type NodeDestroyResource struct {
280 *NodeAbstractResource
281}
282
283var (
284 _ GraphNodeResource = (*NodeDestroyResource)(nil)
285 _ GraphNodeReferenceable = (*NodeDestroyResource)(nil)
286 _ GraphNodeReferencer = (*NodeDestroyResource)(nil)
287 _ GraphNodeEvalable = (*NodeDestroyResource)(nil)
288)
289
290func (n *NodeDestroyResource) Name() string {
291 return n.ResourceAddr().String() + " (clean up state)"
292}
293
294// GraphNodeReferenceable, overriding NodeAbstractResource
295func (n *NodeDestroyResource) ReferenceableAddrs() []addrs.Referenceable {
296 // NodeDestroyResource doesn't participate in references: the graph
297 // builder that created it should ensure directly that it already depends
298 // on every other node related to its resource, without relying on
299 // references.
300 return nil
301}
302
303// GraphNodeReferencer, overriding NodeAbstractResource
304func (n *NodeDestroyResource) References() []*addrs.Reference {
305 // NodeDestroyResource doesn't participate in references: the graph
306 // builder that created it should ensure directly that it already depends
307 // on every other node related to its resource, without relying on
308 // references.
309 return nil
310}
311
312// GraphNodeEvalable
313func (n *NodeDestroyResource) EvalTree() EvalNode {
314 // This EvalNode will produce an error if the resource isn't already
315 // empty by the time it is called, since it should just be pruning the
316 // leftover husk of a resource in state after all of the child instances
317 // and their objects were destroyed.
318 return &EvalForgetResourceState{
319 Addr: n.ResourceAddr().Resource,
320 }
321}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go
new file mode 100644
index 0000000..67c4691
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy_deposed.go
@@ -0,0 +1,313 @@
1package terraform
2
3import (
4 "fmt"
5
6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/dag"
8 "github.com/hashicorp/terraform/plans"
9 "github.com/hashicorp/terraform/providers"
10 "github.com/hashicorp/terraform/states"
11)
12
13// ConcreteResourceInstanceDeposedNodeFunc is a callback type used to convert
14// an abstract resource instance to a concrete one of some type that has
15// an associated deposed object key.
16type ConcreteResourceInstanceDeposedNodeFunc func(*NodeAbstractResourceInstance, states.DeposedKey) dag.Vertex
17
18type GraphNodeDeposedResourceInstanceObject interface {
19 DeposedInstanceObjectKey() states.DeposedKey
20}
21
22// NodePlanDeposedResourceInstanceObject represents deposed resource
23// instance objects during plan. These are distinct from the primary object
24// for each resource instance since the only valid operation to do with them
25// is to destroy them.
26//
27// This node type is also used during the refresh walk to ensure that the
28// record of a deposed object is up-to-date before we plan to destroy it.
29type NodePlanDeposedResourceInstanceObject struct {
30 *NodeAbstractResourceInstance
31 DeposedKey states.DeposedKey
32}
33
34var (
35 _ GraphNodeDeposedResourceInstanceObject = (*NodePlanDeposedResourceInstanceObject)(nil)
36 _ GraphNodeResource = (*NodePlanDeposedResourceInstanceObject)(nil)
37 _ GraphNodeResourceInstance = (*NodePlanDeposedResourceInstanceObject)(nil)
38 _ GraphNodeReferenceable = (*NodePlanDeposedResourceInstanceObject)(nil)
39 _ GraphNodeReferencer = (*NodePlanDeposedResourceInstanceObject)(nil)
40 _ GraphNodeEvalable = (*NodePlanDeposedResourceInstanceObject)(nil)
41 _ GraphNodeProviderConsumer = (*NodePlanDeposedResourceInstanceObject)(nil)
42 _ GraphNodeProvisionerConsumer = (*NodePlanDeposedResourceInstanceObject)(nil)
43)
44
45func (n *NodePlanDeposedResourceInstanceObject) Name() string {
46 return fmt.Sprintf("%s (deposed %s)", n.ResourceInstanceAddr().String(), n.DeposedKey)
47}
48
49func (n *NodePlanDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey {
50 return n.DeposedKey
51}
52
53// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance
54func (n *NodePlanDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable {
55 // Deposed objects don't participate in references.
56 return nil
57}
58
59// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance
60func (n *NodePlanDeposedResourceInstanceObject) References() []*addrs.Reference {
61 // We don't evaluate configuration for deposed objects, so they effectively
62 // make no references.
63 return nil
64}
65
66// GraphNodeEvalable impl.
67func (n *NodePlanDeposedResourceInstanceObject) EvalTree() EvalNode {
68 addr := n.ResourceInstanceAddr()
69
70 var provider providers.Interface
71 var providerSchema *ProviderSchema
72 var state *states.ResourceInstanceObject
73
74 seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)}
75
76 // During the refresh walk we will ensure that our record of the deposed
77 // object is up-to-date. If it was already deleted outside of Terraform
78 // then this will remove it from state and thus avoid us planning a
79 // destroy for it during the subsequent plan walk.
80 seq.Nodes = append(seq.Nodes, &EvalOpFilter{
81 Ops: []walkOperation{walkRefresh},
82 Node: &EvalSequence{
83 Nodes: []EvalNode{
84 &EvalGetProvider{
85 Addr: n.ResolvedProvider,
86 Output: &provider,
87 Schema: &providerSchema,
88 },
89 &EvalReadStateDeposed{
90 Addr: addr.Resource,
91 Provider: &provider,
92 ProviderSchema: &providerSchema,
93 Key: n.DeposedKey,
94 Output: &state,
95 },
96 &EvalRefresh{
97 Addr: addr.Resource,
98 ProviderAddr: n.ResolvedProvider,
99 Provider: &provider,
100 ProviderSchema: &providerSchema,
101 State: &state,
102 Output: &state,
103 },
104 &EvalWriteStateDeposed{
105 Addr: addr.Resource,
106 Key: n.DeposedKey,
107 ProviderAddr: n.ResolvedProvider,
108 ProviderSchema: &providerSchema,
109 State: &state,
110 },
111 },
112 },
113 })
114
115 // During the plan walk we always produce a planned destroy change, because
116 // destroying is the only supported action for deposed objects.
117 var change *plans.ResourceInstanceChange
118 seq.Nodes = append(seq.Nodes, &EvalOpFilter{
119 Ops: []walkOperation{walkPlan, walkPlanDestroy},
120 Node: &EvalSequence{
121 Nodes: []EvalNode{
122 &EvalGetProvider{
123 Addr: n.ResolvedProvider,
124 Output: &provider,
125 Schema: &providerSchema,
126 },
127 &EvalReadStateDeposed{
128 Addr: addr.Resource,
129 Output: &state,
130 Key: n.DeposedKey,
131 Provider: &provider,
132 ProviderSchema: &providerSchema,
133 },
134 &EvalDiffDestroy{
135 Addr: addr.Resource,
136 ProviderAddr: n.ResolvedProvider,
137 DeposedKey: n.DeposedKey,
138 State: &state,
139 Output: &change,
140 },
141 &EvalWriteDiff{
142 Addr: addr.Resource,
143 DeposedKey: n.DeposedKey,
144 ProviderSchema: &providerSchema,
145 Change: &change,
146 },
147 // Since deposed objects cannot be referenced by expressions
148 // elsewhere, we don't need to also record the planned new
149 // state in this case.
150 },
151 },
152 })
153
154 return seq
155}
156
157// NodeDestroyDeposedResourceInstanceObject represents deposed resource
158// instance objects during apply. Nodes of this type are inserted by
159// DiffTransformer when the planned changeset contains "delete" changes for
160// deposed instance objects, and its only supported operation is to destroy
161// and then forget the associated object.
162type NodeDestroyDeposedResourceInstanceObject struct {
163 *NodeAbstractResourceInstance
164 DeposedKey states.DeposedKey
165}
166
167var (
168 _ GraphNodeDeposedResourceInstanceObject = (*NodeDestroyDeposedResourceInstanceObject)(nil)
169 _ GraphNodeResource = (*NodeDestroyDeposedResourceInstanceObject)(nil)
170 _ GraphNodeResourceInstance = (*NodeDestroyDeposedResourceInstanceObject)(nil)
171 _ GraphNodeDestroyer = (*NodeDestroyDeposedResourceInstanceObject)(nil)
172 _ GraphNodeDestroyerCBD = (*NodeDestroyDeposedResourceInstanceObject)(nil)
173 _ GraphNodeReferenceable = (*NodeDestroyDeposedResourceInstanceObject)(nil)
174 _ GraphNodeReferencer = (*NodeDestroyDeposedResourceInstanceObject)(nil)
175 _ GraphNodeEvalable = (*NodeDestroyDeposedResourceInstanceObject)(nil)
176 _ GraphNodeProviderConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil)
177 _ GraphNodeProvisionerConsumer = (*NodeDestroyDeposedResourceInstanceObject)(nil)
178)
179
180func (n *NodeDestroyDeposedResourceInstanceObject) Name() string {
181 return fmt.Sprintf("%s (destroy deposed %s)", n.Addr.String(), n.DeposedKey)
182}
183
184func (n *NodeDestroyDeposedResourceInstanceObject) DeposedInstanceObjectKey() states.DeposedKey {
185 return n.DeposedKey
186}
187
188// GraphNodeReferenceable implementation, overriding the one from NodeAbstractResourceInstance
189func (n *NodeDestroyDeposedResourceInstanceObject) ReferenceableAddrs() []addrs.Referenceable {
190 // Deposed objects don't participate in references.
191 return nil
192}
193
194// GraphNodeReferencer implementation, overriding the one from NodeAbstractResourceInstance
195func (n *NodeDestroyDeposedResourceInstanceObject) References() []*addrs.Reference {
196 // We don't evaluate configuration for deposed objects, so they effectively
197 // make no references.
198 return nil
199}
200
201// GraphNodeDestroyer
202func (n *NodeDestroyDeposedResourceInstanceObject) DestroyAddr() *addrs.AbsResourceInstance {
203 addr := n.ResourceInstanceAddr()
204 return &addr
205}
206
207// GraphNodeDestroyerCBD
208func (n *NodeDestroyDeposedResourceInstanceObject) CreateBeforeDestroy() bool {
209 // A deposed instance is always CreateBeforeDestroy by definition, since
210 // we use deposed only to handle create-before-destroy.
211 return true
212}
213
214// GraphNodeDestroyerCBD
215func (n *NodeDestroyDeposedResourceInstanceObject) ModifyCreateBeforeDestroy(v bool) error {
216 if !v {
217 // Should never happen: deposed instances are _always_ create_before_destroy.
218 return fmt.Errorf("can't deactivate create_before_destroy for a deposed instance")
219 }
220 return nil
221}
222
223// GraphNodeEvalable impl.
224func (n *NodeDestroyDeposedResourceInstanceObject) EvalTree() EvalNode {
225 addr := n.ResourceInstanceAddr()
226
227 var provider providers.Interface
228 var providerSchema *ProviderSchema
229 var state *states.ResourceInstanceObject
230 var change *plans.ResourceInstanceChange
231 var err error
232
233 return &EvalSequence{
234 Nodes: []EvalNode{
235 &EvalGetProvider{
236 Addr: n.ResolvedProvider,
237 Output: &provider,
238 Schema: &providerSchema,
239 },
240 &EvalReadStateDeposed{
241 Addr: addr.Resource,
242 Output: &state,
243 Key: n.DeposedKey,
244 Provider: &provider,
245 ProviderSchema: &providerSchema,
246 },
247 &EvalDiffDestroy{
248 Addr: addr.Resource,
249 ProviderAddr: n.ResolvedProvider,
250 State: &state,
251 Output: &change,
252 },
253 // Call pre-apply hook
254 &EvalApplyPre{
255 Addr: addr.Resource,
256 State: &state,
257 Change: &change,
258 },
259 &EvalApply{
260 Addr: addr.Resource,
261 Config: nil, // No configuration because we are destroying
262 State: &state,
263 Change: &change,
264 Provider: &provider,
265 ProviderAddr: n.ResolvedProvider,
266 ProviderSchema: &providerSchema,
267 Output: &state,
268 Error: &err,
269 },
270 // Always write the resource back to the state deposed... if it
271 // was successfully destroyed it will be pruned. If it was not, it will
272 // be caught on the next run.
273 &EvalWriteStateDeposed{
274 Addr: addr.Resource,
275 Key: n.DeposedKey,
276 ProviderAddr: n.ResolvedProvider,
277 ProviderSchema: &providerSchema,
278 State: &state,
279 },
280 &EvalApplyPost{
281 Addr: addr.Resource,
282 State: &state,
283 Error: &err,
284 },
285 &EvalReturnError{
286 Error: &err,
287 },
288 &EvalUpdateStateHook{},
289 },
290 }
291}
292
293// GraphNodeDeposer is an optional interface implemented by graph nodes that
294// might create a single new deposed object for a specific associated resource
295// instance, allowing a caller to optionally pre-allocate a DeposedKey for
296// it.
297type GraphNodeDeposer interface {
298 // SetPreallocatedDeposedKey will be called during graph construction
299 // if a particular node must use a pre-allocated deposed key if/when it
300 // "deposes" the current object of its associated resource instance.
301 SetPreallocatedDeposedKey(key states.DeposedKey)
302}
303
304// graphNodeDeposer is an embeddable implementation of GraphNodeDeposer.
305// Embed it in a node type to get automatic support for it, and then access
306// the field PreallocatedDeposedKey to access any pre-allocated key.
307type graphNodeDeposer struct {
308 PreallocatedDeposedKey states.DeposedKey
309}
310
311func (n *graphNodeDeposer) SetPreallocatedDeposedKey(key states.DeposedKey) {
312 n.PreallocatedDeposedKey = key
313}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
index 1afae7a..633c1c4 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
@@ -1,47 +1,119 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "log"
5
4 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/dag"
7 "github.com/hashicorp/terraform/tfdiags"
5) 8)
6 9
7// NodePlannableResource represents a resource that is "plannable": 10// NodePlannableResource represents a resource that is "plannable":
8// it is ready to be planned in order to create a diff. 11// it is ready to be planned in order to create a diff.
9type NodePlannableResource struct { 12type NodePlannableResource struct {
10 *NodeAbstractCountResource 13 *NodeAbstractResource
14
15 // ForceCreateBeforeDestroy might be set via our GraphNodeDestroyerCBD
16 // during graph construction, if dependencies require us to force this
17 // on regardless of what the configuration says.
18 ForceCreateBeforeDestroy *bool
19}
20
21var (
22 _ GraphNodeSubPath = (*NodePlannableResource)(nil)
23 _ GraphNodeDestroyerCBD = (*NodePlannableResource)(nil)
24 _ GraphNodeDynamicExpandable = (*NodePlannableResource)(nil)
25 _ GraphNodeReferenceable = (*NodePlannableResource)(nil)
26 _ GraphNodeReferencer = (*NodePlannableResource)(nil)
27 _ GraphNodeResource = (*NodePlannableResource)(nil)
28 _ GraphNodeAttachResourceConfig = (*NodePlannableResource)(nil)
29)
30
31// GraphNodeEvalable
32func (n *NodePlannableResource) EvalTree() EvalNode {
33 addr := n.ResourceAddr()
34 config := n.Config
35
36 if config == nil {
37 // Nothing to do, then.
38 log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", addr)
39 return &EvalNoop{}
40 }
41
42 // this ensures we can reference the resource even if the count is 0
43 return &EvalWriteResourceState{
44 Addr: addr.Resource,
45 Config: config,
46 ProviderAddr: n.ResolvedProvider,
47 }
48}
49
50// GraphNodeDestroyerCBD
51func (n *NodePlannableResource) CreateBeforeDestroy() bool {
52 if n.ForceCreateBeforeDestroy != nil {
53 return *n.ForceCreateBeforeDestroy
54 }
55
56 // If we have no config, we just assume no
57 if n.Config == nil || n.Config.Managed == nil {
58 return false
59 }
60
61 return n.Config.Managed.CreateBeforeDestroy
62}
63
64// GraphNodeDestroyerCBD
65func (n *NodePlannableResource) ModifyCreateBeforeDestroy(v bool) error {
66 n.ForceCreateBeforeDestroy = &v
67 return nil
11} 68}
12 69
13// GraphNodeDynamicExpandable 70// GraphNodeDynamicExpandable
14func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) { 71func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
15 // Grab the state which we read 72 var diags tfdiags.Diagnostics
16 state, lock := ctx.State() 73
17 lock.RLock() 74 count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx)
18 defer lock.RUnlock() 75 diags = diags.Append(countDiags)
19 76 if countDiags.HasErrors() {
20 // Expand the resource count which must be available by now from EvalTree 77 return nil, diags.Err()
21 count, err := n.Config.Count()
22 if err != nil {
23 return nil, err
24 } 78 }
25 79
80 // Next we need to potentially rename an instance address in the state
81 // if we're transitioning whether "count" is set at all.
82 fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1)
83
84 // Our graph transformers require access to the full state, so we'll
85 // temporarily lock it while we work on this.
86 state := ctx.State().Lock()
87 defer ctx.State().Unlock()
88
26 // The concrete resource factory we'll use 89 // The concrete resource factory we'll use
27 concreteResource := func(a *NodeAbstractResource) dag.Vertex { 90 concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {
28 // Add the config and state since we don't do that via transforms 91 // Add the config and state since we don't do that via transforms
29 a.Config = n.Config 92 a.Config = n.Config
30 a.ResolvedProvider = n.ResolvedProvider 93 a.ResolvedProvider = n.ResolvedProvider
94 a.Schema = n.Schema
95 a.ProvisionerSchemas = n.ProvisionerSchemas
31 96
32 return &NodePlannableResourceInstance{ 97 return &NodePlannableResourceInstance{
33 NodeAbstractResource: a, 98 NodeAbstractResourceInstance: a,
99
100 // By the time we're walking, we've figured out whether we need
101 // to force on CreateBeforeDestroy due to dependencies on other
102 // nodes that have it.
103 ForceCreateBeforeDestroy: n.CreateBeforeDestroy(),
34 } 104 }
35 } 105 }
36 106
37 // The concrete resource factory we'll use for oprhans 107 // The concrete resource factory we'll use for orphans
38 concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex { 108 concreteResourceOrphan := func(a *NodeAbstractResourceInstance) dag.Vertex {
39 // Add the config and state since we don't do that via transforms 109 // Add the config and state since we don't do that via transforms
40 a.Config = n.Config 110 a.Config = n.Config
41 a.ResolvedProvider = n.ResolvedProvider 111 a.ResolvedProvider = n.ResolvedProvider
112 a.Schema = n.Schema
113 a.ProvisionerSchemas = n.ProvisionerSchemas
42 114
43 return &NodePlannableResourceOrphan{ 115 return &NodePlannableResourceInstanceOrphan{
44 NodeAbstractResource: a, 116 NodeAbstractResourceInstance: a,
45 } 117 }
46 } 118 }
47 119
@@ -50,6 +122,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
50 // Expand the count. 122 // Expand the count.
51 &ResourceCountTransformer{ 123 &ResourceCountTransformer{
52 Concrete: concreteResource, 124 Concrete: concreteResource,
125 Schema: n.Schema,
53 Count: count, 126 Count: count,
54 Addr: n.ResourceAddr(), 127 Addr: n.ResourceAddr(),
55 }, 128 },
@@ -66,7 +139,7 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
66 &AttachStateTransformer{State: state}, 139 &AttachStateTransformer{State: state},
67 140
68 // Targeting 141 // Targeting
69 &TargetsTransformer{ParsedTargets: n.Targets}, 142 &TargetsTransformer{Targets: n.Targets},
70 143
71 // Connect references so ordering is correct 144 // Connect references so ordering is correct
72 &ReferenceTransformer{}, 145 &ReferenceTransformer{},
@@ -81,5 +154,6 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
81 Validate: true, 154 Validate: true,
82 Name: "NodePlannableResource", 155 Name: "NodePlannableResource",
83 } 156 }
84 return b.Build(ctx.Path()) 157 graph, diags := b.Build(ctx.Path())
158 return graph, diags.ErrWithWarnings()
85} 159}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
index 9b02362..38746f0 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
@@ -1,52 +1,87 @@
1package terraform 1package terraform
2 2
3// NodePlanDestroyableResource represents a resource that is "applyable": 3import (
4// it is ready to be applied and is represented by a diff. 4 "fmt"
5type NodePlanDestroyableResource struct { 5
6 *NodeAbstractResource 6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/dag"
8 "github.com/hashicorp/terraform/plans"
9 "github.com/hashicorp/terraform/providers"
10 "github.com/hashicorp/terraform/states"
11)
12
13// NodePlanDestroyableResourceInstance represents a resource that is ready
14// to be planned for destruction.
15type NodePlanDestroyableResourceInstance struct {
16 *NodeAbstractResourceInstance
7} 17}
8 18
19var (
20 _ GraphNodeSubPath = (*NodePlanDestroyableResourceInstance)(nil)
21 _ GraphNodeReferenceable = (*NodePlanDestroyableResourceInstance)(nil)
22 _ GraphNodeReferencer = (*NodePlanDestroyableResourceInstance)(nil)
23 _ GraphNodeDestroyer = (*NodePlanDestroyableResourceInstance)(nil)
24 _ GraphNodeResource = (*NodePlanDestroyableResourceInstance)(nil)
25 _ GraphNodeResourceInstance = (*NodePlanDestroyableResourceInstance)(nil)
26 _ GraphNodeAttachResourceConfig = (*NodePlanDestroyableResourceInstance)(nil)
27 _ GraphNodeAttachResourceState = (*NodePlanDestroyableResourceInstance)(nil)
28 _ GraphNodeEvalable = (*NodePlanDestroyableResourceInstance)(nil)
29 _ GraphNodeProviderConsumer = (*NodePlanDestroyableResourceInstance)(nil)
30)
31
9// GraphNodeDestroyer 32// GraphNodeDestroyer
10func (n *NodePlanDestroyableResource) DestroyAddr() *ResourceAddress { 33func (n *NodePlanDestroyableResourceInstance) DestroyAddr() *addrs.AbsResourceInstance {
11 return n.Addr 34 addr := n.ResourceInstanceAddr()
35 return &addr
12} 36}
13 37
14// GraphNodeEvalable 38// GraphNodeEvalable
15func (n *NodePlanDestroyableResource) EvalTree() EvalNode { 39func (n *NodePlanDestroyableResourceInstance) EvalTree() EvalNode {
16 addr := n.NodeAbstractResource.Addr 40 addr := n.ResourceInstanceAddr()
17 41
18 // stateId is the ID to put into the state 42 // Declare a bunch of variables that are used for state during
19 stateId := addr.stateId() 43 // evaluation. These are written to by address in the EvalNodes we
44 // declare below.
45 var provider providers.Interface
46 var providerSchema *ProviderSchema
47 var change *plans.ResourceInstanceChange
48 var state *states.ResourceInstanceObject
20 49
21 // Build the instance info. More of this will be populated during eval 50 if n.ResolvedProvider.ProviderConfig.Type == "" {
22 info := &InstanceInfo{ 51 // Should never happen; indicates that the graph was not constructed
23 Id: stateId, 52 // correctly since we didn't get our provider attached.
24 Type: addr.Type, 53 panic(fmt.Sprintf("%T %q was not assigned a resolved provider", n, dag.VertexName(n)))
25 } 54 }
26 55
27 // Declare a bunch of variables that are used for state during
28 // evaluation. Most of this are written to by-address below.
29 var diff *InstanceDiff
30 var state *InstanceState
31
32 return &EvalSequence{ 56 return &EvalSequence{
33 Nodes: []EvalNode{ 57 Nodes: []EvalNode{
58 &EvalGetProvider{
59 Addr: n.ResolvedProvider,
60 Output: &provider,
61 Schema: &providerSchema,
62 },
34 &EvalReadState{ 63 &EvalReadState{
35 Name: stateId, 64 Addr: addr.Resource,
65 Provider: &provider,
66 ProviderSchema: &providerSchema,
67
36 Output: &state, 68 Output: &state,
37 }, 69 },
38 &EvalDiffDestroy{ 70 &EvalDiffDestroy{
39 Info: info, 71 Addr: addr.Resource,
40 State: &state, 72 ProviderAddr: n.ResolvedProvider,
41 Output: &diff, 73 State: &state,
74 Output: &change,
42 }, 75 },
43 &EvalCheckPreventDestroy{ 76 &EvalCheckPreventDestroy{
44 Resource: n.Config, 77 Addr: addr.Resource,
45 Diff: &diff, 78 Config: n.Config,
79 Change: &change,
46 }, 80 },
47 &EvalWriteDiff{ 81 &EvalWriteDiff{
48 Name: stateId, 82 Addr: addr.Resource,
49 Diff: &diff, 83 ProviderSchema: &providerSchema,
84 Change: &change,
50 }, 85 },
51 }, 86 },
52 } 87 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
index 7d9fcdd..75e0bcd 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
@@ -3,187 +3,205 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/plans"
7 "github.com/hashicorp/terraform/providers"
8 "github.com/hashicorp/terraform/states"
9
10 "github.com/hashicorp/terraform/addrs"
11 "github.com/zclconf/go-cty/cty"
7) 12)
8 13
9// NodePlannableResourceInstance represents a _single_ resource 14// NodePlannableResourceInstance represents a _single_ resource
10// instance that is plannable. This means this represents a single 15// instance that is plannable. This means this represents a single
11// count index, for example. 16// count index, for example.
12type NodePlannableResourceInstance struct { 17type NodePlannableResourceInstance struct {
13 *NodeAbstractResource 18 *NodeAbstractResourceInstance
19 ForceCreateBeforeDestroy bool
14} 20}
15 21
22var (
23 _ GraphNodeSubPath = (*NodePlannableResourceInstance)(nil)
24 _ GraphNodeReferenceable = (*NodePlannableResourceInstance)(nil)
25 _ GraphNodeReferencer = (*NodePlannableResourceInstance)(nil)
26 _ GraphNodeResource = (*NodePlannableResourceInstance)(nil)
27 _ GraphNodeResourceInstance = (*NodePlannableResourceInstance)(nil)
28 _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstance)(nil)
29 _ GraphNodeAttachResourceState = (*NodePlannableResourceInstance)(nil)
30 _ GraphNodeEvalable = (*NodePlannableResourceInstance)(nil)
31)
32
16// GraphNodeEvalable 33// GraphNodeEvalable
17func (n *NodePlannableResourceInstance) EvalTree() EvalNode { 34func (n *NodePlannableResourceInstance) EvalTree() EvalNode {
18 addr := n.NodeAbstractResource.Addr 35 addr := n.ResourceInstanceAddr()
19
20 // stateId is the ID to put into the state
21 stateId := addr.stateId()
22 36
23 // Build the instance info. More of this will be populated during eval 37 // State still uses legacy-style internal ids, so we need to shim to get
24 info := &InstanceInfo{ 38 // a suitable key to use.
25 Id: stateId, 39 stateId := NewLegacyResourceInstanceAddress(addr).stateId()
26 Type: addr.Type,
27 ModulePath: normalizeModulePath(addr.Path),
28 }
29
30 // Build the resource for eval
31 resource := &Resource{
32 Name: addr.Name,
33 Type: addr.Type,
34 CountIndex: addr.Index,
35 }
36 if resource.CountIndex < 0 {
37 resource.CountIndex = 0
38 }
39 40
40 // Determine the dependencies for the state. 41 // Determine the dependencies for the state.
41 stateDeps := n.StateReferences() 42 stateDeps := n.StateReferences()
42 43
43 // Eval info is different depending on what kind of resource this is 44 // Eval info is different depending on what kind of resource this is
44 switch n.Config.Mode { 45 switch addr.Resource.Resource.Mode {
45 case config.ManagedResourceMode: 46 case addrs.ManagedResourceMode:
46 return n.evalTreeManagedResource( 47 return n.evalTreeManagedResource(addr, stateId, stateDeps)
47 stateId, info, resource, stateDeps, 48 case addrs.DataResourceMode:
48 ) 49 return n.evalTreeDataResource(addr, stateId, stateDeps)
49 case config.DataResourceMode:
50 return n.evalTreeDataResource(
51 stateId, info, resource, stateDeps)
52 default: 50 default:
53 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode)) 51 panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
54 } 52 }
55} 53}
56 54
57func (n *NodePlannableResourceInstance) evalTreeDataResource( 55func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode {
58 stateId string, info *InstanceInfo, 56 config := n.Config
59 resource *Resource, stateDeps []string) EvalNode { 57 var provider providers.Interface
60 var provider ResourceProvider 58 var providerSchema *ProviderSchema
61 var config *ResourceConfig 59 var change *plans.ResourceInstanceChange
62 var diff *InstanceDiff 60 var state *states.ResourceInstanceObject
63 var state *InstanceState 61 var configVal cty.Value
64 62
65 return &EvalSequence{ 63 return &EvalSequence{
66 Nodes: []EvalNode{ 64 Nodes: []EvalNode{
67 &EvalReadState{ 65 &EvalGetProvider{
68 Name: stateId, 66 Addr: n.ResolvedProvider,
69 Output: &state, 67 Output: &provider,
68 Schema: &providerSchema,
70 }, 69 },
71 70
72 // We need to re-interpolate the config here because some 71 &EvalReadState{
73 // of the attributes may have become computed during 72 Addr: addr.Resource,
74 // earlier planning, due to other resources having 73 Provider: &provider,
75 // "requires new resource" diffs. 74 ProviderSchema: &providerSchema,
76 &EvalInterpolate{ 75
77 Config: n.Config.RawConfig.Copy(), 76 Output: &state,
78 Resource: resource,
79 Output: &config,
80 }, 77 },
81 78
79 // If we already have a non-planned state then we already dealt
80 // with this during the refresh walk and so we have nothing to do
81 // here.
82 &EvalIf{ 82 &EvalIf{
83 If: func(ctx EvalContext) (bool, error) { 83 If: func(ctx EvalContext) (bool, error) {
84 computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0 84 depChanges := false
85 85
86 // If the configuration is complete and we 86 // Check and see if any of our dependencies have changes.
87 // already have a state then we don't need to 87 changes := ctx.Changes()
88 // do any further work during apply, because we 88 for _, d := range n.StateReferences() {
89 // already populated the state during refresh. 89 ri, ok := d.(addrs.ResourceInstance)
90 if !computed && state != nil { 90 if !ok {
91 return true, EvalEarlyExitError{} 91 continue
92 }
93 change := changes.GetResourceInstanceChange(ri.Absolute(ctx.Path()), states.CurrentGen)
94 if change != nil && change.Action != plans.NoOp {
95 depChanges = true
96 break
97 }
92 } 98 }
93 99
100 refreshed := state != nil && state.Status != states.ObjectPlanned
101
102 // If there are no dependency changes, and it's not a forced
103 // read because we there was no Refresh, then we don't need
104 // to re-read. If any dependencies have changes, it means
105 // our config may also have changes and we need to Read the
106 // data source again.
107 if !depChanges && refreshed {
108 return false, EvalEarlyExitError{}
109 }
94 return true, nil 110 return true, nil
95 }, 111 },
96 Then: EvalNoop{}, 112 Then: EvalNoop{},
97 }, 113 },
98 114
99 &EvalGetProvider{ 115 &EvalValidateSelfRef{
100 Name: n.ResolvedProvider, 116 Addr: addr.Resource,
101 Output: &provider, 117 Config: config.Config,
118 ProviderSchema: &providerSchema,
102 }, 119 },
103 120
104 &EvalReadDataDiff{ 121 &EvalReadData{
105 Info: info, 122 Addr: addr.Resource,
106 Config: &config, 123 Config: n.Config,
107 Provider: &provider, 124 Dependencies: n.StateReferences(),
108 Output: &diff, 125 Provider: &provider,
109 OutputState: &state, 126 ProviderAddr: n.ResolvedProvider,
127 ProviderSchema: &providerSchema,
128 ForcePlanRead: true, // _always_ produce a Read change, even if the config seems ready
129 OutputChange: &change,
130 OutputValue: &configVal,
131 OutputState: &state,
110 }, 132 },
111 133
112 &EvalWriteState{ 134 &EvalWriteState{
113 Name: stateId, 135 Addr: addr.Resource,
114 ResourceType: n.Config.Type, 136 ProviderAddr: n.ResolvedProvider,
115 Provider: n.ResolvedProvider, 137 ProviderSchema: &providerSchema,
116 Dependencies: stateDeps, 138 State: &state,
117 State: &state,
118 }, 139 },
119 140
120 &EvalWriteDiff{ 141 &EvalWriteDiff{
121 Name: stateId, 142 Addr: addr.Resource,
122 Diff: &diff, 143 ProviderSchema: &providerSchema,
144 Change: &change,
123 }, 145 },
124 }, 146 },
125 } 147 }
126} 148}
127 149
128func (n *NodePlannableResourceInstance) evalTreeManagedResource( 150func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode {
129 stateId string, info *InstanceInfo, 151 config := n.Config
130 resource *Resource, stateDeps []string) EvalNode { 152 var provider providers.Interface
131 // Declare a bunch of variables that are used for state during 153 var providerSchema *ProviderSchema
132 // evaluation. Most of this are written to by-address below. 154 var change *plans.ResourceInstanceChange
133 var provider ResourceProvider 155 var state *states.ResourceInstanceObject
134 var diff *InstanceDiff
135 var state *InstanceState
136 var resourceConfig *ResourceConfig
137 156
138 return &EvalSequence{ 157 return &EvalSequence{
139 Nodes: []EvalNode{ 158 Nodes: []EvalNode{
140 &EvalInterpolate{
141 Config: n.Config.RawConfig.Copy(),
142 Resource: resource,
143 Output: &resourceConfig,
144 },
145 &EvalGetProvider{ 159 &EvalGetProvider{
146 Name: n.ResolvedProvider, 160 Addr: n.ResolvedProvider,
147 Output: &provider, 161 Output: &provider,
162 Schema: &providerSchema,
148 }, 163 },
149 // Re-run validation to catch any errors we missed, e.g. type 164
150 // mismatches on computed values.
151 &EvalValidateResource{
152 Provider: &provider,
153 Config: &resourceConfig,
154 ResourceName: n.Config.Name,
155 ResourceType: n.Config.Type,
156 ResourceMode: n.Config.Mode,
157 IgnoreWarnings: true,
158 },
159 &EvalReadState{ 165 &EvalReadState{
160 Name: stateId, 166 Addr: addr.Resource,
167 Provider: &provider,
168 ProviderSchema: &providerSchema,
169
161 Output: &state, 170 Output: &state,
162 }, 171 },
172
173 &EvalValidateSelfRef{
174 Addr: addr.Resource,
175 Config: config.Config,
176 ProviderSchema: &providerSchema,
177 },
178
163 &EvalDiff{ 179 &EvalDiff{
164 Name: stateId, 180 Addr: addr.Resource,
165 Info: info, 181 Config: n.Config,
166 Config: &resourceConfig, 182 CreateBeforeDestroy: n.ForceCreateBeforeDestroy,
167 Resource: n.Config, 183 Provider: &provider,
168 Provider: &provider, 184 ProviderAddr: n.ResolvedProvider,
169 State: &state, 185 ProviderSchema: &providerSchema,
170 OutputDiff: &diff, 186 State: &state,
171 OutputState: &state, 187 OutputChange: &change,
188 OutputState: &state,
172 }, 189 },
173 &EvalCheckPreventDestroy{ 190 &EvalCheckPreventDestroy{
174 Resource: n.Config, 191 Addr: addr.Resource,
175 Diff: &diff, 192 Config: n.Config,
193 Change: &change,
176 }, 194 },
177 &EvalWriteState{ 195 &EvalWriteState{
178 Name: stateId, 196 Addr: addr.Resource,
179 ResourceType: n.Config.Type, 197 ProviderAddr: n.ResolvedProvider,
180 Provider: n.ResolvedProvider, 198 State: &state,
181 Dependencies: stateDeps, 199 ProviderSchema: &providerSchema,
182 State: &state,
183 }, 200 },
184 &EvalWriteDiff{ 201 &EvalWriteDiff{
185 Name: stateId, 202 Addr: addr.Resource,
186 Diff: &diff, 203 ProviderSchema: &providerSchema,
204 Change: &change,
187 }, 205 },
188 }, 206 },
189 } 207 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
index 73d6e41..8416694 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
@@ -1,53 +1,83 @@
1package terraform 1package terraform
2 2
3// NodePlannableResourceOrphan represents a resource that is "applyable": 3import (
4 "github.com/hashicorp/terraform/plans"
5 "github.com/hashicorp/terraform/providers"
6 "github.com/hashicorp/terraform/states"
7)
8
9// NodePlannableResourceInstanceOrphan represents a resource that is "applyable":
4// it is ready to be applied and is represented by a diff. 10// it is ready to be applied and is represented by a diff.
5type NodePlannableResourceOrphan struct { 11type NodePlannableResourceInstanceOrphan struct {
6 *NodeAbstractResource 12 *NodeAbstractResourceInstance
7} 13}
8 14
9func (n *NodePlannableResourceOrphan) Name() string { 15var (
10 return n.NodeAbstractResource.Name() + " (orphan)" 16 _ GraphNodeSubPath = (*NodePlannableResourceInstanceOrphan)(nil)
11} 17 _ GraphNodeReferenceable = (*NodePlannableResourceInstanceOrphan)(nil)
18 _ GraphNodeReferencer = (*NodePlannableResourceInstanceOrphan)(nil)
19 _ GraphNodeResource = (*NodePlannableResourceInstanceOrphan)(nil)
20 _ GraphNodeResourceInstance = (*NodePlannableResourceInstanceOrphan)(nil)
21 _ GraphNodeAttachResourceConfig = (*NodePlannableResourceInstanceOrphan)(nil)
22 _ GraphNodeAttachResourceState = (*NodePlannableResourceInstanceOrphan)(nil)
23 _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil)
24)
12 25
13// GraphNodeEvalable 26var (
14func (n *NodePlannableResourceOrphan) EvalTree() EvalNode { 27 _ GraphNodeEvalable = (*NodePlannableResourceInstanceOrphan)(nil)
15 addr := n.NodeAbstractResource.Addr 28)
16 29
17 // stateId is the ID to put into the state 30func (n *NodePlannableResourceInstanceOrphan) Name() string {
18 stateId := addr.stateId() 31 return n.ResourceInstanceAddr().String() + " (orphan)"
32}
19 33
20 // Build the instance info. More of this will be populated during eval 34// GraphNodeEvalable
21 info := &InstanceInfo{ 35func (n *NodePlannableResourceInstanceOrphan) EvalTree() EvalNode {
22 Id: stateId, 36 addr := n.ResourceInstanceAddr()
23 Type: addr.Type,
24 ModulePath: normalizeModulePath(addr.Path),
25 }
26 37
27 // Declare a bunch of variables that are used for state during 38 // Declare a bunch of variables that are used for state during
28 // evaluation. Most of this are written to by-address below. 39 // evaluation. Most of this are written to by-address below.
29 var diff *InstanceDiff 40 var change *plans.ResourceInstanceChange
30 var state *InstanceState 41 var state *states.ResourceInstanceObject
42 var provider providers.Interface
43 var providerSchema *ProviderSchema
31 44
32 return &EvalSequence{ 45 return &EvalSequence{
33 Nodes: []EvalNode{ 46 Nodes: []EvalNode{
47 &EvalGetProvider{
48 Addr: n.ResolvedProvider,
49 Output: &provider,
50 Schema: &providerSchema,
51 },
34 &EvalReadState{ 52 &EvalReadState{
35 Name: stateId, 53 Addr: addr.Resource,
54 Provider: &provider,
55 ProviderSchema: &providerSchema,
56
36 Output: &state, 57 Output: &state,
37 }, 58 },
38 &EvalDiffDestroy{ 59 &EvalDiffDestroy{
39 Info: info, 60 Addr: addr.Resource,
40 State: &state, 61 State: &state,
41 Output: &diff, 62 ProviderAddr: n.ResolvedProvider,
63 Output: &change,
64 OutputState: &state, // Will point to a nil state after this complete, signalling destroyed
42 }, 65 },
43 &EvalCheckPreventDestroy{ 66 &EvalCheckPreventDestroy{
44 Resource: n.Config, 67 Addr: addr.Resource,
45 ResourceId: stateId, 68 Config: n.Config,
46 Diff: &diff, 69 Change: &change,
47 }, 70 },
48 &EvalWriteDiff{ 71 &EvalWriteDiff{
49 Name: stateId, 72 Addr: addr.Resource,
50 Diff: &diff, 73 ProviderSchema: &providerSchema,
74 Change: &change,
75 },
76 &EvalWriteState{
77 Addr: addr.Resource,
78 ProviderAddr: n.ResolvedProvider,
79 ProviderSchema: &providerSchema,
80 State: &state,
51 }, 81 },
52 }, 82 },
53 } 83 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
index 697bd49..9506023 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
@@ -2,38 +2,60 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
5 6
6 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/terraform/plans"
8 "github.com/hashicorp/terraform/providers"
9
10 "github.com/hashicorp/terraform/states"
11
12 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/dag" 13 "github.com/hashicorp/terraform/dag"
14 "github.com/hashicorp/terraform/tfdiags"
8) 15)
9 16
10// NodeRefreshableManagedResource represents a resource that is expanabled into 17// NodeRefreshableManagedResource represents a resource that is expanabled into
11// NodeRefreshableManagedResourceInstance. Resource count orphans are also added. 18// NodeRefreshableManagedResourceInstance. Resource count orphans are also added.
12type NodeRefreshableManagedResource struct { 19type NodeRefreshableManagedResource struct {
13 *NodeAbstractCountResource 20 *NodeAbstractResource
14} 21}
15 22
23var (
24 _ GraphNodeSubPath = (*NodeRefreshableManagedResource)(nil)
25 _ GraphNodeDynamicExpandable = (*NodeRefreshableManagedResource)(nil)
26 _ GraphNodeReferenceable = (*NodeRefreshableManagedResource)(nil)
27 _ GraphNodeReferencer = (*NodeRefreshableManagedResource)(nil)
28 _ GraphNodeResource = (*NodeRefreshableManagedResource)(nil)
29 _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResource)(nil)
30)
31
16// GraphNodeDynamicExpandable 32// GraphNodeDynamicExpandable
17func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) { 33func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
18 // Grab the state which we read 34 var diags tfdiags.Diagnostics
19 state, lock := ctx.State() 35
20 lock.RLock() 36 count, countDiags := evaluateResourceCountExpression(n.Config.Count, ctx)
21 defer lock.RUnlock() 37 diags = diags.Append(countDiags)
22 38 if countDiags.HasErrors() {
23 // Expand the resource count which must be available by now from EvalTree 39 return nil, diags.Err()
24 count, err := n.Config.Count()
25 if err != nil {
26 return nil, err
27 } 40 }
28 41
42 // Next we need to potentially rename an instance address in the state
43 // if we're transitioning whether "count" is set at all.
44 fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1)
45
46 // Our graph transformers require access to the full state, so we'll
47 // temporarily lock it while we work on this.
48 state := ctx.State().Lock()
49 defer ctx.State().Unlock()
50
29 // The concrete resource factory we'll use 51 // The concrete resource factory we'll use
30 concreteResource := func(a *NodeAbstractResource) dag.Vertex { 52 concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {
31 // Add the config and state since we don't do that via transforms 53 // Add the config and state since we don't do that via transforms
32 a.Config = n.Config 54 a.Config = n.Config
33 a.ResolvedProvider = n.ResolvedProvider 55 a.ResolvedProvider = n.ResolvedProvider
34 56
35 return &NodeRefreshableManagedResourceInstance{ 57 return &NodeRefreshableManagedResourceInstance{
36 NodeAbstractResource: a, 58 NodeAbstractResourceInstance: a,
37 } 59 }
38 } 60 }
39 61
@@ -42,6 +64,7 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
42 // Expand the count. 64 // Expand the count.
43 &ResourceCountTransformer{ 65 &ResourceCountTransformer{
44 Concrete: concreteResource, 66 Concrete: concreteResource,
67 Schema: n.Schema,
45 Count: count, 68 Count: count,
46 Addr: n.ResourceAddr(), 69 Addr: n.ResourceAddr(),
47 }, 70 },
@@ -59,7 +82,7 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
59 &AttachStateTransformer{State: state}, 82 &AttachStateTransformer{State: state},
60 83
61 // Targeting 84 // Targeting
62 &TargetsTransformer{ParsedTargets: n.Targets}, 85 &TargetsTransformer{Targets: n.Targets},
63 86
64 // Connect references so ordering is correct 87 // Connect references so ordering is correct
65 &ReferenceTransformer{}, 88 &ReferenceTransformer{},
@@ -75,66 +98,76 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
75 Name: "NodeRefreshableManagedResource", 98 Name: "NodeRefreshableManagedResource",
76 } 99 }
77 100
78 return b.Build(ctx.Path()) 101 graph, diags := b.Build(ctx.Path())
102 return graph, diags.ErrWithWarnings()
79} 103}
80 104
81// NodeRefreshableManagedResourceInstance represents a resource that is "applyable": 105// NodeRefreshableManagedResourceInstance represents a resource that is "applyable":
82// it is ready to be applied and is represented by a diff. 106// it is ready to be applied and is represented by a diff.
83type NodeRefreshableManagedResourceInstance struct { 107type NodeRefreshableManagedResourceInstance struct {
84 *NodeAbstractResource 108 *NodeAbstractResourceInstance
85} 109}
86 110
111var (
112 _ GraphNodeSubPath = (*NodeRefreshableManagedResourceInstance)(nil)
113 _ GraphNodeReferenceable = (*NodeRefreshableManagedResourceInstance)(nil)
114 _ GraphNodeReferencer = (*NodeRefreshableManagedResourceInstance)(nil)
115 _ GraphNodeDestroyer = (*NodeRefreshableManagedResourceInstance)(nil)
116 _ GraphNodeResource = (*NodeRefreshableManagedResourceInstance)(nil)
117 _ GraphNodeResourceInstance = (*NodeRefreshableManagedResourceInstance)(nil)
118 _ GraphNodeAttachResourceConfig = (*NodeRefreshableManagedResourceInstance)(nil)
119 _ GraphNodeAttachResourceState = (*NodeRefreshableManagedResourceInstance)(nil)
120 _ GraphNodeEvalable = (*NodeRefreshableManagedResourceInstance)(nil)
121)
122
87// GraphNodeDestroyer 123// GraphNodeDestroyer
88func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *ResourceAddress { 124func (n *NodeRefreshableManagedResourceInstance) DestroyAddr() *addrs.AbsResourceInstance {
89 return n.Addr 125 addr := n.ResourceInstanceAddr()
126 return &addr
90} 127}
91 128
92// GraphNodeEvalable 129// GraphNodeEvalable
93func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode { 130func (n *NodeRefreshableManagedResourceInstance) EvalTree() EvalNode {
131 addr := n.ResourceInstanceAddr()
132
94 // Eval info is different depending on what kind of resource this is 133 // Eval info is different depending on what kind of resource this is
95 switch mode := n.Addr.Mode; mode { 134 switch addr.Resource.Resource.Mode {
96 case config.ManagedResourceMode: 135 case addrs.ManagedResourceMode:
97 if n.ResourceState == nil { 136 if n.ResourceState == nil {
137 log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s has no existing state to refresh", addr)
98 return n.evalTreeManagedResourceNoState() 138 return n.evalTreeManagedResourceNoState()
99 } 139 }
140 log.Printf("[TRACE] NodeRefreshableManagedResourceInstance: %s will be refreshed", addr)
100 return n.evalTreeManagedResource() 141 return n.evalTreeManagedResource()
101 142
102 case config.DataResourceMode: 143 case addrs.DataResourceMode:
103 // Get the data source node. If we don't have a configuration 144 // Get the data source node. If we don't have a configuration
104 // then it is an orphan so we destroy it (remove it from the state). 145 // then it is an orphan so we destroy it (remove it from the state).
105 var dn GraphNodeEvalable 146 var dn GraphNodeEvalable
106 if n.Config != nil { 147 if n.Config != nil {
107 dn = &NodeRefreshableDataResourceInstance{ 148 dn = &NodeRefreshableDataResourceInstance{
108 NodeAbstractResource: n.NodeAbstractResource, 149 NodeAbstractResourceInstance: n.NodeAbstractResourceInstance,
109 } 150 }
110 } else { 151 } else {
111 dn = &NodeDestroyableDataResource{ 152 dn = &NodeDestroyableDataResourceInstance{
112 NodeAbstractResource: n.NodeAbstractResource, 153 NodeAbstractResourceInstance: n.NodeAbstractResourceInstance,
113 } 154 }
114 } 155 }
115 156
116 return dn.EvalTree() 157 return dn.EvalTree()
117 default: 158 default:
118 panic(fmt.Errorf("unsupported resource mode %s", mode)) 159 panic(fmt.Errorf("unsupported resource mode %s", addr.Resource.Resource.Mode))
119 } 160 }
120} 161}
121 162
122func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode { 163func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalNode {
123 addr := n.NodeAbstractResource.Addr 164 addr := n.ResourceInstanceAddr()
124
125 // stateId is the ID to put into the state
126 stateId := addr.stateId()
127
128 // Build the instance info. More of this will be populated during eval
129 info := &InstanceInfo{
130 Id: stateId,
131 Type: addr.Type,
132 }
133 165
134 // Declare a bunch of variables that are used for state during 166 // Declare a bunch of variables that are used for state during
135 // evaluation. Most of this are written to by-address below. 167 // evaluation. Most of this are written to by-address below.
136 var provider ResourceProvider 168 var provider providers.Interface
137 var state *InstanceState 169 var providerSchema *ProviderSchema
170 var state *states.ResourceInstanceObject
138 171
139 // This happened during initial development. All known cases were 172 // This happened during initial development. All known cases were
140 // fixed and tested but as a sanity check let's assert here. 173 // fixed and tested but as a sanity check let's assert here.
@@ -150,25 +183,33 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalN
150 return &EvalSequence{ 183 return &EvalSequence{
151 Nodes: []EvalNode{ 184 Nodes: []EvalNode{
152 &EvalGetProvider{ 185 &EvalGetProvider{
153 Name: n.ResolvedProvider, 186 Addr: n.ResolvedProvider,
154 Output: &provider, 187 Output: &provider,
188 Schema: &providerSchema,
155 }, 189 },
190
156 &EvalReadState{ 191 &EvalReadState{
157 Name: stateId, 192 Addr: addr.Resource,
193 Provider: &provider,
194 ProviderSchema: &providerSchema,
195
158 Output: &state, 196 Output: &state,
159 }, 197 },
198
160 &EvalRefresh{ 199 &EvalRefresh{
161 Info: info, 200 Addr: addr.Resource,
162 Provider: &provider, 201 ProviderAddr: n.ResolvedProvider,
163 State: &state, 202 Provider: &provider,
164 Output: &state, 203 ProviderSchema: &providerSchema,
204 State: &state,
205 Output: &state,
165 }, 206 },
207
166 &EvalWriteState{ 208 &EvalWriteState{
167 Name: stateId, 209 Addr: addr.Resource,
168 ResourceType: n.ResourceState.Type, 210 ProviderAddr: n.ResolvedProvider,
169 Provider: n.ResolvedProvider, 211 ProviderSchema: &providerSchema,
170 Dependencies: n.ResourceState.Dependencies, 212 State: &state,
171 State: &state,
172 }, 213 },
173 }, 214 },
174 } 215 }
@@ -186,80 +227,62 @@ func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResource() EvalN
186// plan, but nothing is done with the diff after it is created - it is dropped, 227// plan, but nothing is done with the diff after it is created - it is dropped,
187// and its changes are not counted in the UI. 228// and its changes are not counted in the UI.
188func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode { 229func (n *NodeRefreshableManagedResourceInstance) evalTreeManagedResourceNoState() EvalNode {
230 addr := n.ResourceInstanceAddr()
231
189 // Declare a bunch of variables that are used for state during 232 // Declare a bunch of variables that are used for state during
190 // evaluation. Most of this are written to by-address below. 233 // evaluation. Most of this are written to by-address below.
191 var provider ResourceProvider 234 var provider providers.Interface
192 var state *InstanceState 235 var providerSchema *ProviderSchema
193 var resourceConfig *ResourceConfig 236 var change *plans.ResourceInstanceChange
194 237 var state *states.ResourceInstanceObject
195 addr := n.NodeAbstractResource.Addr
196 stateID := addr.stateId()
197 info := &InstanceInfo{
198 Id: stateID,
199 Type: addr.Type,
200 ModulePath: normalizeModulePath(addr.Path),
201 }
202
203 // Build the resource for eval
204 resource := &Resource{
205 Name: addr.Name,
206 Type: addr.Type,
207 CountIndex: addr.Index,
208 }
209 if resource.CountIndex < 0 {
210 resource.CountIndex = 0
211 }
212
213 // Determine the dependencies for the state.
214 stateDeps := n.StateReferences()
215
216 // n.Config can be nil if the config and state don't match
217 var raw *config.RawConfig
218 if n.Config != nil {
219 raw = n.Config.RawConfig.Copy()
220 }
221 238
222 return &EvalSequence{ 239 return &EvalSequence{
223 Nodes: []EvalNode{ 240 Nodes: []EvalNode{
224 &EvalInterpolate{
225 Config: raw,
226 Resource: resource,
227 Output: &resourceConfig,
228 },
229 &EvalGetProvider{ 241 &EvalGetProvider{
230 Name: n.ResolvedProvider, 242 Addr: n.ResolvedProvider,
231 Output: &provider, 243 Output: &provider,
244 Schema: &providerSchema,
232 }, 245 },
233 // Re-run validation to catch any errors we missed, e.g. type 246
234 // mismatches on computed values.
235 &EvalValidateResource{
236 Provider: &provider,
237 Config: &resourceConfig,
238 ResourceName: n.Config.Name,
239 ResourceType: n.Config.Type,
240 ResourceMode: n.Config.Mode,
241 IgnoreWarnings: true,
242 },
243 &EvalReadState{ 247 &EvalReadState{
244 Name: stateID, 248 Addr: addr.Resource,
249 Provider: &provider,
250 ProviderSchema: &providerSchema,
251
245 Output: &state, 252 Output: &state,
246 }, 253 },
254
247 &EvalDiff{ 255 &EvalDiff{
248 Name: stateID, 256 Addr: addr.Resource,
249 Info: info, 257 Config: n.Config,
250 Config: &resourceConfig, 258 Provider: &provider,
251 Resource: n.Config, 259 ProviderAddr: n.ResolvedProvider,
252 Provider: &provider, 260 ProviderSchema: &providerSchema,
253 State: &state, 261 State: &state,
254 OutputState: &state, 262 OutputChange: &change,
255 Stub: true, 263 OutputState: &state,
264 Stub: true,
256 }, 265 },
266
257 &EvalWriteState{ 267 &EvalWriteState{
258 Name: stateID, 268 Addr: addr.Resource,
259 ResourceType: n.Config.Type, 269 ProviderAddr: n.ResolvedProvider,
260 Provider: n.ResolvedProvider, 270 ProviderSchema: &providerSchema,
261 Dependencies: stateDeps, 271 State: &state,
262 State: &state, 272 },
273
274 // We must also save the planned change, so that expressions in
275 // other nodes, such as provider configurations and data resources,
276 // can work with the planned new value.
277 //
278 // This depends on the fact that Context.Refresh creates a
279 // temporary new empty changeset for the duration of its graph
280 // walk, and so this recorded change will be discarded immediately
281 // after the refresh walk completes.
282 &EvalWriteDiff{
283 Addr: addr.Resource,
284 Change: &change,
285 ProviderSchema: &providerSchema,
263 }, 286 },
264 }, 287 },
265 } 288 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
index 0df223d..734ec9e 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
@@ -1,158 +1,87 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/dag" 4 "github.com/hashicorp/terraform/configs"
5 "github.com/hashicorp/terraform/configs/configschema"
6 "github.com/hashicorp/terraform/providers"
7 "github.com/hashicorp/terraform/provisioners"
8 "github.com/zclconf/go-cty/cty"
5) 9)
6 10
7// NodeValidatableResource represents a resource that is used for validation 11// NodeValidatableResource represents a resource that is used for validation
8// only. 12// only.
9type NodeValidatableResource struct { 13type NodeValidatableResource struct {
10 *NodeAbstractCountResource
11}
12
13// GraphNodeEvalable
14func (n *NodeValidatableResource) EvalTree() EvalNode {
15 // Ensure we're validating
16 c := n.NodeAbstractCountResource
17 c.Validate = true
18 return c.EvalTree()
19}
20
21// GraphNodeDynamicExpandable
22func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
23 // Grab the state which we read
24 state, lock := ctx.State()
25 lock.RLock()
26 defer lock.RUnlock()
27
28 // Expand the resource count which must be available by now from EvalTree
29 count := 1
30 if n.Config.RawCount.Value() != unknownValue() {
31 var err error
32 count, err = n.Config.Count()
33 if err != nil {
34 return nil, err
35 }
36 }
37
38 // The concrete resource factory we'll use
39 concreteResource := func(a *NodeAbstractResource) dag.Vertex {
40 // Add the config and state since we don't do that via transforms
41 a.Config = n.Config
42 a.ResolvedProvider = n.ResolvedProvider
43
44 return &NodeValidatableResourceInstance{
45 NodeAbstractResource: a,
46 }
47 }
48
49 // Start creating the steps
50 steps := []GraphTransformer{
51 // Expand the count.
52 &ResourceCountTransformer{
53 Concrete: concreteResource,
54 Count: count,
55 Addr: n.ResourceAddr(),
56 },
57
58 // Attach the state
59 &AttachStateTransformer{State: state},
60
61 // Targeting
62 &TargetsTransformer{ParsedTargets: n.Targets},
63
64 // Connect references so ordering is correct
65 &ReferenceTransformer{},
66
67 // Make sure there is a single root
68 &RootTransformer{},
69 }
70
71 // Build the graph
72 b := &BasicGraphBuilder{
73 Steps: steps,
74 Validate: true,
75 Name: "NodeValidatableResource",
76 }
77
78 return b.Build(ctx.Path())
79}
80
81// This represents a _single_ resource instance to validate.
82type NodeValidatableResourceInstance struct {
83 *NodeAbstractResource 14 *NodeAbstractResource
84} 15}
85 16
86// GraphNodeEvalable 17var (
87func (n *NodeValidatableResourceInstance) EvalTree() EvalNode { 18 _ GraphNodeSubPath = (*NodeValidatableResource)(nil)
88 addr := n.NodeAbstractResource.Addr 19 _ GraphNodeEvalable = (*NodeValidatableResource)(nil)
20 _ GraphNodeReferenceable = (*NodeValidatableResource)(nil)
21 _ GraphNodeReferencer = (*NodeValidatableResource)(nil)
22 _ GraphNodeResource = (*NodeValidatableResource)(nil)
23 _ GraphNodeAttachResourceConfig = (*NodeValidatableResource)(nil)
24)
89 25
90 // Build the resource for eval 26// GraphNodeEvalable
91 resource := &Resource{ 27func (n *NodeValidatableResource) EvalTree() EvalNode {
92 Name: addr.Name, 28 addr := n.ResourceAddr()
93 Type: addr.Type, 29 config := n.Config
94 CountIndex: addr.Index,
95 }
96 if resource.CountIndex < 0 {
97 resource.CountIndex = 0
98 }
99 30
100 // Declare a bunch of variables that are used for state during 31 // Declare the variables will be used are used to pass values along
101 // evaluation. Most of this are written to by-address below. 32 // the evaluation sequence below. These are written to via pointers
102 var config *ResourceConfig 33 // passed to the EvalNodes.
103 var provider ResourceProvider 34 var provider providers.Interface
35 var providerSchema *ProviderSchema
36 var configVal cty.Value
104 37
105 seq := &EvalSequence{ 38 seq := &EvalSequence{
106 Nodes: []EvalNode{ 39 Nodes: []EvalNode{
107 &EvalValidateResourceSelfRef{
108 Addr: &addr,
109 Config: &n.Config.RawConfig,
110 },
111 &EvalGetProvider{ 40 &EvalGetProvider{
112 Name: n.ResolvedProvider, 41 Addr: n.ResolvedProvider,
113 Output: &provider, 42 Output: &provider,
114 }, 43 Schema: &providerSchema,
115 &EvalInterpolate{
116 Config: n.Config.RawConfig.Copy(),
117 Resource: resource,
118 Output: &config,
119 }, 44 },
120 &EvalValidateResource{ 45 &EvalValidateResource{
121 Provider: &provider, 46 Addr: addr.Resource,
122 Config: &config, 47 Provider: &provider,
123 ResourceName: n.Config.Name, 48 ProviderSchema: &providerSchema,
124 ResourceType: n.Config.Type, 49 Config: config,
125 ResourceMode: n.Config.Mode, 50 ConfigVal: &configVal,
126 }, 51 },
127 }, 52 },
128 } 53 }
129 54
130 // Validate all the provisioners 55 if managed := n.Config.Managed; managed != nil {
131 for _, p := range n.Config.Provisioners { 56 hasCount := n.Config.Count != nil
132 var provisioner ResourceProvisioner 57
133 var connConfig *ResourceConfig 58 // Validate all the provisioners
134 seq.Nodes = append( 59 for _, p := range managed.Provisioners {
135 seq.Nodes, 60 var provisioner provisioners.Interface
136 &EvalGetProvisioner{ 61 var provisionerSchema *configschema.Block
137 Name: p.Type, 62
138 Output: &provisioner, 63 if p.Connection == nil {
139 }, 64 p.Connection = config.Managed.Connection
140 &EvalInterpolate{ 65 } else if config.Managed.Connection != nil {
141 Config: p.RawConfig.Copy(), 66 p.Connection.Config = configs.MergeBodies(config.Managed.Connection.Config, p.Connection.Config)
142 Resource: resource, 67 }
143 Output: &config, 68
144 }, 69 seq.Nodes = append(
145 &EvalInterpolate{ 70 seq.Nodes,
146 Config: p.ConnInfo.Copy(), 71 &EvalGetProvisioner{
147 Resource: resource, 72 Name: p.Type,
148 Output: &connConfig, 73 Output: &provisioner,
149 }, 74 Schema: &provisionerSchema,
150 &EvalValidateProvisioner{ 75 },
151 Provisioner: &provisioner, 76 &EvalValidateProvisioner{
152 Config: &config, 77 ResourceAddr: addr.Resource,
153 ConnConfig: &connConfig, 78 Provisioner: &provisioner,
154 }, 79 Schema: &provisionerSchema,
155 ) 80 Config: p,
81 ResourceHasCount: hasCount,
82 },
83 )
84 }
156 } 85 }
157 86
158 return seq 87 return seq
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
index cb61a4e..1c30290 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
@@ -1,22 +1,44 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt" 4 "github.com/hashicorp/terraform/addrs"
5 5 "github.com/hashicorp/terraform/configs"
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/dag"
7) 7)
8 8
9// NodeRootVariable represents a root variable input. 9// NodeRootVariable represents a root variable input.
10type NodeRootVariable struct { 10type NodeRootVariable struct {
11 Config *config.Variable 11 Addr addrs.InputVariable
12 Config *configs.Variable
12} 13}
13 14
15var (
16 _ GraphNodeSubPath = (*NodeRootVariable)(nil)
17 _ GraphNodeReferenceable = (*NodeRootVariable)(nil)
18 _ dag.GraphNodeDotter = (*NodeApplyableModuleVariable)(nil)
19)
20
14func (n *NodeRootVariable) Name() string { 21func (n *NodeRootVariable) Name() string {
15 result := fmt.Sprintf("var.%s", n.Config.Name) 22 return n.Addr.String()
16 return result 23}
24
25// GraphNodeSubPath
26func (n *NodeRootVariable) Path() addrs.ModuleInstance {
27 return addrs.RootModuleInstance
17} 28}
18 29
19// GraphNodeReferenceable 30// GraphNodeReferenceable
20func (n *NodeRootVariable) ReferenceableName() []string { 31func (n *NodeRootVariable) ReferenceableAddrs() []addrs.Referenceable {
21 return []string{n.Name()} 32 return []addrs.Referenceable{n.Addr}
33}
34
35// dag.GraphNodeDotter impl.
36func (n *NodeRootVariable) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
37 return &dag.DotNode{
38 Name: name,
39 Attrs: map[string]string{
40 "label": n.Name(),
41 "shape": "note",
42 },
43 }
22} 44}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go
index 51dd412..9757446 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/path.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/path.go
@@ -1,10 +1,17 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "strings" 4 "fmt"
5
6 "github.com/hashicorp/terraform/addrs"
5) 7)
6 8
7// PathCacheKey returns a cache key for a module path. 9// PathObjectCacheKey is like PathCacheKey but includes an additional name
8func PathCacheKey(path []string) string { 10// to be included in the key, for module-namespaced objects.
9 return strings.Join(path, "|") 11//
12// The result of this function is guaranteed unique for any distinct pair
13// of path and name, but is not guaranteed to be in any particular format
14// and in particular should never be shown to end-users.
15func PathObjectCacheKey(path addrs.ModuleInstance, objectName string) string {
16 return fmt.Sprintf("%s|%s", path.String(), objectName)
10} 17}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go
index 30db195..af04c6c 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/plan.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/plan.go
@@ -3,14 +3,13 @@ package terraform
3import ( 3import (
4 "bytes" 4 "bytes"
5 "encoding/gob" 5 "encoding/gob"
6 "errors"
7 "fmt" 6 "fmt"
8 "io" 7 "io"
9 "log"
10 "sync" 8 "sync"
11 9
12 "github.com/hashicorp/terraform/config/module" 10 "github.com/zclconf/go-cty/cty"
13 "github.com/hashicorp/terraform/version" 11
12 "github.com/hashicorp/terraform/configs"
14) 13)
15 14
16func init() { 15func init() {
@@ -31,9 +30,9 @@ type Plan struct {
31 // plan is applied. 30 // plan is applied.
32 Diff *Diff 31 Diff *Diff
33 32
34 // Module represents the entire configuration that was present when this 33 // Config represents the entire configuration that was present when this
35 // plan was created. 34 // plan was created.
36 Module *module.Tree 35 Config *configs.Config
37 36
38 // State is the Terraform state that was current when this plan was 37 // State is the Terraform state that was current when this plan was
39 // created. 38 // created.
@@ -44,7 +43,7 @@ type Plan struct {
44 43
45 // Vars retains the variables that were set when creating the plan, so 44 // Vars retains the variables that were set when creating the plan, so
46 // that the same variables can be applied during apply. 45 // that the same variables can be applied during apply.
47 Vars map[string]interface{} 46 Vars map[string]cty.Value
48 47
49 // Targets, if non-empty, contains a set of resource address strings that 48 // Targets, if non-empty, contains a set of resource address strings that
50 // identify graph nodes that were selected as targets for plan. 49 // identify graph nodes that were selected as targets for plan.
@@ -78,64 +77,6 @@ type Plan struct {
78 once sync.Once 77 once sync.Once
79} 78}
80 79
81// Context returns a Context with the data encapsulated in this plan.
82//
83// The following fields in opts are overridden by the plan: Config,
84// Diff, Variables.
85//
86// If State is not provided, it is set from the plan. If it _is_ provided,
87// it must be Equal to the state stored in plan, but may have a newer
88// serial.
89func (p *Plan) Context(opts *ContextOpts) (*Context, error) {
90 var err error
91 opts, err = p.contextOpts(opts)
92 if err != nil {
93 return nil, err
94 }
95 return NewContext(opts)
96}
97
98// contextOpts mutates the given base ContextOpts in place to use input
99// objects obtained from the receiving plan.
100func (p *Plan) contextOpts(base *ContextOpts) (*ContextOpts, error) {
101 opts := base
102
103 opts.Diff = p.Diff
104 opts.Module = p.Module
105 opts.Targets = p.Targets
106 opts.ProviderSHA256s = p.ProviderSHA256s
107 opts.Destroy = p.Destroy
108
109 if opts.State == nil {
110 opts.State = p.State
111 } else if !opts.State.Equal(p.State) {
112 // Even if we're overriding the state, it should be logically equal
113 // to what's in plan. The only valid change to have made by the time
114 // we get here is to have incremented the serial.
115 //
116 // Due to the fact that serialization may change the representation of
117 // the state, there is little chance that these aren't actually equal.
118 // Log the error condition for reference, but continue with the state
119 // we have.
120 log.Println("[WARN] Plan state and ContextOpts state are not equal")
121 }
122
123 thisVersion := version.String()
124 if p.TerraformVersion != "" && p.TerraformVersion != thisVersion {
125 return nil, fmt.Errorf(
126 "plan was created with a different version of Terraform (created with %s, but running %s)",
127 p.TerraformVersion, thisVersion,
128 )
129 }
130
131 opts.Variables = make(map[string]interface{})
132 for k, v := range p.Vars {
133 opts.Variables[k] = v
134 }
135
136 return opts, nil
137}
138
139func (p *Plan) String() string { 80func (p *Plan) String() string {
140 buf := new(bytes.Buffer) 81 buf := new(bytes.Buffer)
141 buf.WriteString("DIFF:\n\n") 82 buf.WriteString("DIFF:\n\n")
@@ -158,7 +99,7 @@ func (p *Plan) init() {
158 } 99 }
159 100
160 if p.Vars == nil { 101 if p.Vars == nil {
161 p.Vars = make(map[string]interface{}) 102 p.Vars = make(map[string]cty.Value)
162 } 103 }
163 }) 104 })
164} 105}
@@ -172,63 +113,10 @@ const planFormatVersion byte = 2
172// ReadPlan reads a plan structure out of a reader in the format that 113// ReadPlan reads a plan structure out of a reader in the format that
173// was written by WritePlan. 114// was written by WritePlan.
174func ReadPlan(src io.Reader) (*Plan, error) { 115func ReadPlan(src io.Reader) (*Plan, error) {
175 var result *Plan 116 return nil, fmt.Errorf("terraform.ReadPlan is no longer in use; use planfile.Open instead")
176 var err error
177 n := 0
178
179 // Verify the magic bytes
180 magic := make([]byte, len(planFormatMagic))
181 for n < len(magic) {
182 n, err = src.Read(magic[n:])
183 if err != nil {
184 return nil, fmt.Errorf("error while reading magic bytes: %s", err)
185 }
186 }
187 if string(magic) != planFormatMagic {
188 return nil, fmt.Errorf("not a valid plan file")
189 }
190
191 // Verify the version is something we can read
192 var formatByte [1]byte
193 n, err = src.Read(formatByte[:])
194 if err != nil {
195 return nil, err
196 }
197 if n != len(formatByte) {
198 return nil, errors.New("failed to read plan version byte")
199 }
200
201 if formatByte[0] != planFormatVersion {
202 return nil, fmt.Errorf("unknown plan file version: %d", formatByte[0])
203 }
204
205 dec := gob.NewDecoder(src)
206 if err := dec.Decode(&result); err != nil {
207 return nil, err
208 }
209
210 return result, nil
211} 117}
212 118
213// WritePlan writes a plan somewhere in a binary format. 119// WritePlan writes a plan somewhere in a binary format.
214func WritePlan(d *Plan, dst io.Writer) error { 120func WritePlan(d *Plan, dst io.Writer) error {
215 // Write the magic bytes so we can determine the file format later 121 return fmt.Errorf("terraform.WritePlan is no longer in use; use planfile.Create instead")
216 n, err := dst.Write([]byte(planFormatMagic))
217 if err != nil {
218 return err
219 }
220 if n != len(planFormatMagic) {
221 return errors.New("failed to write plan format magic bytes")
222 }
223
224 // Write a version byte so we can iterate on version at some point
225 n, err = dst.Write([]byte{planFormatVersion})
226 if err != nil {
227 return err
228 }
229 if n != 1 {
230 return errors.New("failed to write plan version byte")
231 }
232
233 return gob.NewEncoder(dst).Encode(d)
234} 122}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go
new file mode 100644
index 0000000..4ae346d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/provider_mock.go
@@ -0,0 +1,522 @@
1package terraform
2
3import (
4 "encoding/json"
5 "fmt"
6 "sync"
7
8 "github.com/zclconf/go-cty/cty"
9 ctyjson "github.com/zclconf/go-cty/cty/json"
10
11 "github.com/hashicorp/terraform/config"
12 "github.com/hashicorp/terraform/config/hcl2shim"
13 "github.com/hashicorp/terraform/providers"
14 "github.com/hashicorp/terraform/tfdiags"
15)
16
17var _ providers.Interface = (*MockProvider)(nil)
18
19// MockProvider implements providers.Interface but mocks out all the
20// calls for testing purposes.
21type MockProvider struct {
22 sync.Mutex
23
24 // Anything you want, in case you need to store extra data with the mock.
25 Meta interface{}
26
27 GetSchemaCalled bool
28 GetSchemaReturn *ProviderSchema // This is using ProviderSchema directly rather than providers.GetSchemaResponse for compatibility with old tests
29
30 PrepareProviderConfigCalled bool
31 PrepareProviderConfigResponse providers.PrepareProviderConfigResponse
32 PrepareProviderConfigRequest providers.PrepareProviderConfigRequest
33 PrepareProviderConfigFn func(providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse
34
35 ValidateResourceTypeConfigCalled bool
36 ValidateResourceTypeConfigTypeName string
37 ValidateResourceTypeConfigResponse providers.ValidateResourceTypeConfigResponse
38 ValidateResourceTypeConfigRequest providers.ValidateResourceTypeConfigRequest
39 ValidateResourceTypeConfigFn func(providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse
40
41 ValidateDataSourceConfigCalled bool
42 ValidateDataSourceConfigTypeName string
43 ValidateDataSourceConfigResponse providers.ValidateDataSourceConfigResponse
44 ValidateDataSourceConfigRequest providers.ValidateDataSourceConfigRequest
45 ValidateDataSourceConfigFn func(providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse
46
47 UpgradeResourceStateCalled bool
48 UpgradeResourceStateTypeName string
49 UpgradeResourceStateResponse providers.UpgradeResourceStateResponse
50 UpgradeResourceStateRequest providers.UpgradeResourceStateRequest
51 UpgradeResourceStateFn func(providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse
52
53 ConfigureCalled bool
54 ConfigureResponse providers.ConfigureResponse
55 ConfigureRequest providers.ConfigureRequest
56 ConfigureNewFn func(providers.ConfigureRequest) providers.ConfigureResponse // Named ConfigureNewFn so we can still have the legacy ConfigureFn declared below
57
58 StopCalled bool
59 StopFn func() error
60 StopResponse error
61
62 ReadResourceCalled bool
63 ReadResourceResponse providers.ReadResourceResponse
64 ReadResourceRequest providers.ReadResourceRequest
65 ReadResourceFn func(providers.ReadResourceRequest) providers.ReadResourceResponse
66
67 PlanResourceChangeCalled bool
68 PlanResourceChangeResponse providers.PlanResourceChangeResponse
69 PlanResourceChangeRequest providers.PlanResourceChangeRequest
70 PlanResourceChangeFn func(providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse
71
72 ApplyResourceChangeCalled bool
73 ApplyResourceChangeResponse providers.ApplyResourceChangeResponse
74 ApplyResourceChangeRequest providers.ApplyResourceChangeRequest
75 ApplyResourceChangeFn func(providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse
76
77 ImportResourceStateCalled bool
78 ImportResourceStateResponse providers.ImportResourceStateResponse
79 ImportResourceStateRequest providers.ImportResourceStateRequest
80 ImportResourceStateFn func(providers.ImportResourceStateRequest) providers.ImportResourceStateResponse
81 // Legacy return type for existing tests, which will be shimmed into an
82 // ImportResourceStateResponse if set
83 ImportStateReturn []*InstanceState
84
85 ReadDataSourceCalled bool
86 ReadDataSourceResponse providers.ReadDataSourceResponse
87 ReadDataSourceRequest providers.ReadDataSourceRequest
88 ReadDataSourceFn func(providers.ReadDataSourceRequest) providers.ReadDataSourceResponse
89
90 CloseCalled bool
91 CloseError error
92
93 // Legacy callbacks: if these are set, we will shim incoming calls for
94 // new-style methods to these old-fashioned terraform.ResourceProvider
95 // mock callbacks, for the benefit of older tests that were written against
96 // the old mock API.
97 ValidateFn func(c *ResourceConfig) (ws []string, es []error)
98 ConfigureFn func(c *ResourceConfig) error
99 DiffFn func(info *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error)
100 ApplyFn func(info *InstanceInfo, s *InstanceState, d *InstanceDiff) (*InstanceState, error)
101}
102
103func (p *MockProvider) GetSchema() providers.GetSchemaResponse {
104 p.Lock()
105 defer p.Unlock()
106 p.GetSchemaCalled = true
107 return p.getSchema()
108}
109
110func (p *MockProvider) getSchema() providers.GetSchemaResponse {
111 // This version of getSchema doesn't do any locking, so it's suitable to
112 // call from other methods of this mock as long as they are already
113 // holding the lock.
114
115 ret := providers.GetSchemaResponse{
116 Provider: providers.Schema{},
117 DataSources: map[string]providers.Schema{},
118 ResourceTypes: map[string]providers.Schema{},
119 }
120 if p.GetSchemaReturn != nil {
121 ret.Provider.Block = p.GetSchemaReturn.Provider
122 for n, s := range p.GetSchemaReturn.DataSources {
123 ret.DataSources[n] = providers.Schema{
124 Block: s,
125 }
126 }
127 for n, s := range p.GetSchemaReturn.ResourceTypes {
128 ret.ResourceTypes[n] = providers.Schema{
129 Version: int64(p.GetSchemaReturn.ResourceTypeSchemaVersions[n]),
130 Block: s,
131 }
132 }
133 }
134
135 return ret
136}
137
138func (p *MockProvider) PrepareProviderConfig(r providers.PrepareProviderConfigRequest) providers.PrepareProviderConfigResponse {
139 p.Lock()
140 defer p.Unlock()
141
142 p.PrepareProviderConfigCalled = true
143 p.PrepareProviderConfigRequest = r
144 if p.PrepareProviderConfigFn != nil {
145 return p.PrepareProviderConfigFn(r)
146 }
147 return p.PrepareProviderConfigResponse
148}
149
150func (p *MockProvider) ValidateResourceTypeConfig(r providers.ValidateResourceTypeConfigRequest) providers.ValidateResourceTypeConfigResponse {
151 p.Lock()
152 defer p.Unlock()
153
154 p.ValidateResourceTypeConfigCalled = true
155 p.ValidateResourceTypeConfigRequest = r
156
157 if p.ValidateFn != nil {
158 resp := p.getSchema()
159 schema := resp.Provider.Block
160 rc := NewResourceConfigShimmed(r.Config, schema)
161 warns, errs := p.ValidateFn(rc)
162 ret := providers.ValidateResourceTypeConfigResponse{}
163 for _, warn := range warns {
164 ret.Diagnostics = ret.Diagnostics.Append(tfdiags.SimpleWarning(warn))
165 }
166 for _, err := range errs {
167 ret.Diagnostics = ret.Diagnostics.Append(err)
168 }
169 }
170 if p.ValidateResourceTypeConfigFn != nil {
171 return p.ValidateResourceTypeConfigFn(r)
172 }
173
174 return p.ValidateResourceTypeConfigResponse
175}
176
177func (p *MockProvider) ValidateDataSourceConfig(r providers.ValidateDataSourceConfigRequest) providers.ValidateDataSourceConfigResponse {
178 p.Lock()
179 defer p.Unlock()
180
181 p.ValidateDataSourceConfigCalled = true
182 p.ValidateDataSourceConfigRequest = r
183
184 if p.ValidateDataSourceConfigFn != nil {
185 return p.ValidateDataSourceConfigFn(r)
186 }
187
188 return p.ValidateDataSourceConfigResponse
189}
190
191func (p *MockProvider) UpgradeResourceState(r providers.UpgradeResourceStateRequest) providers.UpgradeResourceStateResponse {
192 p.Lock()
193 defer p.Unlock()
194
195 schemas := p.getSchema()
196 schema := schemas.ResourceTypes[r.TypeName]
197 schemaType := schema.Block.ImpliedType()
198
199 p.UpgradeResourceStateCalled = true
200 p.UpgradeResourceStateRequest = r
201
202 if p.UpgradeResourceStateFn != nil {
203 return p.UpgradeResourceStateFn(r)
204 }
205
206 resp := p.UpgradeResourceStateResponse
207
208 if resp.UpgradedState == cty.NilVal {
209 switch {
210 case r.RawStateFlatmap != nil:
211 v, err := hcl2shim.HCL2ValueFromFlatmap(r.RawStateFlatmap, schemaType)
212 if err != nil {
213 resp.Diagnostics = resp.Diagnostics.Append(err)
214 return resp
215 }
216 resp.UpgradedState = v
217 case len(r.RawStateJSON) > 0:
218 v, err := ctyjson.Unmarshal(r.RawStateJSON, schemaType)
219
220 if err != nil {
221 resp.Diagnostics = resp.Diagnostics.Append(err)
222 return resp
223 }
224 resp.UpgradedState = v
225 }
226 }
227 return resp
228}
229
230func (p *MockProvider) Configure(r providers.ConfigureRequest) providers.ConfigureResponse {
231 p.Lock()
232 defer p.Unlock()
233
234 p.ConfigureCalled = true
235 p.ConfigureRequest = r
236
237 if p.ConfigureFn != nil {
238 resp := p.getSchema()
239 schema := resp.Provider.Block
240 rc := NewResourceConfigShimmed(r.Config, schema)
241 ret := providers.ConfigureResponse{}
242
243 err := p.ConfigureFn(rc)
244 if err != nil {
245 ret.Diagnostics = ret.Diagnostics.Append(err)
246 }
247 return ret
248 }
249 if p.ConfigureNewFn != nil {
250 return p.ConfigureNewFn(r)
251 }
252
253 return p.ConfigureResponse
254}
255
256func (p *MockProvider) Stop() error {
257 // We intentionally don't lock in this one because the whole point of this
258 // method is to be called concurrently with another operation that can
259 // be cancelled. The provider itself is responsible for handling
260 // any concurrency concerns in this case.
261
262 p.StopCalled = true
263 if p.StopFn != nil {
264 return p.StopFn()
265 }
266
267 return p.StopResponse
268}
269
270func (p *MockProvider) ReadResource(r providers.ReadResourceRequest) providers.ReadResourceResponse {
271 p.Lock()
272 defer p.Unlock()
273
274 p.ReadResourceCalled = true
275 p.ReadResourceRequest = r
276
277 if p.ReadResourceFn != nil {
278 return p.ReadResourceFn(r)
279 }
280
281 // make sure the NewState fits the schema
282 newState, err := p.GetSchemaReturn.ResourceTypes[r.TypeName].CoerceValue(p.ReadResourceResponse.NewState)
283 if err != nil {
284 panic(err)
285 }
286 resp := p.ReadResourceResponse
287 resp.NewState = newState
288
289 return resp
290}
291
292func (p *MockProvider) PlanResourceChange(r providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
293 p.Lock()
294 defer p.Unlock()
295
296 p.PlanResourceChangeCalled = true
297 p.PlanResourceChangeRequest = r
298
299 if p.DiffFn != nil {
300 ps := p.getSchema()
301 if ps.ResourceTypes == nil || ps.ResourceTypes[r.TypeName].Block == nil {
302 return providers.PlanResourceChangeResponse{
303 Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Printf("mock provider has no schema for resource type %s", r.TypeName)),
304 }
305 }
306 schema := ps.ResourceTypes[r.TypeName].Block
307 info := &InstanceInfo{
308 Type: r.TypeName,
309 }
310 priorState := NewInstanceStateShimmedFromValue(r.PriorState, 0)
311 cfg := NewResourceConfigShimmed(r.Config, schema)
312
313 legacyDiff, err := p.DiffFn(info, priorState, cfg)
314
315 var res providers.PlanResourceChangeResponse
316 res.PlannedState = r.ProposedNewState
317 if err != nil {
318 res.Diagnostics = res.Diagnostics.Append(err)
319 }
320 if legacyDiff != nil {
321 newVal, err := legacyDiff.ApplyToValue(r.PriorState, schema)
322 if err != nil {
323 res.Diagnostics = res.Diagnostics.Append(err)
324 }
325
326 res.PlannedState = newVal
327
328 var requiresNew []string
329 for attr, d := range legacyDiff.Attributes {
330 if d.RequiresNew {
331 requiresNew = append(requiresNew, attr)
332 }
333 }
334 requiresReplace, err := hcl2shim.RequiresReplace(requiresNew, schema.ImpliedType())
335 if err != nil {
336 res.Diagnostics = res.Diagnostics.Append(err)
337 }
338 res.RequiresReplace = requiresReplace
339 }
340 return res
341 }
342 if p.PlanResourceChangeFn != nil {
343 return p.PlanResourceChangeFn(r)
344 }
345
346 return p.PlanResourceChangeResponse
347}
348
349func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeRequest) providers.ApplyResourceChangeResponse {
350 p.Lock()
351 p.ApplyResourceChangeCalled = true
352 p.ApplyResourceChangeRequest = r
353 p.Unlock()
354
355 if p.ApplyFn != nil {
356 // ApplyFn is a special callback fashioned after our old provider
357 // interface, which expected to be given an actual diff rather than
358 // separate old/new values to apply. Therefore we need to approximate
359 // a diff here well enough that _most_ of our legacy ApplyFns in old
360 // tests still see the behavior they are expecting. New tests should
361 // not use this, and should instead use ApplyResourceChangeFn directly.
362 providerSchema := p.getSchema()
363 schema, ok := providerSchema.ResourceTypes[r.TypeName]
364 if !ok {
365 return providers.ApplyResourceChangeResponse{
366 Diagnostics: tfdiags.Diagnostics(nil).Append(fmt.Errorf("no mocked schema available for resource type %s", r.TypeName)),
367 }
368 }
369
370 info := &InstanceInfo{
371 Type: r.TypeName,
372 }
373
374 priorVal := r.PriorState
375 plannedVal := r.PlannedState
376 priorMap := hcl2shim.FlatmapValueFromHCL2(priorVal)
377 plannedMap := hcl2shim.FlatmapValueFromHCL2(plannedVal)
378 s := NewInstanceStateShimmedFromValue(priorVal, 0)
379 d := &InstanceDiff{
380 Attributes: make(map[string]*ResourceAttrDiff),
381 }
382 if plannedMap == nil { // destroying, then
383 d.Destroy = true
384 // Destroy diffs don't have any attribute diffs
385 } else {
386 if priorMap == nil { // creating, then
387 // We'll just make an empty prior map to make things easier below.
388 priorMap = make(map[string]string)
389 }
390
391 for k, new := range plannedMap {
392 old := priorMap[k]
393 newComputed := false
394 if new == config.UnknownVariableValue {
395 new = ""
396 newComputed = true
397 }
398 d.Attributes[k] = &ResourceAttrDiff{
399 Old: old,
400 New: new,
401 NewComputed: newComputed,
402 Type: DiffAttrInput, // not generally used in tests, so just hard-coded
403 }
404 }
405 // Also need any attributes that were removed in "planned"
406 for k, old := range priorMap {
407 if _, ok := plannedMap[k]; ok {
408 continue
409 }
410 d.Attributes[k] = &ResourceAttrDiff{
411 Old: old,
412 NewRemoved: true,
413 Type: DiffAttrInput,
414 }
415 }
416 }
417 newState, err := p.ApplyFn(info, s, d)
418 resp := providers.ApplyResourceChangeResponse{}
419 if err != nil {
420 resp.Diagnostics = resp.Diagnostics.Append(err)
421 }
422 if newState != nil {
423 var newVal cty.Value
424 if newState != nil {
425 var err error
426 newVal, err = newState.AttrsAsObjectValue(schema.Block.ImpliedType())
427 if err != nil {
428 resp.Diagnostics = resp.Diagnostics.Append(err)
429 }
430 } else {
431 // If apply returned a nil new state then that's the old way to
432 // indicate that the object was destroyed. Our new interface calls
433 // for that to be signalled as a null value.
434 newVal = cty.NullVal(schema.Block.ImpliedType())
435 }
436 resp.NewState = newVal
437 }
438
439 return resp
440 }
441 if p.ApplyResourceChangeFn != nil {
442 return p.ApplyResourceChangeFn(r)
443 }
444
445 return p.ApplyResourceChangeResponse
446}
447
448func (p *MockProvider) ImportResourceState(r providers.ImportResourceStateRequest) providers.ImportResourceStateResponse {
449 p.Lock()
450 defer p.Unlock()
451
452 if p.ImportStateReturn != nil {
453 for _, is := range p.ImportStateReturn {
454 if is.Attributes == nil {
455 is.Attributes = make(map[string]string)
456 }
457 is.Attributes["id"] = is.ID
458
459 typeName := is.Ephemeral.Type
460 // Use the requested type if the resource has no type of it's own.
461 // We still return the empty type, which will error, but this prevents a panic.
462 if typeName == "" {
463 typeName = r.TypeName
464 }
465
466 schema := p.GetSchemaReturn.ResourceTypes[typeName]
467 if schema == nil {
468 panic("no schema found for " + typeName)
469 }
470
471 private, err := json.Marshal(is.Meta)
472 if err != nil {
473 panic(err)
474 }
475
476 state, err := hcl2shim.HCL2ValueFromFlatmap(is.Attributes, schema.ImpliedType())
477 if err != nil {
478 panic(err)
479 }
480
481 state, err = schema.CoerceValue(state)
482 if err != nil {
483 panic(err)
484 }
485
486 p.ImportResourceStateResponse.ImportedResources = append(
487 p.ImportResourceStateResponse.ImportedResources,
488 providers.ImportedResource{
489 TypeName: is.Ephemeral.Type,
490 State: state,
491 Private: private,
492 })
493 }
494 }
495
496 p.ImportResourceStateCalled = true
497 p.ImportResourceStateRequest = r
498 if p.ImportResourceStateFn != nil {
499 return p.ImportResourceStateFn(r)
500 }
501
502 return p.ImportResourceStateResponse
503}
504
505func (p *MockProvider) ReadDataSource(r providers.ReadDataSourceRequest) providers.ReadDataSourceResponse {
506 p.Lock()
507 defer p.Unlock()
508
509 p.ReadDataSourceCalled = true
510 p.ReadDataSourceRequest = r
511
512 if p.ReadDataSourceFn != nil {
513 return p.ReadDataSourceFn(r)
514 }
515
516 return p.ReadDataSourceResponse
517}
518
519func (p *MockProvider) Close() error {
520 p.CloseCalled = true
521 return p.CloseError
522}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go
new file mode 100644
index 0000000..f595891
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/provisioner_mock.go
@@ -0,0 +1,154 @@
1package terraform
2
3import (
4 "fmt"
5 "sync"
6
7 "github.com/zclconf/go-cty/cty"
8 "github.com/zclconf/go-cty/cty/convert"
9
10 "github.com/hashicorp/terraform/provisioners"
11)
12
13var _ provisioners.Interface = (*MockProvisioner)(nil)
14
15// MockProvisioner implements provisioners.Interface but mocks out all the
16// calls for testing purposes.
17type MockProvisioner struct {
18 sync.Mutex
19 // Anything you want, in case you need to store extra data with the mock.
20 Meta interface{}
21
22 GetSchemaCalled bool
23 GetSchemaResponse provisioners.GetSchemaResponse
24
25 ValidateProvisionerConfigCalled bool
26 ValidateProvisionerConfigRequest provisioners.ValidateProvisionerConfigRequest
27 ValidateProvisionerConfigResponse provisioners.ValidateProvisionerConfigResponse
28 ValidateProvisionerConfigFn func(provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse
29
30 ProvisionResourceCalled bool
31 ProvisionResourceRequest provisioners.ProvisionResourceRequest
32 ProvisionResourceResponse provisioners.ProvisionResourceResponse
33 ProvisionResourceFn func(provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse
34
35 StopCalled bool
36 StopResponse error
37 StopFn func() error
38
39 CloseCalled bool
40 CloseResponse error
41 CloseFn func() error
42
43 // Legacy callbacks: if these are set, we will shim incoming calls for
44 // new-style methods to these old-fashioned terraform.ResourceProvider
45 // mock callbacks, for the benefit of older tests that were written against
46 // the old mock API.
47 ApplyFn func(rs *InstanceState, c *ResourceConfig) error
48}
49
50func (p *MockProvisioner) GetSchema() provisioners.GetSchemaResponse {
51 p.Lock()
52 defer p.Unlock()
53
54 p.GetSchemaCalled = true
55 return p.getSchema()
56}
57
58// getSchema is the implementation of GetSchema, which can be called from other
59// methods on MockProvisioner that may already be holding the lock.
60func (p *MockProvisioner) getSchema() provisioners.GetSchemaResponse {
61 return p.GetSchemaResponse
62}
63
64func (p *MockProvisioner) ValidateProvisionerConfig(r provisioners.ValidateProvisionerConfigRequest) provisioners.ValidateProvisionerConfigResponse {
65 p.Lock()
66 defer p.Unlock()
67
68 p.ValidateProvisionerConfigCalled = true
69 p.ValidateProvisionerConfigRequest = r
70 if p.ValidateProvisionerConfigFn != nil {
71 return p.ValidateProvisionerConfigFn(r)
72 }
73 return p.ValidateProvisionerConfigResponse
74}
75
76func (p *MockProvisioner) ProvisionResource(r provisioners.ProvisionResourceRequest) provisioners.ProvisionResourceResponse {
77 p.Lock()
78 defer p.Unlock()
79
80 p.ProvisionResourceCalled = true
81 p.ProvisionResourceRequest = r
82 if p.ApplyFn != nil {
83 if !r.Config.IsKnown() {
84 panic(fmt.Sprintf("cannot provision with unknown value: %#v", r.Config))
85 }
86
87 schema := p.getSchema()
88 rc := NewResourceConfigShimmed(r.Config, schema.Provisioner)
89 connVal := r.Connection
90 connMap := map[string]string{}
91
92 if !connVal.IsNull() && connVal.IsKnown() {
93 for it := connVal.ElementIterator(); it.Next(); {
94 ak, av := it.Element()
95 name := ak.AsString()
96
97 if !av.IsKnown() || av.IsNull() {
98 continue
99 }
100
101 av, _ = convert.Convert(av, cty.String)
102 connMap[name] = av.AsString()
103 }
104 }
105
106 // We no longer pass the full instance state to a provisioner, so we'll
107 // construct a partial one that should be good enough for what existing
108 // test mocks need.
109 is := &InstanceState{
110 Ephemeral: EphemeralState{
111 ConnInfo: connMap,
112 },
113 }
114 var resp provisioners.ProvisionResourceResponse
115 err := p.ApplyFn(is, rc)
116 if err != nil {
117 resp.Diagnostics = resp.Diagnostics.Append(err)
118 }
119 return resp
120 }
121 if p.ProvisionResourceFn != nil {
122 fn := p.ProvisionResourceFn
123 p.Unlock()
124 return fn(r)
125 }
126
127 return p.ProvisionResourceResponse
128}
129
130func (p *MockProvisioner) Stop() error {
131 // We intentionally don't lock in this one because the whole point of this
132 // method is to be called concurrently with another operation that can
133 // be cancelled. The provisioner itself is responsible for handling
134 // any concurrency concerns in this case.
135
136 p.StopCalled = true
137 if p.StopFn != nil {
138 return p.StopFn()
139 }
140
141 return p.StopResponse
142}
143
144func (p *MockProvisioner) Close() error {
145 p.Lock()
146 defer p.Unlock()
147
148 p.CloseCalled = true
149 if p.CloseFn != nil {
150 return p.CloseFn()
151 }
152
153 return p.CloseResponse
154}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go
index 2f5ebb5..2cd6c5b 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/resource.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go
@@ -7,9 +7,14 @@ import (
7 "strconv" 7 "strconv"
8 "strings" 8 "strings"
9 9
10 "github.com/hashicorp/terraform/config"
11 "github.com/mitchellh/copystructure" 10 "github.com/mitchellh/copystructure"
12 "github.com/mitchellh/reflectwalk" 11 "github.com/mitchellh/reflectwalk"
12 "github.com/zclconf/go-cty/cty"
13
14 "github.com/hashicorp/terraform/addrs"
15 "github.com/hashicorp/terraform/config"
16 "github.com/hashicorp/terraform/config/hcl2shim"
17 "github.com/hashicorp/terraform/configs/configschema"
13) 18)
14 19
15// ResourceProvisionerConfig is used to pair a provisioner 20// ResourceProvisionerConfig is used to pair a provisioner
@@ -25,9 +30,10 @@ type ResourceProvisionerConfig struct {
25 ConnInfo *config.RawConfig 30 ConnInfo *config.RawConfig
26} 31}
27 32
28// Resource encapsulates a resource, its configuration, its provider, 33// Resource is a legacy way to identify a particular resource instance.
29// its current state, and potentially a desired diff from the state it 34//
30// wants to reach. 35// New code should use addrs.ResourceInstance instead. This is still here
36// only for codepaths that haven't been updated yet.
31type Resource struct { 37type Resource struct {
32 // These are all used by the new EvalNode stuff. 38 // These are all used by the new EvalNode stuff.
33 Name string 39 Name string
@@ -47,6 +53,31 @@ type Resource struct {
47 Flags ResourceFlag 53 Flags ResourceFlag
48} 54}
49 55
56// NewResource constructs a legacy Resource object from an
57// addrs.ResourceInstance value.
58//
59// This is provided to shim to old codepaths that haven't been updated away
60// from this type yet. Since this old type is not able to represent instances
61// that have string keys, this function will panic if given a resource address
62// that has a string key.
63func NewResource(addr addrs.ResourceInstance) *Resource {
64 ret := &Resource{
65 Name: addr.Resource.Name,
66 Type: addr.Resource.Type,
67 }
68
69 if addr.Key != addrs.NoKey {
70 switch tk := addr.Key.(type) {
71 case addrs.IntKey:
72 ret.CountIndex = int(tk)
73 default:
74 panic(fmt.Errorf("resource instance with key %#v is not supported", addr.Key))
75 }
76 }
77
78 return ret
79}
80
50// ResourceKind specifies what kind of instance we're working with, whether 81// ResourceKind specifies what kind of instance we're working with, whether
51// its a primary instance, a tainted instance, or an orphan. 82// its a primary instance, a tainted instance, or an orphan.
52type ResourceFlag byte 83type ResourceFlag byte
@@ -72,20 +103,53 @@ type InstanceInfo struct {
72 uniqueExtra string 103 uniqueExtra string
73} 104}
74 105
75// HumanId is a unique Id that is human-friendly and useful for UI elements. 106// NewInstanceInfo constructs an InstanceInfo from an addrs.AbsResourceInstance.
76func (i *InstanceInfo) HumanId() string { 107//
77 if i == nil { 108// InstanceInfo is a legacy type, and uses of it should be gradually replaced
78 return "<nil>" 109// by direct use of addrs.AbsResource or addrs.AbsResourceInstance as
110// appropriate.
111//
112// The legacy InstanceInfo type cannot represent module instances with instance
113// keys, so this function will panic if given such a path. Uses of this type
114// should all be removed or replaced before implementing "count" and "for_each"
115// arguments on modules in order to avoid such panics.
116//
117// This legacy type also cannot represent resource instances with string
118// instance keys. It will panic if the given key is not either NoKey or an
119// IntKey.
120func NewInstanceInfo(addr addrs.AbsResourceInstance) *InstanceInfo {
121 // We need an old-style []string module path for InstanceInfo.
122 path := make([]string, len(addr.Module))
123 for i, step := range addr.Module {
124 if step.InstanceKey != addrs.NoKey {
125 panic("NewInstanceInfo cannot convert module instance with key")
126 }
127 path[i] = step.Name
79 } 128 }
80 129
81 if len(i.ModulePath) <= 1 { 130 // This is a funny old meaning of "id" that is no longer current. It should
82 return i.Id 131 // not be used for anything users might see. Note that it does not include
132 // a representation of the resource mode, and so it's impossible to
133 // determine from an InstanceInfo alone whether it is a managed or data
134 // resource that is being referred to.
135 id := fmt.Sprintf("%s.%s", addr.Resource.Resource.Type, addr.Resource.Resource.Name)
136 if addr.Resource.Resource.Mode == addrs.DataResourceMode {
137 id = "data." + id
138 }
139 if addr.Resource.Key != addrs.NoKey {
140 switch k := addr.Resource.Key.(type) {
141 case addrs.IntKey:
142 id = id + fmt.Sprintf(".%d", int(k))
143 default:
144 panic(fmt.Sprintf("NewInstanceInfo cannot convert resource instance with %T instance key", addr.Resource.Key))
145 }
83 } 146 }
84 147
85 return fmt.Sprintf( 148 return &InstanceInfo{
86 "module.%s.%s", 149 Id: id,
87 strings.Join(i.ModulePath[1:], "."), 150 ModulePath: path,
88 i.Id) 151 Type: addr.Resource.Resource.Type,
152 }
89} 153}
90 154
91// ResourceAddress returns the address of the resource that the receiver is describing. 155// ResourceAddress returns the address of the resource that the receiver is describing.
@@ -128,18 +192,9 @@ func (i *InstanceInfo) ResourceAddress() *ResourceAddress {
128 return addr 192 return addr
129} 193}
130 194
131func (i *InstanceInfo) uniqueId() string { 195// ResourceConfig is a legacy type that was formerly used to represent
132 prefix := i.HumanId() 196// interpolatable configuration blocks. It is now only used to shim to old
133 if v := i.uniqueExtra; v != "" { 197// APIs that still use this type, via NewResourceConfigShimmed.
134 prefix += " " + v
135 }
136
137 return prefix
138}
139
140// ResourceConfig holds the configuration given for a resource. This is
141// done instead of a raw `map[string]interface{}` type so that rich
142// methods can be added to it to make dealing with it easier.
143type ResourceConfig struct { 198type ResourceConfig struct {
144 ComputedKeys []string 199 ComputedKeys []string
145 Raw map[string]interface{} 200 Raw map[string]interface{}
@@ -155,6 +210,85 @@ func NewResourceConfig(c *config.RawConfig) *ResourceConfig {
155 return result 210 return result
156} 211}
157 212
213// NewResourceConfigShimmed wraps a cty.Value of object type in a legacy
214// ResourceConfig object, so that it can be passed to older APIs that expect
215// this wrapping.
216//
217// The returned ResourceConfig is already interpolated and cannot be
218// re-interpolated. It is, therefore, useful only to functions that expect
219// an already-populated ResourceConfig which they then treat as read-only.
220//
221// If the given value is not of an object type that conforms to the given
222// schema then this function will panic.
223func NewResourceConfigShimmed(val cty.Value, schema *configschema.Block) *ResourceConfig {
224 if !val.Type().IsObjectType() {
225 panic(fmt.Errorf("NewResourceConfigShimmed given %#v; an object type is required", val.Type()))
226 }
227 ret := &ResourceConfig{}
228
229 legacyVal := hcl2shim.ConfigValueFromHCL2Block(val, schema)
230 if legacyVal != nil {
231 ret.Config = legacyVal
232
233 // Now we need to walk through our structure and find any unknown values,
234 // producing the separate list ComputedKeys to represent these. We use the
235 // schema here so that we can preserve the expected invariant
236 // that an attribute is always either wholly known or wholly unknown, while
237 // a child block can be partially unknown.
238 ret.ComputedKeys = newResourceConfigShimmedComputedKeys(val, "")
239 } else {
240 ret.Config = make(map[string]interface{})
241 }
242 ret.Raw = ret.Config
243
244 return ret
245}
246
247// Record the any config values in ComputedKeys. This field had been unused in
248// helper/schema, but in the new protocol we're using this so that the SDK can
249// now handle having an unknown collection. The legacy diff code doesn't
250// properly handle the unknown, because it can't be expressed in the same way
251// between the config and diff.
252func newResourceConfigShimmedComputedKeys(val cty.Value, path string) []string {
253 var ret []string
254 ty := val.Type()
255
256 if val.IsNull() {
257 return ret
258 }
259
260 if !val.IsKnown() {
261 // we shouldn't have an entirely unknown resource, but prevent empty
262 // strings just in case
263 if len(path) > 0 {
264 ret = append(ret, path)
265 }
266 return ret
267 }
268
269 if path != "" {
270 path += "."
271 }
272 switch {
273 case ty.IsListType(), ty.IsTupleType(), ty.IsSetType():
274 i := 0
275 for it := val.ElementIterator(); it.Next(); i++ {
276 _, subVal := it.Element()
277 keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%d", path, i))
278 ret = append(ret, keys...)
279 }
280
281 case ty.IsMapType(), ty.IsObjectType():
282 for it := val.ElementIterator(); it.Next(); {
283 subK, subVal := it.Element()
284 keys := newResourceConfigShimmedComputedKeys(subVal, fmt.Sprintf("%s%s", path, subK.AsString()))
285 ret = append(ret, keys...)
286 }
287 }
288
289 return ret
290}
291
158// DeepCopy performs a deep copy of the configuration. This makes it safe 292// DeepCopy performs a deep copy of the configuration. This makes it safe
159// to modify any of the structures that are part of the resource config without 293// to modify any of the structures that are part of the resource config without
160// affecting the original configuration. 294// affecting the original configuration.
@@ -374,6 +508,14 @@ func (c *ResourceConfig) get(
374// refactor is complete. 508// refactor is complete.
375func (c *ResourceConfig) interpolateForce() { 509func (c *ResourceConfig) interpolateForce() {
376 if c.raw == nil { 510 if c.raw == nil {
511 // If we don't have a lowercase "raw" but we _do_ have the uppercase
512 // Raw populated then this indicates that we're recieving a shim
513 // ResourceConfig created by NewResourceConfigShimmed, which is already
514 // fully evaluated and thus this function doesn't need to do anything.
515 if c.Raw != nil {
516 return
517 }
518
377 var err error 519 var err error
378 c.raw, err = config.NewRawConfig(make(map[string]interface{})) 520 c.raw, err = config.NewRawConfig(make(map[string]interface{}))
379 if err != nil { 521 if err != nil {
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
index a64f5d8..156ecf5 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
@@ -7,8 +7,10 @@ import (
7 "strconv" 7 "strconv"
8 "strings" 8 "strings"
9 9
10 "github.com/hashicorp/terraform/addrs"
11
10 "github.com/hashicorp/terraform/config" 12 "github.com/hashicorp/terraform/config"
11 "github.com/hashicorp/terraform/config/module" 13 "github.com/hashicorp/terraform/configs"
12) 14)
13 15
14// ResourceAddress is a way of identifying an individual resource (or, 16// ResourceAddress is a way of identifying an individual resource (or,
@@ -109,30 +111,47 @@ func (r *ResourceAddress) WholeModuleAddress() *ResourceAddress {
109 } 111 }
110} 112}
111 113
112// MatchesConfig returns true if the receiver matches the given 114// MatchesResourceConfig returns true if the receiver matches the given
113// configuration resource within the given configuration module. 115// configuration resource within the given _static_ module path. Note that
116// the module path in a resource address is a _dynamic_ module path, and
117// multiple dynamic resource paths may map to a single static path if
118// count and for_each are in use on module calls.
114// 119//
115// Since resource configuration blocks represent all of the instances of 120// Since resource configuration blocks represent all of the instances of
116// a multi-instance resource, the index of the address (if any) is not 121// a multi-instance resource, the index of the address (if any) is not
117// considered. 122// considered.
118func (r *ResourceAddress) MatchesConfig(mod *module.Tree, rc *config.Resource) bool { 123func (r *ResourceAddress) MatchesResourceConfig(path addrs.Module, rc *configs.Resource) bool {
119 if r.HasResourceSpec() { 124 if r.HasResourceSpec() {
120 if r.Mode != rc.Mode || r.Type != rc.Type || r.Name != rc.Name { 125 // FIXME: Some ugliness while we are between worlds. Functionality
126 // in "addrs" should eventually replace this ResourceAddress idea
127 // completely, but for now we'll need to translate to the old
128 // way of representing resource modes.
129 switch r.Mode {
130 case config.ManagedResourceMode:
131 if rc.Mode != addrs.ManagedResourceMode {
132 return false
133 }
134 case config.DataResourceMode:
135 if rc.Mode != addrs.DataResourceMode {
136 return false
137 }
138 }
139 if r.Type != rc.Type || r.Name != rc.Name {
121 return false 140 return false
122 } 141 }
123 } 142 }
124 143
125 addrPath := r.Path 144 addrPath := r.Path
126 cfgPath := mod.Path()
127 145
128 // normalize 146 // normalize
129 if len(addrPath) == 0 { 147 if len(addrPath) == 0 {
130 addrPath = nil 148 addrPath = nil
131 } 149 }
132 if len(cfgPath) == 0 { 150 if len(path) == 0 {
133 cfgPath = nil 151 path = nil
134 } 152 }
135 return reflect.DeepEqual(addrPath, cfgPath) 153 rawPath := []string(path)
154 return reflect.DeepEqual(addrPath, rawPath)
136} 155}
137 156
138// stateId returns the ID that this resource should be entered with 157// stateId returns the ID that this resource should be entered with
@@ -270,6 +289,144 @@ func ParseResourceAddressForInstanceDiff(path []string, key string) (*ResourceAd
270 return addr, nil 289 return addr, nil
271} 290}
272 291
292// NewLegacyResourceAddress creates a ResourceAddress from a new-style
293// addrs.AbsResource value.
294//
295// This is provided for shimming purposes so that we can still easily call into
296// older functions that expect the ResourceAddress type.
297func NewLegacyResourceAddress(addr addrs.AbsResource) *ResourceAddress {
298 ret := &ResourceAddress{
299 Type: addr.Resource.Type,
300 Name: addr.Resource.Name,
301 }
302
303 switch addr.Resource.Mode {
304 case addrs.ManagedResourceMode:
305 ret.Mode = config.ManagedResourceMode
306 case addrs.DataResourceMode:
307 ret.Mode = config.DataResourceMode
308 default:
309 panic(fmt.Errorf("cannot shim %s to legacy config.ResourceMode value", addr.Resource.Mode))
310 }
311
312 path := make([]string, len(addr.Module))
313 for i, step := range addr.Module {
314 if step.InstanceKey != addrs.NoKey {
315 // At the time of writing this can't happen because we don't
316 // ket generate keyed module instances. This legacy codepath must
317 // be removed before we can support "count" and "for_each" for
318 // modules.
319 panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey))
320 }
321
322 path[i] = step.Name
323 }
324 ret.Path = path
325 ret.Index = -1
326
327 return ret
328}
329
330// NewLegacyResourceInstanceAddress creates a ResourceAddress from a new-style
331// addrs.AbsResource value.
332//
333// This is provided for shimming purposes so that we can still easily call into
334// older functions that expect the ResourceAddress type.
335func NewLegacyResourceInstanceAddress(addr addrs.AbsResourceInstance) *ResourceAddress {
336 ret := &ResourceAddress{
337 Type: addr.Resource.Resource.Type,
338 Name: addr.Resource.Resource.Name,
339 }
340
341 switch addr.Resource.Resource.Mode {
342 case addrs.ManagedResourceMode:
343 ret.Mode = config.ManagedResourceMode
344 case addrs.DataResourceMode:
345 ret.Mode = config.DataResourceMode
346 default:
347 panic(fmt.Errorf("cannot shim %s to legacy config.ResourceMode value", addr.Resource.Resource.Mode))
348 }
349
350 path := make([]string, len(addr.Module))
351 for i, step := range addr.Module {
352 if step.InstanceKey != addrs.NoKey {
353 // At the time of writing this can't happen because we don't
354 // ket generate keyed module instances. This legacy codepath must
355 // be removed before we can support "count" and "for_each" for
356 // modules.
357 panic(fmt.Errorf("cannot shim module instance step with key %#v to legacy ResourceAddress.Path", step.InstanceKey))
358 }
359
360 path[i] = step.Name
361 }
362 ret.Path = path
363
364 if addr.Resource.Key == addrs.NoKey {
365 ret.Index = -1
366 } else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok {
367 ret.Index = int(ik)
368 } else {
369 panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key))
370 }
371
372 return ret
373}
374
375// AbsResourceInstanceAddr converts the receiver, a legacy resource address, to
376// the new resource address type addrs.AbsResourceInstance.
377//
378// This method can be used only on an address that has a resource specification.
379// It will panic if called on a module-path-only ResourceAddress. Use
380// method HasResourceSpec to check before calling, in contexts where it is
381// unclear.
382//
383// addrs.AbsResourceInstance does not represent the "tainted" and "deposed"
384// states, and so if these are present on the receiver then they are discarded.
385//
386// This is provided for shimming purposes so that we can easily adapt functions
387// that are returning the legacy ResourceAddress type, for situations where
388// the new type is required.
389func (addr *ResourceAddress) AbsResourceInstanceAddr() addrs.AbsResourceInstance {
390 if !addr.HasResourceSpec() {
391 panic("AbsResourceInstanceAddr called on ResourceAddress with no resource spec")
392 }
393
394 ret := addrs.AbsResourceInstance{
395 Module: addr.ModuleInstanceAddr(),
396 Resource: addrs.ResourceInstance{
397 Resource: addrs.Resource{
398 Type: addr.Type,
399 Name: addr.Name,
400 },
401 },
402 }
403
404 switch addr.Mode {
405 case config.ManagedResourceMode:
406 ret.Resource.Resource.Mode = addrs.ManagedResourceMode
407 case config.DataResourceMode:
408 ret.Resource.Resource.Mode = addrs.DataResourceMode
409 default:
410 panic(fmt.Errorf("cannot shim %s to addrs.ResourceMode value", addr.Mode))
411 }
412
413 if addr.Index != -1 {
414 ret.Resource.Key = addrs.IntKey(addr.Index)
415 }
416
417 return ret
418}
419
420// ModuleInstanceAddr returns the module path portion of the receiver as a
421// addrs.ModuleInstance value.
422func (addr *ResourceAddress) ModuleInstanceAddr() addrs.ModuleInstance {
423 path := make(addrs.ModuleInstance, len(addr.Path))
424 for i, name := range addr.Path {
425 path[i] = addrs.ModuleInstanceStep{Name: name}
426 }
427 return path
428}
429
273// Contains returns true if and only if the given node is contained within 430// Contains returns true if and only if the given node is contained within
274// the receiver. 431// the receiver.
275// 432//
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
index 93fd14f..3455ad8 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
@@ -3,8 +3,10 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 multierror "github.com/hashicorp/go-multierror" 6 "github.com/hashicorp/terraform/tfdiags"
7
7 "github.com/hashicorp/terraform/plugin/discovery" 8 "github.com/hashicorp/terraform/plugin/discovery"
9 "github.com/hashicorp/terraform/providers"
8) 10)
9 11
10// ResourceProvider is an interface that must be implemented by any 12// ResourceProvider is an interface that must be implemented by any
@@ -30,13 +32,12 @@ type ResourceProvider interface {
30 // resource or data source has the SchemaAvailable flag set. 32 // resource or data source has the SchemaAvailable flag set.
31 GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error) 33 GetSchema(*ProviderSchemaRequest) (*ProviderSchema, error)
32 34
33 // Input is called to ask the provider to ask the user for input 35 // Input was used prior to v0.12 to ask the provider to prompt the user
34 // for completing the configuration if necesarry. 36 // for input to complete the configuration.
35 // 37 //
36 // This may or may not be called, so resource provider writers shouldn't 38 // From v0.12 onwards this method is never called because Terraform Core
37 // rely on this being available to set some default values for validate 39 // is able to handle the necessary input logic itself based on the
38 // later. Example of a situation where this wouldn't be called is if 40 // schema returned from GetSchema.
39 // the user is not using a TTY.
40 Input(UIInput, *ResourceConfig) (*ResourceConfig, error) 41 Input(UIInput, *ResourceConfig) (*ResourceConfig, error)
41 42
42 // Validate is called once at the beginning with the raw configuration 43 // Validate is called once at the beginning with the raw configuration
@@ -170,18 +171,6 @@ type ResourceProvider interface {
170 ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error) 171 ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
171} 172}
172 173
173// ResourceProviderError may be returned when creating a Context if the
174// required providers cannot be satisfied. This error can then be used to
175// format a more useful message for the user.
176type ResourceProviderError struct {
177 Errors []error
178}
179
180func (e *ResourceProviderError) Error() string {
181 // use multierror to format the default output
182 return multierror.Append(nil, e.Errors...).Error()
183}
184
185// ResourceProviderCloser is an interface that providers that can close 174// ResourceProviderCloser is an interface that providers that can close
186// connections that aren't needed anymore must implement. 175// connections that aren't needed anymore must implement.
187type ResourceProviderCloser interface { 176type ResourceProviderCloser interface {
@@ -296,13 +285,35 @@ func ProviderHasDataSource(p ResourceProvider, n string) bool {
296// This should be called only with configurations that have passed calls 285// This should be called only with configurations that have passed calls
297// to config.Validate(), which ensures that all of the given version 286// to config.Validate(), which ensures that all of the given version
298// constraints are valid. It will panic if any invalid constraints are present. 287// constraints are valid. It will panic if any invalid constraints are present.
299func resourceProviderFactories(resolver ResourceProviderResolver, reqd discovery.PluginRequirements) (map[string]ResourceProviderFactory, error) { 288func resourceProviderFactories(resolver providers.Resolver, reqd discovery.PluginRequirements) (map[string]providers.Factory, tfdiags.Diagnostics) {
289 var diags tfdiags.Diagnostics
300 ret, errs := resolver.ResolveProviders(reqd) 290 ret, errs := resolver.ResolveProviders(reqd)
301 if errs != nil { 291 if errs != nil {
302 return nil, &ResourceProviderError{ 292 diags = diags.Append(
303 Errors: errs, 293 tfdiags.Sourceless(tfdiags.Error,
294 "Could not satisfy plugin requirements",
295 errPluginInit,
296 ),
297 )
298
299 for _, err := range errs {
300 diags = diags.Append(err)
304 } 301 }
302
303 return nil, diags
305 } 304 }
306 305
307 return ret, nil 306 return ret, nil
308} 307}
308
309const errPluginInit = `
310Plugin reinitialization required. Please run "terraform init".
311
312Plugins are external binaries that Terraform uses to access and manipulate
313resources. The configuration provided requires plugins which can't be located,
314don't satisfy the version constraints, or are otherwise incompatible.
315
316Terraform automatically discovers provider requirements from your
317configuration, including providers used in child modules. To see the
318requirements and constraints from each module, run "terraform providers".
319`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
index 361ec1e..2743dd7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
@@ -1,9 +1,21 @@
1package terraform 1package terraform
2 2
3import (
4 "github.com/hashicorp/terraform/configs/configschema"
5 "github.com/hashicorp/terraform/provisioners"
6)
7
3// ResourceProvisioner is an interface that must be implemented by any 8// ResourceProvisioner is an interface that must be implemented by any
4// resource provisioner: the thing that initializes resources in 9// resource provisioner: the thing that initializes resources in
5// a Terraform configuration. 10// a Terraform configuration.
6type ResourceProvisioner interface { 11type ResourceProvisioner interface {
12 // GetConfigSchema returns the schema for the provisioner type's main
13 // configuration block. This is called prior to Validate to enable some
14 // basic structural validation to be performed automatically and to allow
15 // the configuration to be properly extracted from potentially-ambiguous
16 // configuration file formats.
17 GetConfigSchema() (*configschema.Block, error)
18
7 // Validate is called once at the beginning with the raw 19 // Validate is called once at the beginning with the raw
8 // configuration (no interpolation done) and can return a list of warnings 20 // configuration (no interpolation done) and can return a list of warnings
9 // and/or errors. 21 // and/or errors.
@@ -52,3 +64,7 @@ type ResourceProvisionerCloser interface {
52// ResourceProvisionerFactory is a function type that creates a new instance 64// ResourceProvisionerFactory is a function type that creates a new instance
53// of a resource provisioner. 65// of a resource provisioner.
54type ResourceProvisionerFactory func() (ResourceProvisioner, error) 66type ResourceProvisionerFactory func() (ResourceProvisioner, error)
67
68// ProvisionerFactory is a function type that creates a new instance
69// of a provisioners.Interface.
70type ProvisionerFactory = provisioners.Factory
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
index f471a51..7b88cf7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
@@ -1,6 +1,10 @@
1package terraform 1package terraform
2 2
3import "sync" 3import (
4 "sync"
5
6 "github.com/hashicorp/terraform/configs/configschema"
7)
4 8
5// MockResourceProvisioner implements ResourceProvisioner but mocks out all the 9// MockResourceProvisioner implements ResourceProvisioner but mocks out all the
6// calls for testing purposes. 10// calls for testing purposes.
@@ -9,6 +13,10 @@ type MockResourceProvisioner struct {
9 // Anything you want, in case you need to store extra data with the mock. 13 // Anything you want, in case you need to store extra data with the mock.
10 Meta interface{} 14 Meta interface{}
11 15
16 GetConfigSchemaCalled bool
17 GetConfigSchemaReturnSchema *configschema.Block
18 GetConfigSchemaReturnError error
19
12 ApplyCalled bool 20 ApplyCalled bool
13 ApplyOutput UIOutput 21 ApplyOutput UIOutput
14 ApplyState *InstanceState 22 ApplyState *InstanceState
@@ -27,6 +35,13 @@ type MockResourceProvisioner struct {
27 StopReturnError error 35 StopReturnError error
28} 36}
29 37
38var _ ResourceProvisioner = (*MockResourceProvisioner)(nil)
39
40func (p *MockResourceProvisioner) GetConfigSchema() (*configschema.Block, error) {
41 p.GetConfigSchemaCalled = true
42 return p.GetConfigSchemaReturnSchema, p.GetConfigSchemaReturnError
43}
44
30func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { 45func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) {
31 p.Lock() 46 p.Lock()
32 defer p.Unlock() 47 defer p.Unlock()
diff --git a/vendor/github.com/hashicorp/terraform/terraform/schemas.go b/vendor/github.com/hashicorp/terraform/terraform/schemas.go
index ec46efc..62991c8 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/schemas.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/schemas.go
@@ -1,18 +1,239 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config/configschema" 4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/configs"
9 "github.com/hashicorp/terraform/configs/configschema"
10 "github.com/hashicorp/terraform/providers"
11 "github.com/hashicorp/terraform/states"
12 "github.com/hashicorp/terraform/tfdiags"
5) 13)
6 14
15// Schemas is a container for various kinds of schema that Terraform needs
16// during processing.
7type Schemas struct { 17type Schemas struct {
8 Providers ProviderSchemas 18 Providers map[string]*ProviderSchema
19 Provisioners map[string]*configschema.Block
20}
21
22// ProviderSchema returns the entire ProviderSchema object that was produced
23// by the plugin for the given provider, or nil if no such schema is available.
24//
25// It's usually better to go use the more precise methods offered by type
26// Schemas to handle this detail automatically.
27func (ss *Schemas) ProviderSchema(typeName string) *ProviderSchema {
28 if ss.Providers == nil {
29 return nil
30 }
31 return ss.Providers[typeName]
32}
33
34// ProviderConfig returns the schema for the provider configuration of the
35// given provider type, or nil if no such schema is available.
36func (ss *Schemas) ProviderConfig(typeName string) *configschema.Block {
37 ps := ss.ProviderSchema(typeName)
38 if ps == nil {
39 return nil
40 }
41 return ps.Provider
42}
43
44// ResourceTypeConfig returns the schema for the configuration of a given
45// resource type belonging to a given provider type, or nil of no such
46// schema is available.
47//
48// In many cases the provider type is inferrable from the resource type name,
49// but this is not always true because users can override the provider for
50// a resource using the "provider" meta-argument. Therefore it's important to
51// always pass the correct provider name, even though it many cases it feels
52// redundant.
53func (ss *Schemas) ResourceTypeConfig(providerType string, resourceMode addrs.ResourceMode, resourceType string) (block *configschema.Block, schemaVersion uint64) {
54 ps := ss.ProviderSchema(providerType)
55 if ps == nil || ps.ResourceTypes == nil {
56 return nil, 0
57 }
58 return ps.SchemaForResourceType(resourceMode, resourceType)
59}
60
61// ProvisionerConfig returns the schema for the configuration of a given
62// provisioner, or nil of no such schema is available.
63func (ss *Schemas) ProvisionerConfig(name string) *configschema.Block {
64 return ss.Provisioners[name]
9} 65}
10 66
11// ProviderSchemas is a map from provider names to provider schemas. 67// LoadSchemas searches the given configuration, state and plan (any of which
68// may be nil) for constructs that have an associated schema, requests the
69// necessary schemas from the given component factory (which must _not_ be nil),
70// and returns a single object representing all of the necessary schemas.
12// 71//
13// The names in this map are the direct plugin name (e.g. "aws") rather than 72// If an error is returned, it may be a wrapped tfdiags.Diagnostics describing
14// any alias name (e.g. "aws.foo"), since. 73// errors across multiple separate objects. Errors here will usually indicate
15type ProviderSchemas map[string]*ProviderSchema 74// either misbehavior on the part of one of the providers or of the provider
75// protocol itself. When returned with errors, the returned schemas object is
76// still valid but may be incomplete.
77func LoadSchemas(config *configs.Config, state *states.State, components contextComponentFactory) (*Schemas, error) {
78 schemas := &Schemas{
79 Providers: map[string]*ProviderSchema{},
80 Provisioners: map[string]*configschema.Block{},
81 }
82 var diags tfdiags.Diagnostics
83
84 newDiags := loadProviderSchemas(schemas.Providers, config, state, components)
85 diags = diags.Append(newDiags)
86 newDiags = loadProvisionerSchemas(schemas.Provisioners, config, components)
87 diags = diags.Append(newDiags)
88
89 return schemas, diags.Err()
90}
91
92func loadProviderSchemas(schemas map[string]*ProviderSchema, config *configs.Config, state *states.State, components contextComponentFactory) tfdiags.Diagnostics {
93 var diags tfdiags.Diagnostics
94
95 ensure := func(typeName string) {
96 if _, exists := schemas[typeName]; exists {
97 return
98 }
99
100 log.Printf("[TRACE] LoadSchemas: retrieving schema for provider type %q", typeName)
101 provider, err := components.ResourceProvider(typeName, "early/"+typeName)
102 if err != nil {
103 // We'll put a stub in the map so we won't re-attempt this on
104 // future calls.
105 schemas[typeName] = &ProviderSchema{}
106 diags = diags.Append(
107 fmt.Errorf("Failed to instantiate provider %q to obtain schema: %s", typeName, err),
108 )
109 return
110 }
111 defer func() {
112 provider.Close()
113 }()
114
115 resp := provider.GetSchema()
116 if resp.Diagnostics.HasErrors() {
117 // We'll put a stub in the map so we won't re-attempt this on
118 // future calls.
119 schemas[typeName] = &ProviderSchema{}
120 diags = diags.Append(
121 fmt.Errorf("Failed to retrieve schema from provider %q: %s", typeName, resp.Diagnostics.Err()),
122 )
123 return
124 }
125
126 s := &ProviderSchema{
127 Provider: resp.Provider.Block,
128 ResourceTypes: make(map[string]*configschema.Block),
129 DataSources: make(map[string]*configschema.Block),
130
131 ResourceTypeSchemaVersions: make(map[string]uint64),
132 }
133
134 if resp.Provider.Version < 0 {
135 // We're not using the version numbers here yet, but we'll check
136 // for validity anyway in case we start using them in future.
137 diags = diags.Append(
138 fmt.Errorf("invalid negative schema version provider configuration for provider %q", typeName),
139 )
140 }
141
142 for t, r := range resp.ResourceTypes {
143 s.ResourceTypes[t] = r.Block
144 s.ResourceTypeSchemaVersions[t] = uint64(r.Version)
145 if r.Version < 0 {
146 diags = diags.Append(
147 fmt.Errorf("invalid negative schema version for resource type %s in provider %q", t, typeName),
148 )
149 }
150 }
151
152 for t, d := range resp.DataSources {
153 s.DataSources[t] = d.Block
154 if d.Version < 0 {
155 // We're not using the version numbers here yet, but we'll check
156 // for validity anyway in case we start using them in future.
157 diags = diags.Append(
158 fmt.Errorf("invalid negative schema version for data source %s in provider %q", t, typeName),
159 )
160 }
161 }
162
163 schemas[typeName] = s
164 }
165
166 if config != nil {
167 for _, typeName := range config.ProviderTypes() {
168 ensure(typeName)
169 }
170 }
171
172 if state != nil {
173 needed := providers.AddressedTypesAbs(state.ProviderAddrs())
174 for _, typeName := range needed {
175 ensure(typeName)
176 }
177 }
178
179 return diags
180}
181
182func loadProvisionerSchemas(schemas map[string]*configschema.Block, config *configs.Config, components contextComponentFactory) tfdiags.Diagnostics {
183 var diags tfdiags.Diagnostics
184
185 ensure := func(name string) {
186 if _, exists := schemas[name]; exists {
187 return
188 }
189
190 log.Printf("[TRACE] LoadSchemas: retrieving schema for provisioner %q", name)
191 provisioner, err := components.ResourceProvisioner(name, "early/"+name)
192 if err != nil {
193 // We'll put a stub in the map so we won't re-attempt this on
194 // future calls.
195 schemas[name] = &configschema.Block{}
196 diags = diags.Append(
197 fmt.Errorf("Failed to instantiate provisioner %q to obtain schema: %s", name, err),
198 )
199 return
200 }
201 defer func() {
202 if closer, ok := provisioner.(ResourceProvisionerCloser); ok {
203 closer.Close()
204 }
205 }()
206
207 resp := provisioner.GetSchema()
208 if resp.Diagnostics.HasErrors() {
209 // We'll put a stub in the map so we won't re-attempt this on
210 // future calls.
211 schemas[name] = &configschema.Block{}
212 diags = diags.Append(
213 fmt.Errorf("Failed to retrieve schema from provisioner %q: %s", name, resp.Diagnostics.Err()),
214 )
215 return
216 }
217
218 schemas[name] = resp.Provisioner
219 }
220
221 if config != nil {
222 for _, rc := range config.Module.ManagedResources {
223 for _, pc := range rc.Managed.Provisioners {
224 ensure(pc.Type)
225 }
226 }
227
228 // Must also visit our child modules, recursively.
229 for _, cc := range config.Children {
230 childDiags := loadProvisionerSchemas(schemas, cc, components)
231 diags = diags.Append(childDiags)
232 }
233 }
234
235 return diags
236}
16 237
17// ProviderSchema represents the schema for a provider's own configuration 238// ProviderSchema represents the schema for a provider's own configuration
18// and the configuration for some or all of its resources and data sources. 239// and the configuration for some or all of its resources and data sources.
@@ -24,6 +245,29 @@ type ProviderSchema struct {
24 Provider *configschema.Block 245 Provider *configschema.Block
25 ResourceTypes map[string]*configschema.Block 246 ResourceTypes map[string]*configschema.Block
26 DataSources map[string]*configschema.Block 247 DataSources map[string]*configschema.Block
248
249 ResourceTypeSchemaVersions map[string]uint64
250}
251
252// SchemaForResourceType attempts to find a schema for the given mode and type.
253// Returns nil if no such schema is available.
254func (ps *ProviderSchema) SchemaForResourceType(mode addrs.ResourceMode, typeName string) (schema *configschema.Block, version uint64) {
255 switch mode {
256 case addrs.ManagedResourceMode:
257 return ps.ResourceTypes[typeName], ps.ResourceTypeSchemaVersions[typeName]
258 case addrs.DataResourceMode:
259 // Data resources don't have schema versions right now, since state is discarded for each refresh
260 return ps.DataSources[typeName], 0
261 default:
262 // Shouldn't happen, because the above cases are comprehensive.
263 return nil, 0
264 }
265}
266
267// SchemaForResourceAddr attempts to find a schema for the mode and type from
268// the given resource address. Returns nil if no such schema is available.
269func (ps *ProviderSchema) SchemaForResourceAddr(addr addrs.Resource) (schema *configschema.Block, version uint64) {
270 return ps.SchemaForResourceType(addr.Mode, addr.Type)
27} 271}
28 272
29// ProviderSchemaRequest is used to describe to a ResourceProvider which 273// ProviderSchemaRequest is used to describe to a ResourceProvider which
diff --git a/vendor/github.com/hashicorp/terraform/terraform/semantics.go b/vendor/github.com/hashicorp/terraform/terraform/semantics.go
deleted file mode 100644
index 20f1d8a..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/semantics.go
+++ /dev/null
@@ -1,132 +0,0 @@
1package terraform
2
3import (
4 "fmt"
5 "strings"
6
7 "github.com/hashicorp/go-multierror"
8 "github.com/hashicorp/terraform/config"
9 "github.com/hashicorp/terraform/dag"
10)
11
12// GraphSemanticChecker is the interface that semantic checks across
13// the entire Terraform graph implement.
14//
15// The graph should NOT be modified by the semantic checker.
16type GraphSemanticChecker interface {
17 Check(*dag.Graph) error
18}
19
20// UnorderedSemanticCheckRunner is an implementation of GraphSemanticChecker
21// that runs a list of SemanticCheckers against the vertices of the graph
22// in no specified order.
23type UnorderedSemanticCheckRunner struct {
24 Checks []SemanticChecker
25}
26
27func (sc *UnorderedSemanticCheckRunner) Check(g *dag.Graph) error {
28 var err error
29 for _, v := range g.Vertices() {
30 for _, check := range sc.Checks {
31 if e := check.Check(g, v); e != nil {
32 err = multierror.Append(err, e)
33 }
34 }
35 }
36
37 return err
38}
39
40// SemanticChecker is the interface that semantic checks across the
41// Terraform graph implement. Errors are accumulated. Even after an error
42// is returned, child vertices in the graph will still be visited.
43//
44// The graph should NOT be modified by the semantic checker.
45//
46// The order in which vertices are visited is left unspecified, so the
47// semantic checks should not rely on that.
48type SemanticChecker interface {
49 Check(*dag.Graph, dag.Vertex) error
50}
51
52// smcUserVariables does all the semantic checks to verify that the
53// variables given satisfy the configuration itself.
54func smcUserVariables(c *config.Config, vs map[string]interface{}) []error {
55 var errs []error
56
57 cvs := make(map[string]*config.Variable)
58 for _, v := range c.Variables {
59 cvs[v.Name] = v
60 }
61
62 // Check that all required variables are present
63 required := make(map[string]struct{})
64 for _, v := range c.Variables {
65 if v.Required() {
66 required[v.Name] = struct{}{}
67 }
68 }
69 for k, _ := range vs {
70 delete(required, k)
71 }
72 if len(required) > 0 {
73 for k, _ := range required {
74 errs = append(errs, fmt.Errorf(
75 "Required variable not set: %s", k))
76 }
77 }
78
79 // Check that types match up
80 for name, proposedValue := range vs {
81 // Check for "map.key" fields. These stopped working with Terraform
82 // 0.7 but we do this to surface a better error message informing
83 // the user what happened.
84 if idx := strings.Index(name, "."); idx > 0 {
85 key := name[:idx]
86 if _, ok := cvs[key]; ok {
87 errs = append(errs, fmt.Errorf(
88 "%s: Overriding map keys with the format `name.key` is no "+
89 "longer allowed. You may still override keys by setting "+
90 "`name = { key = value }`. The maps will be merged. This "+
91 "behavior appeared in 0.7.0.", name))
92 continue
93 }
94 }
95
96 schema, ok := cvs[name]
97 if !ok {
98 continue
99 }
100
101 declaredType := schema.Type()
102
103 switch declaredType {
104 case config.VariableTypeString:
105 switch proposedValue.(type) {
106 case string:
107 continue
108 }
109 case config.VariableTypeMap:
110 switch v := proposedValue.(type) {
111 case map[string]interface{}:
112 continue
113 case []map[string]interface{}:
114 // if we have a list of 1 map, it will get coerced later as needed
115 if len(v) == 1 {
116 continue
117 }
118 }
119 case config.VariableTypeList:
120 switch proposedValue.(type) {
121 case []interface{}:
122 continue
123 }
124 }
125 errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s",
126 name, declaredType.Printable(), hclTypeName(proposedValue)))
127 }
128
129 // TODO(mitchellh): variables that are unknown
130
131 return errs
132}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go
index 04b14a6..092b690 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/state.go
@@ -16,12 +16,23 @@ import (
16 "strings" 16 "strings"
17 "sync" 17 "sync"
18 18
19 "github.com/hashicorp/go-multierror" 19 "github.com/hashicorp/errwrap"
20 "github.com/hashicorp/go-uuid" 20 multierror "github.com/hashicorp/go-multierror"
21 "github.com/hashicorp/go-version" 21 uuid "github.com/hashicorp/go-uuid"
22 "github.com/hashicorp/terraform/config" 22 version "github.com/hashicorp/go-version"
23 "github.com/hashicorp/hcl2/hcl"
24 "github.com/hashicorp/hcl2/hcl/hclsyntax"
23 "github.com/mitchellh/copystructure" 25 "github.com/mitchellh/copystructure"
26 "github.com/zclconf/go-cty/cty"
27 ctyjson "github.com/zclconf/go-cty/cty/json"
24 28
29 "github.com/hashicorp/terraform/addrs"
30 "github.com/hashicorp/terraform/config"
31 "github.com/hashicorp/terraform/config/hcl2shim"
32 "github.com/hashicorp/terraform/configs"
33 "github.com/hashicorp/terraform/configs/configschema"
34 "github.com/hashicorp/terraform/plans"
35 "github.com/hashicorp/terraform/tfdiags"
25 tfversion "github.com/hashicorp/terraform/version" 36 tfversion "github.com/hashicorp/terraform/version"
26) 37)
27 38
@@ -33,26 +44,38 @@ const (
33// rootModulePath is the path of the root module 44// rootModulePath is the path of the root module
34var rootModulePath = []string{"root"} 45var rootModulePath = []string{"root"}
35 46
47// normalizeModulePath transforms a legacy module path (which may or may not
48// have a redundant "root" label at the start of it) into an
49// addrs.ModuleInstance representing the same module.
50//
51// For legacy reasons, different parts of Terraform disagree about whether the
52// root module has the path []string{} or []string{"root"}, and so this
53// function accepts both and trims off the "root". An implication of this is
54// that it's not possible to actually have a module call in the root module
55// that is itself named "root", since that would be ambiguous.
56//
36// normalizeModulePath takes a raw module path and returns a path that 57// normalizeModulePath takes a raw module path and returns a path that
37// has the rootModulePath prepended to it. If I could go back in time I 58// has the rootModulePath prepended to it. If I could go back in time I
38// would've never had a rootModulePath (empty path would be root). We can 59// would've never had a rootModulePath (empty path would be root). We can
39// still fix this but thats a big refactor that my branch doesn't make sense 60// still fix this but thats a big refactor that my branch doesn't make sense
40// for. Instead, this function normalizes paths. 61// for. Instead, this function normalizes paths.
41func normalizeModulePath(p []string) []string { 62func normalizeModulePath(p []string) addrs.ModuleInstance {
42 k := len(rootModulePath) 63 // FIXME: Remove this once everyone is using addrs.ModuleInstance.
43 64
44 // If we already have a root module prefix, we're done 65 if len(p) > 0 && p[0] == "root" {
45 if len(p) >= len(rootModulePath) { 66 p = p[1:]
46 if reflect.DeepEqual(p[:k], rootModulePath) {
47 return p
48 }
49 } 67 }
50 68
51 // None? Prefix it 69 ret := make(addrs.ModuleInstance, len(p))
52 result := make([]string, len(rootModulePath)+len(p)) 70 for i, name := range p {
53 copy(result, rootModulePath) 71 // For now we don't actually support modules with multiple instances
54 copy(result[k:], p) 72 // identified by keys, so we just treat every path element as a
55 return result 73 // step with no key.
74 ret[i] = addrs.ModuleInstanceStep{
75 Name: name,
76 }
77 }
78 return ret
56} 79}
57 80
58// State keeps track of a snapshot state-of-the-world that Terraform 81// State keeps track of a snapshot state-of-the-world that Terraform
@@ -138,21 +161,43 @@ func (s *State) children(path []string) []*ModuleState {
138// 161//
139// This should be the preferred method to add module states since it 162// This should be the preferred method to add module states since it
140// allows us to optimize lookups later as well as control sorting. 163// allows us to optimize lookups later as well as control sorting.
141func (s *State) AddModule(path []string) *ModuleState { 164func (s *State) AddModule(path addrs.ModuleInstance) *ModuleState {
142 s.Lock() 165 s.Lock()
143 defer s.Unlock() 166 defer s.Unlock()
144 167
145 return s.addModule(path) 168 return s.addModule(path)
146} 169}
147 170
148func (s *State) addModule(path []string) *ModuleState { 171func (s *State) addModule(path addrs.ModuleInstance) *ModuleState {
149 // check if the module exists first 172 // check if the module exists first
150 m := s.moduleByPath(path) 173 m := s.moduleByPath(path)
151 if m != nil { 174 if m != nil {
152 return m 175 return m
153 } 176 }
154 177
155 m = &ModuleState{Path: path} 178 // Lower the new-style address into a legacy-style address.
179 // This requires that none of the steps have instance keys, which is
180 // true for all addresses at the time of implementing this because
181 // "count" and "for_each" are not yet implemented for modules.
182 // For the purposes of state, the legacy address format also includes
183 // a redundant extra prefix element "root". It is important to include
184 // this because the "prune" method will remove any module that has a
185 // path length less than one, and other parts of the state code will
186 // trim off the first element indiscriminately.
187 legacyPath := make([]string, len(path)+1)
188 legacyPath[0] = "root"
189 for i, step := range path {
190 if step.InstanceKey != addrs.NoKey {
191 // FIXME: Once the rest of Terraform is ready to use count and
192 // for_each, remove all of this and just write the addrs.ModuleInstance
193 // value itself into the ModuleState.
194 panic("state cannot represent modules with count or for_each keys")
195 }
196
197 legacyPath[i+1] = step.Name
198 }
199
200 m = &ModuleState{Path: legacyPath}
156 m.init() 201 m.init()
157 s.Modules = append(s.Modules, m) 202 s.Modules = append(s.Modules, m)
158 s.sort() 203 s.sort()
@@ -162,7 +207,7 @@ func (s *State) addModule(path []string) *ModuleState {
162// ModuleByPath is used to lookup the module state for the given path. 207// ModuleByPath is used to lookup the module state for the given path.
163// This should be the preferred lookup mechanism as it allows for future 208// This should be the preferred lookup mechanism as it allows for future
164// lookup optimizations. 209// lookup optimizations.
165func (s *State) ModuleByPath(path []string) *ModuleState { 210func (s *State) ModuleByPath(path addrs.ModuleInstance) *ModuleState {
166 if s == nil { 211 if s == nil {
167 return nil 212 return nil
168 } 213 }
@@ -172,7 +217,7 @@ func (s *State) ModuleByPath(path []string) *ModuleState {
172 return s.moduleByPath(path) 217 return s.moduleByPath(path)
173} 218}
174 219
175func (s *State) moduleByPath(path []string) *ModuleState { 220func (s *State) moduleByPath(path addrs.ModuleInstance) *ModuleState {
176 for _, mod := range s.Modules { 221 for _, mod := range s.Modules {
177 if mod == nil { 222 if mod == nil {
178 continue 223 continue
@@ -180,97 +225,14 @@ func (s *State) moduleByPath(path []string) *ModuleState {
180 if mod.Path == nil { 225 if mod.Path == nil {
181 panic("missing module path") 226 panic("missing module path")
182 } 227 }
183 if reflect.DeepEqual(mod.Path, path) { 228 modPath := normalizeModulePath(mod.Path)
229 if modPath.String() == path.String() {
184 return mod 230 return mod
185 } 231 }
186 } 232 }
187 return nil 233 return nil
188} 234}
189 235
190// ModuleOrphans returns all the module orphans in this state by
191// returning their full paths. These paths can be used with ModuleByPath
192// to return the actual state.
193func (s *State) ModuleOrphans(path []string, c *config.Config) [][]string {
194 s.Lock()
195 defer s.Unlock()
196
197 return s.moduleOrphans(path, c)
198
199}
200
201func (s *State) moduleOrphans(path []string, c *config.Config) [][]string {
202 // direct keeps track of what direct children we have both in our config
203 // and in our state. childrenKeys keeps track of what isn't an orphan.
204 direct := make(map[string]struct{})
205 childrenKeys := make(map[string]struct{})
206 if c != nil {
207 for _, m := range c.Modules {
208 childrenKeys[m.Name] = struct{}{}
209 direct[m.Name] = struct{}{}
210 }
211 }
212
213 // Go over the direct children and find any that aren't in our keys.
214 var orphans [][]string
215 for _, m := range s.children(path) {
216 key := m.Path[len(m.Path)-1]
217
218 // Record that we found this key as a direct child. We use this
219 // later to find orphan nested modules.
220 direct[key] = struct{}{}
221
222 // If we have a direct child still in our config, it is not an orphan
223 if _, ok := childrenKeys[key]; ok {
224 continue
225 }
226
227 orphans = append(orphans, m.Path)
228 }
229
230 // Find the orphans that are nested...
231 for _, m := range s.Modules {
232 if m == nil {
233 continue
234 }
235
236 // We only want modules that are at least grandchildren
237 if len(m.Path) < len(path)+2 {
238 continue
239 }
240
241 // If it isn't part of our tree, continue
242 if !reflect.DeepEqual(path, m.Path[:len(path)]) {
243 continue
244 }
245
246 // If we have the direct child, then just skip it.
247 key := m.Path[len(path)]
248 if _, ok := direct[key]; ok {
249 continue
250 }
251
252 orphanPath := m.Path[:len(path)+1]
253
254 // Don't double-add if we've already added this orphan (which can happen if
255 // there are multiple nested sub-modules that get orphaned together).
256 alreadyAdded := false
257 for _, o := range orphans {
258 if reflect.DeepEqual(o, orphanPath) {
259 alreadyAdded = true
260 break
261 }
262 }
263 if alreadyAdded {
264 continue
265 }
266
267 // Add this orphan
268 orphans = append(orphans, orphanPath)
269 }
270
271 return orphans
272}
273
274// Empty returns true if the state is empty. 236// Empty returns true if the state is empty.
275func (s *State) Empty() bool { 237func (s *State) Empty() bool {
276 if s == nil { 238 if s == nil {
@@ -443,7 +405,7 @@ func (s *State) removeModule(path []string, v *ModuleState) {
443 405
444func (s *State) removeResource(path []string, v *ResourceState) { 406func (s *State) removeResource(path []string, v *ResourceState) {
445 // Get the module this resource lives in. If it doesn't exist, we're done. 407 // Get the module this resource lives in. If it doesn't exist, we're done.
446 mod := s.moduleByPath(path) 408 mod := s.moduleByPath(normalizeModulePath(path))
447 if mod == nil { 409 if mod == nil {
448 return 410 return
449 } 411 }
@@ -487,7 +449,7 @@ func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState
487 449
488// RootModule returns the ModuleState for the root module 450// RootModule returns the ModuleState for the root module
489func (s *State) RootModule() *ModuleState { 451func (s *State) RootModule() *ModuleState {
490 root := s.ModuleByPath(rootModulePath) 452 root := s.ModuleByPath(addrs.RootModuleInstance)
491 if root == nil { 453 if root == nil {
492 panic("missing root module") 454 panic("missing root module")
493 } 455 }
@@ -522,7 +484,7 @@ func (s *State) equal(other *State) bool {
522 } 484 }
523 for _, m := range s.Modules { 485 for _, m := range s.Modules {
524 // This isn't very optimal currently but works. 486 // This isn't very optimal currently but works.
525 otherM := other.moduleByPath(m.Path) 487 otherM := other.moduleByPath(normalizeModulePath(m.Path))
526 if otherM == nil { 488 if otherM == nil {
527 return false 489 return false
528 } 490 }
@@ -681,8 +643,8 @@ func (s *State) init() {
681 s.Version = StateVersion 643 s.Version = StateVersion
682 } 644 }
683 645
684 if s.moduleByPath(rootModulePath) == nil { 646 if s.moduleByPath(addrs.RootModuleInstance) == nil {
685 s.addModule(rootModulePath) 647 s.addModule(addrs.RootModuleInstance)
686 } 648 }
687 s.ensureHasLineage() 649 s.ensureHasLineage()
688 650
@@ -811,13 +773,9 @@ func (s *State) String() string {
811 773
812// BackendState stores the configuration to connect to a remote backend. 774// BackendState stores the configuration to connect to a remote backend.
813type BackendState struct { 775type BackendState struct {
814 Type string `json:"type"` // Backend type 776 Type string `json:"type"` // Backend type
815 Config map[string]interface{} `json:"config"` // Backend raw config 777 ConfigRaw json.RawMessage `json:"config"` // Backend raw config
816 778 Hash uint64 `json:"hash"` // Hash of portion of configuration from config files
817 // Hash is the hash code to uniquely identify the original source
818 // configuration. We use this to detect when there is a change in
819 // configuration even when "type" isn't changed.
820 Hash uint64 `json:"hash"`
821} 779}
822 780
823// Empty returns true if BackendState has no state. 781// Empty returns true if BackendState has no state.
@@ -825,25 +783,50 @@ func (s *BackendState) Empty() bool {
825 return s == nil || s.Type == "" 783 return s == nil || s.Type == ""
826} 784}
827 785
828// Rehash returns a unique content hash for this backend's configuration 786// Config decodes the type-specific configuration object using the provided
829// as a uint64 value. 787// schema and returns the result as a cty.Value.
830// The Hash stored in the backend state needs to match the config itself, but 788//
831// we need to compare the backend config after it has been combined with all 789// An error is returned if the stored configuration does not conform to the
832// options. 790// given schema.
833// This function must match the implementation used by config.Backend. 791func (s *BackendState) Config(schema *configschema.Block) (cty.Value, error) {
834func (s *BackendState) Rehash() uint64 { 792 ty := schema.ImpliedType()
835 if s == nil { 793 if s == nil {
836 return 0 794 return cty.NullVal(ty), nil
837 } 795 }
796 return ctyjson.Unmarshal(s.ConfigRaw, ty)
797}
838 798
839 cfg := config.Backend{ 799// SetConfig replaces (in-place) the type-specific configuration object using
840 Type: s.Type, 800// the provided value and associated schema.
841 RawConfig: &config.RawConfig{ 801//
842 Raw: s.Config, 802// An error is returned if the given value does not conform to the implied
843 }, 803// type of the schema.
804func (s *BackendState) SetConfig(val cty.Value, schema *configschema.Block) error {
805 ty := schema.ImpliedType()
806 buf, err := ctyjson.Marshal(val, ty)
807 if err != nil {
808 return err
844 } 809 }
810 s.ConfigRaw = buf
811 return nil
812}
845 813
846 return cfg.Rehash() 814// ForPlan produces an alternative representation of the reciever that is
815// suitable for storing in a plan. The current workspace must additionally
816// be provided, to be stored alongside the backend configuration.
817//
818// The backend configuration schema is required in order to properly
819// encode the backend-specific configuration settings.
820func (s *BackendState) ForPlan(schema *configschema.Block, workspaceName string) (*plans.Backend, error) {
821 if s == nil {
822 return nil, nil
823 }
824
825 configVal, err := s.Config(schema)
826 if err != nil {
827 return nil, errwrap.Wrapf("failed to decode backend config: {{err}}", err)
828 }
829 return plans.NewBackend(s.Type, configVal, schema, workspaceName)
847} 830}
848 831
849// RemoteState is used to track the information about a remote 832// RemoteState is used to track the information about a remote
@@ -1089,58 +1072,64 @@ func (m *ModuleState) IsDescendent(other *ModuleState) bool {
1089// Orphans returns a list of keys of resources that are in the State 1072// Orphans returns a list of keys of resources that are in the State
1090// but aren't present in the configuration itself. Hence, these keys 1073// but aren't present in the configuration itself. Hence, these keys
1091// represent the state of resources that are orphans. 1074// represent the state of resources that are orphans.
1092func (m *ModuleState) Orphans(c *config.Config) []string { 1075func (m *ModuleState) Orphans(c *configs.Module) []addrs.ResourceInstance {
1093 m.Lock() 1076 m.Lock()
1094 defer m.Unlock() 1077 defer m.Unlock()
1095 1078
1096 keys := make(map[string]struct{}) 1079 inConfig := make(map[string]struct{})
1097 for k := range m.Resources {
1098 keys[k] = struct{}{}
1099 }
1100
1101 if c != nil { 1080 if c != nil {
1102 for _, r := range c.Resources { 1081 for _, r := range c.ManagedResources {
1103 delete(keys, r.Id()) 1082 inConfig[r.Addr().String()] = struct{}{}
1104 1083 }
1105 for k := range keys { 1084 for _, r := range c.DataResources {
1106 if strings.HasPrefix(k, r.Id()+".") { 1085 inConfig[r.Addr().String()] = struct{}{}
1107 delete(keys, k)
1108 }
1109 }
1110 } 1086 }
1111 } 1087 }
1112 1088
1113 result := make([]string, 0, len(keys)) 1089 var result []addrs.ResourceInstance
1114 for k := range keys { 1090 for k := range m.Resources {
1115 result = append(result, k) 1091 // Since we've not yet updated state to use our new address format,
1116 } 1092 // we need to do some shimming here.
1093 legacyAddr, err := parseResourceAddressInternal(k)
1094 if err != nil {
1095 // Suggests that the user tampered with the state, since we always
1096 // generate valid internal addresses.
1097 log.Printf("ModuleState has invalid resource key %q. Ignoring.", k)
1098 continue
1099 }
1117 1100
1101 addr := legacyAddr.AbsResourceInstanceAddr().Resource
1102 compareKey := addr.Resource.String() // compare by resource address, ignoring instance key
1103 if _, exists := inConfig[compareKey]; !exists {
1104 result = append(result, addr)
1105 }
1106 }
1118 return result 1107 return result
1119} 1108}
1120 1109
1121// RemovedOutputs returns a list of outputs that are in the State but aren't 1110// RemovedOutputs returns a list of outputs that are in the State but aren't
1122// present in the configuration itself. 1111// present in the configuration itself.
1123func (m *ModuleState) RemovedOutputs(c *config.Config) []string { 1112func (s *ModuleState) RemovedOutputs(outputs map[string]*configs.Output) []addrs.OutputValue {
1124 m.Lock() 1113 if outputs == nil {
1125 defer m.Unlock() 1114 // If we got no output map at all then we'll just treat our set of
1126 1115 // configured outputs as empty, since that suggests that they've all
1127 keys := make(map[string]struct{}) 1116 // been removed by removing their containing module.
1128 for k := range m.Outputs { 1117 outputs = make(map[string]*configs.Output)
1129 keys[k] = struct{}{}
1130 } 1118 }
1131 1119
1132 if c != nil { 1120 s.Lock()
1133 for _, o := range c.Outputs { 1121 defer s.Unlock()
1134 delete(keys, o.Name)
1135 }
1136 }
1137 1122
1138 result := make([]string, 0, len(keys)) 1123 var ret []addrs.OutputValue
1139 for k := range keys { 1124 for n := range s.Outputs {
1140 result = append(result, k) 1125 if _, declared := outputs[n]; !declared {
1126 ret = append(ret, addrs.OutputValue{
1127 Name: n,
1128 })
1129 }
1141 } 1130 }
1142 1131
1143 return result 1132 return ret
1144} 1133}
1145 1134
1146// View returns a view with the given resource prefix. 1135// View returns a view with the given resource prefix.
@@ -1543,6 +1532,24 @@ func (s *ResourceState) Untaint() {
1543 } 1532 }
1544} 1533}
1545 1534
1535// ProviderAddr returns the provider address for the receiver, by parsing the
1536// string representation saved in state. An error can be returned if the
1537// value in state is corrupt.
1538func (s *ResourceState) ProviderAddr() (addrs.AbsProviderConfig, error) {
1539 var diags tfdiags.Diagnostics
1540
1541 str := s.Provider
1542 traversal, travDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
1543 diags = diags.Append(travDiags)
1544 if travDiags.HasErrors() {
1545 return addrs.AbsProviderConfig{}, diags.Err()
1546 }
1547
1548 addr, addrDiags := addrs.ParseAbsProviderConfig(traversal)
1549 diags = diags.Append(addrDiags)
1550 return addr, diags.Err()
1551}
1552
1546func (s *ResourceState) init() { 1553func (s *ResourceState) init() {
1547 s.Lock() 1554 s.Lock()
1548 defer s.Unlock() 1555 defer s.Unlock()
@@ -1651,6 +1658,51 @@ func (s *InstanceState) init() {
1651 s.Ephemeral.init() 1658 s.Ephemeral.init()
1652} 1659}
1653 1660
1661// NewInstanceStateShimmedFromValue is a shim method to lower a new-style
1662// object value representing the attributes of an instance object into the
1663// legacy InstanceState representation.
1664//
1665// This is for shimming to old components only and should not be used in new code.
1666func NewInstanceStateShimmedFromValue(state cty.Value, schemaVersion int) *InstanceState {
1667 attrs := hcl2shim.FlatmapValueFromHCL2(state)
1668 return &InstanceState{
1669 ID: attrs["id"],
1670 Attributes: attrs,
1671 Meta: map[string]interface{}{
1672 "schema_version": schemaVersion,
1673 },
1674 }
1675}
1676
1677// AttrsAsObjectValue shims from the legacy InstanceState representation to
1678// a new-style cty object value representation of the state attributes, using
1679// the given type for guidance.
1680//
1681// The given type must be the implied type of the schema of the resource type
1682// of the object whose state is being converted, or the result is undefined.
1683//
1684// This is for shimming from old components only and should not be used in
1685// new code.
1686func (s *InstanceState) AttrsAsObjectValue(ty cty.Type) (cty.Value, error) {
1687 if s == nil {
1688 // if the state is nil, we need to construct a complete cty.Value with
1689 // null attributes, rather than a single cty.NullVal(ty)
1690 s = &InstanceState{}
1691 }
1692
1693 if s.Attributes == nil {
1694 s.Attributes = map[string]string{}
1695 }
1696
1697 // make sure ID is included in the attributes. The InstanceState.ID value
1698 // takes precedence.
1699 if s.ID != "" {
1700 s.Attributes["id"] = s.ID
1701 }
1702
1703 return hcl2shim.HCL2ValueFromFlatmap(s.Attributes, ty)
1704}
1705
1654// Copy all the Fields from another InstanceState 1706// Copy all the Fields from another InstanceState
1655func (s *InstanceState) Set(from *InstanceState) { 1707func (s *InstanceState) Set(from *InstanceState) {
1656 s.Lock() 1708 s.Lock()
@@ -1787,13 +1839,19 @@ func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState {
1787} 1839}
1788 1840
1789func (s *InstanceState) String() string { 1841func (s *InstanceState) String() string {
1842 notCreated := "<not created>"
1843
1844 if s == nil {
1845 return notCreated
1846 }
1847
1790 s.Lock() 1848 s.Lock()
1791 defer s.Unlock() 1849 defer s.Unlock()
1792 1850
1793 var buf bytes.Buffer 1851 var buf bytes.Buffer
1794 1852
1795 if s == nil || s.ID == "" { 1853 if s.ID == "" {
1796 return "<not created>" 1854 return notCreated
1797 } 1855 }
1798 1856
1799 buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID)) 1857 buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID))
@@ -2187,19 +2245,6 @@ func (s moduleStateSort) Swap(i, j int) {
2187 s[i], s[j] = s[j], s[i] 2245 s[i], s[j] = s[j], s[i]
2188} 2246}
2189 2247
2190// StateCompatible returns an error if the state is not compatible with the
2191// current version of terraform.
2192func CheckStateVersion(state *State) error {
2193 if state == nil {
2194 return nil
2195 }
2196
2197 if state.FromFutureTerraform() {
2198 return fmt.Errorf(stateInvalidTerraformVersionErr, state.TFVersion)
2199 }
2200 return nil
2201}
2202
2203const stateValidateErrMultiModule = ` 2248const stateValidateErrMultiModule = `
2204Multiple modules with the same path: %s 2249Multiple modules with the same path: %s
2205 2250
@@ -2208,11 +2253,3 @@ in your state file that point to the same module. This will cause Terraform
2208to behave in unexpected and error prone ways and is invalid. Please back up 2253to behave in unexpected and error prone ways and is invalid. Please back up
2209and modify your state file manually to resolve this. 2254and modify your state file manually to resolve this.
2210` 2255`
2211
2212const stateInvalidTerraformVersionErr = `
2213Terraform doesn't allow running any operations against a state
2214that was written by a future Terraform version. The state is
2215reporting it is written by Terraform '%s'
2216
2217Please run at least that version of Terraform to continue.
2218`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_add.go b/vendor/github.com/hashicorp/terraform/terraform/state_add.go
deleted file mode 100644
index 1163730..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/state_add.go
+++ /dev/null
@@ -1,374 +0,0 @@
1package terraform
2
3import "fmt"
4
5// Add adds the item in the state at the given address.
6//
7// The item can be a ModuleState, ResourceState, or InstanceState. Depending
8// on the item type, the address may or may not be valid. For example, a
9// module cannot be moved to a resource address, however a resource can be
10// moved to a module address (it retains the same name, under that resource).
11//
12// The item can also be a []*ModuleState, which is the case for nested
13// modules. In this case, Add will expect the zero-index to be the top-most
14// module to add and will only nest children from there. For semantics, this
15// is equivalent to module => module.
16//
17// The full semantics of Add:
18//
19// ┌───────────────────┬───────────────────┬───────────────────┐
20// │ Module Address │ Resource Address │ Instance Address │
21// ┌─────────────────┼───────────────────┼───────────────────┼───────────────────┤
22// │ ModuleState │ ✓ │ x │ x │
23// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
24// │ ResourceState │ ✓ │ ✓ │ maybe* │
25// ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
26// │ Instance State │ ✓ │ ✓ │ ✓ │
27// └─────────────────┴───────────────────┴───────────────────┴───────────────────┘
28//
29// *maybe - Resources can be added at an instance address only if the resource
30// represents a single instance (primary). Example:
31// "aws_instance.foo" can be moved to "aws_instance.bar.tainted"
32//
33func (s *State) Add(fromAddrRaw string, toAddrRaw string, raw interface{}) error {
34 // Parse the address
35
36 toAddr, err := ParseResourceAddress(toAddrRaw)
37 if err != nil {
38 return err
39 }
40
41 // Parse the from address
42 fromAddr, err := ParseResourceAddress(fromAddrRaw)
43 if err != nil {
44 return err
45 }
46
47 // Determine the types
48 from := detectValueAddLoc(raw)
49 to := detectAddrAddLoc(toAddr)
50
51 // Find the function to do this
52 fromMap, ok := stateAddFuncs[from]
53 if !ok {
54 return fmt.Errorf("invalid source to add to state: %T", raw)
55 }
56 f, ok := fromMap[to]
57 if !ok {
58 return fmt.Errorf("invalid destination: %s (%d)", toAddr, to)
59 }
60
61 // Call the migrator
62 if err := f(s, fromAddr, toAddr, raw); err != nil {
63 return err
64 }
65
66 // Prune the state
67 s.prune()
68 return nil
69}
70
71func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
72 // raw can be either *ModuleState or []*ModuleState. The former means
73 // we're moving just one module. The latter means we're moving a module
74 // and children.
75 root := raw
76 var rest []*ModuleState
77 if list, ok := raw.([]*ModuleState); ok {
78 // We need at least one item
79 if len(list) == 0 {
80 return fmt.Errorf("module move with no value to: %s", addr)
81 }
82
83 // The first item is always the root
84 root = list[0]
85 if len(list) > 1 {
86 rest = list[1:]
87 }
88 }
89
90 // Get the actual module state
91 src := root.(*ModuleState).deepcopy()
92
93 // If the target module exists, it is an error
94 path := append([]string{"root"}, addr.Path...)
95 if s.ModuleByPath(path) != nil {
96 return fmt.Errorf("module target is not empty: %s", addr)
97 }
98
99 // Create it and copy our outputs and dependencies
100 mod := s.AddModule(path)
101 mod.Outputs = src.Outputs
102 mod.Dependencies = src.Dependencies
103
104 // Go through the resources perform an add for each of those
105 for k, v := range src.Resources {
106 resourceKey, err := ParseResourceStateKey(k)
107 if err != nil {
108 return err
109 }
110
111 // Update the resource address for this
112 addrCopy := *addr
113 addrCopy.Type = resourceKey.Type
114 addrCopy.Name = resourceKey.Name
115 addrCopy.Index = resourceKey.Index
116 addrCopy.Mode = resourceKey.Mode
117
118 // Perform an add
119 if err := s.Add(fromAddr.String(), addrCopy.String(), v); err != nil {
120 return err
121 }
122 }
123
124 // Add all the children if we have them
125 for _, item := range rest {
126 // If item isn't a descendent of our root, then ignore it
127 if !src.IsDescendent(item) {
128 continue
129 }
130
131 // It is! Strip the leading prefix and attach that to our address
132 extra := item.Path[len(src.Path):]
133 addrCopy := addr.Copy()
134 addrCopy.Path = append(addrCopy.Path, extra...)
135
136 // Add it
137 s.Add(fromAddr.String(), addrCopy.String(), item)
138 }
139
140 return nil
141}
142
143func stateAddFunc_Resource_Module(
144 s *State, from, to *ResourceAddress, raw interface{}) error {
145 // Build the more specific to addr
146 addr := *to
147 addr.Type = from.Type
148 addr.Name = from.Name
149
150 return s.Add(from.String(), addr.String(), raw)
151}
152
153func stateAddFunc_Resource_Resource(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
154 // raw can be either *ResourceState or []*ResourceState. The former means
155 // we're moving just one resource. The latter means we're moving a count
156 // of resources.
157 if list, ok := raw.([]*ResourceState); ok {
158 // We need at least one item
159 if len(list) == 0 {
160 return fmt.Errorf("resource move with no value to: %s", addr)
161 }
162
163 // If there is an index, this is an error since we can't assign
164 // a set of resources to a single index
165 if addr.Index >= 0 && len(list) > 1 {
166 return fmt.Errorf(
167 "multiple resources can't be moved to a single index: "+
168 "%s => %s", fromAddr, addr)
169 }
170
171 // Add each with a specific index
172 for i, rs := range list {
173 addrCopy := addr.Copy()
174 addrCopy.Index = i
175
176 if err := s.Add(fromAddr.String(), addrCopy.String(), rs); err != nil {
177 return err
178 }
179 }
180
181 return nil
182 }
183
184 src := raw.(*ResourceState).deepcopy()
185
186 // Initialize the resource
187 resourceRaw, exists := stateAddInitAddr(s, addr)
188 if exists {
189 return fmt.Errorf("resource exists and not empty: %s", addr)
190 }
191 resource := resourceRaw.(*ResourceState)
192 resource.Type = src.Type
193 resource.Dependencies = src.Dependencies
194 resource.Provider = src.Provider
195
196 // Move the primary
197 if src.Primary != nil {
198 addrCopy := *addr
199 addrCopy.InstanceType = TypePrimary
200 addrCopy.InstanceTypeSet = true
201 if err := s.Add(fromAddr.String(), addrCopy.String(), src.Primary); err != nil {
202 return err
203 }
204 }
205
206 // Move all deposed
207 if len(src.Deposed) > 0 {
208 resource.Deposed = src.Deposed
209 }
210
211 return nil
212}
213
214func stateAddFunc_Instance_Instance(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
215 src := raw.(*InstanceState).DeepCopy()
216
217 // Create the instance
218 instanceRaw, _ := stateAddInitAddr(s, addr)
219 instance := instanceRaw.(*InstanceState)
220
221 // Set it
222 instance.Set(src)
223
224 return nil
225}
226
227func stateAddFunc_Instance_Module(
228 s *State, from, to *ResourceAddress, raw interface{}) error {
229 addr := *to
230 addr.Type = from.Type
231 addr.Name = from.Name
232
233 return s.Add(from.String(), addr.String(), raw)
234}
235
236func stateAddFunc_Instance_Resource(
237 s *State, from, to *ResourceAddress, raw interface{}) error {
238 addr := *to
239 addr.InstanceType = TypePrimary
240 addr.InstanceTypeSet = true
241
242 return s.Add(from.String(), addr.String(), raw)
243}
244
245// stateAddFunc is the type of function for adding an item to a state
246type stateAddFunc func(s *State, from, to *ResourceAddress, item interface{}) error
247
248// stateAddFuncs has the full matrix mapping of the state adders.
249var stateAddFuncs map[stateAddLoc]map[stateAddLoc]stateAddFunc
250
251func init() {
252 stateAddFuncs = map[stateAddLoc]map[stateAddLoc]stateAddFunc{
253 stateAddModule: {
254 stateAddModule: stateAddFunc_Module_Module,
255 },
256 stateAddResource: {
257 stateAddModule: stateAddFunc_Resource_Module,
258 stateAddResource: stateAddFunc_Resource_Resource,
259 },
260 stateAddInstance: {
261 stateAddInstance: stateAddFunc_Instance_Instance,
262 stateAddModule: stateAddFunc_Instance_Module,
263 stateAddResource: stateAddFunc_Instance_Resource,
264 },
265 }
266}
267
268// stateAddLoc is an enum to represent the location where state is being
269// moved from/to. We use this for quick lookups in a function map.
270type stateAddLoc uint
271
272const (
273 stateAddInvalid stateAddLoc = iota
274 stateAddModule
275 stateAddResource
276 stateAddInstance
277)
278
279// detectAddrAddLoc detects the state type for the given address. This
280// function is specifically not unit tested since we consider the State.Add
281// functionality to be comprehensive enough to cover this.
282func detectAddrAddLoc(addr *ResourceAddress) stateAddLoc {
283 if addr.Name == "" {
284 return stateAddModule
285 }
286
287 if !addr.InstanceTypeSet {
288 return stateAddResource
289 }
290
291 return stateAddInstance
292}
293
294// detectValueAddLoc determines the stateAddLoc value from the raw value
295// that is some State structure.
296func detectValueAddLoc(raw interface{}) stateAddLoc {
297 switch raw.(type) {
298 case *ModuleState:
299 return stateAddModule
300 case []*ModuleState:
301 return stateAddModule
302 case *ResourceState:
303 return stateAddResource
304 case []*ResourceState:
305 return stateAddResource
306 case *InstanceState:
307 return stateAddInstance
308 default:
309 return stateAddInvalid
310 }
311}
312
313// stateAddInitAddr takes a ResourceAddress and creates the non-existing
314// resources up to that point, returning the empty (or existing) interface
315// at that address.
316func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) {
317 addType := detectAddrAddLoc(addr)
318
319 // Get the module
320 path := append([]string{"root"}, addr.Path...)
321 exists := true
322 mod := s.ModuleByPath(path)
323 if mod == nil {
324 mod = s.AddModule(path)
325 exists = false
326 }
327 if addType == stateAddModule {
328 return mod, exists
329 }
330
331 // Add the resource
332 resourceKey := (&ResourceStateKey{
333 Name: addr.Name,
334 Type: addr.Type,
335 Index: addr.Index,
336 Mode: addr.Mode,
337 }).String()
338 exists = true
339 resource, ok := mod.Resources[resourceKey]
340 if !ok {
341 resource = &ResourceState{Type: addr.Type}
342 resource.init()
343 mod.Resources[resourceKey] = resource
344 exists = false
345 }
346 if addType == stateAddResource {
347 return resource, exists
348 }
349
350 // Get the instance
351 exists = true
352 instance := &InstanceState{}
353 switch addr.InstanceType {
354 case TypePrimary, TypeTainted:
355 if v := resource.Primary; v != nil {
356 instance = resource.Primary
357 } else {
358 exists = false
359 }
360 case TypeDeposed:
361 idx := addr.Index
362 if addr.Index < 0 {
363 idx = 0
364 }
365 if len(resource.Deposed) > idx {
366 instance = resource.Deposed[idx]
367 } else {
368 resource.Deposed = append(resource.Deposed, instance)
369 exists = false
370 }
371 }
372
373 return instance, exists
374}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go
index 0e47f20..fd3f5c7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform.go
@@ -38,13 +38,18 @@ type graphTransformerMulti struct {
38} 38}
39 39
40func (t *graphTransformerMulti) Transform(g *Graph) error { 40func (t *graphTransformerMulti) Transform(g *Graph) error {
41 var lastStepStr string
41 for _, t := range t.Transforms { 42 for _, t := range t.Transforms {
43 log.Printf("[TRACE] (graphTransformerMulti) Executing graph transform %T", t)
42 if err := t.Transform(g); err != nil { 44 if err := t.Transform(g); err != nil {
43 return err 45 return err
44 } 46 }
45 log.Printf( 47 if thisStepStr := g.StringWithNodeTypes(); thisStepStr != lastStepStr {
46 "[TRACE] Graph after step %T:\n\n%s", 48 log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T with new graph:\n%s------", t, thisStepStr)
47 t, g.StringWithNodeTypes()) 49 lastStepStr = thisStepStr
50 } else {
51 log.Printf("[TRACE] (graphTransformerMulti) Completed graph transform %T (no changes)", t)
52 }
48 } 53 }
49 54
50 return nil 55 return nil
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
index 39cf097..897a7e7 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
@@ -1,7 +1,8 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config" 4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/configs"
5) 6)
6 7
7// GraphNodeAttachProvider is an interface that must be implemented by nodes 8// GraphNodeAttachProvider is an interface that must be implemented by nodes
@@ -11,8 +12,8 @@ type GraphNodeAttachProvider interface {
11 GraphNodeSubPath 12 GraphNodeSubPath
12 13
13 // ProviderName with no module prefix. Example: "aws". 14 // ProviderName with no module prefix. Example: "aws".
14 ProviderName() string 15 ProviderAddr() addrs.AbsProviderConfig
15 16
16 // Sets the configuration 17 // Sets the configuration
17 AttachProvider(*config.ProviderConfig) 18 AttachProvider(*configs.Provider)
18} 19}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
index f2ee37e..03f8564 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
@@ -1,35 +1,32 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt"
5 "log" 4 "log"
6 5
7 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/configs"
8 "github.com/hashicorp/terraform/config/module" 7 "github.com/hashicorp/terraform/dag"
9) 8)
10 9
11// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes 10// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes
12// that want resource configurations attached. 11// that want resource configurations attached.
13type GraphNodeAttachResourceConfig interface { 12type GraphNodeAttachResourceConfig interface {
14 // ResourceAddr is the address to the resource 13 GraphNodeResource
15 ResourceAddr() *ResourceAddress
16 14
17 // Sets the configuration 15 // Sets the configuration
18 AttachResourceConfig(*config.Resource) 16 AttachResourceConfig(*configs.Resource)
19} 17}
20 18
21// AttachResourceConfigTransformer goes through the graph and attaches 19// AttachResourceConfigTransformer goes through the graph and attaches
22// resource configuration structures to nodes that implement the interfaces 20// resource configuration structures to nodes that implement
23// above. 21// GraphNodeAttachManagedResourceConfig or GraphNodeAttachDataResourceConfig.
24// 22//
25// The attached configuration structures are directly from the configuration. 23// The attached configuration structures are directly from the configuration.
26// If they're going to be modified, a copy should be made. 24// If they're going to be modified, a copy should be made.
27type AttachResourceConfigTransformer struct { 25type AttachResourceConfigTransformer struct {
28 Module *module.Tree // Module is the root module for the config 26 Config *configs.Config // Config is the root node in the config tree
29} 27}
30 28
31func (t *AttachResourceConfigTransformer) Transform(g *Graph) error { 29func (t *AttachResourceConfigTransformer) Transform(g *Graph) error {
32 log.Printf("[TRACE] AttachResourceConfigTransformer: Beginning...")
33 30
34 // Go through and find GraphNodeAttachResource 31 // Go through and find GraphNodeAttachResource
35 for _, v := range g.Vertices() { 32 for _, v := range g.Vertices() {
@@ -41,36 +38,35 @@ func (t *AttachResourceConfigTransformer) Transform(g *Graph) error {
41 38
42 // Determine what we're looking for 39 // Determine what we're looking for
43 addr := arn.ResourceAddr() 40 addr := arn.ResourceAddr()
44 log.Printf(
45 "[TRACE] AttachResourceConfigTransformer: Attach resource "+
46 "config request: %s", addr)
47 41
48 // Get the configuration. 42 // Get the configuration.
49 path := normalizeModulePath(addr.Path) 43 config := t.Config.DescendentForInstance(addr.Module)
50 path = path[1:] 44 if config == nil {
51 tree := t.Module.Child(path) 45 log.Printf("[TRACE] AttachResourceConfigTransformer: %q (%T) has no configuration available", dag.VertexName(v), v)
52 if tree == nil {
53 continue 46 continue
54 } 47 }
55 48
56 // Go through the resource configs to find the matching config 49 for _, r := range config.Module.ManagedResources {
57 for _, r := range tree.Config().Resources { 50 rAddr := r.Addr()
58 // Get a resource address so we can compare 51
59 a, err := parseResourceAddressConfig(r) 52 if rAddr != addr.Resource {
60 if err != nil { 53 // Not the same resource
61 panic(fmt.Sprintf( 54 continue
62 "Error parsing config address, this is a bug: %#v", r))
63 } 55 }
64 a.Path = addr.Path
65 56
66 // If this is not the same resource, then continue 57 log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %s", dag.VertexName(v), v, r.DeclRange)
67 if !a.Equals(addr) { 58 arn.AttachResourceConfig(r)
59 }
60 for _, r := range config.Module.DataResources {
61 rAddr := r.Addr()
62
63 if rAddr != addr.Resource {
64 // Not the same resource
68 continue 65 continue
69 } 66 }
70 67
71 log.Printf("[TRACE] Attaching resource config: %#v", r) 68 log.Printf("[TRACE] AttachResourceConfigTransformer: attaching to %q (%T) config from %#v", dag.VertexName(v), v, r.DeclRange)
72 arn.AttachResourceConfig(r) 69 arn.AttachResourceConfig(r)
73 break
74 } 70 }
75 } 71 }
76 72
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go
new file mode 100644
index 0000000..c7695dd
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_schema.go
@@ -0,0 +1,99 @@
1package terraform
2
3import (
4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/configs/configschema"
8 "github.com/hashicorp/terraform/dag"
9)
10
11// GraphNodeAttachResourceSchema is an interface implemented by node types
12// that need a resource schema attached.
13type GraphNodeAttachResourceSchema interface {
14 GraphNodeResource
15 GraphNodeProviderConsumer
16
17 AttachResourceSchema(schema *configschema.Block, version uint64)
18}
19
20// GraphNodeAttachProviderConfigSchema is an interface implemented by node types
21// that need a provider configuration schema attached.
22type GraphNodeAttachProviderConfigSchema interface {
23 GraphNodeProvider
24
25 AttachProviderConfigSchema(*configschema.Block)
26}
27
28// GraphNodeAttachProvisionerSchema is an interface implemented by node types
29// that need one or more provisioner schemas attached.
30type GraphNodeAttachProvisionerSchema interface {
31 ProvisionedBy() []string
32
33 // SetProvisionerSchema is called during transform for each provisioner
34 // type returned from ProvisionedBy, providing the configuration schema
35 // for each provisioner in turn. The implementer should save these for
36 // later use in evaluating provisioner configuration blocks.
37 AttachProvisionerSchema(name string, schema *configschema.Block)
38}
39
40// AttachSchemaTransformer finds nodes that implement
41// GraphNodeAttachResourceSchema, GraphNodeAttachProviderConfigSchema, or
42// GraphNodeAttachProvisionerSchema, looks up the needed schemas for each
43// and then passes them to a method implemented by the node.
44type AttachSchemaTransformer struct {
45 Schemas *Schemas
46}
47
48func (t *AttachSchemaTransformer) Transform(g *Graph) error {
49 if t.Schemas == nil {
50 // Should never happen with a reasonable caller, but we'll return a
51 // proper error here anyway so that we'll fail gracefully.
52 return fmt.Errorf("AttachSchemaTransformer used with nil Schemas")
53 }
54
55 for _, v := range g.Vertices() {
56
57 if tv, ok := v.(GraphNodeAttachResourceSchema); ok {
58 addr := tv.ResourceAddr()
59 mode := addr.Resource.Mode
60 typeName := addr.Resource.Type
61 providerAddr, _ := tv.ProvidedBy()
62 providerType := providerAddr.ProviderConfig.Type
63
64 schema, version := t.Schemas.ResourceTypeConfig(providerType, mode, typeName)
65 if schema == nil {
66 log.Printf("[ERROR] AttachSchemaTransformer: No resource schema available for %s", addr)
67 continue
68 }
69 log.Printf("[TRACE] AttachSchemaTransformer: attaching resource schema to %s", dag.VertexName(v))
70 tv.AttachResourceSchema(schema, version)
71 }
72
73 if tv, ok := v.(GraphNodeAttachProviderConfigSchema); ok {
74 providerAddr := tv.ProviderAddr()
75 schema := t.Schemas.ProviderConfig(providerAddr.ProviderConfig.Type)
76 if schema == nil {
77 log.Printf("[ERROR] AttachSchemaTransformer: No provider config schema available for %s", providerAddr)
78 continue
79 }
80 log.Printf("[TRACE] AttachSchemaTransformer: attaching provider config schema to %s", dag.VertexName(v))
81 tv.AttachProviderConfigSchema(schema)
82 }
83
84 if tv, ok := v.(GraphNodeAttachProvisionerSchema); ok {
85 names := tv.ProvisionedBy()
86 for _, name := range names {
87 schema := t.Schemas.ProvisionerConfig(name)
88 if schema == nil {
89 log.Printf("[ERROR] AttachSchemaTransformer: No schema available for provisioner %q on %q", name, dag.VertexName(v))
90 continue
91 }
92 log.Printf("[TRACE] AttachSchemaTransformer: attaching provisioner %q config schema to %s", name, dag.VertexName(v))
93 tv.AttachProvisionerSchema(name, schema)
94 }
95 }
96 }
97
98 return nil
99}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
index 564ff08..3af7b98 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
@@ -4,64 +4,64 @@ import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/dag"
7 "github.com/hashicorp/terraform/states"
7) 8)
8 9
9// GraphNodeAttachResourceState is an interface that can be implemented 10// GraphNodeAttachResourceState is an interface that can be implemented
10// to request that a ResourceState is attached to the node. 11// to request that a ResourceState is attached to the node.
12//
13// Due to a historical naming inconsistency, the type ResourceState actually
14// represents the state for a particular _instance_, while InstanceState
15// represents the values for that instance during a particular phase
16// (e.g. primary vs. deposed). Consequently, GraphNodeAttachResourceState
17// is supported only for nodes that represent resource instances, even though
18// the name might suggest it is for containing resources.
11type GraphNodeAttachResourceState interface { 19type GraphNodeAttachResourceState interface {
12 // The address to the resource for the state 20 GraphNodeResourceInstance
13 ResourceAddr() *ResourceAddress
14 21
15 // Sets the state 22 // Sets the state
16 AttachResourceState(*ResourceState) 23 AttachResourceState(*states.Resource)
17} 24}
18 25
19// AttachStateTransformer goes through the graph and attaches 26// AttachStateTransformer goes through the graph and attaches
20// state to nodes that implement the interfaces above. 27// state to nodes that implement the interfaces above.
21type AttachStateTransformer struct { 28type AttachStateTransformer struct {
22 State *State // State is the root state 29 State *states.State // State is the root state
23} 30}
24 31
25func (t *AttachStateTransformer) Transform(g *Graph) error { 32func (t *AttachStateTransformer) Transform(g *Graph) error {
26 // If no state, then nothing to do 33 // If no state, then nothing to do
27 if t.State == nil { 34 if t.State == nil {
28 log.Printf("[DEBUG] Not attaching any state: state is nil") 35 log.Printf("[DEBUG] Not attaching any node states: overall state is nil")
29 return nil 36 return nil
30 } 37 }
31 38
32 filter := &StateFilter{State: t.State}
33 for _, v := range g.Vertices() { 39 for _, v := range g.Vertices() {
34 // Only care about nodes requesting we're adding state 40 // Nodes implement this interface to request state attachment.
35 an, ok := v.(GraphNodeAttachResourceState) 41 an, ok := v.(GraphNodeAttachResourceState)
36 if !ok { 42 if !ok {
37 continue 43 continue
38 } 44 }
39 addr := an.ResourceAddr() 45 addr := an.ResourceInstanceAddr()
40 46
41 // Get the module state 47 rs := t.State.Resource(addr.ContainingResource())
42 results, err := filter.Filter(addr.String()) 48 if rs == nil {
43 if err != nil { 49 log.Printf("[DEBUG] Resource state not found for node %q, instance %s", dag.VertexName(v), addr)
44 return err 50 continue
45 } 51 }
46 52
47 // Attach the first resource state we get 53 is := rs.Instance(addr.Resource.Key)
48 found := false 54 if is == nil {
49 for _, result := range results { 55 // We don't actually need this here, since we'll attach the whole
50 if rs, ok := result.Value.(*ResourceState); ok { 56 // resource state, but we still check because it'd be weird
51 log.Printf( 57 // for the specific instance we're attaching to not to exist.
52 "[DEBUG] Attaching resource state to %q: %#v", 58 log.Printf("[DEBUG] Resource instance state not found for node %q, instance %s", dag.VertexName(v), addr)
53 dag.VertexName(v), rs) 59 continue
54 an.AttachResourceState(rs)
55 found = true
56 break
57 }
58 } 60 }
59 61
60 if !found { 62 // make sure to attach a copy of the state, so instances can modify the
61 log.Printf( 63 // same ResourceState.
62 "[DEBUG] Resource state not found for %q: %s", 64 an.AttachResourceState(rs.DeepCopy())
63 dag.VertexName(v), addr)
64 }
65 } 65 }
66 66
67 return nil 67 return nil
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
index 61bce85..9d3b6f4 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
@@ -1,13 +1,11 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "errors"
5 "fmt"
6 "log" 4 "log"
7 "sync" 5 "sync"
8 6
9 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/terraform/addrs"
10 "github.com/hashicorp/terraform/config/module" 8 "github.com/hashicorp/terraform/configs"
11 "github.com/hashicorp/terraform/dag" 9 "github.com/hashicorp/terraform/dag"
12) 10)
13 11
@@ -26,14 +24,14 @@ type ConfigTransformer struct {
26 Concrete ConcreteResourceNodeFunc 24 Concrete ConcreteResourceNodeFunc
27 25
28 // Module is the module to add resources from. 26 // Module is the module to add resources from.
29 Module *module.Tree 27 Config *configs.Config
30 28
31 // Unique will only add resources that aren't already present in the graph. 29 // Unique will only add resources that aren't already present in the graph.
32 Unique bool 30 Unique bool
33 31
34 // Mode will only add resources that match the given mode 32 // Mode will only add resources that match the given mode
35 ModeFilter bool 33 ModeFilter bool
36 Mode config.ResourceMode 34 Mode addrs.ResourceMode
37 35
38 l sync.Mutex 36 l sync.Mutex
39 uniqueMap map[string]struct{} 37 uniqueMap map[string]struct{}
@@ -44,16 +42,11 @@ func (t *ConfigTransformer) Transform(g *Graph) error {
44 t.l.Lock() 42 t.l.Lock()
45 defer t.l.Unlock() 43 defer t.l.Unlock()
46 44
47 // If no module is given, we don't do anything 45 // If no configuration is available, we don't do anything
48 if t.Module == nil { 46 if t.Config == nil {
49 return nil 47 return nil
50 } 48 }
51 49
52 // If the module isn't loaded, that is simply an error
53 if !t.Module.Loaded() {
54 return errors.New("module must be loaded for ConfigTransformer")
55 }
56
57 // Reset the uniqueness map. If we're tracking uniques, then populate 50 // Reset the uniqueness map. If we're tracking uniques, then populate
58 // it with addresses. 51 // it with addresses.
59 t.uniqueMap = make(map[string]struct{}) 52 t.uniqueMap = make(map[string]struct{})
@@ -67,22 +60,22 @@ func (t *ConfigTransformer) Transform(g *Graph) error {
67 } 60 }
68 61
69 // Start the transformation process 62 // Start the transformation process
70 return t.transform(g, t.Module) 63 return t.transform(g, t.Config)
71} 64}
72 65
73func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error { 66func (t *ConfigTransformer) transform(g *Graph, config *configs.Config) error {
74 // If no config, do nothing 67 // If no config, do nothing
75 if m == nil { 68 if config == nil {
76 return nil 69 return nil
77 } 70 }
78 71
79 // Add our resources 72 // Add our resources
80 if err := t.transformSingle(g, m); err != nil { 73 if err := t.transformSingle(g, config); err != nil {
81 return err 74 return err
82 } 75 }
83 76
84 // Transform all the children. 77 // Transform all the children.
85 for _, c := range m.Children() { 78 for _, c := range config.Children {
86 if err := t.transform(g, c); err != nil { 79 if err := t.transform(g, c); err != nil {
87 return err 80 return err
88 } 81 }
@@ -91,43 +84,48 @@ func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error {
91 return nil 84 return nil
92} 85}
93 86
94func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { 87func (t *ConfigTransformer) transformSingle(g *Graph, config *configs.Config) error {
95 log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", m.Path()) 88 path := config.Path
96 89 module := config.Module
97 // Get the configuration for this module 90 log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", path)
98 conf := m.Config() 91
99 92 // For now we assume that each module call produces only one module
100 // Build the path we're at 93 // instance with no key, since we don't yet support "count" and "for_each"
101 path := m.Path() 94 // on modules.
95 // FIXME: As part of supporting "count" and "for_each" on modules, rework
96 // this so that we'll "expand" the module call first and then create graph
97 // nodes for each module instance separately.
98 instPath := path.UnkeyedInstanceShim()
99
100 allResources := make([]*configs.Resource, 0, len(module.ManagedResources)+len(module.DataResources))
101 for _, r := range module.ManagedResources {
102 allResources = append(allResources, r)
103 }
104 for _, r := range module.DataResources {
105 allResources = append(allResources, r)
106 }
102 107
103 // Write all the resources out 108 for _, r := range allResources {
104 for _, r := range conf.Resources { 109 relAddr := r.Addr()
105 // Build the resource address
106 addr, err := parseResourceAddressConfig(r)
107 if err != nil {
108 panic(fmt.Sprintf(
109 "Error parsing config address, this is a bug: %#v", r))
110 }
111 addr.Path = path
112 110
113 // If this is already in our uniqueness map, don't add it again 111 if t.ModeFilter && relAddr.Mode != t.Mode {
114 if _, ok := t.uniqueMap[addr.String()]; ok { 112 // Skip non-matching modes
115 continue 113 continue
116 } 114 }
117 115
118 // Remove non-matching modes 116 addr := relAddr.Absolute(instPath)
119 if t.ModeFilter && addr.Mode != t.Mode { 117 if _, ok := t.uniqueMap[addr.String()]; ok {
118 // We've already seen a resource with this address. This should
119 // never happen, because we enforce uniqueness in the config loader.
120 continue 120 continue
121 } 121 }
122 122
123 // Build the abstract node and the concrete one
124 abstract := &NodeAbstractResource{Addr: addr} 123 abstract := &NodeAbstractResource{Addr: addr}
125 var node dag.Vertex = abstract 124 var node dag.Vertex = abstract
126 if f := t.Concrete; f != nil { 125 if f := t.Concrete; f != nil {
127 node = f(abstract) 126 node = f(abstract)
128 } 127 }
129 128
130 // Add it to the graph
131 g.Add(node) 129 g.Add(node)
132 } 130 }
133 131
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
index 92f9888..866c917 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
@@ -1,9 +1,7 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "errors" 4 "github.com/hashicorp/terraform/configs"
5
6 "github.com/hashicorp/terraform/config/module"
7 "github.com/hashicorp/terraform/dag" 5 "github.com/hashicorp/terraform/dag"
8) 6)
9 7
@@ -20,54 +18,47 @@ import (
20type FlatConfigTransformer struct { 18type FlatConfigTransformer struct {
21 Concrete ConcreteResourceNodeFunc // What to turn resources into 19 Concrete ConcreteResourceNodeFunc // What to turn resources into
22 20
23 Module *module.Tree 21 Config *configs.Config
24} 22}
25 23
26func (t *FlatConfigTransformer) Transform(g *Graph) error { 24func (t *FlatConfigTransformer) Transform(g *Graph) error {
27 // If no module, we do nothing 25 // We have nothing to do if there is no configuration.
28 if t.Module == nil { 26 if t.Config == nil {
29 return nil 27 return nil
30 } 28 }
31 29
32 // If the module is not loaded, that is an error 30 return t.transform(g, t.Config)
33 if !t.Module.Loaded() {
34 return errors.New("module must be loaded")
35 }
36
37 return t.transform(g, t.Module)
38} 31}
39 32
40func (t *FlatConfigTransformer) transform(g *Graph, m *module.Tree) error { 33func (t *FlatConfigTransformer) transform(g *Graph, config *configs.Config) error {
41 // If no module, no problem 34 // If we have no configuration then there's nothing to do.
42 if m == nil { 35 if config == nil {
43 return nil 36 return nil
44 } 37 }
45 38
46 // Transform all the children. 39 // Transform all the children.
47 for _, c := range m.Children() { 40 for _, c := range config.Children {
48 if err := t.transform(g, c); err != nil { 41 if err := t.transform(g, c); err != nil {
49 return err 42 return err
50 } 43 }
51 } 44 }
52 45
53 // Get the configuration for this module 46 module := config.Module
54 config := m.Config() 47 // For now we assume that each module call produces only one module
55 48 // instance with no key, since we don't yet support "count" and "for_each"
56 // Write all the resources out 49 // on modules.
57 for _, r := range config.Resources { 50 // FIXME: As part of supporting "count" and "for_each" on modules, rework
58 // Grab the address for this resource 51 // this so that we'll "expand" the module call first and then create graph
59 addr, err := parseResourceAddressConfig(r) 52 // nodes for each module instance separately.
60 if err != nil { 53 instPath := config.Path.UnkeyedInstanceShim()
61 return err
62 }
63 addr.Path = m.Path()
64 54
65 // Build the abstract resource. We have the config already so 55 for _, r := range module.ManagedResources {
66 // we'll just pre-populate that. 56 addr := r.Addr().Absolute(instPath)
67 abstract := &NodeAbstractResource{ 57 abstract := &NodeAbstractResource{
68 Addr: addr, 58 Addr: addr,
69 Config: r, 59 Config: r,
70 } 60 }
61 // Grab the address for this resource
71 var node dag.Vertex = abstract 62 var node dag.Vertex = abstract
72 if f := t.Concrete; f != nil { 63 if f := t.Concrete; f != nil {
73 node = f(abstract) 64 node = f(abstract)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
index 83415f3..01601bd 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
@@ -1,16 +1,21 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/configs"
4 "github.com/hashicorp/terraform/dag" 5 "github.com/hashicorp/terraform/dag"
5) 6)
6 7
7// CountBoundaryTransformer adds a node that depends on everything else 8// CountBoundaryTransformer adds a node that depends on everything else
8// so that it runs last in order to clean up the state for nodes that 9// so that it runs last in order to clean up the state for nodes that
9// are on the "count boundary": "foo.0" when only one exists becomes "foo" 10// are on the "count boundary": "foo.0" when only one exists becomes "foo"
10type CountBoundaryTransformer struct{} 11type CountBoundaryTransformer struct {
12 Config *configs.Config
13}
11 14
12func (t *CountBoundaryTransformer) Transform(g *Graph) error { 15func (t *CountBoundaryTransformer) Transform(g *Graph) error {
13 node := &NodeCountBoundary{} 16 node := &NodeCountBoundary{
17 Config: t.Config,
18 }
14 g.Add(node) 19 g.Add(node)
15 20
16 // Depends on everything 21 // Depends on everything
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
deleted file mode 100644
index 87a1f9c..0000000
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
+++ /dev/null
@@ -1,178 +0,0 @@
1package terraform
2
3import "fmt"
4
5// DeposedTransformer is a GraphTransformer that adds deposed resources
6// to the graph.
7type DeposedTransformer struct {
8 // State is the global state. We'll automatically find the correct
9 // ModuleState based on the Graph.Path that is being transformed.
10 State *State
11
12 // View, if non-empty, is the ModuleState.View used around the state
13 // to find deposed resources.
14 View string
15
16 // The provider used by the resourced which were deposed
17 ResolvedProvider string
18}
19
20func (t *DeposedTransformer) Transform(g *Graph) error {
21 state := t.State.ModuleByPath(g.Path)
22 if state == nil {
23 // If there is no state for our module there can't be any deposed
24 // resources, since they live in the state.
25 return nil
26 }
27
28 // If we have a view, apply it now
29 if t.View != "" {
30 state = state.View(t.View)
31 }
32
33 // Go through all the resources in our state to look for deposed resources
34 for k, rs := range state.Resources {
35 // If we have no deposed resources, then move on
36 if len(rs.Deposed) == 0 {
37 continue
38 }
39
40 deposed := rs.Deposed
41
42 for i, _ := range deposed {
43 g.Add(&graphNodeDeposedResource{
44 Index: i,
45 ResourceName: k,
46 ResourceType: rs.Type,
47 ProviderName: rs.Provider,
48 ResolvedProvider: t.ResolvedProvider,
49 })
50 }
51 }
52
53 return nil
54}
55
56// graphNodeDeposedResource is the graph vertex representing a deposed resource.
57type graphNodeDeposedResource struct {
58 Index int
59 ResourceName string
60 ResourceType string
61 ProviderName string
62 ResolvedProvider string
63}
64
65func (n *graphNodeDeposedResource) Name() string {
66 return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index)
67}
68
69func (n *graphNodeDeposedResource) ProvidedBy() string {
70 return resourceProvider(n.ResourceName, n.ProviderName)
71}
72
73func (n *graphNodeDeposedResource) SetProvider(p string) {
74 n.ResolvedProvider = p
75}
76
77// GraphNodeEvalable impl.
78func (n *graphNodeDeposedResource) EvalTree() EvalNode {
79 var provider ResourceProvider
80 var state *InstanceState
81
82 seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)}
83
84 // Build instance info
85 info := &InstanceInfo{Id: n.Name(), Type: n.ResourceType}
86 seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info})
87
88 // Refresh the resource
89 seq.Nodes = append(seq.Nodes, &EvalOpFilter{
90 Ops: []walkOperation{walkRefresh},
91 Node: &EvalSequence{
92 Nodes: []EvalNode{
93 &EvalGetProvider{
94 Name: n.ResolvedProvider,
95 Output: &provider,
96 },
97 &EvalReadStateDeposed{
98 Name: n.ResourceName,
99 Output: &state,
100 Index: n.Index,
101 },
102 &EvalRefresh{
103 Info: info,
104 Provider: &provider,
105 State: &state,
106 Output: &state,
107 },
108 &EvalWriteStateDeposed{
109 Name: n.ResourceName,
110 ResourceType: n.ResourceType,
111 Provider: n.ResolvedProvider,
112 State: &state,
113 Index: n.Index,
114 },
115 },
116 },
117 })
118
119 // Apply
120 var diff *InstanceDiff
121 var err error
122 seq.Nodes = append(seq.Nodes, &EvalOpFilter{
123 Ops: []walkOperation{walkApply, walkDestroy},
124 Node: &EvalSequence{
125 Nodes: []EvalNode{
126 &EvalGetProvider{
127 Name: n.ResolvedProvider,
128 Output: &provider,
129 },
130 &EvalReadStateDeposed{
131 Name: n.ResourceName,
132 Output: &state,
133 Index: n.Index,
134 },
135 &EvalDiffDestroy{
136 Info: info,
137 State: &state,
138 Output: &diff,
139 },
140 // Call pre-apply hook
141 &EvalApplyPre{
142 Info: info,
143 State: &state,
144 Diff: &diff,
145 },
146 &EvalApply{
147 Info: info,
148 State: &state,
149 Diff: &diff,
150 Provider: &provider,
151 Output: &state,
152 Error: &err,
153 },
154 // Always write the resource back to the state deposed... if it
155 // was successfully destroyed it will be pruned. If it was not, it will
156 // be caught on the next run.
157 &EvalWriteStateDeposed{
158 Name: n.ResourceName,
159 ResourceType: n.ResourceType,
160 Provider: n.ResolvedProvider,
161 State: &state,
162 Index: n.Index,
163 },
164 &EvalApplyPost{
165 Info: info,
166 State: &state,
167 Error: &err,
168 },
169 &EvalReturnError{
170 Error: &err,
171 },
172 &EvalUpdateStateHook{},
173 },
174 },
175 })
176
177 return seq
178}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
index edfb460..2f4d5ed 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
@@ -4,15 +4,15 @@ import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 6
7 "github.com/hashicorp/terraform/config/module" 7 "github.com/hashicorp/terraform/configs"
8 "github.com/hashicorp/terraform/dag" 8 "github.com/hashicorp/terraform/dag"
9 "github.com/hashicorp/terraform/states"
9) 10)
10 11
11// GraphNodeDestroyerCBD must be implemented by nodes that might be 12// GraphNodeDestroyerCBD must be implemented by nodes that might be
12// create-before-destroy destroyers. 13// create-before-destroy destroyers, or might plan a create-before-destroy
14// action.
13type GraphNodeDestroyerCBD interface { 15type GraphNodeDestroyerCBD interface {
14 GraphNodeDestroyer
15
16 // CreateBeforeDestroy returns true if this node represents a node 16 // CreateBeforeDestroy returns true if this node represents a node
17 // that is doing a CBD. 17 // that is doing a CBD.
18 CreateBeforeDestroy() bool 18 CreateBeforeDestroy() bool
@@ -23,6 +23,89 @@ type GraphNodeDestroyerCBD interface {
23 ModifyCreateBeforeDestroy(bool) error 23 ModifyCreateBeforeDestroy(bool) error
24} 24}
25 25
26// GraphNodeAttachDestroyer is implemented by applyable nodes that have a
27// companion destroy node. This allows the creation node to look up the status
28// of the destroy node and determine if it needs to depose the existing state,
29// or replace it.
30// If a node is not marked as create-before-destroy in the configuration, but a
31// dependency forces that status, only the destroy node will be aware of that
32// status.
33type GraphNodeAttachDestroyer interface {
34 // AttachDestroyNode takes a destroy node and saves a reference to that
35 // node in the receiver, so it can later check the status of
36 // CreateBeforeDestroy().
37 AttachDestroyNode(n GraphNodeDestroyerCBD)
38}
39
40// ForcedCBDTransformer detects when a particular CBD-able graph node has
41// dependencies with another that has create_before_destroy set that require
42// it to be forced on, and forces it on.
43//
44// This must be used in the plan graph builder to ensure that
45// create_before_destroy settings are properly propagated before constructing
46// the planned changes. This requires that the plannable resource nodes
47// implement GraphNodeDestroyerCBD.
48type ForcedCBDTransformer struct {
49}
50
51func (t *ForcedCBDTransformer) Transform(g *Graph) error {
52 for _, v := range g.Vertices() {
53 dn, ok := v.(GraphNodeDestroyerCBD)
54 if !ok {
55 continue
56 }
57
58 if !dn.CreateBeforeDestroy() {
59 // If there are no CBD decendent (dependent nodes), then we
60 // do nothing here.
61 if !t.hasCBDDescendent(g, v) {
62 log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) has no CBD descendent, so skipping", dag.VertexName(v), v)
63 continue
64 }
65
66 // If this isn't naturally a CBD node, this means that an descendent is
67 // and we need to auto-upgrade this node to CBD. We do this because
68 // a CBD node depending on non-CBD will result in cycles. To avoid this,
69 // we always attempt to upgrade it.
70 log.Printf("[TRACE] ForcedCBDTransformer: forcing create_before_destroy on for %q (%T)", dag.VertexName(v), v)
71 if err := dn.ModifyCreateBeforeDestroy(true); err != nil {
72 return fmt.Errorf(
73 "%s: must have create before destroy enabled because "+
74 "a dependent resource has CBD enabled. However, when "+
75 "attempting to automatically do this, an error occurred: %s",
76 dag.VertexName(v), err)
77 }
78 } else {
79 log.Printf("[TRACE] ForcedCBDTransformer: %q (%T) already has create_before_destroy set", dag.VertexName(v), v)
80 }
81 }
82 return nil
83}
84
85// hasCBDDescendent returns true if any descendent (node that depends on this)
86// has CBD set.
87func (t *ForcedCBDTransformer) hasCBDDescendent(g *Graph, v dag.Vertex) bool {
88 s, _ := g.Descendents(v)
89 if s == nil {
90 return true
91 }
92
93 for _, ov := range s.List() {
94 dn, ok := ov.(GraphNodeDestroyerCBD)
95 if !ok {
96 continue
97 }
98
99 if dn.CreateBeforeDestroy() {
100 // some descendent is CreateBeforeDestroy, so we need to follow suit
101 log.Printf("[TRACE] ForcedCBDTransformer: %q has CBD descendent %q", dag.VertexName(v), dag.VertexName(ov))
102 return true
103 }
104 }
105
106 return false
107}
108
26// CBDEdgeTransformer modifies the edges of CBD nodes that went through 109// CBDEdgeTransformer modifies the edges of CBD nodes that went through
27// the DestroyEdgeTransformer to have the right dependencies. There are 110// the DestroyEdgeTransformer to have the right dependencies. There are
28// two real tasks here: 111// two real tasks here:
@@ -35,16 +118,25 @@ type GraphNodeDestroyerCBD interface {
35// update to A. Example: adding a web server updates the load balancer 118// update to A. Example: adding a web server updates the load balancer
36// before deleting the old web server. 119// before deleting the old web server.
37// 120//
121// This transformer requires that a previous transformer has already forced
122// create_before_destroy on for nodes that are depended on by explicit CBD
123// nodes. This is the logic in ForcedCBDTransformer, though in practice we
124// will get here by recording the CBD-ness of each change in the plan during
125// the plan walk and then forcing the nodes into the appropriate setting during
126// DiffTransformer when building the apply graph.
38type CBDEdgeTransformer struct { 127type CBDEdgeTransformer struct {
39 // Module and State are only needed to look up dependencies in 128 // Module and State are only needed to look up dependencies in
40 // any way possible. Either can be nil if not availabile. 129 // any way possible. Either can be nil if not availabile.
41 Module *module.Tree 130 Config *configs.Config
42 State *State 131 State *states.State
132
133 // If configuration is present then Schemas is required in order to
134 // obtain schema information from providers and provisioners so we can
135 // properly resolve implicit dependencies.
136 Schemas *Schemas
43} 137}
44 138
45func (t *CBDEdgeTransformer) Transform(g *Graph) error { 139func (t *CBDEdgeTransformer) Transform(g *Graph) error {
46 log.Printf("[TRACE] CBDEdgeTransformer: Beginning CBD transformation...")
47
48 // Go through and reverse any destroy edges 140 // Go through and reverse any destroy edges
49 destroyMap := make(map[string][]dag.Vertex) 141 destroyMap := make(map[string][]dag.Vertex)
50 for _, v := range g.Vertices() { 142 for _, v := range g.Vertices() {
@@ -52,25 +144,13 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error {
52 if !ok { 144 if !ok {
53 continue 145 continue
54 } 146 }
147 dern, ok := v.(GraphNodeDestroyer)
148 if !ok {
149 continue
150 }
55 151
56 if !dn.CreateBeforeDestroy() { 152 if !dn.CreateBeforeDestroy() {
57 // If there are no CBD ancestors (dependent nodes), then we 153 continue
58 // do nothing here.
59 if !t.hasCBDAncestor(g, v) {
60 continue
61 }
62
63 // If this isn't naturally a CBD node, this means that an ancestor is
64 // and we need to auto-upgrade this node to CBD. We do this because
65 // a CBD node depending on non-CBD will result in cycles. To avoid this,
66 // we always attempt to upgrade it.
67 if err := dn.ModifyCreateBeforeDestroy(true); err != nil {
68 return fmt.Errorf(
69 "%s: must have create before destroy enabled because "+
70 "a dependent resource has CBD enabled. However, when "+
71 "attempting to automatically do this, an error occurred: %s",
72 dag.VertexName(v), err)
73 }
74 } 154 }
75 155
76 // Find the destroy edge. There should only be one. 156 // Find the destroy edge. There should only be one.
@@ -86,7 +166,9 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error {
86 166
87 // Found it! Invert. 167 // Found it! Invert.
88 g.RemoveEdge(de) 168 g.RemoveEdge(de)
89 g.Connect(&DestroyEdge{S: de.Target(), T: de.Source()}) 169 applyNode := de.Source()
170 destroyNode := de.Target()
171 g.Connect(&DestroyEdge{S: destroyNode, T: applyNode})
90 } 172 }
91 173
92 // If the address has an index, we strip that. Our depMap creation 174 // If the address has an index, we strip that. Our depMap creation
@@ -94,15 +176,11 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error {
94 // dependencies. One day when we limit dependencies more exactly 176 // dependencies. One day when we limit dependencies more exactly
95 // this will have to change. We have a test case covering this 177 // this will have to change. We have a test case covering this
96 // (depNonCBDCountBoth) so it'll be caught. 178 // (depNonCBDCountBoth) so it'll be caught.
97 addr := dn.DestroyAddr() 179 addr := dern.DestroyAddr()
98 if addr.Index >= 0 { 180 key := addr.ContainingResource().String()
99 addr = addr.Copy() // Copy so that we don't modify any pointers
100 addr.Index = -1
101 }
102 181
103 // Add this to the list of nodes that we need to fix up 182 // Add this to the list of nodes that we need to fix up
104 // the edges for (step 2 above in the docs). 183 // the edges for (step 2 above in the docs).
105 key := addr.String()
106 destroyMap[key] = append(destroyMap[key], v) 184 destroyMap[key] = append(destroyMap[key], v)
107 } 185 }
108 186
@@ -151,13 +229,9 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error {
151 // dependencies. One day when we limit dependencies more exactly 229 // dependencies. One day when we limit dependencies more exactly
152 // this will have to change. We have a test case covering this 230 // this will have to change. We have a test case covering this
153 // (depNonCBDCount) so it'll be caught. 231 // (depNonCBDCount) so it'll be caught.
154 if addr.Index >= 0 { 232 key := addr.ContainingResource().String()
155 addr = addr.Copy() // Copy so that we don't modify any pointers
156 addr.Index = -1
157 }
158 233
159 // If there is nothing this resource should depend on, ignore it 234 // If there is nothing this resource should depend on, ignore it
160 key := addr.String()
161 dns, ok := depMap[key] 235 dns, ok := depMap[key]
162 if !ok { 236 if !ok {
163 continue 237 continue
@@ -174,21 +248,21 @@ func (t *CBDEdgeTransformer) Transform(g *Graph) error {
174 return nil 248 return nil
175} 249}
176 250
177func (t *CBDEdgeTransformer) depMap( 251func (t *CBDEdgeTransformer) depMap(destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) {
178 destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) {
179 // Build the graph of our config, this ensures that all resources 252 // Build the graph of our config, this ensures that all resources
180 // are present in the graph. 253 // are present in the graph.
181 g, err := (&BasicGraphBuilder{ 254 g, diags := (&BasicGraphBuilder{
182 Steps: []GraphTransformer{ 255 Steps: []GraphTransformer{
183 &FlatConfigTransformer{Module: t.Module}, 256 &FlatConfigTransformer{Config: t.Config},
184 &AttachResourceConfigTransformer{Module: t.Module}, 257 &AttachResourceConfigTransformer{Config: t.Config},
185 &AttachStateTransformer{State: t.State}, 258 &AttachStateTransformer{State: t.State},
259 &AttachSchemaTransformer{Schemas: t.Schemas},
186 &ReferenceTransformer{}, 260 &ReferenceTransformer{},
187 }, 261 },
188 Name: "CBDEdgeTransformer", 262 Name: "CBDEdgeTransformer",
189 }).Build(nil) 263 }).Build(nil)
190 if err != nil { 264 if diags.HasErrors() {
191 return nil, err 265 return nil, diags.Err()
192 } 266 }
193 267
194 // Using this graph, build the list of destroy nodes that each resource 268 // Using this graph, build the list of destroy nodes that each resource
@@ -232,26 +306,3 @@ func (t *CBDEdgeTransformer) depMap(
232 306
233 return depMap, nil 307 return depMap, nil
234} 308}
235
236// hasCBDAncestor returns true if any ancestor (node that depends on this)
237// has CBD set.
238func (t *CBDEdgeTransformer) hasCBDAncestor(g *Graph, v dag.Vertex) bool {
239 s, _ := g.Ancestors(v)
240 if s == nil {
241 return true
242 }
243
244 for _, v := range s.List() {
245 dn, ok := v.(GraphNodeDestroyerCBD)
246 if !ok {
247 continue
248 }
249
250 if dn.CreateBeforeDestroy() {
251 // some ancestor is CreateBeforeDestroy, so we need to follow suit
252 return true
253 }
254 }
255
256 return false
257}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
index a06ff29..7fb415b 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
@@ -3,7 +3,10 @@ package terraform
3import ( 3import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/config/module" 6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/states"
8
9 "github.com/hashicorp/terraform/configs"
7 "github.com/hashicorp/terraform/dag" 10 "github.com/hashicorp/terraform/dag"
8) 11)
9 12
@@ -11,16 +14,16 @@ import (
11type GraphNodeDestroyer interface { 14type GraphNodeDestroyer interface {
12 dag.Vertex 15 dag.Vertex
13 16
14 // ResourceAddr is the address of the resource that is being 17 // DestroyAddr is the address of the resource that is being
15 // destroyed by this node. If this returns nil, then this node 18 // destroyed by this node. If this returns nil, then this node
16 // is not destroying anything. 19 // is not destroying anything.
17 DestroyAddr() *ResourceAddress 20 DestroyAddr() *addrs.AbsResourceInstance
18} 21}
19 22
20// GraphNodeCreator must be implemented by nodes that create OR update resources. 23// GraphNodeCreator must be implemented by nodes that create OR update resources.
21type GraphNodeCreator interface { 24type GraphNodeCreator interface {
22 // ResourceAddr is the address of the resource being created or updated 25 // CreateAddr is the address of the resource being created or updated
23 CreateAddr() *ResourceAddress 26 CreateAddr() *addrs.AbsResourceInstance
24} 27}
25 28
26// DestroyEdgeTransformer is a GraphTransformer that creates the proper 29// DestroyEdgeTransformer is a GraphTransformer that creates the proper
@@ -40,33 +43,37 @@ type GraphNodeCreator interface {
40type DestroyEdgeTransformer struct { 43type DestroyEdgeTransformer struct {
41 // These are needed to properly build the graph of dependencies 44 // These are needed to properly build the graph of dependencies
42 // to determine what a destroy node depends on. Any of these can be nil. 45 // to determine what a destroy node depends on. Any of these can be nil.
43 Module *module.Tree 46 Config *configs.Config
44 State *State 47 State *states.State
48
49 // If configuration is present then Schemas is required in order to
50 // obtain schema information from providers and provisioners in order
51 // to properly resolve implicit dependencies.
52 Schemas *Schemas
45} 53}
46 54
47func (t *DestroyEdgeTransformer) Transform(g *Graph) error { 55func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
48 log.Printf("[TRACE] DestroyEdgeTransformer: Beginning destroy edge transformation...")
49
50 // Build a map of what is being destroyed (by address string) to 56 // Build a map of what is being destroyed (by address string) to
51 // the list of destroyers. In general there will only be one destroyer 57 // the list of destroyers. Usually there will be at most one destroyer
52 // but to make it more robust we support multiple. 58 // per node, but we allow multiple if present for completeness.
53 destroyers := make(map[string][]GraphNodeDestroyer) 59 destroyers := make(map[string][]GraphNodeDestroyer)
60 destroyerAddrs := make(map[string]addrs.AbsResourceInstance)
54 for _, v := range g.Vertices() { 61 for _, v := range g.Vertices() {
55 dn, ok := v.(GraphNodeDestroyer) 62 dn, ok := v.(GraphNodeDestroyer)
56 if !ok { 63 if !ok {
57 continue 64 continue
58 } 65 }
59 66
60 addr := dn.DestroyAddr() 67 addrP := dn.DestroyAddr()
61 if addr == nil { 68 if addrP == nil {
62 continue 69 continue
63 } 70 }
71 addr := *addrP
64 72
65 key := addr.String() 73 key := addr.String()
66 log.Printf( 74 log.Printf("[TRACE] DestroyEdgeTransformer: %q (%T) destroys %s", dag.VertexName(dn), v, key)
67 "[TRACE] DestroyEdgeTransformer: %s destroying %q",
68 dag.VertexName(dn), key)
69 destroyers[key] = append(destroyers[key], dn) 75 destroyers[key] = append(destroyers[key], dn)
76 destroyerAddrs[key] = addr
70 } 77 }
71 78
72 // If we aren't destroying anything, there will be no edges to make 79 // If we aren't destroying anything, there will be no edges to make
@@ -100,10 +107,20 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
100 a := v 107 a := v
101 108
102 log.Printf( 109 log.Printf(
103 "[TRACE] DestroyEdgeTransformer: connecting creator/destroyer: %s, %s", 110 "[TRACE] DestroyEdgeTransformer: connecting creator %q with destroyer %q",
104 dag.VertexName(a), dag.VertexName(a_d)) 111 dag.VertexName(a), dag.VertexName(a_d))
105 112
106 g.Connect(&DestroyEdge{S: a, T: a_d}) 113 g.Connect(&DestroyEdge{S: a, T: a_d})
114
115 // Attach the destroy node to the creator
116 // There really shouldn't be more than one destroyer, but even if
117 // there are, any of them will represent the correct
118 // CreateBeforeDestroy status.
119 if n, ok := cn.(GraphNodeAttachDestroyer); ok {
120 if d, ok := d.(GraphNodeDestroyerCBD); ok {
121 n.AttachDestroyNode(d)
122 }
123 }
107 } 124 }
108 } 125 }
109 126
@@ -120,20 +137,24 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
120 } 137 }
121 steps := []GraphTransformer{ 138 steps := []GraphTransformer{
122 // Add the local values 139 // Add the local values
123 &LocalTransformer{Module: t.Module}, 140 &LocalTransformer{Config: t.Config},
124 141
125 // Add outputs and metadata 142 // Add outputs and metadata
126 &OutputTransformer{Module: t.Module}, 143 &OutputTransformer{Config: t.Config},
127 &AttachResourceConfigTransformer{Module: t.Module}, 144 &AttachResourceConfigTransformer{Config: t.Config},
128 &AttachStateTransformer{State: t.State}, 145 &AttachStateTransformer{State: t.State},
129 146
130 TransformProviders(nil, providerFn, t.Module),
131
132 // Add all the variables. We can depend on resources through 147 // Add all the variables. We can depend on resources through
133 // variables due to module parameters, and we need to properly 148 // variables due to module parameters, and we need to properly
134 // determine that. 149 // determine that.
135 &RootVariableTransformer{Module: t.Module}, 150 &RootVariableTransformer{Config: t.Config},
136 &ModuleVariableTransformer{Module: t.Module}, 151 &ModuleVariableTransformer{Config: t.Config},
152
153 TransformProviders(nil, providerFn, t.Config),
154
155 // Must attach schemas before ReferenceTransformer so that we can
156 // analyze the configuration to find references.
157 &AttachSchemaTransformer{Schemas: t.Schemas},
137 158
138 &ReferenceTransformer{}, 159 &ReferenceTransformer{},
139 } 160 }
@@ -146,37 +167,36 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
146 // 167 //
147 var tempG Graph 168 var tempG Graph
148 var tempDestroyed []dag.Vertex 169 var tempDestroyed []dag.Vertex
149 for d, _ := range destroyers { 170 for d := range destroyers {
150 // d is what is being destroyed. We parse the resource address 171 // d is the string key for the resource being destroyed. We actually
151 // which it came from it is a panic if this fails. 172 // want the address value, which we stashed earlier.
152 addr, err := ParseResourceAddress(d) 173 addr := destroyerAddrs[d]
153 if err != nil {
154 panic(err)
155 }
156 174
157 // This part is a little bit weird but is the best way to 175 // This part is a little bit weird but is the best way to
158 // find the dependencies we need to: build a graph and use the 176 // find the dependencies we need to: build a graph and use the
159 // attach config and state transformers then ask for references. 177 // attach config and state transformers then ask for references.
160 abstract := &NodeAbstractResource{Addr: addr} 178 abstract := NewNodeAbstractResourceInstance(addr)
161 tempG.Add(abstract) 179 tempG.Add(abstract)
162 tempDestroyed = append(tempDestroyed, abstract) 180 tempDestroyed = append(tempDestroyed, abstract)
163 181
164 // We also add the destroy version here since the destroy can 182 // We also add the destroy version here since the destroy can
165 // depend on things that the creation doesn't (destroy provisioners). 183 // depend on things that the creation doesn't (destroy provisioners).
166 destroy := &NodeDestroyResource{NodeAbstractResource: abstract} 184 destroy := &NodeDestroyResourceInstance{NodeAbstractResourceInstance: abstract}
167 tempG.Add(destroy) 185 tempG.Add(destroy)
168 tempDestroyed = append(tempDestroyed, destroy) 186 tempDestroyed = append(tempDestroyed, destroy)
169 } 187 }
170 188
171 // Run the graph transforms so we have the information we need to 189 // Run the graph transforms so we have the information we need to
172 // build references. 190 // build references.
191 log.Printf("[TRACE] DestroyEdgeTransformer: constructing temporary graph for analysis of references, starting from:\n%s", tempG.StringWithNodeTypes())
173 for _, s := range steps { 192 for _, s := range steps {
193 log.Printf("[TRACE] DestroyEdgeTransformer: running %T on temporary graph", s)
174 if err := s.Transform(&tempG); err != nil { 194 if err := s.Transform(&tempG); err != nil {
195 log.Printf("[TRACE] DestroyEdgeTransformer: %T failed: %s", s, err)
175 return err 196 return err
176 } 197 }
177 } 198 }
178 199 log.Printf("[TRACE] DestroyEdgeTransformer: temporary reference graph:\n%s", tempG.String())
179 log.Printf("[TRACE] DestroyEdgeTransformer: reference graph: %s", tempG.String())
180 200
181 // Go through all the nodes in the graph and determine what they 201 // Go through all the nodes in the graph and determine what they
182 // depend on. 202 // depend on.
@@ -207,16 +227,13 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
207 227
208 // Get the destroy node for this. In the example of our struct, 228 // Get the destroy node for this. In the example of our struct,
209 // we are currently at B and we're looking for B_d. 229 // we are currently at B and we're looking for B_d.
210 rn, ok := v.(GraphNodeResource) 230 rn, ok := v.(GraphNodeResourceInstance)
211 if !ok { 231 if !ok {
232 log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s, since it's not a resource", dag.VertexName(v))
212 continue 233 continue
213 } 234 }
214 235
215 addr := rn.ResourceAddr() 236 addr := rn.ResourceInstanceAddr()
216 if addr == nil {
217 continue
218 }
219
220 dns := destroyers[addr.String()] 237 dns := destroyers[addr.String()]
221 238
222 // We have dependencies, check if any are being destroyed 239 // We have dependencies, check if any are being destroyed
@@ -231,16 +248,12 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
231 // to see if A_d exists. 248 // to see if A_d exists.
232 var depDestroyers []dag.Vertex 249 var depDestroyers []dag.Vertex
233 for _, v := range refs { 250 for _, v := range refs {
234 rn, ok := v.(GraphNodeResource) 251 rn, ok := v.(GraphNodeResourceInstance)
235 if !ok { 252 if !ok {
236 continue 253 continue
237 } 254 }
238 255
239 addr := rn.ResourceAddr() 256 addr := rn.ResourceInstanceAddr()
240 if addr == nil {
241 continue
242 }
243
244 key := addr.String() 257 key := addr.String()
245 if ds, ok := destroyers[key]; ok { 258 if ds, ok := destroyers[key]; ok {
246 for _, d := range ds { 259 for _, d := range ds {
@@ -257,6 +270,7 @@ func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
257 for _, a_d := range dns { 270 for _, a_d := range dns {
258 for _, b_d := range depDestroyers { 271 for _, b_d := range depDestroyers {
259 if b_d != a_d { 272 if b_d != a_d {
273 log.Printf("[TRACE] DestroyEdgeTransformer: %q depends on %q", dag.VertexName(b_d), dag.VertexName(a_d))
260 g.Connect(dag.BasicEdge(b_d, a_d)) 274 g.Connect(dag.BasicEdge(b_d, a_d))
261 } 275 }
262 } 276 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
index ad46d3c..6fb915f 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
@@ -4,83 +4,189 @@ import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 6
7 "github.com/hashicorp/terraform/config/module"
8 "github.com/hashicorp/terraform/dag" 7 "github.com/hashicorp/terraform/dag"
8 "github.com/hashicorp/terraform/plans"
9 "github.com/hashicorp/terraform/states"
10 "github.com/hashicorp/terraform/tfdiags"
9) 11)
10 12
11// DiffTransformer is a GraphTransformer that adds the elements of 13// DiffTransformer is a GraphTransformer that adds graph nodes representing
12// the diff to the graph. 14// each of the resource changes described in the given Changes object.
13//
14// This transform is used for example by the ApplyGraphBuilder to ensure
15// that only resources that are being modified are represented in the graph.
16//
17// Module and State is still required for the DiffTransformer for annotations
18// since the Diff doesn't contain all the information required to build the
19// complete graph (such as create-before-destroy information). The graph
20// is built based on the diff first, though, ensuring that only resources
21// that are being modified are present in the graph.
22type DiffTransformer struct { 15type DiffTransformer struct {
23 Concrete ConcreteResourceNodeFunc 16 Concrete ConcreteResourceInstanceNodeFunc
24 17 State *states.State
25 Diff *Diff 18 Changes *plans.Changes
26 Module *module.Tree
27 State *State
28} 19}
29 20
30func (t *DiffTransformer) Transform(g *Graph) error { 21func (t *DiffTransformer) Transform(g *Graph) error {
31 // If the diff is nil or empty (nil is empty) then do nothing 22 if t.Changes == nil || len(t.Changes.Resources) == 0 {
32 if t.Diff.Empty() { 23 // Nothing to do!
33 return nil 24 return nil
34 } 25 }
35 26
36 // Go through all the modules in the diff. 27 // Go through all the modules in the diff.
37 log.Printf("[TRACE] DiffTransformer: starting") 28 log.Printf("[TRACE] DiffTransformer starting")
38 var nodes []dag.Vertex 29
39 for _, m := range t.Diff.Modules { 30 var diags tfdiags.Diagnostics
40 log.Printf("[TRACE] DiffTransformer: Module: %s", m) 31 state := t.State
41 // TODO: If this is a destroy diff then add a module destroy node 32 changes := t.Changes
42 33
43 // Go through all the resources in this module. 34 // DiffTransformer creates resource _instance_ nodes. If there are any
44 for name, inst := range m.Resources { 35 // whole-resource nodes already in the graph, we must ensure that they
45 log.Printf("[TRACE] DiffTransformer: Resource %q: %#v", name, inst) 36 // get evaluated before any of the corresponding instances by creating
46 37 // dependency edges, so we'll do some prep work here to ensure we'll only
47 // We have changes! This is a create or update operation. 38 // create connections to nodes that existed before we started here.
48 // First grab the address so we have a unique way to 39 resourceNodes := map[string][]GraphNodeResource{}
49 // reference this resource. 40 for _, node := range g.Vertices() {
50 addr, err := parseResourceAddressInternal(name) 41 rn, ok := node.(GraphNodeResource)
51 if err != nil { 42 if !ok {
52 panic(fmt.Sprintf( 43 continue
53 "Error parsing internal name, this is a bug: %q", name)) 44 }
54 } 45 // We ignore any instances that _also_ implement
46 // GraphNodeResourceInstance, since in the unlikely event that they
47 // do exist we'd probably end up creating cycles by connecting them.
48 if _, ok := node.(GraphNodeResourceInstance); ok {
49 continue
50 }
51
52 addr := rn.ResourceAddr().String()
53 resourceNodes[addr] = append(resourceNodes[addr], rn)
54 }
55
56 for _, rc := range changes.Resources {
57 addr := rc.Addr
58 dk := rc.DeposedKey
59
60 log.Printf("[TRACE] DiffTransformer: found %s change for %s %s", rc.Action, addr, dk)
61
62 // Depending on the action we'll need some different combinations of
63 // nodes, because destroying uses a special node type separate from
64 // other actions.
65 var update, delete, createBeforeDestroy bool
66 switch rc.Action {
67 case plans.NoOp:
68 continue
69 case plans.Delete:
70 delete = true
71 case plans.DeleteThenCreate, plans.CreateThenDelete:
72 update = true
73 delete = true
74 createBeforeDestroy = (rc.Action == plans.CreateThenDelete)
75 default:
76 update = true
77 }
78
79 if dk != states.NotDeposed && update {
80 diags = diags.Append(tfdiags.Sourceless(
81 tfdiags.Error,
82 "Invalid planned change for deposed object",
83 fmt.Sprintf("The plan contains a non-delete change for %s deposed object %s. The only valid action for a deposed object is to destroy it, so this is a bug in Terraform.", addr, dk),
84 ))
85 continue
86 }
55 87
56 // Very important: add the module path for this resource to 88 // If we're going to do a create_before_destroy Replace operation then
57 // the address. Remove "root" from it. 89 // we need to allocate a DeposedKey to use to retain the
58 addr.Path = m.Path[1:] 90 // not-yet-destroyed prior object, so that the delete node can destroy
91 // _that_ rather than the newly-created node, which will be current
92 // by the time the delete node is visited.
93 if update && delete && createBeforeDestroy {
94 // In this case, variable dk will be the _pre-assigned_ DeposedKey
95 // that must be used if the update graph node deposes the current
96 // instance, which will then align with the same key we pass
97 // into the destroy node to ensure we destroy exactly the deposed
98 // object we expect.
99 if state != nil {
100 ris := state.ResourceInstance(addr)
101 if ris == nil {
102 // Should never happen, since we don't plan to replace an
103 // instance that doesn't exist yet.
104 diags = diags.Append(tfdiags.Sourceless(
105 tfdiags.Error,
106 "Invalid planned change",
107 fmt.Sprintf("The plan contains a replace change for %s, which doesn't exist yet. This is a bug in Terraform.", addr),
108 ))
109 continue
110 }
111
112 // Allocating a deposed key separately from using it can be racy
113 // in general, but we assume here that nothing except the apply
114 // node we instantiate below will actually make new deposed objects
115 // in practice, and so the set of already-used keys will not change
116 // between now and then.
117 dk = ris.FindUnusedDeposedKey()
118 } else {
119 // If we have no state at all yet then we can use _any_
120 // DeposedKey.
121 dk = states.NewDeposedKey()
122 }
123 }
59 124
60 // If we're destroying, add the destroy node 125 if update {
61 if inst.Destroy || inst.GetDestroyDeposed() { 126 // All actions except destroying the node type chosen by t.Concrete
62 abstract := &NodeAbstractResource{Addr: addr} 127 abstract := NewNodeAbstractResourceInstance(addr)
63 g.Add(&NodeDestroyResource{NodeAbstractResource: abstract}) 128 var node dag.Vertex = abstract
129 if f := t.Concrete; f != nil {
130 node = f(abstract)
64 } 131 }
65 132
66 // If we have changes, then add the applyable version 133 if createBeforeDestroy {
67 if len(inst.Attributes) > 0 { 134 // We'll attach our pre-allocated DeposedKey to the node if
68 // Add the resource to the graph 135 // it supports that. NodeApplyableResourceInstance is the
69 abstract := &NodeAbstractResource{Addr: addr} 136 // specific concrete node type we are looking for here really,
70 var node dag.Vertex = abstract 137 // since that's the only node type that might depose objects.
71 if f := t.Concrete; f != nil { 138 if dn, ok := node.(GraphNodeDeposer); ok {
72 node = f(abstract) 139 dn.SetPreallocatedDeposedKey(dk)
73 } 140 }
141 log.Printf("[TRACE] DiffTransformer: %s will be represented by %s, deposing prior object to %s", addr, dag.VertexName(node), dk)
142 } else {
143 log.Printf("[TRACE] DiffTransformer: %s will be represented by %s", addr, dag.VertexName(node))
144 }
74 145
75 nodes = append(nodes, node) 146 g.Add(node)
147 rsrcAddr := addr.ContainingResource().String()
148 for _, rsrcNode := range resourceNodes[rsrcAddr] {
149 g.Connect(dag.BasicEdge(node, rsrcNode))
150 }
151 }
152
153 if delete {
154 // Destroying always uses a destroy-specific node type, though
155 // which one depends on whether we're destroying a current object
156 // or a deposed object.
157 var node GraphNodeResourceInstance
158 abstract := NewNodeAbstractResourceInstance(addr)
159 if dk == states.NotDeposed {
160 node = &NodeDestroyResourceInstance{
161 NodeAbstractResourceInstance: abstract,
162 DeposedKey: dk,
163 }
164 node.(*NodeDestroyResourceInstance).ModifyCreateBeforeDestroy(createBeforeDestroy)
165 } else {
166 node = &NodeDestroyDeposedResourceInstanceObject{
167 NodeAbstractResourceInstance: abstract,
168 DeposedKey: dk,
169 }
170 }
171 if dk == states.NotDeposed {
172 log.Printf("[TRACE] DiffTransformer: %s will be represented for destruction by %s", addr, dag.VertexName(node))
173 } else {
174 log.Printf("[TRACE] DiffTransformer: %s deposed object %s will be represented for destruction by %s", addr, dk, dag.VertexName(node))
175 }
176 g.Add(node)
177 rsrcAddr := addr.ContainingResource().String()
178 for _, rsrcNode := range resourceNodes[rsrcAddr] {
179 // We connect this edge "forwards" (even though destroy dependencies
180 // are often inverted) because evaluating the resource node
181 // after the destroy node could cause an unnecessary husk of
182 // a resource state to be re-added.
183 g.Connect(dag.BasicEdge(node, rsrcNode))
76 } 184 }
77 } 185 }
78 }
79 186
80 // Add all the nodes to the graph
81 for _, n := range nodes {
82 g.Add(n)
83 } 187 }
84 188
85 return nil 189 log.Printf("[TRACE] DiffTransformer complete")
190
191 return diags.Err()
86} 192}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
index 3673771..c1945f0 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
@@ -2,7 +2,10 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "strings" 5
6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/tfdiags"
6) 9)
7 10
8// ImportProviderValidateTransformer is a GraphTransformer that goes through 11// ImportProviderValidateTransformer is a GraphTransformer that goes through
@@ -10,6 +13,8 @@ import (
10type ImportProviderValidateTransformer struct{} 13type ImportProviderValidateTransformer struct{}
11 14
12func (t *ImportProviderValidateTransformer) Transform(g *Graph) error { 15func (t *ImportProviderValidateTransformer) Transform(g *Graph) error {
16 var diags tfdiags.Diagnostics
17
13 for _, v := range g.Vertices() { 18 for _, v := range g.Vertices() {
14 // We only care about providers 19 // We only care about providers
15 pv, ok := v.(GraphNodeProvider) 20 pv, ok := v.(GraphNodeProvider)
@@ -24,15 +29,16 @@ func (t *ImportProviderValidateTransformer) Transform(g *Graph) error {
24 } 29 }
25 30
26 for _, ref := range rn.References() { 31 for _, ref := range rn.References() {
27 if !strings.HasPrefix(ref, "var.") { 32 if _, ok := ref.Subject.(addrs.InputVariable); !ok {
28 return fmt.Errorf( 33 diags = diags.Append(&hcl.Diagnostic{
29 "Provider %q depends on non-var %q. Providers for import can currently\n"+ 34 Severity: hcl.DiagError,
30 "only depend on variables or must be hardcoded. You can stop import\n"+ 35 Summary: "Invalid provider dependency for import",
31 "from loading configurations by specifying `-config=\"\"`.", 36 Detail: fmt.Sprintf("The configuration for %s depends on %s. Providers used with import must either have literal configuration or refer only to input variables.", pv.ProviderAddr(), ref.Subject.String()),
32 pv.ProviderName(), ref) 37 Subject: ref.SourceRange.ToHCL().Ptr(),
38 })
33 } 39 }
34 } 40 }
35 } 41 }
36 42
37 return nil 43 return diags.Err()
38} 44}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
index fcbff65..ab0ecae 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
@@ -2,6 +2,10 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5
6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/providers"
8 "github.com/hashicorp/terraform/tfdiags"
5) 9)
6 10
7// ImportStateTransformer is a GraphTransformer that adds nodes to the 11// ImportStateTransformer is a GraphTransformer that adds nodes to the
@@ -11,64 +15,68 @@ type ImportStateTransformer struct {
11} 15}
12 16
13func (t *ImportStateTransformer) Transform(g *Graph) error { 17func (t *ImportStateTransformer) Transform(g *Graph) error {
14 nodes := make([]*graphNodeImportState, 0, len(t.Targets))
15 for _, target := range t.Targets { 18 for _, target := range t.Targets {
16 addr, err := ParseResourceAddress(target.Addr) 19 // The ProviderAddr may not be supplied for non-aliased providers.
17 if err != nil { 20 // This will be populated if the targets come from the cli, but tests
18 return fmt.Errorf( 21 // may not specify implied provider addresses.
19 "failed to parse resource address '%s': %s", 22 providerAddr := target.ProviderAddr
20 target.Addr, err) 23 if providerAddr.ProviderConfig.Type == "" {
24 providerAddr = target.Addr.Resource.Resource.DefaultProviderConfig().Absolute(target.Addr.Module)
21 } 25 }
22 26
23 nodes = append(nodes, &graphNodeImportState{ 27 node := &graphNodeImportState{
24 Addr: addr, 28 Addr: target.Addr,
25 ID: target.ID, 29 ID: target.ID,
26 ProviderName: target.Provider, 30 ProviderAddr: providerAddr,
27 }) 31 }
28 } 32 g.Add(node)
29
30 // Build the graph vertices
31 for _, n := range nodes {
32 g.Add(n)
33 } 33 }
34
35 return nil 34 return nil
36} 35}
37 36
38type graphNodeImportState struct { 37type graphNodeImportState struct {
39 Addr *ResourceAddress // Addr is the resource address to import to 38 Addr addrs.AbsResourceInstance // Addr is the resource address to import into
40 ID string // ID is the ID to import as 39 ID string // ID is the ID to import as
41 ProviderName string // Provider string 40 ProviderAddr addrs.AbsProviderConfig // Provider address given by the user, or implied by the resource type
42 ResolvedProvider string // provider node address 41 ResolvedProvider addrs.AbsProviderConfig // provider node address after resolution
43 42
44 states []*InstanceState 43 states []providers.ImportedResource
45} 44}
46 45
46var (
47 _ GraphNodeSubPath = (*graphNodeImportState)(nil)
48 _ GraphNodeEvalable = (*graphNodeImportState)(nil)
49 _ GraphNodeProviderConsumer = (*graphNodeImportState)(nil)
50 _ GraphNodeDynamicExpandable = (*graphNodeImportState)(nil)
51)
52
47func (n *graphNodeImportState) Name() string { 53func (n *graphNodeImportState) Name() string {
48 return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID) 54 return fmt.Sprintf("%s (import id %q)", n.Addr, n.ID)
49} 55}
50 56
51func (n *graphNodeImportState) ProvidedBy() string { 57// GraphNodeProviderConsumer
52 return resourceProvider(n.Addr.Type, n.ProviderName) 58func (n *graphNodeImportState) ProvidedBy() (addrs.AbsProviderConfig, bool) {
59 // We assume that n.ProviderAddr has been properly populated here.
60 // It's the responsibility of the code creating a graphNodeImportState
61 // to populate this, possibly by calling DefaultProviderConfig() on the
62 // resource address to infer an implied provider from the resource type
63 // name.
64 return n.ProviderAddr, false
53} 65}
54 66
55func (n *graphNodeImportState) SetProvider(p string) { 67// GraphNodeProviderConsumer
56 n.ResolvedProvider = p 68func (n *graphNodeImportState) SetProvider(addr addrs.AbsProviderConfig) {
69 n.ResolvedProvider = addr
57} 70}
58 71
59// GraphNodeSubPath 72// GraphNodeSubPath
60func (n *graphNodeImportState) Path() []string { 73func (n *graphNodeImportState) Path() addrs.ModuleInstance {
61 return normalizeModulePath(n.Addr.Path) 74 return n.Addr.Module
62} 75}
63 76
64// GraphNodeEvalable impl. 77// GraphNodeEvalable impl.
65func (n *graphNodeImportState) EvalTree() EvalNode { 78func (n *graphNodeImportState) EvalTree() EvalNode {
66 var provider ResourceProvider 79 var provider providers.Interface
67 info := &InstanceInfo{
68 Id: fmt.Sprintf("%s.%s", n.Addr.Type, n.Addr.Name),
69 ModulePath: n.Path(),
70 Type: n.Addr.Type,
71 }
72 80
73 // Reset our states 81 // Reset our states
74 n.states = nil 82 n.states = nil
@@ -77,13 +85,13 @@ func (n *graphNodeImportState) EvalTree() EvalNode {
77 return &EvalSequence{ 85 return &EvalSequence{
78 Nodes: []EvalNode{ 86 Nodes: []EvalNode{
79 &EvalGetProvider{ 87 &EvalGetProvider{
80 Name: n.ResolvedProvider, 88 Addr: n.ResolvedProvider,
81 Output: &provider, 89 Output: &provider,
82 }, 90 },
83 &EvalImportState{ 91 &EvalImportState{
92 Addr: n.Addr.Resource,
84 Provider: &provider, 93 Provider: &provider,
85 Info: info, 94 ID: n.ID,
86 Id: n.ID,
87 Output: &n.states, 95 Output: &n.states,
88 }, 96 },
89 }, 97 },
@@ -97,6 +105,8 @@ func (n *graphNodeImportState) EvalTree() EvalNode {
97// resources they don't depend on anything else and refreshes are isolated 105// resources they don't depend on anything else and refreshes are isolated
98// so this is nearly a perfect use case for dynamic expand. 106// so this is nearly a perfect use case for dynamic expand.
99func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) { 107func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
108 var diags tfdiags.Diagnostics
109
100 g := &Graph{Path: ctx.Path()} 110 g := &Graph{Path: ctx.Path()}
101 111
102 // nameCounter is used to de-dup names in the state. 112 // nameCounter is used to de-dup names in the state.
@@ -105,11 +115,11 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
105 // Compile the list of addresses that we'll be inserting into the state. 115 // Compile the list of addresses that we'll be inserting into the state.
106 // We do this ahead of time so we can verify that we aren't importing 116 // We do this ahead of time so we can verify that we aren't importing
107 // something that already exists. 117 // something that already exists.
108 addrs := make([]*ResourceAddress, len(n.states)) 118 addrs := make([]addrs.AbsResourceInstance, len(n.states))
109 for i, state := range n.states { 119 for i, state := range n.states {
110 addr := *n.Addr 120 addr := n.Addr
111 if t := state.Ephemeral.Type; t != "" { 121 if t := state.TypeName; t != "" {
112 addr.Type = t 122 addr.Resource.Resource.Type = t
113 } 123 }
114 124
115 // Determine if we need to suffix the name to de-dup 125 // Determine if we need to suffix the name to de-dup
@@ -117,36 +127,31 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
117 count, ok := nameCounter[key] 127 count, ok := nameCounter[key]
118 if ok { 128 if ok {
119 count++ 129 count++
120 addr.Name += fmt.Sprintf("-%d", count) 130 addr.Resource.Resource.Name += fmt.Sprintf("-%d", count)
121 } 131 }
122 nameCounter[key] = count 132 nameCounter[key] = count
123 133
124 // Add it to our list 134 // Add it to our list
125 addrs[i] = &addr 135 addrs[i] = addr
126 } 136 }
127 137
128 // Verify that all the addresses are clear 138 // Verify that all the addresses are clear
129 state, lock := ctx.State() 139 state := ctx.State()
130 lock.RLock()
131 defer lock.RUnlock()
132 filter := &StateFilter{State: state}
133 for _, addr := range addrs { 140 for _, addr := range addrs {
134 result, err := filter.Filter(addr.String()) 141 existing := state.ResourceInstance(addr)
135 if err != nil { 142 if existing != nil {
136 return nil, fmt.Errorf("Error verifying address %s: %s", addr, err) 143 diags = diags.Append(tfdiags.Sourceless(
137 } 144 tfdiags.Error,
138 145 "Resource already managed by Terraform",
139 // Go through the filter results and it is an error if we find 146 fmt.Sprintf("Terraform is already managing a remote object for %s. To import to this address you must first remove the existing object from the state.", addr),
140 // a matching InstanceState, meaning that we would have a collision. 147 ))
141 for _, r := range result { 148 continue
142 if _, ok := r.Value.(*InstanceState); ok {
143 return nil, fmt.Errorf(
144 "Can't import %s, would collide with an existing resource.\n\n"+
145 "Please remove or rename this resource before continuing.",
146 addr)
147 }
148 } 149 }
149 } 150 }
151 if diags.HasErrors() {
152 // Bail out early, then.
153 return nil, diags.Err()
154 }
150 155
151 // For each of the states, we add a node to handle the refresh/add to state. 156 // For each of the states, we add a node to handle the refresh/add to state.
152 // "n.states" is populated by our own EvalTree with the result of 157 // "n.states" is populated by our own EvalTree with the result of
@@ -154,10 +159,8 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
154 // is safe. 159 // is safe.
155 for i, state := range n.states { 160 for i, state := range n.states {
156 g.Add(&graphNodeImportStateSub{ 161 g.Add(&graphNodeImportStateSub{
157 Target: addrs[i], 162 TargetAddr: addrs[i],
158 Path_: n.Path(),
159 State: state, 163 State: state,
160 ProviderName: n.ProviderName,
161 ResolvedProvider: n.ResolvedProvider, 164 ResolvedProvider: n.ResolvedProvider,
162 }) 165 })
163 } 166 }
@@ -169,79 +172,67 @@ func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
169 } 172 }
170 173
171 // Done! 174 // Done!
172 return g, nil 175 return g, diags.Err()
173} 176}
174 177
175// graphNodeImportStateSub is the sub-node of graphNodeImportState 178// graphNodeImportStateSub is the sub-node of graphNodeImportState
176// and is part of the subgraph. This node is responsible for refreshing 179// and is part of the subgraph. This node is responsible for refreshing
177// and adding a resource to the state once it is imported. 180// and adding a resource to the state once it is imported.
178type graphNodeImportStateSub struct { 181type graphNodeImportStateSub struct {
179 Target *ResourceAddress 182 TargetAddr addrs.AbsResourceInstance
180 State *InstanceState 183 State providers.ImportedResource
181 Path_ []string 184 ResolvedProvider addrs.AbsProviderConfig
182 ProviderName string
183 ResolvedProvider string
184} 185}
185 186
187var (
188 _ GraphNodeSubPath = (*graphNodeImportStateSub)(nil)
189 _ GraphNodeEvalable = (*graphNodeImportStateSub)(nil)
190)
191
186func (n *graphNodeImportStateSub) Name() string { 192func (n *graphNodeImportStateSub) Name() string {
187 return fmt.Sprintf("import %s result: %s", n.Target, n.State.ID) 193 return fmt.Sprintf("import %s result", n.TargetAddr)
188} 194}
189 195
190func (n *graphNodeImportStateSub) Path() []string { 196func (n *graphNodeImportStateSub) Path() addrs.ModuleInstance {
191 return n.Path_ 197 return n.TargetAddr.Module
192} 198}
193 199
194// GraphNodeEvalable impl. 200// GraphNodeEvalable impl.
195func (n *graphNodeImportStateSub) EvalTree() EvalNode { 201func (n *graphNodeImportStateSub) EvalTree() EvalNode {
196 // If the Ephemeral type isn't set, then it is an error 202 // If the Ephemeral type isn't set, then it is an error
197 if n.State.Ephemeral.Type == "" { 203 if n.State.TypeName == "" {
198 err := fmt.Errorf( 204 err := fmt.Errorf("import of %s didn't set type", n.TargetAddr.String())
199 "import of %s didn't set type for %s",
200 n.Target.String(), n.State.ID)
201 return &EvalReturnError{Error: &err} 205 return &EvalReturnError{Error: &err}
202 } 206 }
203 207
204 // DeepCopy so we're only modifying our local copy 208 state := n.State.AsInstanceObject()
205 state := n.State.DeepCopy()
206 209
207 // Build the resource info 210 var provider providers.Interface
208 info := &InstanceInfo{ 211 var providerSchema *ProviderSchema
209 Id: fmt.Sprintf("%s.%s", n.Target.Type, n.Target.Name),
210 ModulePath: n.Path_,
211 Type: n.State.Ephemeral.Type,
212 }
213
214 // Key is the resource key
215 key := &ResourceStateKey{
216 Name: n.Target.Name,
217 Type: info.Type,
218 Index: n.Target.Index,
219 }
220
221 // The eval sequence
222 var provider ResourceProvider
223 return &EvalSequence{ 212 return &EvalSequence{
224 Nodes: []EvalNode{ 213 Nodes: []EvalNode{
225 &EvalGetProvider{ 214 &EvalGetProvider{
226 Name: n.ResolvedProvider, 215 Addr: n.ResolvedProvider,
227 Output: &provider, 216 Output: &provider,
217 Schema: &providerSchema,
228 }, 218 },
229 &EvalRefresh{ 219 &EvalRefresh{
230 Provider: &provider, 220 Addr: n.TargetAddr.Resource,
231 State: &state, 221 ProviderAddr: n.ResolvedProvider,
232 Info: info, 222 Provider: &provider,
233 Output: &state, 223 ProviderSchema: &providerSchema,
224 State: &state,
225 Output: &state,
234 }, 226 },
235 &EvalImportStateVerify{ 227 &EvalImportStateVerify{
236 Info: info, 228 Addr: n.TargetAddr.Resource,
237 Id: n.State.ID,
238 State: &state, 229 State: &state,
239 }, 230 },
240 &EvalWriteState{ 231 &EvalWriteState{
241 Name: key.String(), 232 Addr: n.TargetAddr.Resource,
242 ResourceType: info.Type, 233 ProviderAddr: n.ResolvedProvider,
243 Provider: n.ResolvedProvider, 234 ProviderSchema: &providerSchema,
244 State: &state, 235 State: &state,
245 }, 236 },
246 }, 237 },
247 } 238 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_local.go b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go
index 95ecfc0..84eb26b 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_local.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_local.go
@@ -1,37 +1,45 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config/module" 4 "github.com/hashicorp/terraform/configs"
5) 5)
6 6
7// LocalTransformer is a GraphTransformer that adds all the local values 7// LocalTransformer is a GraphTransformer that adds all the local values
8// from the configuration to the graph. 8// from the configuration to the graph.
9type LocalTransformer struct { 9type LocalTransformer struct {
10 Module *module.Tree 10 Config *configs.Config
11} 11}
12 12
13func (t *LocalTransformer) Transform(g *Graph) error { 13func (t *LocalTransformer) Transform(g *Graph) error {
14 return t.transformModule(g, t.Module) 14 return t.transformModule(g, t.Config)
15} 15}
16 16
17func (t *LocalTransformer) transformModule(g *Graph, m *module.Tree) error { 17func (t *LocalTransformer) transformModule(g *Graph, c *configs.Config) error {
18 if m == nil { 18 if c == nil {
19 // Can't have any locals if there's no config 19 // Can't have any locals if there's no config
20 return nil 20 return nil
21 } 21 }
22 22
23 for _, local := range m.Config().Locals { 23 // Our addressing system distinguishes between modules and module instances,
24 // but we're not yet ready to make that distinction here (since we don't
25 // support "count"/"for_each" on modules) and so we just do a naive
26 // transform of the module path into a module instance path, assuming that
27 // no keys are in use. This should be removed when "count" and "for_each"
28 // are implemented for modules.
29 path := c.Path.UnkeyedInstanceShim()
30
31 for _, local := range c.Module.Locals {
32 addr := path.LocalValue(local.Name)
24 node := &NodeLocal{ 33 node := &NodeLocal{
25 PathValue: normalizeModulePath(m.Path()), 34 Addr: addr,
26 Config: local, 35 Config: local,
27 } 36 }
28
29 g.Add(node) 37 g.Add(node)
30 } 38 }
31 39
32 // Also populate locals for child modules 40 // Also populate locals for child modules
33 for _, c := range m.Children() { 41 for _, cc := range c.Children {
34 if err := t.transformModule(g, c); err != nil { 42 if err := t.transformModule(g, cc); err != nil {
35 return err 43 return err
36 } 44 }
37 } 45 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
index 467950b..a994bd4 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
@@ -1,46 +1,54 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "log" 4 "fmt"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/hcl2/hcl/hclsyntax"
7 "github.com/hashicorp/terraform/config/module" 7 "github.com/hashicorp/terraform/tfdiags"
8 "github.com/hashicorp/terraform/dag" 8 "github.com/zclconf/go-cty/cty"
9
10 "github.com/hashicorp/hcl2/hcl"
11 "github.com/hashicorp/terraform/configs"
9) 12)
10 13
11// ModuleVariableTransformer is a GraphTransformer that adds all the variables 14// ModuleVariableTransformer is a GraphTransformer that adds all the variables
12// in the configuration to the graph. 15// in the configuration to the graph.
13// 16//
14// This only adds variables that are referenced by other things in the graph. 17// Any "variable" block present in any non-root module is included here, even
15// If a module variable is not referenced, it won't be added to the graph. 18// if a particular variable is not referenced from anywhere.
19//
20// The transform will produce errors if a call to a module does not conform
21// to the expected set of arguments, but this transformer is not in a good
22// position to return errors and so the validate walk should include specific
23// steps for validating module blocks, separate from this transform.
16type ModuleVariableTransformer struct { 24type ModuleVariableTransformer struct {
17 Module *module.Tree 25 Config *configs.Config
18
19 DisablePrune bool // True if pruning unreferenced should be disabled
20} 26}
21 27
22func (t *ModuleVariableTransformer) Transform(g *Graph) error { 28func (t *ModuleVariableTransformer) Transform(g *Graph) error {
23 return t.transform(g, nil, t.Module) 29 return t.transform(g, nil, t.Config)
24} 30}
25 31
26func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) error { 32func (t *ModuleVariableTransformer) transform(g *Graph, parent, c *configs.Config) error {
27 // If no config, no variables 33 // We can have no variables if we have no configuration.
28 if m == nil { 34 if c == nil {
29 return nil 35 return nil
30 } 36 }
31 37
32 // Transform all the children. This must be done BEFORE the transform 38 // Transform all the children first.
33 // above since child module variables can reference parent module variables. 39 for _, cc := range c.Children {
34 for _, c := range m.Children() { 40 if err := t.transform(g, c, cc); err != nil {
35 if err := t.transform(g, m, c); err != nil {
36 return err 41 return err
37 } 42 }
38 } 43 }
39 44
45 // If we're processing anything other than the root module then we'll
46 // add graph nodes for variables defined inside. (Variables for the root
47 // module are dealt with in RootVariableTransformer).
40 // If we have a parent, we can determine if a module variable is being 48 // If we have a parent, we can determine if a module variable is being
41 // used, so we transform this. 49 // used, so we transform this.
42 if parent != nil { 50 if parent != nil {
43 if err := t.transformSingle(g, parent, m); err != nil { 51 if err := t.transformSingle(g, parent, c); err != nil {
44 return err 52 return err
45 } 53 }
46 } 54 }
@@ -48,71 +56,69 @@ func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree)
48 return nil 56 return nil
49} 57}
50 58
51func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, m *module.Tree) error { 59func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, c *configs.Config) error {
52 // If we have no vars, we're done! 60
53 vars := m.Config().Variables 61 // Our addressing system distinguishes between modules and module instances,
54 if len(vars) == 0 { 62 // but we're not yet ready to make that distinction here (since we don't
55 log.Printf("[TRACE] Module %#v has no variables, skipping.", m.Path()) 63 // support "count"/"for_each" on modules) and so we just do a naive
56 return nil 64 // transform of the module path into a module instance path, assuming that
65 // no keys are in use. This should be removed when "count" and "for_each"
66 // are implemented for modules.
67 path := c.Path.UnkeyedInstanceShim()
68 _, call := path.Call()
69
70 // Find the call in the parent module configuration, so we can get the
71 // expressions given for each input variable at the call site.
72 callConfig, exists := parent.Module.ModuleCalls[call.Name]
73 if !exists {
74 // This should never happen, since it indicates an improperly-constructed
75 // configuration tree.
76 panic(fmt.Errorf("no module call block found for %s", path))
57 } 77 }
58 78
59 // Look for usage of this module 79 // We need to construct a schema for the expected call arguments based on
60 var mod *config.Module 80 // the configured variables in our config, which we can then use to
61 for _, modUse := range parent.Config().Modules { 81 // decode the content of the call block.
62 if modUse.Name == m.Name() { 82 schema := &hcl.BodySchema{}
63 mod = modUse 83 for _, v := range c.Module.Variables {
64 break 84 schema.Attributes = append(schema.Attributes, hcl.AttributeSchema{
65 } 85 Name: v.Name,
86 Required: v.Default == cty.NilVal,
87 })
66 } 88 }
67 if mod == nil { 89
68 log.Printf("[INFO] Module %#v not used, not adding variables", m.Path()) 90 content, contentDiags := callConfig.Config.Content(schema)
69 return nil 91 if contentDiags.HasErrors() {
92 // Validation code elsewhere should deal with any errors before we
93 // get in here, but we'll report them out here just in case, to
94 // avoid crashes.
95 var diags tfdiags.Diagnostics
96 diags = diags.Append(contentDiags)
97 return diags.Err()
70 } 98 }
71 99
72 // Build the reference map so we can determine if we're referencing things. 100 for _, v := range c.Module.Variables {
73 refMap := NewReferenceMap(g.Vertices()) 101 var expr hcl.Expression
74 102 if attr := content.Attributes[v.Name]; attr != nil {
75 // Add all variables here 103 expr = attr.Expr
76 for _, v := range vars { 104 } else {
77 // Determine the value of the variable. If it isn't in the 105 // No expression provided for this variable, so we'll make a
78 // configuration then it was never set and that's not a problem. 106 // synthetic one using the variable's default value.
79 var value *config.RawConfig 107 expr = &hclsyntax.LiteralValueExpr{
80 if raw, ok := mod.RawConfig.Raw[v.Name]; ok { 108 Val: v.Default,
81 var err error 109 SrcRange: v.DeclRange, // This is not exact, but close enough
82 value, err = config.NewRawConfig(map[string]interface{}{
83 v.Name: raw,
84 })
85 if err != nil {
86 // This shouldn't happen because it is already in
87 // a RawConfig above meaning it worked once before.
88 panic(err)
89 } 110 }
90 } 111 }
91 112
92 // Build the node. 113 // For now we treat all module variables as "applyable", even though
93 // 114 // such nodes are valid to use on other walks too. We may specialize
94 // NOTE: For now this is just an "applyable" variable. As we build 115 // this in future if we find reasons to employ different behaviors
95 // new graph builders for the other operations I suspect we'll 116 // in different scenarios.
96 // find a way to parameterize this, require new transforms, etc.
97 node := &NodeApplyableModuleVariable{ 117 node := &NodeApplyableModuleVariable{
98 PathValue: normalizeModulePath(m.Path()), 118 Addr: path.InputVariable(v.Name),
99 Config: v, 119 Config: v,
100 Value: value, 120 Expr: expr,
101 Module: t.Module,
102 } 121 }
103
104 if !t.DisablePrune {
105 // If the node is not referenced by anything, then we don't need
106 // to include it since it won't be used.
107 if matches := refMap.ReferencedBy(node); len(matches) == 0 {
108 log.Printf(
109 "[INFO] Not including %q in graph, nothing depends on it",
110 dag.VertexName(node))
111 continue
112 }
113 }
114
115 // Add it!
116 g.Add(node) 122 g.Add(node)
117 } 123 }
118 124
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
index b256a25..eec762e 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
@@ -3,7 +3,9 @@ package terraform
3import ( 3import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/addrs"
6 "github.com/hashicorp/terraform/dag" 7 "github.com/hashicorp/terraform/dag"
8 "github.com/hashicorp/terraform/states"
7) 9)
8 10
9// OrphanResourceCountTransformer is a GraphTransformer that adds orphans 11// OrphanResourceCountTransformer is a GraphTransformer that adds orphans
@@ -14,95 +16,106 @@ import (
14// This transform assumes that if an element in the state is within the count 16// This transform assumes that if an element in the state is within the count
15// bounds given, that it is not an orphan. 17// bounds given, that it is not an orphan.
16type OrphanResourceCountTransformer struct { 18type OrphanResourceCountTransformer struct {
17 Concrete ConcreteResourceNodeFunc 19 Concrete ConcreteResourceInstanceNodeFunc
18 20
19 Count int // Actual count of the resource 21 Count int // Actual count of the resource, or -1 if count is not set at all
20 Addr *ResourceAddress // Addr of the resource to look for orphans 22 Addr addrs.AbsResource // Addr of the resource to look for orphans
21 State *State // Full global state 23 State *states.State // Full global state
22} 24}
23 25
24func (t *OrphanResourceCountTransformer) Transform(g *Graph) error { 26func (t *OrphanResourceCountTransformer) Transform(g *Graph) error {
25 log.Printf("[TRACE] OrphanResourceCount: Starting...") 27 rs := t.State.Resource(t.Addr)
28 if rs == nil {
29 return nil // Resource doesn't exist in state, so nothing to do!
30 }
26 31
27 // Grab the module in the state just for this resource address 32 haveKeys := make(map[addrs.InstanceKey]struct{})
28 ms := t.State.ModuleByPath(normalizeModulePath(t.Addr.Path)) 33 for key := range rs.Instances {
29 if ms == nil { 34 haveKeys[key] = struct{}{}
30 // If no state, there can't be orphans
31 return nil
32 } 35 }
33 36
34 orphanIndex := -1 37 if t.Count < 0 {
35 if t.Count == 1 { 38 return t.transformNoCount(haveKeys, g)
36 orphanIndex = 0 39 }
40 if t.Count == 0 {
41 return t.transformZeroCount(haveKeys, g)
37 } 42 }
43 return t.transformCount(haveKeys, g)
44}
45
46func (t *OrphanResourceCountTransformer) transformCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error {
47 // Due to the logic in Transform, we only get in here if our count is
48 // at least one.
38 49
39 // Go through the orphans and add them all to the state 50 _, have0Key := haveKeys[addrs.IntKey(0)]
40 for key, _ := range ms.Resources { 51
41 // Build the address 52 for key := range haveKeys {
42 addr, err := parseResourceAddressInternal(key) 53 if key == addrs.NoKey && !have0Key {
43 if err != nil { 54 // If we have no 0-key then we will accept a no-key instance
44 return err 55 // as an alias for it.
56 continue
45 } 57 }
46 addr.Path = ms.Path[1:]
47 58
48 // Copy the address for comparison. If we aren't looking at 59 i, isInt := key.(addrs.IntKey)
49 // the same resource, then just ignore it. 60 if isInt && int(i) < t.Count {
50 addrCopy := addr.Copy()
51 addrCopy.Index = -1
52 if !addrCopy.Equals(t.Addr) {
53 continue 61 continue
54 } 62 }
55 63
56 log.Printf("[TRACE] OrphanResourceCount: Checking: %s", addr) 64 abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key))
65 var node dag.Vertex = abstract
66 if f := t.Concrete; f != nil {
67 node = f(abstract)
68 }
69 log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node)
70 g.Add(node)
71 }
72
73 return nil
74}
57 75
58 idx := addr.Index 76func (t *OrphanResourceCountTransformer) transformZeroCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error {
77 // This case is easy: we need to orphan any keys we have at all.
59 78
60 // If we have zero and the index here is 0 or 1, then we 79 for key := range haveKeys {
61 // change the index to a high number so that we treat it as 80 abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key))
62 // an orphan. 81 var node dag.Vertex = abstract
63 if t.Count <= 0 && idx <= 0 { 82 if f := t.Concrete; f != nil {
64 idx = t.Count + 1 83 node = f(abstract)
65 } 84 }
85 log.Printf("[TRACE] OrphanResourceCount(zero): adding %s as %T", t.Addr, node)
86 g.Add(node)
87 }
66 88
67 // If we have a count greater than 0 and we're at the zero index, 89 return nil
68 // we do a special case check to see if our state also has a 90}
69 // -1 index value. If so, this is an orphan because our rules are
70 // that if both a -1 and 0 are in the state, the 0 is destroyed.
71 if t.Count > 0 && idx == orphanIndex {
72 // This is a piece of cleverness (beware), but its simple:
73 // if orphanIndex is 0, then check -1, else check 0.
74 checkIndex := (orphanIndex + 1) * -1
75
76 key := &ResourceStateKey{
77 Name: addr.Name,
78 Type: addr.Type,
79 Mode: addr.Mode,
80 Index: checkIndex,
81 }
82
83 if _, ok := ms.Resources[key.String()]; ok {
84 // We have a -1 index, too. Make an arbitrarily high
85 // index so that we always mark this as an orphan.
86 log.Printf(
87 "[WARN] OrphanResourceCount: %q both -1 and 0 index found, orphaning %d",
88 addr, orphanIndex)
89 idx = t.Count + 1
90 }
91 }
92 91
93 // If the index is within the count bounds, it is not an orphan 92func (t *OrphanResourceCountTransformer) transformNoCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error {
94 if idx < t.Count { 93 // Negative count indicates that count is not set at all, in which
94 // case we expect to have a single instance with no key set at all.
95 // However, we'll also accept an instance with key 0 set as an alias
96 // for it, in case the user has just deleted the "count" argument and
97 // so wants to keep the first instance in the set.
98
99 _, haveNoKey := haveKeys[addrs.NoKey]
100 _, have0Key := haveKeys[addrs.IntKey(0)]
101 keepKey := addrs.NoKey
102 if have0Key && !haveNoKey {
103 // If we don't have a no-key instance then we can use the 0-key instance
104 // instead.
105 keepKey = addrs.IntKey(0)
106 }
107
108 for key := range haveKeys {
109 if key == keepKey {
95 continue 110 continue
96 } 111 }
97 112
98 // Build the abstract node and the concrete one 113 abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key))
99 abstract := &NodeAbstractResource{Addr: addr}
100 var node dag.Vertex = abstract 114 var node dag.Vertex = abstract
101 if f := t.Concrete; f != nil { 115 if f := t.Concrete; f != nil {
102 node = f(abstract) 116 node = f(abstract)
103 } 117 }
104 118 log.Printf("[TRACE] OrphanResourceCount(no-count): adding %s as %T", t.Addr, node)
105 // Add it to the graph
106 g.Add(node) 119 g.Add(node)
107 } 120 }
108 121
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
index aea2bd0..c675409 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
@@ -3,16 +3,17 @@ package terraform
3import ( 3import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/terraform/addrs"
7 "github.com/hashicorp/terraform/config/module" 7 "github.com/hashicorp/terraform/configs"
8 "github.com/hashicorp/terraform/states"
8) 9)
9 10
10// OrphanOutputTransformer finds the outputs that aren't present 11// OrphanOutputTransformer finds the outputs that aren't present
11// in the given config that are in the state and adds them to the graph 12// in the given config that are in the state and adds them to the graph
12// for deletion. 13// for deletion.
13type OrphanOutputTransformer struct { 14type OrphanOutputTransformer struct {
14 Module *module.Tree // Root module 15 Config *configs.Config // Root of config tree
15 State *State // State is the root state 16 State *states.State // State is the root state
16} 17}
17 18
18func (t *OrphanOutputTransformer) Transform(g *Graph) error { 19func (t *OrphanOutputTransformer) Transform(g *Graph) error {
@@ -29,24 +30,30 @@ func (t *OrphanOutputTransformer) Transform(g *Graph) error {
29 return nil 30 return nil
30} 31}
31 32
32func (t *OrphanOutputTransformer) transform(g *Graph, ms *ModuleState) error { 33func (t *OrphanOutputTransformer) transform(g *Graph, ms *states.Module) error {
33 if ms == nil { 34 if ms == nil {
34 return nil 35 return nil
35 } 36 }
36 37
37 path := normalizeModulePath(ms.Path) 38 moduleAddr := ms.Addr
38 39
39 // Get the config for this path, which is nil if the entire module has been 40 // Get the config for this path, which is nil if the entire module has been
40 // removed. 41 // removed.
41 var c *config.Config 42 var outputs map[string]*configs.Output
42 if m := t.Module.Child(path[1:]); m != nil { 43 if c := t.Config.DescendentForInstance(moduleAddr); c != nil {
43 c = m.Config() 44 outputs = c.Module.Outputs
44 } 45 }
45 46
46 // add all the orphaned outputs to the graph 47 // An output is "orphaned" if it's present in the state but not declared
47 for _, n := range ms.RemovedOutputs(c) { 48 // in the configuration.
48 g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path}) 49 for name := range ms.OutputValues {
50 if _, exists := outputs[name]; exists {
51 continue
52 }
49 53
54 g.Add(&NodeOutputOrphan{
55 Addr: addrs.OutputValue{Name: name}.Absolute(moduleAddr),
56 })
50 } 57 }
51 58
52 return nil 59 return nil
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
index e42d3c8..50df178 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
@@ -1,34 +1,43 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config" 4 "log"
5 "github.com/hashicorp/terraform/config/module" 5
6 "github.com/hashicorp/terraform/configs"
6 "github.com/hashicorp/terraform/dag" 7 "github.com/hashicorp/terraform/dag"
8 "github.com/hashicorp/terraform/states"
7) 9)
8 10
9// OrphanResourceTransformer is a GraphTransformer that adds resource 11// OrphanResourceInstanceTransformer is a GraphTransformer that adds orphaned
10// orphans to the graph. A resource orphan is a resource that is 12// resource instances to the graph. An "orphan" is an instance that is present
11// represented in the state but not in the configuration. 13// in the state but belongs to a resource that is no longer present in the
12//
13// This only adds orphans that have no representation at all in the
14// configuration. 14// configuration.
15type OrphanResourceTransformer struct { 15//
16 Concrete ConcreteResourceNodeFunc 16// This is not the transformer that deals with "count orphans" (instances that
17// are no longer covered by a resource's "count" or "for_each" setting); that's
18// handled instead by OrphanResourceCountTransformer.
19type OrphanResourceInstanceTransformer struct {
20 Concrete ConcreteResourceInstanceNodeFunc
17 21
18 // State is the global state. We require the global state to 22 // State is the global state. We require the global state to
19 // properly find module orphans at our path. 23 // properly find module orphans at our path.
20 State *State 24 State *states.State
21 25
22 // Module is the root module. We'll look up the proper configuration 26 // Config is the root node in the configuration tree. We'll look up
23 // using the graph path. 27 // the appropriate note in this tree using the path in each node.
24 Module *module.Tree 28 Config *configs.Config
25} 29}
26 30
27func (t *OrphanResourceTransformer) Transform(g *Graph) error { 31func (t *OrphanResourceInstanceTransformer) Transform(g *Graph) error {
28 if t.State == nil { 32 if t.State == nil {
29 // If the entire state is nil, there can't be any orphans 33 // If the entire state is nil, there can't be any orphans
30 return nil 34 return nil
31 } 35 }
36 if t.Config == nil {
37 // Should never happen: we can't be doing any Terraform operations
38 // without at least an empty configuration.
39 panic("OrphanResourceInstanceTransformer used without setting Config")
40 }
32 41
33 // Go through the modules and for each module transform in order 42 // Go through the modules and for each module transform in order
34 // to add the orphan. 43 // to add the orphan.
@@ -41,38 +50,130 @@ func (t *OrphanResourceTransformer) Transform(g *Graph) error {
41 return nil 50 return nil
42} 51}
43 52
44func (t *OrphanResourceTransformer) transform(g *Graph, ms *ModuleState) error { 53func (t *OrphanResourceInstanceTransformer) transform(g *Graph, ms *states.Module) error {
45 if ms == nil { 54 if ms == nil {
46 return nil 55 return nil
47 } 56 }
48 57
49 // Get the configuration for this path. The configuration might be 58 moduleAddr := ms.Addr
59
60 // Get the configuration for this module. The configuration might be
50 // nil if the module was removed from the configuration. This is okay, 61 // nil if the module was removed from the configuration. This is okay,
51 // this just means that every resource is an orphan. 62 // this just means that every resource is an orphan.
52 var c *config.Config 63 var m *configs.Module
53 if m := t.Module.Child(ms.Path[1:]); m != nil { 64 if c := t.Config.DescendentForInstance(moduleAddr); c != nil {
54 c = m.Config() 65 m = c.Module
55 } 66 }
56 67
57 // Go through the orphans and add them all to the state 68 // An "orphan" is a resource that is in the state but not the configuration,
58 for _, key := range ms.Orphans(c) { 69 // so we'll walk the state resources and try to correlate each of them
59 // Build the abstract resource 70 // with a configuration block. Each orphan gets a node in the graph whose
60 addr, err := parseResourceAddressInternal(key) 71 // type is decided by t.Concrete.
61 if err != nil { 72 //
62 return err 73 // We don't handle orphans related to changes in the "count" and "for_each"
74 // pseudo-arguments here. They are handled by OrphanResourceCountTransformer.
75 for _, rs := range ms.Resources {
76 if m != nil {
77 if r := m.ResourceByAddr(rs.Addr); r != nil {
78 continue
79 }
63 } 80 }
64 addr.Path = ms.Path[1:]
65 81
66 // Build the abstract node and the concrete one 82 for key := range rs.Instances {
67 abstract := &NodeAbstractResource{Addr: addr} 83 addr := rs.Addr.Instance(key).Absolute(moduleAddr)
68 var node dag.Vertex = abstract 84 abstract := NewNodeAbstractResourceInstance(addr)
69 if f := t.Concrete; f != nil { 85 var node dag.Vertex = abstract
70 node = f(abstract) 86 if f := t.Concrete; f != nil {
87 node = f(abstract)
88 }
89 log.Printf("[TRACE] OrphanResourceInstanceTransformer: adding single-instance orphan node for %s", addr)
90 g.Add(node)
71 } 91 }
92 }
93
94 return nil
95}
96
97// OrphanResourceTransformer is a GraphTransformer that adds orphaned
98// resources to the graph. An "orphan" is a resource that is present in
99// the state but no longer present in the config.
100//
101// This is separate to OrphanResourceInstanceTransformer in that it deals with
102// whole resources, rather than individual instances of resources. Orphan
103// resource nodes are only used during apply to clean up leftover empty
104// resource state skeletons, after all of the instances inside have been
105// removed.
106//
107// This transformer will also create edges in the graph to any pre-existing
108// node that creates or destroys the entire orphaned resource or any of its
109// instances, to ensure that the "orphan-ness" of a resource is always dealt
110// with after all other aspects of it.
111type OrphanResourceTransformer struct {
112 Concrete ConcreteResourceNodeFunc
113
114 // State is the global state.
115 State *states.State
72 116
73 // Add it to the graph 117 // Config is the root node in the configuration tree.
74 g.Add(node) 118 Config *configs.Config
119}
120
121func (t *OrphanResourceTransformer) Transform(g *Graph) error {
122 if t.State == nil {
123 // If the entire state is nil, there can't be any orphans
124 return nil
125 }
126 if t.Config == nil {
127 // Should never happen: we can't be doing any Terraform operations
128 // without at least an empty configuration.
129 panic("OrphanResourceTransformer used without setting Config")
130 }
131
132 // We'll first collect up the existing nodes for each resource so we can
133 // create dependency edges for any new nodes we create.
134 deps := map[string][]dag.Vertex{}
135 for _, v := range g.Vertices() {
136 switch tv := v.(type) {
137 case GraphNodeResourceInstance:
138 k := tv.ResourceInstanceAddr().ContainingResource().String()
139 deps[k] = append(deps[k], v)
140 case GraphNodeResource:
141 k := tv.ResourceAddr().String()
142 deps[k] = append(deps[k], v)
143 case GraphNodeDestroyer:
144 k := tv.DestroyAddr().ContainingResource().String()
145 deps[k] = append(deps[k], v)
146 }
147 }
148
149 for _, ms := range t.State.Modules {
150 moduleAddr := ms.Addr
151
152 mc := t.Config.DescendentForInstance(moduleAddr) // might be nil if whole module has been removed
153
154 for _, rs := range ms.Resources {
155 if mc != nil {
156 if r := mc.Module.ResourceByAddr(rs.Addr); r != nil {
157 // It's in the config, so nothing to do for this one.
158 continue
159 }
160 }
161
162 addr := rs.Addr.Absolute(moduleAddr)
163 abstract := NewNodeAbstractResource(addr)
164 var node dag.Vertex = abstract
165 if f := t.Concrete; f != nil {
166 node = f(abstract)
167 }
168 log.Printf("[TRACE] OrphanResourceTransformer: adding whole-resource orphan node for %s", addr)
169 g.Add(node)
170 for _, dn := range deps[addr.String()] {
171 log.Printf("[TRACE] OrphanResourceTransformer: node %q depends on %q", dag.VertexName(node), dag.VertexName(dn))
172 g.Connect(dag.BasicEdge(node, dn))
173 }
174 }
75 } 175 }
76 176
77 return nil 177 return nil
178
78} 179}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
index faa25e4..ed93cdb 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
@@ -3,7 +3,7 @@ package terraform
3import ( 3import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/config/module" 6 "github.com/hashicorp/terraform/configs"
7 "github.com/hashicorp/terraform/dag" 7 "github.com/hashicorp/terraform/dag"
8) 8)
9 9
@@ -14,42 +14,42 @@ import (
14// aren't changing since there is no downside: the state will be available 14// aren't changing since there is no downside: the state will be available
15// even if the dependent items aren't changing. 15// even if the dependent items aren't changing.
16type OutputTransformer struct { 16type OutputTransformer struct {
17 Module *module.Tree 17 Config *configs.Config
18} 18}
19 19
20func (t *OutputTransformer) Transform(g *Graph) error { 20func (t *OutputTransformer) Transform(g *Graph) error {
21 return t.transform(g, t.Module) 21 return t.transform(g, t.Config)
22} 22}
23 23
24func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error { 24func (t *OutputTransformer) transform(g *Graph, c *configs.Config) error {
25 // If no config, no outputs 25 // If we have no config then there can be no outputs.
26 if m == nil { 26 if c == nil {
27 return nil 27 return nil
28 } 28 }
29 29
30 // Transform all the children. We must do this first because 30 // Transform all the children. We must do this first because
31 // we can reference module outputs and they must show up in the 31 // we can reference module outputs and they must show up in the
32 // reference map. 32 // reference map.
33 for _, c := range m.Children() { 33 for _, cc := range c.Children {
34 if err := t.transform(g, c); err != nil { 34 if err := t.transform(g, cc); err != nil {
35 return err 35 return err
36 } 36 }
37 } 37 }
38 38
39 // If we have no outputs, we're done! 39 // Our addressing system distinguishes between modules and module instances,
40 os := m.Config().Outputs 40 // but we're not yet ready to make that distinction here (since we don't
41 if len(os) == 0 { 41 // support "count"/"for_each" on modules) and so we just do a naive
42 return nil 42 // transform of the module path into a module instance path, assuming that
43 } 43 // no keys are in use. This should be removed when "count" and "for_each"
44 // are implemented for modules.
45 path := c.Path.UnkeyedInstanceShim()
44 46
45 // Add all outputs here 47 for _, o := range c.Module.Outputs {
46 for _, o := range os { 48 addr := path.OutputValue(o.Name)
47 node := &NodeApplyableOutput{ 49 node := &NodeApplyableOutput{
48 PathValue: normalizeModulePath(m.Path()), 50 Addr: addr,
49 Config: o, 51 Config: o,
50 } 52 }
51
52 // Add it!
53 g.Add(node) 53 g.Add(node)
54 } 54 }
55 55
@@ -71,8 +71,8 @@ func (t *DestroyOutputTransformer) Transform(g *Graph) error {
71 71
72 // create the destroy node for this output 72 // create the destroy node for this output
73 node := &NodeDestroyableOutput{ 73 node := &NodeDestroyableOutput{
74 PathValue: output.PathValue, 74 Addr: output.Addr,
75 Config: output.Config, 75 Config: output.Config,
76 } 76 }
77 77
78 log.Printf("[TRACE] creating %s", node.Name()) 78 log.Printf("[TRACE] creating %s", node.Name())
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
index c4772b4..6a4fb47 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
@@ -1,22 +1,21 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "errors"
5 "fmt" 4 "fmt"
6 "log" 5 "log"
7 "strings"
8 6
9 "github.com/hashicorp/go-multierror" 7 "github.com/hashicorp/hcl2/hcl"
10 "github.com/hashicorp/terraform/config" 8 "github.com/hashicorp/terraform/addrs"
11 "github.com/hashicorp/terraform/config/module" 9 "github.com/hashicorp/terraform/configs"
12 "github.com/hashicorp/terraform/dag" 10 "github.com/hashicorp/terraform/dag"
11 "github.com/hashicorp/terraform/tfdiags"
13) 12)
14 13
15func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, mod *module.Tree) GraphTransformer { 14func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, config *configs.Config) GraphTransformer {
16 return GraphTransformMulti( 15 return GraphTransformMulti(
17 // Add providers from the config 16 // Add providers from the config
18 &ProviderConfigTransformer{ 17 &ProviderConfigTransformer{
19 Module: mod, 18 Config: config,
20 Providers: providers, 19 Providers: providers,
21 Concrete: concrete, 20 Concrete: concrete,
22 }, 21 },
@@ -26,7 +25,9 @@ func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, m
26 Concrete: concrete, 25 Concrete: concrete,
27 }, 26 },
28 // Connect the providers 27 // Connect the providers
29 &ProviderTransformer{}, 28 &ProviderTransformer{
29 Config: config,
30 },
30 // Remove unused providers and proxies 31 // Remove unused providers and proxies
31 &PruneProviderTransformer{}, 32 &PruneProviderTransformer{},
32 // Connect provider to their parent provider nodes 33 // Connect provider to their parent provider nodes
@@ -36,10 +37,14 @@ func TransformProviders(providers []string, concrete ConcreteProviderNodeFunc, m
36 37
37// GraphNodeProvider is an interface that nodes that can be a provider 38// GraphNodeProvider is an interface that nodes that can be a provider
38// must implement. 39// must implement.
39// ProviderName returns the name of the provider this satisfies. 40//
41// ProviderAddr returns the address of the provider configuration this
42// satisfies, which is relative to the path returned by method Path().
43//
40// Name returns the full name of the provider in the config. 44// Name returns the full name of the provider in the config.
41type GraphNodeProvider interface { 45type GraphNodeProvider interface {
42 ProviderName() string 46 GraphNodeSubPath
47 ProviderAddr() addrs.AbsProviderConfig
43 Name() string 48 Name() string
44} 49}
45 50
@@ -47,62 +52,132 @@ type GraphNodeProvider interface {
47// provider must implement. The CloseProviderName returned is the name of 52// provider must implement. The CloseProviderName returned is the name of
48// the provider they satisfy. 53// the provider they satisfy.
49type GraphNodeCloseProvider interface { 54type GraphNodeCloseProvider interface {
50 CloseProviderName() string 55 GraphNodeSubPath
56 CloseProviderAddr() addrs.AbsProviderConfig
51} 57}
52 58
53// GraphNodeProviderConsumer is an interface that nodes that require 59// GraphNodeProviderConsumer is an interface that nodes that require
54// a provider must implement. ProvidedBy must return the name of the provider 60// a provider must implement. ProvidedBy must return the address of the provider
55// to use. This may be a provider by type, type.alias or a fully resolved 61// to use, which will be resolved to a configuration either in the same module
56// provider name 62// or in an ancestor module, with the resulting absolute address passed to
63// SetProvider.
57type GraphNodeProviderConsumer interface { 64type GraphNodeProviderConsumer interface {
58 ProvidedBy() string 65 // ProvidedBy returns the address of the provider configuration the node
66 // refers to. If the returned "exact" value is true, this address will
67 // be taken exactly. If "exact" is false, a provider configuration from
68 // an ancestor module may be selected instead.
69 ProvidedBy() (addr addrs.AbsProviderConfig, exact bool)
59 // Set the resolved provider address for this resource. 70 // Set the resolved provider address for this resource.
60 SetProvider(string) 71 SetProvider(addrs.AbsProviderConfig)
61} 72}
62 73
63// ProviderTransformer is a GraphTransformer that maps resources to 74// ProviderTransformer is a GraphTransformer that maps resources to
64// providers within the graph. This will error if there are any resources 75// providers within the graph. This will error if there are any resources
65// that don't map to proper resources. 76// that don't map to proper resources.
66type ProviderTransformer struct{} 77type ProviderTransformer struct {
78 Config *configs.Config
79}
67 80
68func (t *ProviderTransformer) Transform(g *Graph) error { 81func (t *ProviderTransformer) Transform(g *Graph) error {
69 // Go through the other nodes and match them to providers they need 82 // We need to find a provider configuration address for each resource
70 var err error 83 // either directly represented by a node or referenced by a node in
71 m := providerVertexMap(g) 84 // the graph, and then create graph edges from provider to provider user
85 // so that the providers will get initialized first.
86
87 var diags tfdiags.Diagnostics
88
89 // To start, we'll collect the _requested_ provider addresses for each
90 // node, which we'll then resolve (handling provider inheritence, etc) in
91 // the next step.
92 // Our "requested" map is from graph vertices to string representations of
93 // provider config addresses (for deduping) to requests.
94 type ProviderRequest struct {
95 Addr addrs.AbsProviderConfig
96 Exact bool // If true, inheritence from parent modules is not attempted
97 }
98 requested := map[dag.Vertex]map[string]ProviderRequest{}
99 needConfigured := map[string]addrs.AbsProviderConfig{}
72 for _, v := range g.Vertices() { 100 for _, v := range g.Vertices() {
101
102 // Does the vertex _directly_ use a provider?
73 if pv, ok := v.(GraphNodeProviderConsumer); ok { 103 if pv, ok := v.(GraphNodeProviderConsumer); ok {
74 p := pv.ProvidedBy() 104 requested[v] = make(map[string]ProviderRequest)
75 105
76 key := providerMapKey(p, pv) 106 p, exact := pv.ProvidedBy()
107 if exact {
108 log.Printf("[TRACE] ProviderTransformer: %s is provided by %s exactly", dag.VertexName(v), p)
109 } else {
110 log.Printf("[TRACE] ProviderTransformer: %s is provided by %s or inherited equivalent", dag.VertexName(v), p)
111 }
112
113 requested[v][p.String()] = ProviderRequest{
114 Addr: p,
115 Exact: exact,
116 }
117
118 // Direct references need the provider configured as well as initialized
119 needConfigured[p.String()] = p
120 }
121 }
122
123 // Now we'll go through all the requested addresses we just collected and
124 // figure out which _actual_ config address each belongs to, after resolving
125 // for provider inheritance and passing.
126 m := providerVertexMap(g)
127 for v, reqs := range requested {
128 for key, req := range reqs {
129 p := req.Addr
77 target := m[key] 130 target := m[key]
78 131
79 sp, ok := pv.(GraphNodeSubPath) 132 _, ok := v.(GraphNodeSubPath)
80 if !ok && target == nil { 133 if !ok && target == nil {
81 // no target, and no path to walk up 134 // No target and no path to traverse up from
82 err = multierror.Append(err, fmt.Errorf( 135 diags = diags.Append(fmt.Errorf("%s: provider %s couldn't be found", dag.VertexName(v), p))
83 "%s: provider %s couldn't be found", 136 continue
84 dag.VertexName(v), p)) 137 }
85 break 138
139 if target != nil {
140 log.Printf("[TRACE] ProviderTransformer: exact match for %s serving %s", p, dag.VertexName(v))
86 } 141 }
87 142
88 // if we don't have a provider at this level, walk up the path looking for one 143 // if we don't have a provider at this level, walk up the path looking for one,
89 for i := 1; target == nil; i++ { 144 // unless we were told to be exact.
90 path := normalizeModulePath(sp.Path()) 145 if target == nil && !req.Exact {
91 if len(path) < i { 146 for pp, ok := p.Inherited(); ok; pp, ok = pp.Inherited() {
92 break 147 key := pp.String()
148 target = m[key]
149 if target != nil {
150 log.Printf("[TRACE] ProviderTransformer: %s uses inherited configuration %s", dag.VertexName(v), pp)
151 break
152 }
153 log.Printf("[TRACE] ProviderTransformer: looking for %s to serve %s", pp, dag.VertexName(v))
93 } 154 }
155 }
94 156
95 key = ResolveProviderName(p, path[:len(path)-i]) 157 // If this provider doesn't need to be configured then we can just
96 target = m[key] 158 // stub it out with an init-only provider node, which will just
97 if target != nil { 159 // start up the provider and fetch its schema.
98 break 160 if _, exists := needConfigured[key]; target == nil && !exists {
161 stubAddr := p.ProviderConfig.Absolute(addrs.RootModuleInstance)
162 stub := &NodeEvalableProvider{
163 &NodeAbstractProvider{
164 Addr: stubAddr,
165 },
99 } 166 }
167 m[stubAddr.String()] = stub
168 log.Printf("[TRACE] ProviderTransformer: creating init-only node for %s", stubAddr)
169 target = stub
170 g.Add(target)
100 } 171 }
101 172
102 if target == nil { 173 if target == nil {
103 err = multierror.Append(err, fmt.Errorf( 174 diags = diags.Append(tfdiags.Sourceless(
104 "%s: configuration for %s is not present; a provider configuration block is required for all operations", 175 tfdiags.Error,
105 dag.VertexName(v), p, 176 "Provider configuration not present",
177 fmt.Sprintf(
178 "To work with %s its original provider configuration at %s is required, but it has been removed. This occurs when a provider configuration is removed while objects created by that provider still exist in the state. Re-add the provider configuration to destroy %s, after which you can remove the provider configuration again.",
179 dag.VertexName(v), p, dag.VertexName(v),
180 ),
106 )) 181 ))
107 break 182 break
108 } 183 }
@@ -111,16 +186,18 @@ func (t *ProviderTransformer) Transform(g *Graph) error {
111 if p, ok := target.(*graphNodeProxyProvider); ok { 186 if p, ok := target.(*graphNodeProxyProvider); ok {
112 g.Remove(p) 187 g.Remove(p)
113 target = p.Target() 188 target = p.Target()
114 key = target.(GraphNodeProvider).Name() 189 key = target.(GraphNodeProvider).ProviderAddr().String()
115 } 190 }
116 191
117 log.Printf("[DEBUG] resource %s using provider %s", dag.VertexName(pv), key) 192 log.Printf("[DEBUG] ProviderTransformer: %q (%T) needs %s", dag.VertexName(v), v, dag.VertexName(target))
118 pv.SetProvider(key) 193 if pv, ok := v.(GraphNodeProviderConsumer); ok {
194 pv.SetProvider(target.ProviderAddr())
195 }
119 g.Connect(dag.BasicEdge(v, target)) 196 g.Connect(dag.BasicEdge(v, target))
120 } 197 }
121 } 198 }
122 199
123 return err 200 return diags.Err()
124} 201}
125 202
126// CloseProviderTransformer is a GraphTransformer that adds nodes to the 203// CloseProviderTransformer is a GraphTransformer that adds nodes to the
@@ -136,15 +213,16 @@ func (t *CloseProviderTransformer) Transform(g *Graph) error {
136 213
137 for _, v := range pm { 214 for _, v := range pm {
138 p := v.(GraphNodeProvider) 215 p := v.(GraphNodeProvider)
216 key := p.ProviderAddr().String()
139 217
140 // get the close provider of this type if we alread created it 218 // get the close provider of this type if we alread created it
141 closer := cpm[p.Name()] 219 closer := cpm[key]
142 220
143 if closer == nil { 221 if closer == nil {
144 // create a closer for this provider type 222 // create a closer for this provider type
145 closer = &graphNodeCloseProvider{ProviderNameValue: p.Name()} 223 closer = &graphNodeCloseProvider{Addr: p.ProviderAddr()}
146 g.Add(closer) 224 g.Add(closer)
147 cpm[p.Name()] = closer 225 cpm[key] = closer
148 } 226 }
149 227
150 // Close node depends on the provider itself 228 // Close node depends on the provider itself
@@ -164,10 +242,20 @@ func (t *CloseProviderTransformer) Transform(g *Graph) error {
164 return err 242 return err
165} 243}
166 244
167// MissingProviderTransformer is a GraphTransformer that adds nodes for all 245// MissingProviderTransformer is a GraphTransformer that adds to the graph
168// required providers into the graph. Specifically, it creates provider 246// a node for each default provider configuration that is referenced by another
169// configuration nodes for all the providers that we support. These are pruned 247// node but not already present in the graph.
170// later during an optimization pass. 248//
249// These "default" nodes are always added to the root module, regardless of
250// where they are requested. This is important because our inheritance
251// resolution behavior in ProviderTransformer will then treat these as a
252// last-ditch fallback after walking up the tree, rather than preferring them
253// as it would if they were placed in the same module as the requester.
254//
255// This transformer may create extra nodes that are not needed in practice,
256// due to overriding provider configurations in child modules.
257// PruneProviderTransformer can then remove these once ProviderTransformer
258// has resolved all of the inheritence, etc.
171type MissingProviderTransformer struct { 259type MissingProviderTransformer struct {
172 // Providers is the list of providers we support. 260 // Providers is the list of providers we support.
173 Providers []string 261 Providers []string
@@ -192,34 +280,40 @@ func (t *MissingProviderTransformer) Transform(g *Graph) error {
192 continue 280 continue
193 } 281 }
194 282
195 p := pv.ProvidedBy() 283 // For our work here we actually care only about the provider type and
196 // this may be the resolved provider from the state, so we need to get 284 // we plan to place all default providers in the root module, and so
197 // the base provider name. 285 // it's safe for us to rely on ProvidedBy here rather than waiting for
198 parts := strings.SplitAfter(p, "provider.") 286 // the later proper resolution of provider inheritance done by
199 p = parts[len(parts)-1] 287 // ProviderTransformer.
288 p, _ := pv.ProvidedBy()
289 if p.ProviderConfig.Alias != "" {
290 // We do not create default aliased configurations.
291 log.Println("[TRACE] MissingProviderTransformer: skipping implication of aliased config", p)
292 continue
293 }
200 294
201 key := ResolveProviderName(p, nil) 295 // We're going to create an implicit _default_ configuration for the
296 // referenced provider type in the _root_ module, ignoring all other
297 // aspects of the resource's declared provider address.
298 defaultAddr := addrs.RootModuleInstance.ProviderConfigDefault(p.ProviderConfig.Type)
299 key := defaultAddr.String()
202 provider := m[key] 300 provider := m[key]
203 301
204 // we already have it
205 if provider != nil { 302 if provider != nil {
303 // There's already an explicit default configuration for this
304 // provider type in the root module, so we have nothing to do.
206 continue 305 continue
207 } 306 }
208 307
209 // we don't implicitly create aliased providers 308 log.Printf("[DEBUG] adding implicit provider configuration %s, implied first by %s", defaultAddr, dag.VertexName(v))
210 if strings.Contains(p, ".") {
211 log.Println("[DEBUG] not adding missing provider alias:", p)
212 continue
213 }
214
215 log.Println("[DEBUG] adding missing provider:", p)
216 309
217 // create the misisng top-level provider 310 // create the missing top-level provider
218 provider = t.Concrete(&NodeAbstractProvider{ 311 provider = t.Concrete(&NodeAbstractProvider{
219 NameValue: p, 312 Addr: defaultAddr,
220 }).(dag.Vertex) 313 }).(GraphNodeProvider)
221 314
222 m[key] = g.Add(provider) 315 g.Add(provider)
316 m[key] = provider
223 } 317 }
224 318
225 return err 319 return err
@@ -237,26 +331,26 @@ func (t *ParentProviderTransformer) Transform(g *Graph) error {
237 for _, v := range g.Vertices() { 331 for _, v := range g.Vertices() {
238 // Only care about providers 332 // Only care about providers
239 pn, ok := v.(GraphNodeProvider) 333 pn, ok := v.(GraphNodeProvider)
240 if !ok || pn.ProviderName() == "" { 334 if !ok {
241 continue 335 continue
242 } 336 }
243 337
244 // Also require a subpath, if there is no subpath then we 338 // Also require non-empty path, since otherwise we're in the root
245 // can't have a parent. 339 // module and so cannot have a parent.
246 if pn, ok := v.(GraphNodeSubPath); ok { 340 if len(pn.Path()) <= 1 {
247 if len(normalizeModulePath(pn.Path())) <= 1 { 341 continue
248 continue
249 }
250 } 342 }
251 343
252 // this provider may be disabled, but we can only get it's name from 344 // this provider may be disabled, but we can only get it's name from
253 // the ProviderName string 345 // the ProviderName string
254 name := ResolveProviderName(strings.SplitN(pn.ProviderName(), " ", 2)[0], nil) 346 addr := pn.ProviderAddr()
255 parent := pm[name] 347 parentAddr, ok := addr.Inherited()
256 if parent != nil { 348 if ok {
257 g.Connect(dag.BasicEdge(v, parent)) 349 parent := pm[parentAddr.String()]
350 if parent != nil {
351 g.Connect(dag.BasicEdge(v, parent))
352 }
258 } 353 }
259
260 } 354 }
261 return nil 355 return nil
262} 356}
@@ -270,20 +364,20 @@ type PruneProviderTransformer struct{}
270func (t *PruneProviderTransformer) Transform(g *Graph) error { 364func (t *PruneProviderTransformer) Transform(g *Graph) error {
271 for _, v := range g.Vertices() { 365 for _, v := range g.Vertices() {
272 // We only care about providers 366 // We only care about providers
273 pn, ok := v.(GraphNodeProvider) 367 _, ok := v.(GraphNodeProvider)
274 if !ok || pn.ProviderName() == "" { 368 if !ok {
275 continue 369 continue
276 } 370 }
277 371
278 // ProxyProviders will have up edges, but we're now done with them in the graph 372 // ProxyProviders will have up edges, but we're now done with them in the graph
279 if _, ok := v.(*graphNodeProxyProvider); ok { 373 if _, ok := v.(*graphNodeProxyProvider); ok {
280 log.Printf("[DEBUG] pruning proxy provider %s", dag.VertexName(v)) 374 log.Printf("[DEBUG] pruning proxy %s", dag.VertexName(v))
281 g.Remove(v) 375 g.Remove(v)
282 } 376 }
283 377
284 // Remove providers with no dependencies. 378 // Remove providers with no dependencies.
285 if g.UpEdges(v).Len() == 0 { 379 if g.UpEdges(v).Len() == 0 {
286 log.Printf("[DEBUG] pruning unused provider %s", dag.VertexName(v)) 380 log.Printf("[DEBUG] pruning unused %s", dag.VertexName(v))
287 g.Remove(v) 381 g.Remove(v)
288 } 382 }
289 } 383 }
@@ -291,40 +385,24 @@ func (t *PruneProviderTransformer) Transform(g *Graph) error {
291 return nil 385 return nil
292} 386}
293 387
294// providerMapKey is a helper that gives us the key to use for the 388func providerVertexMap(g *Graph) map[string]GraphNodeProvider {
295// maps returned by things such as providerVertexMap. 389 m := make(map[string]GraphNodeProvider)
296func providerMapKey(k string, v dag.Vertex) string {
297 if strings.Contains(k, "provider.") {
298 // this is already resolved
299 return k
300 }
301
302 // we create a dummy provider to
303 var path []string
304 if sp, ok := v.(GraphNodeSubPath); ok {
305 path = normalizeModulePath(sp.Path())
306 }
307 return ResolveProviderName(k, path)
308}
309
310func providerVertexMap(g *Graph) map[string]dag.Vertex {
311 m := make(map[string]dag.Vertex)
312 for _, v := range g.Vertices() { 390 for _, v := range g.Vertices() {
313 if pv, ok := v.(GraphNodeProvider); ok { 391 if pv, ok := v.(GraphNodeProvider); ok {
314 // TODO: The Name may have meta info, like " (disabled)" 392 addr := pv.ProviderAddr()
315 name := strings.SplitN(pv.Name(), " ", 2)[0] 393 m[addr.String()] = pv
316 m[name] = v
317 } 394 }
318 } 395 }
319 396
320 return m 397 return m
321} 398}
322 399
323func closeProviderVertexMap(g *Graph) map[string]dag.Vertex { 400func closeProviderVertexMap(g *Graph) map[string]GraphNodeCloseProvider {
324 m := make(map[string]dag.Vertex) 401 m := make(map[string]GraphNodeCloseProvider)
325 for _, v := range g.Vertices() { 402 for _, v := range g.Vertices() {
326 if pv, ok := v.(GraphNodeCloseProvider); ok { 403 if pv, ok := v.(GraphNodeCloseProvider); ok {
327 m[pv.CloseProviderName()] = v 404 addr := pv.CloseProviderAddr()
405 m[addr.String()] = pv
328 } 406 }
329 } 407 }
330 408
@@ -332,16 +410,25 @@ func closeProviderVertexMap(g *Graph) map[string]dag.Vertex {
332} 410}
333 411
334type graphNodeCloseProvider struct { 412type graphNodeCloseProvider struct {
335 ProviderNameValue string 413 Addr addrs.AbsProviderConfig
336} 414}
337 415
416var (
417 _ GraphNodeCloseProvider = (*graphNodeCloseProvider)(nil)
418)
419
338func (n *graphNodeCloseProvider) Name() string { 420func (n *graphNodeCloseProvider) Name() string {
339 return n.ProviderNameValue + " (close)" 421 return n.Addr.String() + " (close)"
422}
423
424// GraphNodeSubPath impl.
425func (n *graphNodeCloseProvider) Path() addrs.ModuleInstance {
426 return n.Addr.Module
340} 427}
341 428
342// GraphNodeEvalable impl. 429// GraphNodeEvalable impl.
343func (n *graphNodeCloseProvider) EvalTree() EvalNode { 430func (n *graphNodeCloseProvider) EvalTree() EvalNode {
344 return CloseProviderEvalTree(n.ProviderNameValue) 431 return CloseProviderEvalTree(n.Addr)
345} 432}
346 433
347// GraphNodeDependable impl. 434// GraphNodeDependable impl.
@@ -349,8 +436,8 @@ func (n *graphNodeCloseProvider) DependableName() []string {
349 return []string{n.Name()} 436 return []string{n.Name()}
350} 437}
351 438
352func (n *graphNodeCloseProvider) CloseProviderName() string { 439func (n *graphNodeCloseProvider) CloseProviderAddr() addrs.AbsProviderConfig {
353 return n.ProviderNameValue 440 return n.Addr
354} 441}
355 442
356// GraphNodeDotter impl. 443// GraphNodeDotter impl.
@@ -380,17 +467,24 @@ func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool {
380// configurations, and are removed after all the resources have been connected 467// configurations, and are removed after all the resources have been connected
381// to their providers. 468// to their providers.
382type graphNodeProxyProvider struct { 469type graphNodeProxyProvider struct {
383 nameValue string 470 addr addrs.AbsProviderConfig
384 path []string 471 target GraphNodeProvider
385 target GraphNodeProvider 472}
473
474var (
475 _ GraphNodeProvider = (*graphNodeProxyProvider)(nil)
476)
477
478func (n *graphNodeProxyProvider) ProviderAddr() addrs.AbsProviderConfig {
479 return n.addr
386} 480}
387 481
388func (n *graphNodeProxyProvider) ProviderName() string { 482func (n *graphNodeProxyProvider) Path() addrs.ModuleInstance {
389 return n.Target().ProviderName() 483 return n.addr.Module
390} 484}
391 485
392func (n *graphNodeProxyProvider) Name() string { 486func (n *graphNodeProxyProvider) Name() string {
393 return ResolveProviderName(n.nameValue, n.path) 487 return n.addr.String() + " (proxy)"
394} 488}
395 489
396// find the concrete provider instance 490// find the concrete provider instance
@@ -415,26 +509,21 @@ type ProviderConfigTransformer struct {
415 // record providers that can be overriden with a proxy 509 // record providers that can be overriden with a proxy
416 proxiable map[string]bool 510 proxiable map[string]bool
417 511
418 // Module is the module to add resources from. 512 // Config is the root node of the configuration tree to add providers from.
419 Module *module.Tree 513 Config *configs.Config
420} 514}
421 515
422func (t *ProviderConfigTransformer) Transform(g *Graph) error { 516func (t *ProviderConfigTransformer) Transform(g *Graph) error {
423 // If no module is given, we don't do anything 517 // If no configuration is given, we don't do anything
424 if t.Module == nil { 518 if t.Config == nil {
425 return nil 519 return nil
426 } 520 }
427 521
428 // If the module isn't loaded, that is simply an error
429 if !t.Module.Loaded() {
430 return errors.New("module must be loaded for ProviderConfigTransformer")
431 }
432
433 t.providers = make(map[string]GraphNodeProvider) 522 t.providers = make(map[string]GraphNodeProvider)
434 t.proxiable = make(map[string]bool) 523 t.proxiable = make(map[string]bool)
435 524
436 // Start the transformation process 525 // Start the transformation process
437 if err := t.transform(g, t.Module); err != nil { 526 if err := t.transform(g, t.Config); err != nil {
438 return err 527 return err
439 } 528 }
440 529
@@ -442,95 +531,126 @@ func (t *ProviderConfigTransformer) Transform(g *Graph) error {
442 return t.attachProviderConfigs(g) 531 return t.attachProviderConfigs(g)
443} 532}
444 533
445func (t *ProviderConfigTransformer) transform(g *Graph, m *module.Tree) error { 534func (t *ProviderConfigTransformer) transform(g *Graph, c *configs.Config) error {
446 // If no config, do nothing 535 // If no config, do nothing
447 if m == nil { 536 if c == nil {
448 return nil 537 return nil
449 } 538 }
450 539
451 // Add our resources 540 // Add our resources
452 if err := t.transformSingle(g, m); err != nil { 541 if err := t.transformSingle(g, c); err != nil {
453 return err 542 return err
454 } 543 }
455 544
456 // Transform all the children. 545 // Transform all the children.
457 for _, c := range m.Children() { 546 for _, cc := range c.Children {
458 if err := t.transform(g, c); err != nil { 547 if err := t.transform(g, cc); err != nil {
459 return err 548 return err
460 } 549 }
461 } 550 }
462 return nil 551 return nil
463} 552}
464 553
465func (t *ProviderConfigTransformer) transformSingle(g *Graph, m *module.Tree) error { 554func (t *ProviderConfigTransformer) transformSingle(g *Graph, c *configs.Config) error {
466 log.Printf("[TRACE] ProviderConfigTransformer: Starting for path: %v", m.Path()) 555 // Get the module associated with this configuration tree node
467 556 mod := c.Module
468 // Get the configuration for this module 557 staticPath := c.Path
469 conf := m.Config() 558
470 559 // We actually need a dynamic module path here, but we've not yet updated
471 // Build the path we're at 560 // our graph builders enough to support expansion of module calls with
472 path := m.Path() 561 // "count" and "for_each" set, so for now we'll shim this by converting to
473 if len(path) > 0 { 562 // a dynamic path with no keys. At the time of writing this is the only
474 path = append([]string{RootModuleName}, path...) 563 // possible kind of dynamic path anyway.
564 path := make(addrs.ModuleInstance, len(staticPath))
565 for i, name := range staticPath {
566 path[i] = addrs.ModuleInstanceStep{
567 Name: name,
568 }
475 } 569 }
476 570
477 // add all providers from the configuration 571 // add all providers from the configuration
478 for _, p := range conf.ProviderConfigs { 572 for _, p := range mod.ProviderConfigs {
479 name := p.Name 573 relAddr := p.Addr()
480 if p.Alias != "" { 574 addr := relAddr.Absolute(path)
481 name += "." + p.Alias
482 }
483 575
484 v := t.Concrete(&NodeAbstractProvider{ 576 abstract := &NodeAbstractProvider{
485 NameValue: name, 577 Addr: addr,
486 PathValue: path, 578 }
487 }) 579 var v dag.Vertex
580 if t.Concrete != nil {
581 v = t.Concrete(abstract)
582 } else {
583 v = abstract
584 }
488 585
489 // Add it to the graph 586 // Add it to the graph
490 g.Add(v) 587 g.Add(v)
491 fullName := ResolveProviderName(name, path) 588 key := addr.String()
492 t.providers[fullName] = v.(GraphNodeProvider) 589 t.providers[key] = v.(GraphNodeProvider)
493 t.proxiable[fullName] = len(p.RawConfig.RawMap()) == 0 590
591 // A provider configuration is "proxyable" if its configuration is
592 // entirely empty. This means it's standing in for a provider
593 // configuration that must be passed in from the parent module.
594 // We decide this by evaluating the config with an empty schema;
595 // if this succeeds, then we know there's nothing in the body.
596 _, diags := p.Config.Content(&hcl.BodySchema{})
597 t.proxiable[key] = !diags.HasErrors()
494 } 598 }
495 599
496 // Now replace the provider nodes with proxy nodes if a provider was being 600 // Now replace the provider nodes with proxy nodes if a provider was being
497 // passed in, and create implicit proxies if there was no config. Any extra 601 // passed in, and create implicit proxies if there was no config. Any extra
498 // proxies will be removed in the prune step. 602 // proxies will be removed in the prune step.
499 return t.addProxyProviders(g, m) 603 return t.addProxyProviders(g, c)
500} 604}
501 605
502func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree) error { 606func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, c *configs.Config) error {
503 path := m.Path() 607 path := c.Path
504 608
505 // can't add proxies at the root 609 // can't add proxies at the root
506 if len(path) == 0 { 610 if len(path) == 0 {
507 return nil 611 return nil
508 } 612 }
509 613
510 parentPath := path[:len(path)-1] 614 parentPath, callAddr := path.Call()
511 parent := t.Module.Child(parentPath) 615 parent := c.Parent
512 if parent == nil { 616 if parent == nil {
513 return nil 617 return nil
514 } 618 }
515 619
516 var parentCfg *config.Module 620 callName := callAddr.Name
517 for _, mod := range parent.Config().Modules { 621 var parentCfg *configs.ModuleCall
518 if mod.Name == m.Name() { 622 for name, mod := range parent.Module.ModuleCalls {
623 if name == callName {
519 parentCfg = mod 624 parentCfg = mod
520 break 625 break
521 } 626 }
522 } 627 }
523 628
629 // We currently don't support count/for_each for modules and so we must
630 // shim our path and parentPath into module instances here so that the
631 // rest of Terraform can behave as if we do. This shimming should be
632 // removed later as part of implementing count/for_each for modules.
633 instPath := make(addrs.ModuleInstance, len(path))
634 for i, name := range path {
635 instPath[i] = addrs.ModuleInstanceStep{Name: name}
636 }
637 parentInstPath := make(addrs.ModuleInstance, len(parentPath))
638 for i, name := range parentPath {
639 parentInstPath[i] = addrs.ModuleInstanceStep{Name: name}
640 }
641
524 if parentCfg == nil { 642 if parentCfg == nil {
525 // this can't really happen during normal execution. 643 // this can't really happen during normal execution.
526 return fmt.Errorf("parent module config not found for %s", m.Name()) 644 return fmt.Errorf("parent module config not found for %s", c.Path.String())
527 } 645 }
528 646
529 // Go through all the providers the parent is passing in, and add proxies to 647 // Go through all the providers the parent is passing in, and add proxies to
530 // the parent provider nodes. 648 // the parent provider nodes.
531 for name, parentName := range parentCfg.Providers { 649 for _, pair := range parentCfg.Providers {
532 fullName := ResolveProviderName(name, path) 650 fullAddr := pair.InChild.Addr().Absolute(instPath)
533 fullParentName := ResolveProviderName(parentName, parentPath) 651 fullParentAddr := pair.InParent.Addr().Absolute(parentInstPath)
652 fullName := fullAddr.String()
653 fullParentName := fullParentAddr.String()
534 654
535 parentProvider := t.providers[fullParentName] 655 parentProvider := t.providers[fullParentName]
536 656
@@ -539,9 +659,8 @@ func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree)
539 } 659 }
540 660
541 proxy := &graphNodeProxyProvider{ 661 proxy := &graphNodeProxyProvider{
542 nameValue: name, 662 addr: fullAddr,
543 path: path, 663 target: parentProvider,
544 target: parentProvider,
545 } 664 }
546 665
547 concreteProvider := t.providers[fullName] 666 concreteProvider := t.providers[fullName]
@@ -553,8 +672,8 @@ func (t *ProviderConfigTransformer) addProxyProviders(g *Graph, m *module.Tree)
553 continue 672 continue
554 } 673 }
555 674
556 // aliased providers can't be implicitly passed in 675 // aliased configurations can't be implicitly passed in
557 if strings.Contains(name, ".") { 676 if fullAddr.ProviderConfig.Alias != "" {
558 continue 677 continue
559 } 678 }
560 679
@@ -575,27 +694,19 @@ func (t *ProviderConfigTransformer) attachProviderConfigs(g *Graph) error {
575 } 694 }
576 695
577 // Determine what we're looking for 696 // Determine what we're looking for
578 path := normalizeModulePath(apn.Path())[1:] 697 addr := apn.ProviderAddr()
579 name := apn.ProviderName()
580 log.Printf("[TRACE] Attach provider request: %#v %s", path, name)
581 698
582 // Get the configuration. 699 // Get the configuration.
583 tree := t.Module.Child(path) 700 mc := t.Config.DescendentForInstance(addr.Module)
584 if tree == nil { 701 if mc == nil {
702 log.Printf("[TRACE] ProviderConfigTransformer: no configuration available for %s", addr.String())
585 continue 703 continue
586 } 704 }
587 705
588 // Go through the provider configs to find the matching config 706 // Go through the provider configs to find the matching config
589 for _, p := range tree.Config().ProviderConfigs { 707 for _, p := range mc.Module.ProviderConfigs {
590 // Build the name, which is "name.alias" if an alias exists 708 if p.Name == addr.ProviderConfig.Type && p.Alias == addr.ProviderConfig.Alias {
591 current := p.Name 709 log.Printf("[TRACE] ProviderConfigTransformer: attaching to %q provider configuration from %s", dag.VertexName(v), p.DeclRange)
592 if p.Alias != "" {
593 current += "." + p.Alias
594 }
595
596 // If the configs match then attach!
597 if current == name {
598 log.Printf("[TRACE] Attaching provider config: %#v", p)
599 apn.AttachProvider(p) 710 apn.AttachProvider(p)
600 break 711 break
601 } 712 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
index f49d824..fe4cf0e 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
@@ -2,6 +2,9 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log"
6
7 "github.com/hashicorp/terraform/addrs"
5 8
6 "github.com/hashicorp/go-multierror" 9 "github.com/hashicorp/go-multierror"
7 "github.com/hashicorp/terraform/dag" 10 "github.com/hashicorp/terraform/dag"
@@ -22,8 +25,8 @@ type GraphNodeCloseProvisioner interface {
22} 25}
23 26
24// GraphNodeProvisionerConsumer is an interface that nodes that require 27// GraphNodeProvisionerConsumer is an interface that nodes that require
25// a provisioner must implement. ProvisionedBy must return the name of the 28// a provisioner must implement. ProvisionedBy must return the names of the
26// provisioner to use. 29// provisioners to use.
27type GraphNodeProvisionerConsumer interface { 30type GraphNodeProvisionerConsumer interface {
28 ProvisionedBy() []string 31 ProvisionedBy() []string
29} 32}
@@ -48,6 +51,7 @@ func (t *ProvisionerTransformer) Transform(g *Graph) error {
48 continue 51 continue
49 } 52 }
50 53
54 log.Printf("[TRACE] ProvisionerTransformer: %s is provisioned by %s (%q)", dag.VertexName(v), key, dag.VertexName(m[key]))
51 g.Connect(dag.BasicEdge(v, m[key])) 55 g.Connect(dag.BasicEdge(v, m[key]))
52 } 56 }
53 } 57 }
@@ -83,12 +87,9 @@ func (t *MissingProvisionerTransformer) Transform(g *Graph) error {
83 87
84 // If this node has a subpath, then we use that as a prefix 88 // If this node has a subpath, then we use that as a prefix
85 // into our map to check for an existing provider. 89 // into our map to check for an existing provider.
86 var path []string 90 path := addrs.RootModuleInstance
87 if sp, ok := pv.(GraphNodeSubPath); ok { 91 if sp, ok := pv.(GraphNodeSubPath); ok {
88 raw := normalizeModulePath(sp.Path()) 92 path = sp.Path()
89 if len(raw) > len(rootModulePath) {
90 path = raw
91 }
92 } 93 }
93 94
94 for _, p := range pv.ProvisionedBy() { 95 for _, p := range pv.ProvisionedBy() {
@@ -101,7 +102,7 @@ func (t *MissingProvisionerTransformer) Transform(g *Graph) error {
101 } 102 }
102 103
103 if _, ok := supported[p]; !ok { 104 if _, ok := supported[p]; !ok {
104 // If we don't support the provisioner type, skip it. 105 // If we don't support the provisioner type, we skip it.
105 // Validation later will catch this as an error. 106 // Validation later will catch this as an error.
106 continue 107 continue
107 } 108 }
@@ -114,6 +115,7 @@ func (t *MissingProvisionerTransformer) Transform(g *Graph) error {
114 115
115 // Add the missing provisioner node to the graph 116 // Add the missing provisioner node to the graph
116 m[key] = g.Add(newV) 117 m[key] = g.Add(newV)
118 log.Printf("[TRACE] MissingProviderTransformer: added implicit provisioner %s, first implied by %s", key, dag.VertexName(v))
117 } 119 }
118 } 120 }
119 121
@@ -156,10 +158,7 @@ func (t *CloseProvisionerTransformer) Transform(g *Graph) error {
156func provisionerMapKey(k string, v dag.Vertex) string { 158func provisionerMapKey(k string, v dag.Vertex) string {
157 pathPrefix := "" 159 pathPrefix := ""
158 if sp, ok := v.(GraphNodeSubPath); ok { 160 if sp, ok := v.(GraphNodeSubPath); ok {
159 raw := normalizeModulePath(sp.Path()) 161 pathPrefix = sp.Path().String() + "."
160 if len(raw) > len(rootModulePath) {
161 pathPrefix = modulePrefixStr(raw) + "."
162 }
163 } 162 }
164 163
165 return pathPrefix + k 164 return pathPrefix + k
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
index be8c7f9..23bc8cd 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
@@ -3,8 +3,12 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 "log" 5 "log"
6 "strings"
7 6
7 "github.com/hashicorp/hcl2/hcl"
8 "github.com/hashicorp/terraform/configs/configschema"
9 "github.com/hashicorp/terraform/lang"
10
11 "github.com/hashicorp/terraform/addrs"
8 "github.com/hashicorp/terraform/config" 12 "github.com/hashicorp/terraform/config"
9 "github.com/hashicorp/terraform/dag" 13 "github.com/hashicorp/terraform/dag"
10) 14)
@@ -17,35 +21,46 @@ import (
17// be referenced and other methods of referencing may still be possible (such 21// be referenced and other methods of referencing may still be possible (such
18// as by path!) 22// as by path!)
19type GraphNodeReferenceable interface { 23type GraphNodeReferenceable interface {
20 // ReferenceableName is the name by which this can be referenced. 24 GraphNodeSubPath
21 // This can be either just the type, or include the field. Example: 25
22 // "aws_instance.bar" or "aws_instance.bar.id". 26 // ReferenceableAddrs returns a list of addresses through which this can be
23 ReferenceableName() []string 27 // referenced.
28 ReferenceableAddrs() []addrs.Referenceable
24} 29}
25 30
26// GraphNodeReferencer must be implemented by nodes that reference other 31// GraphNodeReferencer must be implemented by nodes that reference other
27// Terraform items and therefore depend on them. 32// Terraform items and therefore depend on them.
28type GraphNodeReferencer interface { 33type GraphNodeReferencer interface {
29 // References are the list of things that this node references. This 34 GraphNodeSubPath
30 // can include fields or just the type, just like GraphNodeReferenceable 35
31 // above. 36 // References returns a list of references made by this node, which
32 References() []string 37 // include both a referenced address and source location information for
38 // the reference.
39 References() []*addrs.Reference
33} 40}
34 41
35// GraphNodeReferenceGlobal is an interface that can optionally be 42// GraphNodeReferenceOutside is an interface that can optionally be implemented.
36// implemented. If ReferenceGlobal returns true, then the References() 43// A node that implements it can specify that its own referenceable addresses
37// and ReferenceableName() must be _fully qualified_ with "module.foo.bar" 44// and/or the addresses it references are in a different module than the
38// etc. 45// node itself.
46//
47// Any referenceable addresses returned by ReferenceableAddrs are interpreted
48// relative to the returned selfPath.
39// 49//
40// This allows a node to reference and be referenced by a specific name 50// Any references returned by References are interpreted relative to the
41// that may cross module boundaries. This can be very dangerous so use 51// returned referencePath.
42// this wisely.
43// 52//
44// The primary use case for this is module boundaries (variables coming in). 53// It is valid but not required for either of these paths to match what is
45type GraphNodeReferenceGlobal interface { 54// returned by method Path, though if both match the main Path then there
46 // Set to true to signal that references and name are fully 55// is no reason to implement this method.
47 // qualified. See the above docs for more information. 56//
48 ReferenceGlobal() bool 57// The primary use-case for this is the nodes representing module input
58// variables, since their expressions are resolved in terms of their calling
59// module, but they are still referenced from their own module.
60type GraphNodeReferenceOutside interface {
61 // ReferenceOutside returns a path in which any references from this node
62 // are resolved.
63 ReferenceOutside() (selfPath, referencePath addrs.ModuleInstance)
49} 64}
50 65
51// ReferenceTransformer is a GraphTransformer that connects all the 66// ReferenceTransformer is a GraphTransformer that connects all the
@@ -158,75 +173,91 @@ func (t *PruneUnusedValuesTransformer) Transform(g *Graph) error {
158// ReferenceMap is a structure that can be used to efficiently check 173// ReferenceMap is a structure that can be used to efficiently check
159// for references on a graph. 174// for references on a graph.
160type ReferenceMap struct { 175type ReferenceMap struct {
161 // m is the mapping of referenceable name to list of verticies that 176 // vertices is a map from internal reference keys (as produced by the
162 // implement that name. This is built on initialization. 177 // mapKey method) to one or more vertices that are identified by each key.
163 references map[string][]dag.Vertex 178 //
164 referencedBy map[string][]dag.Vertex 179 // A particular reference key might actually identify multiple vertices,
180 // e.g. in situations where one object is contained inside another.
181 vertices map[string][]dag.Vertex
182
183 // edges is a map whose keys are a subset of the internal reference keys
184 // from "vertices", and whose values are the nodes that refer to each
185 // key. The values in this map are the referrers, while values in
186 // "verticies" are the referents. The keys in both cases are referents.
187 edges map[string][]dag.Vertex
165} 188}
166 189
167// References returns the list of vertices that this vertex 190// References returns the set of vertices that the given vertex refers to,
168// references along with any missing references. 191// and any referenced addresses that do not have corresponding vertices.
169func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) { 192func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []addrs.Referenceable) {
170 rn, ok := v.(GraphNodeReferencer) 193 rn, ok := v.(GraphNodeReferencer)
171 if !ok { 194 if !ok {
172 return nil, nil 195 return nil, nil
173 } 196 }
197 if _, ok := v.(GraphNodeSubPath); !ok {
198 return nil, nil
199 }
174 200
175 var matches []dag.Vertex 201 var matches []dag.Vertex
176 var missing []string 202 var missing []addrs.Referenceable
177 prefix := m.prefix(v) 203
178 204 for _, ref := range rn.References() {
179 for _, ns := range rn.References() { 205 subject := ref.Subject
180 found := false 206
181 for _, n := range strings.Split(ns, "/") { 207 key := m.referenceMapKey(v, subject)
182 n = prefix + n 208 if _, exists := m.vertices[key]; !exists {
183 parents, ok := m.references[n] 209 // If what we were looking for was a ResourceInstance then we
184 if !ok { 210 // might be in a resource-oriented graph rather than an
185 continue 211 // instance-oriented graph, and so we'll see if we have the
212 // resource itself instead.
213 switch ri := subject.(type) {
214 case addrs.ResourceInstance:
215 subject = ri.ContainingResource()
216 case addrs.ResourceInstancePhase:
217 subject = ri.ContainingResource()
186 } 218 }
219 key = m.referenceMapKey(v, subject)
220 }
187 221
188 // Mark that we found a match 222 vertices := m.vertices[key]
189 found = true 223 for _, rv := range vertices {
190 224 // don't include self-references
191 for _, p := range parents { 225 if rv == v {
192 // don't include self-references 226 continue
193 if p == v {
194 continue
195 }
196 matches = append(matches, p)
197 } 227 }
198 228 matches = append(matches, rv)
199 break
200 } 229 }
201 230 if len(vertices) == 0 {
202 if !found { 231 missing = append(missing, ref.Subject)
203 missing = append(missing, ns)
204 } 232 }
205 } 233 }
206 234
207 return matches, missing 235 return matches, missing
208} 236}
209 237
210// ReferencedBy returns the list of vertices that reference the 238// Referrers returns the set of vertices that refer to the given vertex.
211// vertex passed in. 239func (m *ReferenceMap) Referrers(v dag.Vertex) []dag.Vertex {
212func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex {
213 rn, ok := v.(GraphNodeReferenceable) 240 rn, ok := v.(GraphNodeReferenceable)
214 if !ok { 241 if !ok {
215 return nil 242 return nil
216 } 243 }
244 sp, ok := v.(GraphNodeSubPath)
245 if !ok {
246 return nil
247 }
217 248
218 var matches []dag.Vertex 249 var matches []dag.Vertex
219 prefix := m.prefix(v) 250 for _, addr := range rn.ReferenceableAddrs() {
220 for _, n := range rn.ReferenceableName() { 251 key := m.mapKey(sp.Path(), addr)
221 n = prefix + n 252 referrers, ok := m.edges[key]
222 children, ok := m.referencedBy[n]
223 if !ok { 253 if !ok {
224 continue 254 continue
225 } 255 }
226 256
227 // Make sure this isn't a self reference, which isn't included 257 // If the referrer set includes our own given vertex then we skip,
258 // since we don't want to return self-references.
228 selfRef := false 259 selfRef := false
229 for _, p := range children { 260 for _, p := range referrers {
230 if p == v { 261 if p == v {
231 selfRef = true 262 selfRef = true
232 break 263 break
@@ -236,28 +267,77 @@ func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex {
236 continue 267 continue
237 } 268 }
238 269
239 matches = append(matches, children...) 270 matches = append(matches, referrers...)
240 } 271 }
241 272
242 return matches 273 return matches
243} 274}
244 275
245func (m *ReferenceMap) prefix(v dag.Vertex) string { 276func (m *ReferenceMap) mapKey(path addrs.ModuleInstance, addr addrs.Referenceable) string {
246 // If the node is stating it is already fully qualified then 277 return fmt.Sprintf("%s|%s", path.String(), addr.String())
247 // we don't have to create the prefix! 278}
248 if gn, ok := v.(GraphNodeReferenceGlobal); ok && gn.ReferenceGlobal() { 279
249 return "" 280// vertexReferenceablePath returns the path in which the given vertex can be
281// referenced. This is the path that its results from ReferenceableAddrs
282// are considered to be relative to.
283//
284// Only GraphNodeSubPath implementations can be referenced, so this method will
285// panic if the given vertex does not implement that interface.
286func (m *ReferenceMap) vertexReferenceablePath(v dag.Vertex) addrs.ModuleInstance {
287 sp, ok := v.(GraphNodeSubPath)
288 if !ok {
289 // Only nodes with paths can participate in a reference map.
290 panic(fmt.Errorf("vertexMapKey on vertex type %T which doesn't implement GraphNodeSubPath", sp))
250 } 291 }
251 292
252 // Create the prefix based on the path 293 if outside, ok := v.(GraphNodeReferenceOutside); ok {
253 var prefix string 294 // Vertex is referenced from a different module than where it was
254 if pn, ok := v.(GraphNodeSubPath); ok { 295 // declared.
255 if path := normalizeModulePath(pn.Path()); len(path) > 1 { 296 path, _ := outside.ReferenceOutside()
256 prefix = modulePrefixStr(path) + "." 297 return path
257 } 298 }
299
300 // Vertex is referenced from the same module as where it was declared.
301 return sp.Path()
302}
303
304// vertexReferencePath returns the path in which references _from_ the given
305// vertex must be interpreted.
306//
307// Only GraphNodeSubPath implementations can have references, so this method
308// will panic if the given vertex does not implement that interface.
309func vertexReferencePath(referrer dag.Vertex) addrs.ModuleInstance {
310 sp, ok := referrer.(GraphNodeSubPath)
311 if !ok {
312 // Only nodes with paths can participate in a reference map.
313 panic(fmt.Errorf("vertexReferencePath on vertex type %T which doesn't implement GraphNodeSubPath", sp))
314 }
315
316 var path addrs.ModuleInstance
317 if outside, ok := referrer.(GraphNodeReferenceOutside); ok {
318 // Vertex makes references to objects in a different module than where
319 // it was declared.
320 _, path = outside.ReferenceOutside()
321 return path
258 } 322 }
259 323
260 return prefix 324 // Vertex makes references to objects in the same module as where it
325 // was declared.
326 return sp.Path()
327}
328
329// referenceMapKey produces keys for the "edges" map. "referrer" is the vertex
330// that the reference is from, and "addr" is the address of the object being
331// referenced.
332//
333// The result is an opaque string that includes both the address of the given
334// object and the address of the module instance that object belongs to.
335//
336// Only GraphNodeSubPath implementations can be referrers, so this method will
337// panic if the given vertex does not implement that interface.
338func (m *ReferenceMap) referenceMapKey(referrer dag.Vertex, addr addrs.Referenceable) string {
339 path := vertexReferencePath(referrer)
340 return m.mapKey(path, addr)
261} 341}
262 342
263// NewReferenceMap is used to create a new reference map for the 343// NewReferenceMap is used to create a new reference map for the
@@ -266,83 +346,82 @@ func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
266 var m ReferenceMap 346 var m ReferenceMap
267 347
268 // Build the lookup table 348 // Build the lookup table
269 refMap := make(map[string][]dag.Vertex) 349 vertices := make(map[string][]dag.Vertex)
270 for _, v := range vs { 350 for _, v := range vs {
351 _, ok := v.(GraphNodeSubPath)
352 if !ok {
353 // Only nodes with paths can participate in a reference map.
354 continue
355 }
356
271 // We're only looking for referenceable nodes 357 // We're only looking for referenceable nodes
272 rn, ok := v.(GraphNodeReferenceable) 358 rn, ok := v.(GraphNodeReferenceable)
273 if !ok { 359 if !ok {
274 continue 360 continue
275 } 361 }
276 362
363 path := m.vertexReferenceablePath(v)
364
277 // Go through and cache them 365 // Go through and cache them
278 prefix := m.prefix(v) 366 for _, addr := range rn.ReferenceableAddrs() {
279 for _, n := range rn.ReferenceableName() { 367 key := m.mapKey(path, addr)
280 n = prefix + n 368 vertices[key] = append(vertices[key], v)
281 refMap[n] = append(refMap[n], v)
282 } 369 }
283 370
284 // If there is a path, it is always referenceable by that. For 371 // Any node can be referenced by the address of the module it belongs
285 // example, if this is a referenceable thing at path []string{"foo"}, 372 // to or any of that module's ancestors.
286 // then it can be referenced at "module.foo" 373 for _, addr := range path.Ancestors()[1:] {
287 if pn, ok := v.(GraphNodeSubPath); ok { 374 // Can be referenced either as the specific call instance (with
288 for _, p := range ReferenceModulePath(pn.Path()) { 375 // an instance key) or as the bare module call itself (the "module"
289 refMap[p] = append(refMap[p], v) 376 // block in the parent module that created the instance).
290 } 377 callPath, call := addr.Call()
378 callInstPath, callInst := addr.CallInstance()
379 callKey := m.mapKey(callPath, call)
380 callInstKey := m.mapKey(callInstPath, callInst)
381 vertices[callKey] = append(vertices[callKey], v)
382 vertices[callInstKey] = append(vertices[callInstKey], v)
291 } 383 }
292 } 384 }
293 385
294 // Build the lookup table for referenced by 386 // Build the lookup table for referenced by
295 refByMap := make(map[string][]dag.Vertex) 387 edges := make(map[string][]dag.Vertex)
296 for _, v := range vs { 388 for _, v := range vs {
297 // We're only looking for referenceable nodes 389 _, ok := v.(GraphNodeSubPath)
390 if !ok {
391 // Only nodes with paths can participate in a reference map.
392 continue
393 }
394
298 rn, ok := v.(GraphNodeReferencer) 395 rn, ok := v.(GraphNodeReferencer)
299 if !ok { 396 if !ok {
397 // We're only looking for referenceable nodes
300 continue 398 continue
301 } 399 }
302 400
303 // Go through and cache them 401 // Go through and cache them
304 prefix := m.prefix(v) 402 for _, ref := range rn.References() {
305 for _, n := range rn.References() { 403 if ref.Subject == nil {
306 n = prefix + n 404 // Should never happen
307 refByMap[n] = append(refByMap[n], v) 405 panic(fmt.Sprintf("%T.References returned reference with nil subject", rn))
406 }
407 key := m.referenceMapKey(v, ref.Subject)
408 edges[key] = append(edges[key], v)
308 } 409 }
309 } 410 }
310 411
311 m.references = refMap 412 m.vertices = vertices
312 m.referencedBy = refByMap 413 m.edges = edges
313 return &m 414 return &m
314} 415}
315 416
316// Returns the reference name for a module path. The path "foo" would return
317// "module.foo". If this is a deeply nested module, it will be every parent
318// as well. For example: ["foo", "bar"] would return both "module.foo" and
319// "module.foo.module.bar"
320func ReferenceModulePath(p []string) []string {
321 p = normalizeModulePath(p)
322 if len(p) == 1 {
323 // Root, no name
324 return nil
325 }
326
327 result := make([]string, 0, len(p)-1)
328 for i := len(p); i > 1; i-- {
329 result = append(result, modulePrefixStr(p[:i]))
330 }
331
332 return result
333}
334
335// ReferencesFromConfig returns the references that a configuration has 417// ReferencesFromConfig returns the references that a configuration has
336// based on the interpolated variables in a configuration. 418// based on the interpolated variables in a configuration.
337func ReferencesFromConfig(c *config.RawConfig) []string { 419func ReferencesFromConfig(body hcl.Body, schema *configschema.Block) []*addrs.Reference {
338 var result []string 420 if body == nil {
339 for _, v := range c.Variables { 421 return nil
340 if r := ReferenceFromInterpolatedVar(v); len(r) > 0 {
341 result = append(result, r...)
342 }
343 } 422 }
344 423 refs, _ := lang.ReferencesInBlock(body, schema)
345 return result 424 return refs
346} 425}
347 426
348// ReferenceFromInterpolatedVar returns the reference from this variable, 427// ReferenceFromInterpolatedVar returns the reference from this variable,
@@ -378,18 +457,31 @@ func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string {
378 } 457 }
379} 458}
380 459
381func modulePrefixStr(p []string) string { 460// appendResourceDestroyReferences identifies resource and resource instance
382 // strip "root" 461// references in the given slice and appends to it the "destroy-phase"
383 if len(p) > 0 && p[0] == rootModulePath[0] { 462// equivalents of those references, returning the result.
384 p = p[1:] 463//
385 } 464// This can be used in the References implementation for a node which must also
386 465// depend on the destruction of anything it references.
387 parts := make([]string, 0, len(p)*2) 466func appendResourceDestroyReferences(refs []*addrs.Reference) []*addrs.Reference {
388 for _, p := range p { 467 given := refs
389 parts = append(parts, "module", p) 468 for _, ref := range given {
469 switch tr := ref.Subject.(type) {
470 case addrs.Resource:
471 newRef := *ref // shallow copy
472 newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
473 refs = append(refs, &newRef)
474 case addrs.ResourceInstance:
475 newRef := *ref // shallow copy
476 newRef.Subject = tr.Phase(addrs.ResourceInstancePhaseDestroy)
477 refs = append(refs, &newRef)
478 }
390 } 479 }
480 return refs
481}
391 482
392 return strings.Join(parts, ".") 483func modulePrefixStr(p addrs.ModuleInstance) string {
484 return p.String()
393} 485}
394 486
395func modulePrefixList(result []string, prefix string) []string { 487func modulePrefixList(result []string, prefix string) []string {
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go
index 2e05edb..ee71387 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_removed_modules.go
@@ -3,14 +3,15 @@ package terraform
3import ( 3import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/config/module" 6 "github.com/hashicorp/terraform/configs"
7 "github.com/hashicorp/terraform/states"
7) 8)
8 9
9// RemoveModuleTransformer implements GraphTransformer to add nodes indicating 10// RemovedModuleTransformer implements GraphTransformer to add nodes indicating
10// when a module was removed from the configuration. 11// when a module was removed from the configuration.
11type RemovedModuleTransformer struct { 12type RemovedModuleTransformer struct {
12 Module *module.Tree // root module 13 Config *configs.Config // root node in the config tree
13 State *State 14 State *states.State
14} 15}
15 16
16func (t *RemovedModuleTransformer) Transform(g *Graph) error { 17func (t *RemovedModuleTransformer) Transform(g *Graph) error {
@@ -20,13 +21,13 @@ func (t *RemovedModuleTransformer) Transform(g *Graph) error {
20 } 21 }
21 22
22 for _, m := range t.State.Modules { 23 for _, m := range t.State.Modules {
23 c := t.Module.Child(m.Path[1:]) 24 cc := t.Config.DescendentForInstance(m.Addr)
24 if c != nil { 25 if cc != nil {
25 continue 26 continue
26 } 27 }
27 28
28 log.Printf("[DEBUG] module %s no longer in config\n", modulePrefixStr(m.Path)) 29 log.Printf("[DEBUG] %s is no longer in configuration\n", m.Addr)
29 g.Add(&NodeModuleRemoved{PathValue: m.Path}) 30 g.Add(&NodeModuleRemoved{Addr: m.Addr})
30 } 31 }
31 return nil 32 return nil
32} 33}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
index e528b37..1123790 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
@@ -1,8 +1,8 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt" 4 "github.com/hashicorp/terraform/addrs"
5 5 "github.com/hashicorp/terraform/configs/configschema"
6 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/dag"
7) 7)
8 8
@@ -11,41 +11,44 @@ import (
11// 11//
12// This assumes that the count is already interpolated. 12// This assumes that the count is already interpolated.
13type ResourceCountTransformer struct { 13type ResourceCountTransformer struct {
14 Concrete ConcreteResourceNodeFunc 14 Concrete ConcreteResourceInstanceNodeFunc
15 Schema *configschema.Block
15 16
17 // Count is either the number of indexed instances to create, or -1 to
18 // indicate that count is not set at all and thus a no-key instance should
19 // be created.
16 Count int 20 Count int
17 Addr *ResourceAddress 21 Addr addrs.AbsResource
18} 22}
19 23
20func (t *ResourceCountTransformer) Transform(g *Graph) error { 24func (t *ResourceCountTransformer) Transform(g *Graph) error {
21 // Don't allow the count to be negative
22 if t.Count < 0 { 25 if t.Count < 0 {
23 return fmt.Errorf("negative count: %d", t.Count) 26 // Negative count indicates that count is not set at all.
27 addr := t.Addr.Instance(addrs.NoKey)
28
29 abstract := NewNodeAbstractResourceInstance(addr)
30 abstract.Schema = t.Schema
31 var node dag.Vertex = abstract
32 if f := t.Concrete; f != nil {
33 node = f(abstract)
34 }
35
36 g.Add(node)
37 return nil
24 } 38 }
25 39
26 // For each count, build and add the node 40 // For each count, build and add the node
27 for i := 0; i < t.Count; i++ { 41 for i := 0; i < t.Count; i++ {
28 // Set the index. If our count is 1 we special case it so that 42 key := addrs.IntKey(i)
29 // we handle the "resource.0" and "resource" boundary properly. 43 addr := t.Addr.Instance(key)
30 index := i
31 if t.Count == 1 {
32 index = -1
33 }
34 44
35 // Build the resource address 45 abstract := NewNodeAbstractResourceInstance(addr)
36 addr := t.Addr.Copy() 46 abstract.Schema = t.Schema
37 addr.Index = index
38
39 // Build the abstract node and the concrete one
40 abstract := &NodeAbstractResource{
41 Addr: addr,
42 }
43 var node dag.Vertex = abstract 47 var node dag.Vertex = abstract
44 if f := t.Concrete; f != nil { 48 if f := t.Concrete; f != nil {
45 node = f(abstract) 49 node = f(abstract)
46 } 50 }
47 51
48 // Add it to the graph
49 g.Add(node) 52 g.Add(node)
50 } 53 }
51 54
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
index 471cd74..0b52347 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
@@ -1,10 +1,9 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "fmt"
5 "log" 4 "log"
6 5
7 "github.com/hashicorp/terraform/dag" 6 "github.com/hashicorp/terraform/states"
8) 7)
9 8
10// StateTransformer is a GraphTransformer that adds the elements of 9// StateTransformer is a GraphTransformer that adds the elements of
@@ -13,53 +12,63 @@ import (
13// This transform is used for example by the DestroyPlanGraphBuilder to ensure 12// This transform is used for example by the DestroyPlanGraphBuilder to ensure
14// that only resources that are in the state are represented in the graph. 13// that only resources that are in the state are represented in the graph.
15type StateTransformer struct { 14type StateTransformer struct {
16 Concrete ConcreteResourceNodeFunc 15 // ConcreteCurrent and ConcreteDeposed are used to specialize the abstract
16 // resource instance nodes that this transformer will create.
17 //
18 // If either of these is nil, the objects of that type will be skipped and
19 // not added to the graph at all. It doesn't make sense to use this
20 // transformer without setting at least one of these, since that would
21 // skip everything and thus be a no-op.
22 ConcreteCurrent ConcreteResourceInstanceNodeFunc
23 ConcreteDeposed ConcreteResourceInstanceDeposedNodeFunc
17 24
18 State *State 25 State *states.State
19} 26}
20 27
21func (t *StateTransformer) Transform(g *Graph) error { 28func (t *StateTransformer) Transform(g *Graph) error {
22 // If the state is nil or empty (nil is empty) then do nothing 29 if !t.State.HasResources() {
23 if t.State.Empty() { 30 log.Printf("[TRACE] StateTransformer: state is empty, so nothing to do")
24 return nil 31 return nil
25 } 32 }
26 33
27 // Go through all the modules in the diff. 34 switch {
28 log.Printf("[TRACE] StateTransformer: starting") 35 case t.ConcreteCurrent != nil && t.ConcreteDeposed != nil:
29 var nodes []dag.Vertex 36 log.Printf("[TRACE] StateTransformer: creating nodes for both current and deposed instance objects")
37 case t.ConcreteCurrent != nil:
38 log.Printf("[TRACE] StateTransformer: creating nodes for current instance objects only")
39 case t.ConcreteDeposed != nil:
40 log.Printf("[TRACE] StateTransformer: creating nodes for deposed instance objects only")
41 default:
42 log.Printf("[TRACE] StateTransformer: pointless no-op call, creating no nodes at all")
43 }
44
30 for _, ms := range t.State.Modules { 45 for _, ms := range t.State.Modules {
31 log.Printf("[TRACE] StateTransformer: Module: %v", ms.Path) 46 moduleAddr := ms.Addr
32 47
33 // Go through all the resources in this module. 48 for _, rs := range ms.Resources {
34 for name, rs := range ms.Resources { 49 resourceAddr := rs.Addr.Absolute(moduleAddr)
35 log.Printf("[TRACE] StateTransformer: Resource %q: %#v", name, rs)
36 50
37 // Add the resource to the graph 51 for key, is := range rs.Instances {
38 addr, err := parseResourceAddressInternal(name) 52 addr := resourceAddr.Instance(key)
39 if err != nil {
40 panic(fmt.Sprintf(
41 "Error parsing internal name, this is a bug: %q", name))
42 }
43 53
44 // Very important: add the module path for this resource to 54 if obj := is.Current; obj != nil && t.ConcreteCurrent != nil {
45 // the address. Remove "root" from it. 55 abstract := NewNodeAbstractResourceInstance(addr)
46 addr.Path = ms.Path[1:] 56 node := t.ConcreteCurrent(abstract)
57 g.Add(node)
58 log.Printf("[TRACE] StateTransformer: added %T for %s current object", node, addr)
59 }
47 60
48 // Add the resource to the graph 61 if t.ConcreteDeposed != nil {
49 abstract := &NodeAbstractResource{Addr: addr} 62 for dk := range is.Deposed {
50 var node dag.Vertex = abstract 63 abstract := NewNodeAbstractResourceInstance(addr)
51 if f := t.Concrete; f != nil { 64 node := t.ConcreteDeposed(abstract, dk)
52 node = f(abstract) 65 g.Add(node)
66 log.Printf("[TRACE] StateTransformer: added %T for %s deposed object %s", node, addr, dk)
67 }
68 }
53 } 69 }
54
55 nodes = append(nodes, node)
56 } 70 }
57 } 71 }
58 72
59 // Add all the nodes to the graph
60 for _, n := range nodes {
61 g.Add(n)
62 }
63
64 return nil 73 return nil
65} 74}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
index af6defe..d25274e 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
@@ -3,6 +3,7 @@ package terraform
3import ( 3import (
4 "log" 4 "log"
5 5
6 "github.com/hashicorp/terraform/addrs"
6 "github.com/hashicorp/terraform/dag" 7 "github.com/hashicorp/terraform/dag"
7) 8)
8 9
@@ -12,7 +13,7 @@ import (
12// provided will contain every target provided, and each implementing graph 13// provided will contain every target provided, and each implementing graph
13// node must filter this list to targets considered relevant. 14// node must filter this list to targets considered relevant.
14type GraphNodeTargetable interface { 15type GraphNodeTargetable interface {
15 SetTargets([]ResourceAddress) 16 SetTargets([]addrs.Targetable)
16} 17}
17 18
18// GraphNodeTargetDownstream is an interface for graph nodes that need to 19// GraphNodeTargetDownstream is an interface for graph nodes that need to
@@ -35,11 +36,7 @@ type GraphNodeTargetDownstream interface {
35// their dependencies. 36// their dependencies.
36type TargetsTransformer struct { 37type TargetsTransformer struct {
37 // List of targeted resource names specified by the user 38 // List of targeted resource names specified by the user
38 Targets []string 39 Targets []addrs.Targetable
39
40 // List of parsed targets, provided by callers like ResourceCountTransform
41 // that already have the targets parsed
42 ParsedTargets []ResourceAddress
43 40
44 // If set, the index portions of resource addresses will be ignored 41 // If set, the index portions of resource addresses will be ignored
45 // for comparison. This is used when transforming a graph where 42 // for comparison. This is used when transforming a graph where
@@ -53,17 +50,8 @@ type TargetsTransformer struct {
53} 50}
54 51
55func (t *TargetsTransformer) Transform(g *Graph) error { 52func (t *TargetsTransformer) Transform(g *Graph) error {
56 if len(t.Targets) > 0 && len(t.ParsedTargets) == 0 { 53 if len(t.Targets) > 0 {
57 addrs, err := t.parseTargetAddresses() 54 targetedNodes, err := t.selectTargetedNodes(g, t.Targets)
58 if err != nil {
59 return err
60 }
61
62 t.ParsedTargets = addrs
63 }
64
65 if len(t.ParsedTargets) > 0 {
66 targetedNodes, err := t.selectTargetedNodes(g, t.ParsedTargets)
67 if err != nil { 55 if err != nil {
68 return err 56 return err
69 } 57 }
@@ -88,24 +76,10 @@ func (t *TargetsTransformer) Transform(g *Graph) error {
88 return nil 76 return nil
89} 77}
90 78
91func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) { 79// Returns a set of targeted nodes. A targeted node is either addressed
92 addrs := make([]ResourceAddress, len(t.Targets)) 80// directly, address indirectly via its container, or it's a dependency of a
93 for i, target := range t.Targets { 81// targeted node. Destroy mode keeps dependents instead of dependencies.
94 ta, err := ParseResourceAddress(target) 82func (t *TargetsTransformer) selectTargetedNodes(g *Graph, addrs []addrs.Targetable) (*dag.Set, error) {
95 if err != nil {
96 return nil, err
97 }
98 addrs[i] = *ta
99 }
100
101 return addrs, nil
102}
103
104// Returns the list of targeted nodes. A targeted node is either addressed
105// directly, or is an Ancestor of a targeted node. Destroy mode keeps
106// Descendents instead of Ancestors.
107func (t *TargetsTransformer) selectTargetedNodes(
108 g *Graph, addrs []ResourceAddress) (*dag.Set, error) {
109 targetedNodes := new(dag.Set) 83 targetedNodes := new(dag.Set)
110 84
111 vertices := g.Vertices() 85 vertices := g.Vertices()
@@ -154,6 +128,12 @@ func (t *TargetsTransformer) addDependencies(targetedNodes *dag.Set, g *Graph) (
154 vertices := queue 128 vertices := queue
155 queue = nil // ready to append for next iteration if neccessary 129 queue = nil // ready to append for next iteration if neccessary
156 for _, v := range vertices { 130 for _, v := range vertices {
131 // providers don't cause transitive dependencies, so don't target
132 // downstream from them.
133 if _, ok := v.(GraphNodeProvider); ok {
134 continue
135 }
136
157 dependers := g.UpEdges(v) 137 dependers := g.UpEdges(v)
158 if dependers == nil { 138 if dependers == nil {
159 // indicates that there are no up edges for this node, so 139 // indicates that there are no up edges for this node, so
@@ -240,21 +220,34 @@ func filterPartialOutputs(v interface{}, targetedNodes *dag.Set, g *Graph) bool
240 return true 220 return true
241} 221}
242 222
243func (t *TargetsTransformer) nodeIsTarget( 223func (t *TargetsTransformer) nodeIsTarget(v dag.Vertex, targets []addrs.Targetable) bool {
244 v dag.Vertex, addrs []ResourceAddress) bool { 224 var vertexAddr addrs.Targetable
245 r, ok := v.(GraphNodeResource) 225 switch r := v.(type) {
226 case GraphNodeResourceInstance:
227 vertexAddr = r.ResourceInstanceAddr()
228 case GraphNodeResource:
229 vertexAddr = r.ResourceAddr()
230 default:
231 // Only resource and resource instance nodes can be targeted.
232 return false
233 }
234 _, ok := v.(GraphNodeResource)
246 if !ok { 235 if !ok {
247 return false 236 return false
248 } 237 }
249 238
250 addr := r.ResourceAddr() 239 for _, targetAddr := range targets {
251 for _, targetAddr := range addrs {
252 if t.IgnoreIndices { 240 if t.IgnoreIndices {
253 // targetAddr is not a pointer, so we can safely mutate it without 241 // If we're ignoring indices then we'll convert any resource instance
254 // interfering with references elsewhere. 242 // addresses into resource addresses. We don't need to convert
255 targetAddr.Index = -1 243 // vertexAddr because instance addresses are contained within
244 // their associated resources, and so .TargetContains will take
245 // care of this for us.
246 if instance, isInstance := targetAddr.(addrs.AbsResourceInstance); isInstance {
247 targetAddr = instance.ContainingResource()
248 }
256 } 249 }
257 if targetAddr.Contains(addr) { 250 if targetAddr.TargetContains(vertexAddr) {
258 return true 251 return true
259 } 252 }
260 } 253 }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
index b31e2c7..05daa51 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
@@ -1,7 +1,8 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "github.com/hashicorp/terraform/config/module" 4 "github.com/hashicorp/terraform/addrs"
5 "github.com/hashicorp/terraform/configs"
5) 6)
6 7
7// RootVariableTransformer is a GraphTransformer that adds all the root 8// RootVariableTransformer is a GraphTransformer that adds all the root
@@ -11,28 +12,27 @@ import (
11// graph since downstream things that depend on them must be able to 12// graph since downstream things that depend on them must be able to
12// reach them. 13// reach them.
13type RootVariableTransformer struct { 14type RootVariableTransformer struct {
14 Module *module.Tree 15 Config *configs.Config
15} 16}
16 17
17func (t *RootVariableTransformer) Transform(g *Graph) error { 18func (t *RootVariableTransformer) Transform(g *Graph) error {
18 // If no config, no variables 19 // We can have no variables if we have no config.
19 if t.Module == nil { 20 if t.Config == nil {
20 return nil 21 return nil
21 } 22 }
22 23
23 // If we have no vars, we're done! 24 // We're only considering root module variables here, since child
24 vars := t.Module.Config().Variables 25 // module variables are handled by ModuleVariableTransformer.
25 if len(vars) == 0 { 26 vars := t.Config.Module.Variables
26 return nil
27 }
28 27
29 // Add all variables here 28 // Add all variables here
30 for _, v := range vars { 29 for _, v := range vars {
31 node := &NodeRootVariable{ 30 node := &NodeRootVariable{
31 Addr: addrs.InputVariable{
32 Name: v.Name,
33 },
32 Config: v, 34 Config: v,
33 } 35 }
34
35 // Add it!
36 g.Add(node) 36 g.Add(node)
37 } 37 }
38 38
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
index 7c87459..f6790d9 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
@@ -1,10 +1,12 @@
1package terraform 1package terraform
2 2
3import "context"
4
3// UIInput is the interface that must be implemented to ask for input 5// UIInput is the interface that must be implemented to ask for input
4// from this user. This should forward the request to wherever the user 6// from this user. This should forward the request to wherever the user
5// inputs things to ask for values. 7// inputs things to ask for values.
6type UIInput interface { 8type UIInput interface {
7 Input(*InputOpts) (string, error) 9 Input(context.Context, *InputOpts) (string, error)
8} 10}
9 11
10// InputOpts are options for asking for input. 12// InputOpts are options for asking for input.
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
index e3a07ef..e2d9c38 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
@@ -1,5 +1,7 @@
1package terraform 1package terraform
2 2
3import "context"
4
3// MockUIInput is an implementation of UIInput that can be used for tests. 5// MockUIInput is an implementation of UIInput that can be used for tests.
4type MockUIInput struct { 6type MockUIInput struct {
5 InputCalled bool 7 InputCalled bool
@@ -10,7 +12,7 @@ type MockUIInput struct {
10 InputFn func(*InputOpts) (string, error) 12 InputFn func(*InputOpts) (string, error)
11} 13}
12 14
13func (i *MockUIInput) Input(opts *InputOpts) (string, error) { 15func (i *MockUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) {
14 i.InputCalled = true 16 i.InputCalled = true
15 i.InputOpts = opts 17 i.InputOpts = opts
16 if i.InputFn != nil { 18 if i.InputFn != nil {
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
index 2207d1d..b5d32b1 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
@@ -1,6 +1,7 @@
1package terraform 1package terraform
2 2
3import ( 3import (
4 "context"
4 "fmt" 5 "fmt"
5) 6)
6 7
@@ -12,8 +13,8 @@ type PrefixUIInput struct {
12 UIInput UIInput 13 UIInput UIInput
13} 14}
14 15
15func (i *PrefixUIInput) Input(opts *InputOpts) (string, error) { 16func (i *PrefixUIInput) Input(ctx context.Context, opts *InputOpts) (string, error) {
16 opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id) 17 opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id)
17 opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query) 18 opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query)
18 return i.UIInput.Input(opts) 19 return i.UIInput.Input(ctx, opts)
19} 20}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
index 878a031..fff964f 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
@@ -1,15 +1,19 @@
1package terraform 1package terraform
2 2
3import (
4 "github.com/hashicorp/terraform/addrs"
5)
6
3// ProvisionerUIOutput is an implementation of UIOutput that calls a hook 7// ProvisionerUIOutput is an implementation of UIOutput that calls a hook
4// for the output so that the hooks can handle it. 8// for the output so that the hooks can handle it.
5type ProvisionerUIOutput struct { 9type ProvisionerUIOutput struct {
6 Info *InstanceInfo 10 InstanceAddr addrs.AbsResourceInstance
7 Type string 11 ProvisionerType string
8 Hooks []Hook 12 Hooks []Hook
9} 13}
10 14
11func (o *ProvisionerUIOutput) Output(msg string) { 15func (o *ProvisionerUIOutput) Output(msg string) {
12 for _, h := range o.Hooks { 16 for _, h := range o.Hooks {
13 h.ProvisionOutput(o.Info, o.Type, msg) 17 h.ProvisionOutput(o.InstanceAddr, o.ProvisionerType, msg)
14 } 18 }
15} 19}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go
new file mode 100644
index 0000000..627593d
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/terraform/valuesourcetype_string.go
@@ -0,0 +1,59 @@
1// Code generated by "stringer -type ValueSourceType"; DO NOT EDIT.
2
3package terraform
4
5import "strconv"
6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[ValueFromUnknown-0]
12 _ = x[ValueFromConfig-67]
13 _ = x[ValueFromAutoFile-70]
14 _ = x[ValueFromNamedFile-78]
15 _ = x[ValueFromCLIArg-65]
16 _ = x[ValueFromEnvVar-69]
17 _ = x[ValueFromInput-73]
18 _ = x[ValueFromPlan-80]
19 _ = x[ValueFromCaller-83]
20}
21
22const (
23 _ValueSourceType_name_0 = "ValueFromUnknown"
24 _ValueSourceType_name_1 = "ValueFromCLIArg"
25 _ValueSourceType_name_2 = "ValueFromConfig"
26 _ValueSourceType_name_3 = "ValueFromEnvVarValueFromAutoFile"
27 _ValueSourceType_name_4 = "ValueFromInput"
28 _ValueSourceType_name_5 = "ValueFromNamedFile"
29 _ValueSourceType_name_6 = "ValueFromPlan"
30 _ValueSourceType_name_7 = "ValueFromCaller"
31)
32
33var (
34 _ValueSourceType_index_3 = [...]uint8{0, 15, 32}
35)
36
37func (i ValueSourceType) String() string {
38 switch {
39 case i == 0:
40 return _ValueSourceType_name_0
41 case i == 65:
42 return _ValueSourceType_name_1
43 case i == 67:
44 return _ValueSourceType_name_2
45 case 69 <= i && i <= 70:
46 i -= 69
47 return _ValueSourceType_name_3[_ValueSourceType_index_3[i]:_ValueSourceType_index_3[i+1]]
48 case i == 73:
49 return _ValueSourceType_name_4
50 case i == 78:
51 return _ValueSourceType_name_5
52 case i == 80:
53 return _ValueSourceType_name_6
54 case i == 83:
55 return _ValueSourceType_name_7
56 default:
57 return "ValueSourceType(" + strconv.FormatInt(int64(i), 10) + ")"
58 }
59}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go
index 300f2ad..75531b2 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/variables.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/variables.go
@@ -2,165 +2,312 @@ package terraform
2 2
3import ( 3import (
4 "fmt" 4 "fmt"
5 "os"
6 "strings"
7 5
8 "github.com/hashicorp/terraform/config" 6 "github.com/hashicorp/hcl2/hcl"
9 "github.com/hashicorp/terraform/config/module" 7 "github.com/zclconf/go-cty/cty"
10 "github.com/hashicorp/terraform/helper/hilmapstructure" 8 "github.com/zclconf/go-cty/cty/convert"
9
10 "github.com/hashicorp/terraform/configs"
11 "github.com/hashicorp/terraform/tfdiags"
12)
13
14// InputValue represents a value for a variable in the root module, provided
15// as part of the definition of an operation.
16type InputValue struct {
17 Value cty.Value
18 SourceType ValueSourceType
19
20 // SourceRange provides source location information for values whose
21 // SourceType is either ValueFromConfig or ValueFromFile. It is not
22 // populated for other source types, and so should not be used.
23 SourceRange tfdiags.SourceRange
24}
25
26// ValueSourceType describes what broad category of source location provided
27// a particular value.
28type ValueSourceType rune
29
30const (
31 // ValueFromUnknown is the zero value of ValueSourceType and is not valid.
32 ValueFromUnknown ValueSourceType = 0
33
34 // ValueFromConfig indicates that a value came from a .tf or .tf.json file,
35 // e.g. the default value defined for a variable.
36 ValueFromConfig ValueSourceType = 'C'
37
38 // ValueFromAutoFile indicates that a value came from a "values file", like
39 // a .tfvars file, that was implicitly loaded by naming convention.
40 ValueFromAutoFile ValueSourceType = 'F'
41
42 // ValueFromNamedFile indicates that a value came from a named "values file",
43 // like a .tfvars file, that was passed explicitly on the command line (e.g.
44 // -var-file=foo.tfvars).
45 ValueFromNamedFile ValueSourceType = 'N'
46
47 // ValueFromCLIArg indicates that the value was provided directly in
48 // a CLI argument. The name of this argument is not recorded and so it must
49 // be inferred from context.
50 ValueFromCLIArg ValueSourceType = 'A'
51
52 // ValueFromEnvVar indicates that the value was provided via an environment
53 // variable. The name of the variable is not recorded and so it must be
54 // inferred from context.
55 ValueFromEnvVar ValueSourceType = 'E'
56
57 // ValueFromInput indicates that the value was provided at an interactive
58 // input prompt.
59 ValueFromInput ValueSourceType = 'I'
60
61 // ValueFromPlan indicates that the value was retrieved from a stored plan.
62 ValueFromPlan ValueSourceType = 'P'
63
64 // ValueFromCaller indicates that the value was explicitly overridden by
65 // a caller to Context.SetVariable after the context was constructed.
66 ValueFromCaller ValueSourceType = 'S'
11) 67)
12 68
13// Variables returns the fully loaded set of variables to use with 69func (v *InputValue) GoString() string {
14// ContextOpts and NewContext, loading any additional variables from 70 if (v.SourceRange != tfdiags.SourceRange{}) {
15// the environment or any other sources. 71 return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v, SourceRange: %#v}", v.Value, v.SourceType, v.SourceRange)
72 } else {
73 return fmt.Sprintf("&terraform.InputValue{Value: %#v, SourceType: %#v}", v.Value, v.SourceType)
74 }
75}
76
77func (v ValueSourceType) GoString() string {
78 return fmt.Sprintf("terraform.%s", v)
79}
80
81//go:generate stringer -type ValueSourceType
82
83// InputValues is a map of InputValue instances.
84type InputValues map[string]*InputValue
85
86// InputValuesFromCaller turns the given map of naked values into an
87// InputValues that attributes each value to "a caller", using the source
88// type ValueFromCaller. This is primarily useful for testing purposes.
16// 89//
17// The given module tree doesn't need to be loaded. 90// This should not be used as a general way to convert map[string]cty.Value
18func Variables( 91// into InputValues, since in most real cases we want to set a suitable
19 m *module.Tree, 92// other SourceType and possibly SourceRange value.
20 override map[string]interface{}) (map[string]interface{}, error) { 93func InputValuesFromCaller(vals map[string]cty.Value) InputValues {
21 result := make(map[string]interface{}) 94 ret := make(InputValues, len(vals))
22 95 for k, v := range vals {
23 // Variables are loaded in the following sequence. Each additional step 96 ret[k] = &InputValue{
24 // will override conflicting variable keys from prior steps: 97 Value: v,
25 // 98 SourceType: ValueFromCaller,
26 // * Take default values from config
27 // * Take values from TF_VAR_x env vars
28 // * Take values specified in the "override" param which is usually
29 // from -var, -var-file, etc.
30 //
31
32 // First load from the config
33 for _, v := range m.Config().Variables {
34 // If the var has no default, ignore
35 if v.Default == nil {
36 continue
37 } 99 }
100 }
101 return ret
102}
38 103
39 // If the type isn't a string, we use it as-is since it is a rich type 104// Override merges the given value maps with the receiver, overriding any
40 if v.Type() != config.VariableTypeString { 105// conflicting keys so that the latest definition wins.
41 result[v.Name] = v.Default 106func (vv InputValues) Override(others ...InputValues) InputValues {
42 continue 107 // FIXME: This should check to see if any of the values are maps and
108 // merge them if so, in order to preserve the behavior from prior to
109 // Terraform 0.12.
110 ret := make(InputValues)
111 for k, v := range vv {
112 ret[k] = v
113 }
114 for _, other := range others {
115 for k, v := range other {
116 ret[k] = v
43 } 117 }
118 }
119 return ret
120}
44 121
45 // v.Default has already been parsed as HCL but it may be an int type 122// JustValues returns a map that just includes the values, discarding the
46 switch typedDefault := v.Default.(type) { 123// source information.
47 case string: 124func (vv InputValues) JustValues() map[string]cty.Value {
48 if typedDefault == "" { 125 ret := make(map[string]cty.Value, len(vv))
49 continue 126 for k, v := range vv {
50 } 127 ret[k] = v.Value
51 result[v.Name] = typedDefault
52 case int, int64:
53 result[v.Name] = fmt.Sprintf("%d", typedDefault)
54 case float32, float64:
55 result[v.Name] = fmt.Sprintf("%f", typedDefault)
56 case bool:
57 result[v.Name] = fmt.Sprintf("%t", typedDefault)
58 default:
59 panic(fmt.Sprintf(
60 "Unknown default var type: %T\n\n"+
61 "THIS IS A BUG. Please report it.",
62 v.Default))
63 }
64 } 128 }
129 return ret
130}
65 131
66 // Load from env vars 132// DefaultVariableValues returns an InputValues map representing the default
67 for _, v := range os.Environ() { 133// values specified for variables in the given configuration map.
68 if !strings.HasPrefix(v, VarEnvPrefix) { 134func DefaultVariableValues(configs map[string]*configs.Variable) InputValues {
135 ret := make(InputValues)
136 for k, c := range configs {
137 if c.Default == cty.NilVal {
69 continue 138 continue
70 } 139 }
140 ret[k] = &InputValue{
141 Value: c.Default,
142 SourceType: ValueFromConfig,
143 SourceRange: tfdiags.SourceRangeFromHCL(c.DeclRange),
144 }
145 }
146 return ret
147}
71 148
72 // Strip off the prefix and get the value after the first "=" 149// SameValues returns true if the given InputValues has the same values as
73 idx := strings.Index(v, "=") 150// the receiever, disregarding the source types and source ranges.
74 k := v[len(VarEnvPrefix):idx] 151//
75 v = v[idx+1:] 152// Values are compared using the cty "RawEquals" method, which means that
76 153// unknown values can be considered equal to one another if they are of the
77 // Override the configuration-default values. Note that *not* finding the variable 154// same type.
78 // in configuration is OK, as we don't want to preclude people from having multiple 155func (vv InputValues) SameValues(other InputValues) bool {
79 // sets of TF_VAR_whatever in their environment even if it is a little weird. 156 if len(vv) != len(other) {
80 for _, schema := range m.Config().Variables { 157 return false
81 if schema.Name != k { 158 }
82 continue
83 }
84
85 varType := schema.Type()
86 varVal, err := parseVariableAsHCL(k, v, varType)
87 if err != nil {
88 return nil, err
89 }
90 159
91 switch varType { 160 for k, v := range vv {
92 case config.VariableTypeMap: 161 ov, exists := other[k]
93 if err := varSetMap(result, k, varVal); err != nil { 162 if !exists {
94 return nil, err 163 return false
95 } 164 }
96 default: 165 if !v.Value.RawEquals(ov.Value) {
97 result[k] = varVal 166 return false
98 }
99 } 167 }
100 } 168 }
101 169
102 // Load from overrides 170 return true
103 for k, v := range override { 171}
104 for _, schema := range m.Config().Variables {
105 if schema.Name != k {
106 continue
107 }
108 172
109 switch schema.Type() { 173// HasValues returns true if the reciever has the same values as in the given
110 case config.VariableTypeList: 174// map, disregarding the source types and source ranges.
111 result[k] = v 175//
112 case config.VariableTypeMap: 176// Values are compared using the cty "RawEquals" method, which means that
113 if err := varSetMap(result, k, v); err != nil { 177// unknown values can be considered equal to one another if they are of the
114 return nil, err 178// same type.
115 } 179func (vv InputValues) HasValues(vals map[string]cty.Value) bool {
116 case config.VariableTypeString: 180 if len(vv) != len(vals) {
117 // Convert to a string and set. We don't catch any errors 181 return false
118 // here because the validation step later should catch 182 }
119 // any type errors. 183
120 var strVal string 184 for k, v := range vv {
121 if err := hilmapstructure.WeakDecode(v, &strVal); err == nil { 185 oVal, exists := vals[k]
122 result[k] = strVal 186 if !exists {
123 } else { 187 return false
124 result[k] = v 188 }
125 } 189 if !v.Value.RawEquals(oVal) {
126 default: 190 return false
127 panic(fmt.Sprintf(
128 "Unhandled var type: %T\n\n"+
129 "THIS IS A BUG. Please report it.",
130 schema.Type()))
131 }
132 } 191 }
133 } 192 }
134 193
135 return result, nil 194 return true
136} 195}
137 196
138// varSetMap sets or merges the map in "v" with the key "k" in the 197// Identical returns true if the given InputValues has the same values,
139// "current" set of variables. This is just a private function to remove 198// source types, and source ranges as the receiver.
140// duplicate logic in Variables 199//
141func varSetMap(current map[string]interface{}, k string, v interface{}) error { 200// Values are compared using the cty "RawEquals" method, which means that
142 existing, ok := current[k] 201// unknown values can be considered equal to one another if they are of the
143 if !ok { 202// same type.
144 current[k] = v 203//
145 return nil 204// This method is primarily for testing. For most practical purposes, it's
205// better to use SameValues or HasValues.
206func (vv InputValues) Identical(other InputValues) bool {
207 if len(vv) != len(other) {
208 return false
146 } 209 }
147 210
148 existingMap, ok := existing.(map[string]interface{}) 211 for k, v := range vv {
149 if !ok { 212 ov, exists := other[k]
150 panic(fmt.Sprintf("%q is not a map, this is a bug in Terraform.", k)) 213 if !exists {
214 return false
215 }
216 if !v.Value.RawEquals(ov.Value) {
217 return false
218 }
219 if v.SourceType != ov.SourceType {
220 return false
221 }
222 if v.SourceRange != ov.SourceRange {
223 return false
224 }
151 } 225 }
152 226
153 switch typedV := v.(type) { 227 return true
154 case []map[string]interface{}: 228}
155 for newKey, newVal := range typedV[0] { 229
156 existingMap[newKey] = newVal 230// checkInputVariables ensures that variable values supplied at the UI conform
231// to their corresponding declarations in configuration.
232//
233// The set of values is considered valid only if the returned diagnostics
234// does not contain errors. A valid set of values may still produce warnings,
235// which should be returned to the user.
236func checkInputVariables(vcs map[string]*configs.Variable, vs InputValues) tfdiags.Diagnostics {
237 var diags tfdiags.Diagnostics
238
239 for name, vc := range vcs {
240 val, isSet := vs[name]
241 if !isSet {
242 // Always an error, since the caller should already have included
243 // default values from the configuration in the values map.
244 diags = diags.Append(tfdiags.Sourceless(
245 tfdiags.Error,
246 "Unassigned variable",
247 fmt.Sprintf("The input variable %q has not been assigned a value. This is a bug in Terraform; please report it in a GitHub issue.", name),
248 ))
249 continue
157 } 250 }
158 case map[string]interface{}: 251
159 for newKey, newVal := range typedV { 252 wantType := vc.Type
160 existingMap[newKey] = newVal 253
254 // A given value is valid if it can convert to the desired type.
255 _, err := convert.Convert(val.Value, wantType)
256 if err != nil {
257 switch val.SourceType {
258 case ValueFromConfig, ValueFromAutoFile, ValueFromNamedFile:
259 // We have source location information for these.
260 diags = diags.Append(&hcl.Diagnostic{
261 Severity: hcl.DiagError,
262 Summary: "Invalid value for input variable",
263 Detail: fmt.Sprintf("The given value is not valid for variable %q: %s.", name, err),
264 Subject: val.SourceRange.ToHCL().Ptr(),
265 })
266 case ValueFromEnvVar:
267 diags = diags.Append(tfdiags.Sourceless(
268 tfdiags.Error,
269 "Invalid value for input variable",
270 fmt.Sprintf("The environment variable TF_VAR_%s does not contain a valid value for variable %q: %s.", name, name, err),
271 ))
272 case ValueFromCLIArg:
273 diags = diags.Append(tfdiags.Sourceless(
274 tfdiags.Error,
275 "Invalid value for input variable",
276 fmt.Sprintf("The argument -var=\"%s=...\" does not contain a valid value for variable %q: %s.", name, name, err),
277 ))
278 case ValueFromInput:
279 diags = diags.Append(tfdiags.Sourceless(
280 tfdiags.Error,
281 "Invalid value for input variable",
282 fmt.Sprintf("The value entered for variable %q is not valid: %s.", name, err),
283 ))
284 default:
285 // The above gets us good coverage for the situations users
286 // are likely to encounter with their own inputs. The other
287 // cases are generally implementation bugs, so we'll just
288 // use a generic error for these.
289 diags = diags.Append(tfdiags.Sourceless(
290 tfdiags.Error,
291 "Invalid value for input variable",
292 fmt.Sprintf("The value provided for variable %q is not valid: %s.", name, err),
293 ))
294 }
161 } 295 }
162 default:
163 return fmt.Errorf("variable %q should be type map, got %s", k, hclTypeName(v))
164 } 296 }
165 return nil 297
298 // Check for any variables that are assigned without being configured.
299 // This is always an implementation error in the caller, because we
300 // expect undefined variables to be caught during context construction
301 // where there is better context to report it well.
302 for name := range vs {
303 if _, defined := vcs[name]; !defined {
304 diags = diags.Append(tfdiags.Sourceless(
305 tfdiags.Error,
306 "Value assigned to undeclared variable",
307 fmt.Sprintf("A value was assigned to an undeclared input variable %q.", name),
308 ))
309 }
310 }
311
312 return diags
166} 313}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
index 1f43045..61423c2 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/version_required.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
@@ -3,69 +3,60 @@ package terraform
3import ( 3import (
4 "fmt" 4 "fmt"
5 5
6 "github.com/hashicorp/go-version" 6 "github.com/hashicorp/hcl2/hcl"
7 "github.com/hashicorp/terraform/config" 7 "github.com/hashicorp/terraform/tfdiags"
8 "github.com/hashicorp/terraform/config/module" 8
9 "github.com/hashicorp/terraform/configs"
9 10
10 tfversion "github.com/hashicorp/terraform/version" 11 tfversion "github.com/hashicorp/terraform/version"
11) 12)
12 13
13// CheckRequiredVersion verifies that any version requirements specified by 14// CheckCoreVersionRequirements visits each of the modules in the given
14// the configuration are met. 15// configuration tree and verifies that any given Core version constraints
15// 16// match with the version of Terraform Core that is being used.
16// This checks the root module as well as any additional version requirements
17// from child modules.
18// 17//
19// This is tested in context_test.go. 18// The returned diagnostics will contain errors if any constraints do not match.
20func CheckRequiredVersion(m *module.Tree) error { 19// The returned diagnostics might also return warnings, which should be
21 // Check any children 20// displayed to the user.
22 for _, c := range m.Children() { 21func CheckCoreVersionRequirements(config *configs.Config) tfdiags.Diagnostics {
23 if err := CheckRequiredVersion(c); err != nil { 22 if config == nil {
24 return err
25 }
26 }
27
28 var tf *config.Terraform
29 if c := m.Config(); c != nil {
30 tf = c.Terraform
31 }
32
33 // If there is no Terraform config or the required version isn't set,
34 // we move on.
35 if tf == nil || tf.RequiredVersion == "" {
36 return nil 23 return nil
37 } 24 }
38 25
39 // Path for errors 26 var diags tfdiags.Diagnostics
40 module := "root" 27 module := config.Module
41 if path := normalizeModulePath(m.Path()); len(path) > 1 { 28
42 module = modulePrefixStr(path) 29 for _, constraint := range module.CoreVersionConstraints {
43 } 30 if !constraint.Required.Check(tfversion.SemVer) {
44 31 switch {
45 // Check this version requirement of this module 32 case len(config.Path) == 0:
46 cs, err := version.NewConstraint(tf.RequiredVersion) 33 diags = diags.Append(&hcl.Diagnostic{
47 if err != nil { 34 Severity: hcl.DiagError,
48 return fmt.Errorf( 35 Summary: "Unsupported Terraform Core version",
49 "%s: terraform.required_version %q syntax error: %s", 36 Detail: fmt.Sprintf(
50 module, 37 "This configuration does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.",
51 tf.RequiredVersion, err) 38 tfversion.String(),
39 ),
40 Subject: &constraint.DeclRange,
41 })
42 default:
43 diags = diags.Append(&hcl.Diagnostic{
44 Severity: hcl.DiagError,
45 Summary: "Unsupported Terraform Core version",
46 Detail: fmt.Sprintf(
47 "Module %s (from %s) does not support Terraform version %s. To proceed, either choose another supported Terraform version or update this version constraint. Version constraints are normally set for good reason, so updating the constraint may lead to other errors or unexpected behavior.",
48 config.Path, config.SourceAddr, tfversion.String(),
49 ),
50 Subject: &constraint.DeclRange,
51 })
52 }
53 }
52 } 54 }
53 55
54 if !cs.Check(tfversion.SemVer) { 56 for _, c := range config.Children {
55 return fmt.Errorf( 57 childDiags := CheckCoreVersionRequirements(c)
56 "The currently running version of Terraform doesn't meet the\n"+ 58 diags = diags.Append(childDiags)
57 "version requirements explicitly specified by the configuration.\n"+
58 "Please use the required version or update the configuration.\n"+
59 "Note that version requirements are usually set for a reason, so\n"+
60 "we recommend verifying with whoever set the version requirements\n"+
61 "prior to making any manual changes.\n\n"+
62 " Module: %s\n"+
63 " Required version: %s\n"+
64 " Current version: %s",
65 module,
66 tf.RequiredVersion,
67 tfversion.SemVer)
68 } 59 }
69 60
70 return nil 61 return diags
71} 62}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
index 4cfc528..0666aa5 100644
--- a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
+++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
@@ -4,9 +4,24 @@ package terraform
4 4
5import "strconv" 5import "strconv"
6 6
7const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport" 7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[walkInvalid-0]
12 _ = x[walkApply-1]
13 _ = x[walkPlan-2]
14 _ = x[walkPlanDestroy-3]
15 _ = x[walkRefresh-4]
16 _ = x[walkValidate-5]
17 _ = x[walkDestroy-6]
18 _ = x[walkImport-7]
19 _ = x[walkEval-8]
20}
21
22const _walkOperation_name = "walkInvalidwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImportwalkEval"
8 23
9var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96} 24var _walkOperation_index = [...]uint8{0, 11, 20, 28, 43, 54, 66, 77, 87, 95}
10 25
11func (i walkOperation) String() string { 26func (i walkOperation) String() string {
12 if i >= walkOperation(len(_walkOperation_index)-1) { 27 if i >= walkOperation(len(_walkOperation_index)-1) {
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go b/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go
new file mode 100644
index 0000000..8e41f46
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/config_traversals.go
@@ -0,0 +1,68 @@
1package tfdiags
2
3import (
4 "bytes"
5 "fmt"
6 "strconv"
7
8 "github.com/zclconf/go-cty/cty"
9)
10
11// FormatCtyPath is a helper function to produce a user-friendly string
12// representation of a cty.Path. The result uses a syntax similar to the
13// HCL expression language in the hope of it being familiar to users.
14func FormatCtyPath(path cty.Path) string {
15 var buf bytes.Buffer
16 for _, step := range path {
17 switch ts := step.(type) {
18 case cty.GetAttrStep:
19 fmt.Fprintf(&buf, ".%s", ts.Name)
20 case cty.IndexStep:
21 buf.WriteByte('[')
22 key := ts.Key
23 keyTy := key.Type()
24 switch {
25 case key.IsNull():
26 buf.WriteString("null")
27 case !key.IsKnown():
28 buf.WriteString("(not yet known)")
29 case keyTy == cty.Number:
30 bf := key.AsBigFloat()
31 buf.WriteString(bf.Text('g', -1))
32 case keyTy == cty.String:
33 buf.WriteString(strconv.Quote(key.AsString()))
34 default:
35 buf.WriteString("...")
36 }
37 buf.WriteByte(']')
38 }
39 }
40 return buf.String()
41}
42
43// FormatError is a helper function to produce a user-friendly string
44// representation of certain special error types that we might want to
45// include in diagnostic messages.
46//
47// This currently has special behavior only for cty.PathError, where a
48// non-empty path is rendered in a HCL-like syntax as context.
49func FormatError(err error) string {
50 perr, ok := err.(cty.PathError)
51 if !ok || len(perr.Path) == 0 {
52 return err.Error()
53 }
54
55 return fmt.Sprintf("%s: %s", FormatCtyPath(perr.Path), perr.Error())
56}
57
58// FormatErrorPrefixed is like FormatError except that it presents any path
59// information after the given prefix string, which is assumed to contain
60// an HCL syntax representation of the value that errors are relative to.
61func FormatErrorPrefixed(err error, prefix string) string {
62 perr, ok := err.(cty.PathError)
63 if !ok || len(perr.Path) == 0 {
64 return fmt.Sprintf("%s: %s", prefix, err.Error())
65 }
66
67 return fmt.Sprintf("%s%s: %s", prefix, FormatCtyPath(perr.Path), perr.Error())
68}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go b/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go
new file mode 100644
index 0000000..25b2140
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/contextual.go
@@ -0,0 +1,372 @@
1package tfdiags
2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5 "github.com/zclconf/go-cty/cty"
6 "github.com/zclconf/go-cty/cty/gocty"
7)
8
9// The "contextual" family of diagnostics are designed to allow separating
10// the detection of a problem from placing that problem in context. For
11// example, some code that is validating an object extracted from configuration
12// may not have access to the configuration that generated it, but can still
13// report problems within that object which the caller can then place in
14// context by calling IsConfigBody on the returned diagnostics.
15//
16// When contextual diagnostics are used, the documentation for a method must
17// be very explicit about what context is implied for any diagnostics returned,
18// to help ensure the expected result.
19
20// contextualFromConfig is an interface type implemented by diagnostic types
21// that can elaborate themselves when given information about the configuration
22// body they are embedded in.
23//
24// Usually this entails extracting source location information in order to
25// populate the "Subject" range.
26type contextualFromConfigBody interface {
27 ElaborateFromConfigBody(hcl.Body) Diagnostic
28}
29
30// InConfigBody returns a copy of the receiver with any config-contextual
31// diagnostics elaborated in the context of the given body.
32func (d Diagnostics) InConfigBody(body hcl.Body) Diagnostics {
33 if len(d) == 0 {
34 return nil
35 }
36
37 ret := make(Diagnostics, len(d))
38 for i, srcDiag := range d {
39 if cd, isCD := srcDiag.(contextualFromConfigBody); isCD {
40 ret[i] = cd.ElaborateFromConfigBody(body)
41 } else {
42 ret[i] = srcDiag
43 }
44 }
45
46 return ret
47}
48
49// AttributeValue returns a diagnostic about an attribute value in an implied current
50// configuration context. This should be returned only from functions whose
51// interface specifies a clear configuration context that this will be
52// resolved in.
53//
54// The given path is relative to the implied configuration context. To describe
55// a top-level attribute, it should be a single-element cty.Path with a
56// cty.GetAttrStep. It's assumed that the path is returning into a structure
57// that would be produced by our conventions in the configschema package; it
58// may return unexpected results for structures that can't be represented by
59// configschema.
60//
61// Since mapping attribute paths back onto configuration is an imprecise
62// operation (e.g. dynamic block generation may cause the same block to be
63// evaluated multiple times) the diagnostic detail should include the attribute
64// name and other context required to help the user understand what is being
65// referenced in case the identified source range is not unique.
66//
67// The returned attribute will not have source location information until
68// context is applied to the containing diagnostics using diags.InConfigBody.
69// After context is applied, the source location is the value assigned to the
70// named attribute, or the containing body's "missing item range" if no
71// value is present.
72func AttributeValue(severity Severity, summary, detail string, attrPath cty.Path) Diagnostic {
73 return &attributeDiagnostic{
74 diagnosticBase: diagnosticBase{
75 severity: severity,
76 summary: summary,
77 detail: detail,
78 },
79 attrPath: attrPath,
80 }
81}
82
83// GetAttribute extracts an attribute cty.Path from a diagnostic if it contains
84// one. Normally this is not accessed directly, and instead the config body is
85// added to the Diagnostic to create a more complete message for the user. In
86// some cases however, we may want to know just the name of the attribute that
87// generated the Diagnostic message.
88// This returns a nil cty.Path if it does not exist in the Diagnostic.
89func GetAttribute(d Diagnostic) cty.Path {
90 if d, ok := d.(*attributeDiagnostic); ok {
91 return d.attrPath
92 }
93 return nil
94}
95
96type attributeDiagnostic struct {
97 diagnosticBase
98 attrPath cty.Path
99 subject *SourceRange // populated only after ElaborateFromConfigBody
100}
101
102// ElaborateFromConfigBody finds the most accurate possible source location
103// for a diagnostic's attribute path within the given body.
104//
105// Backing out from a path back to a source location is not always entirely
106// possible because we lose some information in the decoding process, so
107// if an exact position cannot be found then the returned diagnostic will
108// refer to a position somewhere within the containing body, which is assumed
109// to be better than no location at all.
110//
111// If possible it is generally better to report an error at a layer where
112// source location information is still available, for more accuracy. This
113// is not always possible due to system architecture, so this serves as a
114// "best effort" fallback behavior for such situations.
115func (d *attributeDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic {
116 if len(d.attrPath) < 1 {
117 // Should never happen, but we'll allow it rather than crashing.
118 return d
119 }
120
121 if d.subject != nil {
122 // Don't modify an already-elaborated diagnostic.
123 return d
124 }
125
126 ret := *d
127
128 // This function will often end up re-decoding values that were already
129 // decoded by an earlier step. This is non-ideal but is architecturally
130 // more convenient than arranging for source location information to be
131 // propagated to every place in Terraform, and this happens only in the
132 // presence of errors where performance isn't a concern.
133
134 traverse := d.attrPath[:]
135 final := d.attrPath[len(d.attrPath)-1]
136
137 // Index should never be the first step
138 // as indexing of top blocks (such as resources & data sources)
139 // is handled elsewhere
140 if _, isIdxStep := traverse[0].(cty.IndexStep); isIdxStep {
141 subject := SourceRangeFromHCL(body.MissingItemRange())
142 ret.subject = &subject
143 return &ret
144 }
145
146 // Process index separately
147 idxStep, hasIdx := final.(cty.IndexStep)
148 if hasIdx {
149 final = d.attrPath[len(d.attrPath)-2]
150 traverse = d.attrPath[:len(d.attrPath)-1]
151 }
152
153 // If we have more than one step after removing index
154 // then we'll first try to traverse to a child body
155 // corresponding to the requested path.
156 if len(traverse) > 1 {
157 body = traversePathSteps(traverse, body)
158 }
159
160 // Default is to indicate a missing item in the deepest body we reached
161 // while traversing.
162 subject := SourceRangeFromHCL(body.MissingItemRange())
163 ret.subject = &subject
164
165 // Once we get here, "final" should be a GetAttr step that maps to an
166 // attribute in our current body.
167 finalStep, isAttr := final.(cty.GetAttrStep)
168 if !isAttr {
169 return &ret
170 }
171
172 content, _, contentDiags := body.PartialContent(&hcl.BodySchema{
173 Attributes: []hcl.AttributeSchema{
174 {
175 Name: finalStep.Name,
176 Required: true,
177 },
178 },
179 })
180 if contentDiags.HasErrors() {
181 return &ret
182 }
183
184 if attr, ok := content.Attributes[finalStep.Name]; ok {
185 hclRange := attr.Expr.Range()
186 if hasIdx {
187 // Try to be more precise by finding index range
188 hclRange = hclRangeFromIndexStepAndAttribute(idxStep, attr)
189 }
190 subject = SourceRangeFromHCL(hclRange)
191 ret.subject = &subject
192 }
193
194 return &ret
195}
196
197func traversePathSteps(traverse []cty.PathStep, body hcl.Body) hcl.Body {
198 for i := 0; i < len(traverse); i++ {
199 step := traverse[i]
200
201 switch tStep := step.(type) {
202 case cty.GetAttrStep:
203
204 var next cty.PathStep
205 if i < (len(traverse) - 1) {
206 next = traverse[i+1]
207 }
208
209 // Will be indexing into our result here?
210 var indexType cty.Type
211 var indexVal cty.Value
212 if nextIndex, ok := next.(cty.IndexStep); ok {
213 indexVal = nextIndex.Key
214 indexType = indexVal.Type()
215 i++ // skip over the index on subsequent iterations
216 }
217
218 var blockLabelNames []string
219 if indexType == cty.String {
220 // Map traversal means we expect one label for the key.
221 blockLabelNames = []string{"key"}
222 }
223
224 // For intermediate steps we expect to be referring to a child
225 // block, so we'll attempt decoding under that assumption.
226 content, _, contentDiags := body.PartialContent(&hcl.BodySchema{
227 Blocks: []hcl.BlockHeaderSchema{
228 {
229 Type: tStep.Name,
230 LabelNames: blockLabelNames,
231 },
232 },
233 })
234 if contentDiags.HasErrors() {
235 return body
236 }
237 filtered := make([]*hcl.Block, 0, len(content.Blocks))
238 for _, block := range content.Blocks {
239 if block.Type == tStep.Name {
240 filtered = append(filtered, block)
241 }
242 }
243 if len(filtered) == 0 {
244 // Step doesn't refer to a block
245 continue
246 }
247
248 switch indexType {
249 case cty.NilType: // no index at all
250 if len(filtered) != 1 {
251 return body
252 }
253 body = filtered[0].Body
254 case cty.Number:
255 var idx int
256 err := gocty.FromCtyValue(indexVal, &idx)
257 if err != nil || idx >= len(filtered) {
258 return body
259 }
260 body = filtered[idx].Body
261 case cty.String:
262 key := indexVal.AsString()
263 var block *hcl.Block
264 for _, candidate := range filtered {
265 if candidate.Labels[0] == key {
266 block = candidate
267 break
268 }
269 }
270 if block == nil {
271 // No block with this key, so we'll just indicate a
272 // missing item in the containing block.
273 return body
274 }
275 body = block.Body
276 default:
277 // Should never happen, because only string and numeric indices
278 // are supported by cty collections.
279 return body
280 }
281
282 default:
283 // For any other kind of step, we'll just return our current body
284 // as the subject and accept that this is a little inaccurate.
285 return body
286 }
287 }
288 return body
289}
290
291func hclRangeFromIndexStepAndAttribute(idxStep cty.IndexStep, attr *hcl.Attribute) hcl.Range {
292 switch idxStep.Key.Type() {
293 case cty.Number:
294 var idx int
295 err := gocty.FromCtyValue(idxStep.Key, &idx)
296 items, diags := hcl.ExprList(attr.Expr)
297 if diags.HasErrors() {
298 return attr.Expr.Range()
299 }
300 if err != nil || idx >= len(items) {
301 return attr.NameRange
302 }
303 return items[idx].Range()
304 case cty.String:
305 pairs, diags := hcl.ExprMap(attr.Expr)
306 if diags.HasErrors() {
307 return attr.Expr.Range()
308 }
309 stepKey := idxStep.Key.AsString()
310 for _, kvPair := range pairs {
311 key, err := kvPair.Key.Value(nil)
312 if err != nil {
313 return attr.Expr.Range()
314 }
315 if key.AsString() == stepKey {
316 startRng := kvPair.Value.StartRange()
317 return startRng
318 }
319 }
320 return attr.NameRange
321 }
322 return attr.Expr.Range()
323}
324
325func (d *attributeDiagnostic) Source() Source {
326 return Source{
327 Subject: d.subject,
328 }
329}
330
331// WholeContainingBody returns a diagnostic about the body that is an implied
332// current configuration context. This should be returned only from
333// functions whose interface specifies a clear configuration context that this
334// will be resolved in.
335//
336// The returned attribute will not have source location information until
337// context is applied to the containing diagnostics using diags.InConfigBody.
338// After context is applied, the source location is currently the missing item
339// range of the body. In future, this may change to some other suitable
340// part of the containing body.
341func WholeContainingBody(severity Severity, summary, detail string) Diagnostic {
342 return &wholeBodyDiagnostic{
343 diagnosticBase: diagnosticBase{
344 severity: severity,
345 summary: summary,
346 detail: detail,
347 },
348 }
349}
350
351type wholeBodyDiagnostic struct {
352 diagnosticBase
353 subject *SourceRange // populated only after ElaborateFromConfigBody
354}
355
356func (d *wholeBodyDiagnostic) ElaborateFromConfigBody(body hcl.Body) Diagnostic {
357 if d.subject != nil {
358 // Don't modify an already-elaborated diagnostic.
359 return d
360 }
361
362 ret := *d
363 rng := SourceRangeFromHCL(body.MissingItemRange())
364 ret.subject = &rng
365 return &ret
366}
367
368func (d *wholeBodyDiagnostic) Source() Source {
369 return Source{
370 Subject: d.subject,
371 }
372}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go
index 2c23f76..c91ba9a 100644
--- a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic.go
@@ -1,9 +1,18 @@
1package tfdiags 1package tfdiags
2 2
3import (
4 "github.com/hashicorp/hcl2/hcl"
5)
6
3type Diagnostic interface { 7type Diagnostic interface {
4 Severity() Severity 8 Severity() Severity
5 Description() Description 9 Description() Description
6 Source() Source 10 Source() Source
11
12 // FromExpr returns the expression-related context for the diagnostic, if
13 // available. Returns nil if the diagnostic is not related to an
14 // expression evaluation.
15 FromExpr() *FromExpr
7} 16}
8 17
9type Severity rune 18type Severity rune
@@ -24,3 +33,8 @@ type Source struct {
24 Subject *SourceRange 33 Subject *SourceRange
25 Context *SourceRange 34 Context *SourceRange
26} 35}
36
37type FromExpr struct {
38 Expression hcl.Expression
39 EvalContext *hcl.EvalContext
40}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go
new file mode 100644
index 0000000..50bf9d8
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostic_base.go
@@ -0,0 +1,31 @@
1package tfdiags
2
3// diagnosticBase can be embedded in other diagnostic structs to get
4// default implementations of Severity and Description. This type also
5// has default implementations of Source and FromExpr that return no source
6// location or expression-related information, so embedders should generally
7// override those method to return more useful results where possible.
8type diagnosticBase struct {
9 severity Severity
10 summary string
11 detail string
12}
13
14func (d diagnosticBase) Severity() Severity {
15 return d.severity
16}
17
18func (d diagnosticBase) Description() Description {
19 return Description{
20 Summary: d.summary,
21 Detail: d.detail,
22 }
23}
24
25func (d diagnosticBase) Source() Source {
26 return Source{}
27}
28
29func (d diagnosticBase) FromExpr() *FromExpr {
30 return nil
31}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go
index 667ba80..465b230 100644
--- a/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/diagnostics.go
@@ -3,6 +3,9 @@ package tfdiags
3import ( 3import (
4 "bytes" 4 "bytes"
5 "fmt" 5 "fmt"
6 "path/filepath"
7 "sort"
8 "strings"
6 9
7 "github.com/hashicorp/errwrap" 10 "github.com/hashicorp/errwrap"
8 multierror "github.com/hashicorp/go-multierror" 11 multierror "github.com/hashicorp/go-multierror"
@@ -54,6 +57,8 @@ func (diags Diagnostics) Append(new ...interface{}) Diagnostics {
54 diags = append(diags, ti...) // flatten 57 diags = append(diags, ti...) // flatten
55 case diagnosticsAsError: 58 case diagnosticsAsError:
56 diags = diags.Append(ti.Diagnostics) // unwrap 59 diags = diags.Append(ti.Diagnostics) // unwrap
60 case NonFatalError:
61 diags = diags.Append(ti.Diagnostics) // unwrap
57 case hcl.Diagnostics: 62 case hcl.Diagnostics:
58 for _, hclDiag := range ti { 63 for _, hclDiag := range ti {
59 diags = append(diags, hclDiagnostic{hclDiag}) 64 diags = append(diags, hclDiagnostic{hclDiag})
@@ -136,6 +141,54 @@ func (diags Diagnostics) Err() error {
136 return diagnosticsAsError{diags} 141 return diagnosticsAsError{diags}
137} 142}
138 143
144// ErrWithWarnings is similar to Err except that it will also return a non-nil
145// error if the receiver contains only warnings.
146//
147// In the warnings-only situation, the result is guaranteed to be of dynamic
148// type NonFatalError, allowing diagnostics-aware callers to type-assert
149// and unwrap it, treating it as non-fatal.
150//
151// This should be used only in contexts where the caller is able to recognize
152// and handle NonFatalError. For normal callers that expect a lack of errors
153// to be signaled by nil, use just Diagnostics.Err.
154func (diags Diagnostics) ErrWithWarnings() error {
155 if len(diags) == 0 {
156 return nil
157 }
158 if diags.HasErrors() {
159 return diags.Err()
160 }
161 return NonFatalError{diags}
162}
163
164// NonFatalErr is similar to Err except that it always returns either nil
165// (if there are no diagnostics at all) or NonFatalError.
166//
167// This allows diagnostics to be returned over an error return channel while
168// being explicit that the diagnostics should not halt processing.
169//
170// This should be used only in contexts where the caller is able to recognize
171// and handle NonFatalError. For normal callers that expect a lack of errors
172// to be signaled by nil, use just Diagnostics.Err.
173func (diags Diagnostics) NonFatalErr() error {
174 if len(diags) == 0 {
175 return nil
176 }
177 return NonFatalError{diags}
178}
179
180// Sort applies an ordering to the diagnostics in the receiver in-place.
181//
182// The ordering is: warnings before errors, sourceless before sourced,
183// short source paths before long source paths, and then ordering by
184// position within each file.
185//
186// Diagnostics that do not differ by any of these sortable characteristics
187// will remain in the same relative order after this method returns.
188func (diags Diagnostics) Sort() {
189 sort.Stable(sortDiagnostics(diags))
190}
191
139type diagnosticsAsError struct { 192type diagnosticsAsError struct {
140 Diagnostics 193 Diagnostics
141} 194}
@@ -179,3 +232,99 @@ func (dae diagnosticsAsError) WrappedErrors() []error {
179 } 232 }
180 return errs 233 return errs
181} 234}
235
236// NonFatalError is a special error type, returned by
237// Diagnostics.ErrWithWarnings and Diagnostics.NonFatalErr,
238// that indicates that the wrapped diagnostics should be treated as non-fatal.
239// Callers can conditionally type-assert an error to this type in order to
240// detect the non-fatal scenario and handle it in a different way.
241type NonFatalError struct {
242 Diagnostics
243}
244
245func (woe NonFatalError) Error() string {
246 diags := woe.Diagnostics
247 switch {
248 case len(diags) == 0:
249 // should never happen, since we don't create this wrapper if
250 // there are no diagnostics in the list.
251 return "no errors or warnings"
252 case len(diags) == 1:
253 desc := diags[0].Description()
254 if desc.Detail == "" {
255 return desc.Summary
256 }
257 return fmt.Sprintf("%s: %s", desc.Summary, desc.Detail)
258 default:
259 var ret bytes.Buffer
260 if diags.HasErrors() {
261 fmt.Fprintf(&ret, "%d problems:\n", len(diags))
262 } else {
263 fmt.Fprintf(&ret, "%d warnings:\n", len(diags))
264 }
265 for _, diag := range woe.Diagnostics {
266 desc := diag.Description()
267 if desc.Detail == "" {
268 fmt.Fprintf(&ret, "\n- %s", desc.Summary)
269 } else {
270 fmt.Fprintf(&ret, "\n- %s: %s", desc.Summary, desc.Detail)
271 }
272 }
273 return ret.String()
274 }
275}
276
277// sortDiagnostics is an implementation of sort.Interface
278type sortDiagnostics []Diagnostic
279
280var _ sort.Interface = sortDiagnostics(nil)
281
282func (sd sortDiagnostics) Len() int {
283 return len(sd)
284}
285
286func (sd sortDiagnostics) Less(i, j int) bool {
287 iD, jD := sd[i], sd[j]
288 iSev, jSev := iD.Severity(), jD.Severity()
289 iSrc, jSrc := iD.Source(), jD.Source()
290
291 switch {
292
293 case iSev != jSev:
294 return iSev == Warning
295
296 case (iSrc.Subject == nil) != (jSrc.Subject == nil):
297 return iSrc.Subject == nil
298
299 case iSrc.Subject != nil && *iSrc.Subject != *jSrc.Subject:
300 iSubj := iSrc.Subject
301 jSubj := jSrc.Subject
302 switch {
303 case iSubj.Filename != jSubj.Filename:
304 // Path with fewer segments goes first if they are different lengths
305 sep := string(filepath.Separator)
306 iCount := strings.Count(iSubj.Filename, sep)
307 jCount := strings.Count(jSubj.Filename, sep)
308 if iCount != jCount {
309 return iCount < jCount
310 }
311 return iSubj.Filename < jSubj.Filename
312 case iSubj.Start.Byte != jSubj.Start.Byte:
313 return iSubj.Start.Byte < jSubj.Start.Byte
314 case iSubj.End.Byte != jSubj.End.Byte:
315 return iSubj.End.Byte < jSubj.End.Byte
316 }
317 fallthrough
318
319 default:
320 // The remaining properties do not have a defined ordering, so
321 // we'll leave it unspecified. Since we use sort.Stable in
322 // the caller of this, the ordering of remaining items will
323 // be preserved.
324 return false
325 }
326}
327
328func (sd sortDiagnostics) Swap(i, j int) {
329 sd[i], sd[j] = sd[j], sd[i]
330}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/error.go b/vendor/github.com/hashicorp/terraform/tfdiags/error.go
index 35edc30..13f7a71 100644
--- a/vendor/github.com/hashicorp/terraform/tfdiags/error.go
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/error.go
@@ -13,7 +13,7 @@ func (e nativeError) Severity() Severity {
13 13
14func (e nativeError) Description() Description { 14func (e nativeError) Description() Description {
15 return Description{ 15 return Description{
16 Summary: e.err.Error(), 16 Summary: FormatError(e.err),
17 } 17 }
18} 18}
19 19
@@ -21,3 +21,8 @@ func (e nativeError) Source() Source {
21 // No source information available for a native error 21 // No source information available for a native error
22 return Source{} 22 return Source{}
23} 23}
24
25func (e nativeError) FromExpr() *FromExpr {
26 // Native errors are not expression-related
27 return nil
28}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go
index 24851f4..f9aec41 100644
--- a/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/hcl.go
@@ -40,6 +40,16 @@ func (d hclDiagnostic) Source() Source {
40 return ret 40 return ret
41} 41}
42 42
43func (d hclDiagnostic) FromExpr() *FromExpr {
44 if d.diag.Expression == nil || d.diag.EvalContext == nil {
45 return nil
46 }
47 return &FromExpr{
48 Expression: d.diag.Expression,
49 EvalContext: d.diag.EvalContext,
50 }
51}
52
43// SourceRangeFromHCL constructs a SourceRange from the corresponding range 53// SourceRangeFromHCL constructs a SourceRange from the corresponding range
44// type within the HCL package. 54// type within the HCL package.
45func SourceRangeFromHCL(hclRange hcl.Range) SourceRange { 55func SourceRangeFromHCL(hclRange hcl.Range) SourceRange {
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go
index 6cc95cc..485063b 100644
--- a/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/rpc_friendly.go
@@ -48,6 +48,12 @@ func (d *rpcFriendlyDiag) Source() Source {
48 } 48 }
49} 49}
50 50
51func (d rpcFriendlyDiag) FromExpr() *FromExpr {
52 // RPC-friendly diagnostics cannot preserve expression information because
53 // expressions themselves are not RPC-friendly.
54 return nil
55}
56
51func init() { 57func init() {
52 gob.Register((*rpcFriendlyDiag)(nil)) 58 gob.Register((*rpcFriendlyDiag)(nil))
53} 59}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go
index 0b1249b..78a7210 100644
--- a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go
@@ -4,6 +4,14 @@ package tfdiags
4 4
5import "strconv" 5import "strconv"
6 6
7func _() {
8 // An "invalid array index" compiler error signifies that the constant values have changed.
9 // Re-run the stringer command to generate them again.
10 var x [1]struct{}
11 _ = x[Error-69]
12 _ = x[Warning-87]
13}
14
7const ( 15const (
8 _Severity_name_0 = "Error" 16 _Severity_name_0 = "Error"
9 _Severity_name_1 = "Warning" 17 _Severity_name_1 = "Warning"
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go
index fb3ac98..b0f1ecd 100644
--- a/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/simple_warning.go
@@ -20,6 +20,11 @@ func (e simpleWarning) Description() Description {
20} 20}
21 21
22func (e simpleWarning) Source() Source { 22func (e simpleWarning) Source() Source {
23 // No source information available for a native error 23 // No source information available for a simple warning
24 return Source{} 24 return Source{}
25} 25}
26
27func (e simpleWarning) FromExpr() *FromExpr {
28 // Simple warnings are not expression-related
29 return nil
30}
diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go b/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go
new file mode 100644
index 0000000..eaa2737
--- /dev/null
+++ b/vendor/github.com/hashicorp/terraform/tfdiags/sourceless.go
@@ -0,0 +1,13 @@
1package tfdiags
2
3// Sourceless creates and returns a diagnostic with no source location
4// information. This is generally used for operational-type errors that are
5// caused by or relate to the environment where Terraform is running rather
6// than to the provided configuration.
7func Sourceless(severity Severity, summary, detail string) Diagnostic {
8 return diagnosticBase{
9 severity: severity,
10 summary: summary,
11 detail: detail,
12 }
13}
diff --git a/vendor/github.com/hashicorp/terraform/version/version.go b/vendor/github.com/hashicorp/terraform/version/version.go
index b21b297..30d7284 100644
--- a/vendor/github.com/hashicorp/terraform/version/version.go
+++ b/vendor/github.com/hashicorp/terraform/version/version.go
@@ -11,17 +11,21 @@ import (
11) 11)
12 12
13// The main version number that is being run at the moment. 13// The main version number that is being run at the moment.
14var Version = "0.11.12" 14var Version = "0.12.0"
15 15
16// A pre-release marker for the version. If this is "" (empty string) 16// A pre-release marker for the version. If this is "" (empty string)
17// then it means that it is a final release. Otherwise, this is a pre-release 17// then it means that it is a final release. Otherwise, this is a pre-release
18// such as "dev" (in development), "beta", "rc1", etc. 18// such as "dev" (in development), "beta", "rc1", etc.
19var Prerelease = "dev" 19var Prerelease = ""
20 20
21// SemVer is an instance of version.Version. This has the secondary 21// SemVer is an instance of version.Version. This has the secondary
22// benefit of verifying during tests and init time that our version is a 22// benefit of verifying during tests and init time that our version is a
23// proper semantic version, which should always be the case. 23// proper semantic version, which should always be the case.
24var SemVer = version.Must(version.NewVersion(Version)) 24var SemVer *version.Version
25
26func init() {
27 SemVer = version.Must(version.NewVersion(Version))
28}
25 29
26// Header is the header name used to send the current terraform version 30// Header is the header name used to send the current terraform version
27// in http requests. 31// in http requests.
diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go
index e179818..32ba02e 100644
--- a/vendor/github.com/hashicorp/yamux/session.go
+++ b/vendor/github.com/hashicorp/yamux/session.go
@@ -123,6 +123,12 @@ func (s *Session) IsClosed() bool {
123 } 123 }
124} 124}
125 125
126// CloseChan returns a read-only channel which is closed as
127// soon as the session is closed.
128func (s *Session) CloseChan() <-chan struct{} {
129 return s.shutdownCh
130}
131
126// NumStreams returns the number of currently open streams 132// NumStreams returns the number of currently open streams
127func (s *Session) NumStreams() int { 133func (s *Session) NumStreams() int {
128 s.streamLock.Lock() 134 s.streamLock.Lock()
@@ -303,8 +309,10 @@ func (s *Session) keepalive() {
303 case <-time.After(s.config.KeepAliveInterval): 309 case <-time.After(s.config.KeepAliveInterval):
304 _, err := s.Ping() 310 _, err := s.Ping()
305 if err != nil { 311 if err != nil {
306 s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) 312 if err != ErrSessionShutdown {
307 s.exitErr(ErrKeepAliveTimeout) 313 s.logger.Printf("[ERR] yamux: keepalive failed: %v", err)
314 s.exitErr(ErrKeepAliveTimeout)
315 }
308 return 316 return
309 } 317 }
310 case <-s.shutdownCh: 318 case <-s.shutdownCh:
@@ -323,8 +331,17 @@ func (s *Session) waitForSend(hdr header, body io.Reader) error {
323// potential shutdown. Since there's the expectation that sends can happen 331// potential shutdown. Since there's the expectation that sends can happen
324// in a timely manner, we enforce the connection write timeout here. 332// in a timely manner, we enforce the connection write timeout here.
325func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { 333func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error {
326 timer := time.NewTimer(s.config.ConnectionWriteTimeout) 334 t := timerPool.Get()
327 defer timer.Stop() 335 timer := t.(*time.Timer)
336 timer.Reset(s.config.ConnectionWriteTimeout)
337 defer func() {
338 timer.Stop()
339 select {
340 case <-timer.C:
341 default:
342 }
343 timerPool.Put(t)
344 }()
328 345
329 ready := sendReady{Hdr: hdr, Body: body, Err: errCh} 346 ready := sendReady{Hdr: hdr, Body: body, Err: errCh}
330 select { 347 select {
@@ -349,8 +366,17 @@ func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) e
349// the send happens right here, we enforce the connection write timeout if we 366// the send happens right here, we enforce the connection write timeout if we
350// can't queue the header to be sent. 367// can't queue the header to be sent.
351func (s *Session) sendNoWait(hdr header) error { 368func (s *Session) sendNoWait(hdr header) error {
352 timer := time.NewTimer(s.config.ConnectionWriteTimeout) 369 t := timerPool.Get()
353 defer timer.Stop() 370 timer := t.(*time.Timer)
371 timer.Reset(s.config.ConnectionWriteTimeout)
372 defer func() {
373 timer.Stop()
374 select {
375 case <-timer.C:
376 default:
377 }
378 timerPool.Put(t)
379 }()
354 380
355 select { 381 select {
356 case s.sendCh <- sendReady{Hdr: hdr}: 382 case s.sendCh <- sendReady{Hdr: hdr}:
@@ -408,11 +434,20 @@ func (s *Session) recv() {
408 } 434 }
409} 435}
410 436
437// Ensure that the index of the handler (typeData/typeWindowUpdate/etc) matches the message type
438var (
439 handlers = []func(*Session, header) error{
440 typeData: (*Session).handleStreamMessage,
441 typeWindowUpdate: (*Session).handleStreamMessage,
442 typePing: (*Session).handlePing,
443 typeGoAway: (*Session).handleGoAway,
444 }
445)
446
411// recvLoop continues to receive data until a fatal error is encountered 447// recvLoop continues to receive data until a fatal error is encountered
412func (s *Session) recvLoop() error { 448func (s *Session) recvLoop() error {
413 defer close(s.recvDoneCh) 449 defer close(s.recvDoneCh)
414 hdr := header(make([]byte, headerSize)) 450 hdr := header(make([]byte, headerSize))
415 var handler func(header) error
416 for { 451 for {
417 // Read the header 452 // Read the header
418 if _, err := io.ReadFull(s.bufRead, hdr); err != nil { 453 if _, err := io.ReadFull(s.bufRead, hdr); err != nil {
@@ -428,22 +463,12 @@ func (s *Session) recvLoop() error {
428 return ErrInvalidVersion 463 return ErrInvalidVersion
429 } 464 }
430 465
431 // Switch on the type 466 mt := hdr.MsgType()
432 switch hdr.MsgType() { 467 if mt < typeData || mt > typeGoAway {
433 case typeData:
434 handler = s.handleStreamMessage
435 case typeWindowUpdate:
436 handler = s.handleStreamMessage
437 case typeGoAway:
438 handler = s.handleGoAway
439 case typePing:
440 handler = s.handlePing
441 default:
442 return ErrInvalidMsgType 468 return ErrInvalidMsgType
443 } 469 }
444 470
445 // Invoke the handler 471 if err := handlers[mt](s, hdr); err != nil {
446 if err := handler(hdr); err != nil {
447 return err 472 return err
448 } 473 }
449 } 474 }
diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go
index d216e28..aa23919 100644
--- a/vendor/github.com/hashicorp/yamux/stream.go
+++ b/vendor/github.com/hashicorp/yamux/stream.go
@@ -47,8 +47,8 @@ type Stream struct {
47 recvNotifyCh chan struct{} 47 recvNotifyCh chan struct{}
48 sendNotifyCh chan struct{} 48 sendNotifyCh chan struct{}
49 49
50 readDeadline time.Time 50 readDeadline atomic.Value // time.Time
51 writeDeadline time.Time 51 writeDeadline atomic.Value // time.Time
52} 52}
53 53
54// newStream is used to construct a new stream within 54// newStream is used to construct a new stream within
@@ -67,6 +67,8 @@ func newStream(session *Session, id uint32, state streamState) *Stream {
67 recvNotifyCh: make(chan struct{}, 1), 67 recvNotifyCh: make(chan struct{}, 1),
68 sendNotifyCh: make(chan struct{}, 1), 68 sendNotifyCh: make(chan struct{}, 1),
69 } 69 }
70 s.readDeadline.Store(time.Time{})
71 s.writeDeadline.Store(time.Time{})
70 return s 72 return s
71} 73}
72 74
@@ -122,8 +124,9 @@ START:
122WAIT: 124WAIT:
123 var timeout <-chan time.Time 125 var timeout <-chan time.Time
124 var timer *time.Timer 126 var timer *time.Timer
125 if !s.readDeadline.IsZero() { 127 readDeadline := s.readDeadline.Load().(time.Time)
126 delay := s.readDeadline.Sub(time.Now()) 128 if !readDeadline.IsZero() {
129 delay := readDeadline.Sub(time.Now())
127 timer = time.NewTimer(delay) 130 timer = time.NewTimer(delay)
128 timeout = timer.C 131 timeout = timer.C
129 } 132 }
@@ -188,7 +191,7 @@ START:
188 191
189 // Send the header 192 // Send the header
190 s.sendHdr.encode(typeData, flags, s.id, max) 193 s.sendHdr.encode(typeData, flags, s.id, max)
191 if err := s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { 194 if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil {
192 return 0, err 195 return 0, err
193 } 196 }
194 197
@@ -200,8 +203,9 @@ START:
200 203
201WAIT: 204WAIT:
202 var timeout <-chan time.Time 205 var timeout <-chan time.Time
203 if !s.writeDeadline.IsZero() { 206 writeDeadline := s.writeDeadline.Load().(time.Time)
204 delay := s.writeDeadline.Sub(time.Now()) 207 if !writeDeadline.IsZero() {
208 delay := writeDeadline.Sub(time.Now())
205 timeout = time.After(delay) 209 timeout = time.After(delay)
206 } 210 }
207 select { 211 select {
@@ -238,18 +242,25 @@ func (s *Stream) sendWindowUpdate() error {
238 242
239 // Determine the delta update 243 // Determine the delta update
240 max := s.session.config.MaxStreamWindowSize 244 max := s.session.config.MaxStreamWindowSize
241 delta := max - atomic.LoadUint32(&s.recvWindow) 245 var bufLen uint32
246 s.recvLock.Lock()
247 if s.recvBuf != nil {
248 bufLen = uint32(s.recvBuf.Len())
249 }
250 delta := (max - bufLen) - s.recvWindow
242 251
243 // Determine the flags if any 252 // Determine the flags if any
244 flags := s.sendFlags() 253 flags := s.sendFlags()
245 254
246 // Check if we can omit the update 255 // Check if we can omit the update
247 if delta < (max/2) && flags == 0 { 256 if delta < (max/2) && flags == 0 {
257 s.recvLock.Unlock()
248 return nil 258 return nil
249 } 259 }
250 260
251 // Update our window 261 // Update our window
252 atomic.AddUint32(&s.recvWindow, delta) 262 s.recvWindow += delta
263 s.recvLock.Unlock()
253 264
254 // Send the header 265 // Send the header
255 s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) 266 s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta)
@@ -392,16 +403,18 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error {
392 if length == 0 { 403 if length == 0 {
393 return nil 404 return nil
394 } 405 }
395 if remain := atomic.LoadUint32(&s.recvWindow); length > remain {
396 s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length)
397 return ErrRecvWindowExceeded
398 }
399 406
400 // Wrap in a limited reader 407 // Wrap in a limited reader
401 conn = &io.LimitedReader{R: conn, N: int64(length)} 408 conn = &io.LimitedReader{R: conn, N: int64(length)}
402 409
403 // Copy into buffer 410 // Copy into buffer
404 s.recvLock.Lock() 411 s.recvLock.Lock()
412
413 if length > s.recvWindow {
414 s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length)
415 return ErrRecvWindowExceeded
416 }
417
405 if s.recvBuf == nil { 418 if s.recvBuf == nil {
406 // Allocate the receive buffer just-in-time to fit the full data frame. 419 // Allocate the receive buffer just-in-time to fit the full data frame.
407 // This way we can read in the whole packet without further allocations. 420 // This way we can read in the whole packet without further allocations.
@@ -414,7 +427,7 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error {
414 } 427 }
415 428
416 // Decrement the receive window 429 // Decrement the receive window
417 atomic.AddUint32(&s.recvWindow, ^uint32(length-1)) 430 s.recvWindow -= length
418 s.recvLock.Unlock() 431 s.recvLock.Unlock()
419 432
420 // Unblock any readers 433 // Unblock any readers
@@ -435,13 +448,13 @@ func (s *Stream) SetDeadline(t time.Time) error {
435 448
436// SetReadDeadline sets the deadline for future Read calls. 449// SetReadDeadline sets the deadline for future Read calls.
437func (s *Stream) SetReadDeadline(t time.Time) error { 450func (s *Stream) SetReadDeadline(t time.Time) error {
438 s.readDeadline = t 451 s.readDeadline.Store(t)
439 return nil 452 return nil
440} 453}
441 454
442// SetWriteDeadline sets the deadline for future Write calls 455// SetWriteDeadline sets the deadline for future Write calls
443func (s *Stream) SetWriteDeadline(t time.Time) error { 456func (s *Stream) SetWriteDeadline(t time.Time) error {
444 s.writeDeadline = t 457 s.writeDeadline.Store(t)
445 return nil 458 return nil
446} 459}
447 460
diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go
index 5fe45af..8a73e92 100644
--- a/vendor/github.com/hashicorp/yamux/util.go
+++ b/vendor/github.com/hashicorp/yamux/util.go
@@ -1,5 +1,20 @@
1package yamux 1package yamux
2 2
3import (
4 "sync"
5 "time"
6)
7
8var (
9 timerPool = &sync.Pool{
10 New: func() interface{} {
11 timer := time.NewTimer(time.Hour * 1e6)
12 timer.Stop()
13 return timer
14 },
15 }
16)
17
3// asyncSendErr is used to try an async send of an error 18// asyncSendErr is used to try an async send of an error
4func asyncSendErr(ch chan error, err error) { 19func asyncSendErr(ch chan error, err error) {
5 if ch == nil { 20 if ch == nil {