]> git.immae.eu Git - github/fretlink/terraform-provider-statuscake.git/commitdiff
update vendor and go.mod add_contact_groups
authorAlexandre Garand <alexandre.garand@fretlink.com>
Fri, 9 Aug 2019 13:59:15 +0000 (15:59 +0200)
committerAlexandre Garand <alexandre.garand@fretlink.com>
Fri, 9 Aug 2019 14:39:21 +0000 (16:39 +0200)
201 files changed:
go.mod
go.sum
vendor/github.com/DreamItGetIT/statuscake/Gopkg.lock
vendor/github.com/DreamItGetIT/statuscake/contactGroups.go [new file with mode: 0644]
vendor/github.com/DreamItGetIT/statuscake/responses.go
vendor/github.com/DreamItGetIT/statuscake/ssl.go [new file with mode: 0644]
vendor/github.com/DreamItGetIT/statuscake/tests.go
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go [deleted file]
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
vendor/github.com/aws/aws-sdk-go/aws/request/request.go
vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
vendor/github.com/aws/aws-sdk-go/aws/session/session.go
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
vendor/github.com/aws/aws-sdk-go/aws/types.go
vendor/github.com/aws/aws-sdk-go/aws/version.go
vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go [new file with mode: 0644]
vendor/github.com/google/go-cmp/cmp/compare.go
vendor/github.com/google/go-cmp/cmp/export_panic.go [moved from vendor/github.com/google/go-cmp/cmp/unsafe_panic.go with 60% similarity]
vendor/github.com/google/go-cmp/cmp/export_unsafe.go [moved from vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go with 64% similarity]
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go [new file with mode: 0644]
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go [new file with mode: 0644]
vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go [new file with mode: 0644]
vendor/github.com/google/go-cmp/cmp/internal/function/func.go
vendor/github.com/google/go-cmp/cmp/internal/value/format.go [deleted file]
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go [new file with mode: 0644]
vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go [new file with mode: 0644]
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
vendor/github.com/google/go-cmp/cmp/internal/value/zero.go [new file with mode: 0644]
vendor/github.com/google/go-cmp/cmp/options.go
vendor/github.com/google/go-cmp/cmp/path.go
vendor/github.com/google/go-cmp/cmp/report.go [new file with mode: 0644]
vendor/github.com/google/go-cmp/cmp/report_compare.go [new file with mode: 0644]
vendor/github.com/google/go-cmp/cmp/report_reflect.go [new file with mode: 0644]
vendor/github.com/google/go-cmp/cmp/report_slices.go [new file with mode: 0644]
vendor/github.com/google/go-cmp/cmp/report_text.go [new file with mode: 0644]
vendor/github.com/google/go-cmp/cmp/report_value.go [new file with mode: 0644]
vendor/github.com/google/go-cmp/cmp/reporter.go [deleted file]
vendor/github.com/google/go-querystring/LICENSE [new file with mode: 0644]
vendor/github.com/google/go-querystring/query/encode.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/checksum.go
vendor/github.com/hashicorp/go-getter/detect_bitbucket.go
vendor/github.com/hashicorp/go-plugin/client.go
vendor/github.com/hashicorp/go-plugin/server.go
vendor/github.com/hashicorp/hcl2/ext/dynblock/README.md
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_template.go
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/parser.go
vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/spec.md
vendor/github.com/hashicorp/hcl2/hcl/json/structure.go
vendor/github.com/hashicorp/hcl2/hcl/spec.md
vendor/github.com/hashicorp/hcl2/hcl/structure.go
vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go
vendor/github.com/hashicorp/hcl2/hclwrite/format.go
vendor/github.com/hashicorp/terraform/addrs/for_each_attr.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/addrs/parse_ref.go
vendor/github.com/hashicorp/terraform/command/format/plan.go
vendor/github.com/hashicorp/terraform/command/format/state.go
vendor/github.com/hashicorp/terraform/config/config.go
vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
vendor/github.com/hashicorp/terraform/config/loader.go
vendor/github.com/hashicorp/terraform/config/module/versions.go
vendor/github.com/hashicorp/terraform/config/providers.go
vendor/github.com/hashicorp/terraform/config/raw_config.go
vendor/github.com/hashicorp/terraform/configs/config_build.go
vendor/github.com/hashicorp/terraform/configs/configload/getter.go
vendor/github.com/hashicorp/terraform/configs/configload/loader_load.go
vendor/github.com/hashicorp/terraform/configs/configschema/coerce_value.go
vendor/github.com/hashicorp/terraform/configs/configschema/decoder_spec.go
vendor/github.com/hashicorp/terraform/configs/parser_config_dir.go
vendor/github.com/hashicorp/terraform/configs/resource.go
vendor/github.com/hashicorp/terraform/configs/version_constraint.go
vendor/github.com/hashicorp/terraform/helper/plugin/grpc_provider.go
vendor/github.com/hashicorp/terraform/helper/resource/state_shim.go
vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
vendor/github.com/hashicorp/terraform/helper/schema/resource.go
vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
vendor/github.com/hashicorp/terraform/helper/schema/schema.go
vendor/github.com/hashicorp/terraform/helper/schema/shims.go
vendor/github.com/hashicorp/terraform/internal/initwd/getter.go
vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.pb.go
vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto [deleted file]
vendor/github.com/hashicorp/terraform/lang/blocktoattr/schema.go
vendor/github.com/hashicorp/terraform/lang/blocktoattr/variables.go
vendor/github.com/hashicorp/terraform/lang/data.go
vendor/github.com/hashicorp/terraform/lang/eval.go
vendor/github.com/hashicorp/terraform/lang/funcs/collection.go
vendor/github.com/hashicorp/terraform/lang/funcs/crypto.go
vendor/github.com/hashicorp/terraform/lang/funcs/filesystem.go
vendor/github.com/hashicorp/terraform/lang/functions.go
vendor/github.com/hashicorp/terraform/plans/objchange/compatible.go
vendor/github.com/hashicorp/terraform/plugin/discovery/get.go
vendor/github.com/hashicorp/terraform/plugin/grpc_provider.go
vendor/github.com/hashicorp/terraform/providers/provider.go
vendor/github.com/hashicorp/terraform/states/state_deepcopy.go
vendor/github.com/hashicorp/terraform/states/statefile/version2.go
vendor/github.com/hashicorp/terraform/states/statefile/version3_upgrade.go
vendor/github.com/hashicorp/terraform/terraform/diff.go
vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
vendor/github.com/hashicorp/terraform/terraform/eval_for_each.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
vendor/github.com/hashicorp/terraform/terraform/eval_state.go
vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
vendor/github.com/hashicorp/terraform/terraform/evaluate.go
vendor/github.com/hashicorp/terraform/terraform/interpolate.go
vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
vendor/github.com/hashicorp/terraform/terraform/node_resource_apply_instance.go
vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
vendor/github.com/hashicorp/terraform/terraform/provider_mock.go
vendor/github.com/hashicorp/terraform/terraform/resource_address.go
vendor/github.com/hashicorp/terraform/terraform/state.go
vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
vendor/github.com/hashicorp/terraform/terraform/util.go
vendor/github.com/hashicorp/terraform/version/version.go
vendor/github.com/satori/go.uuid/.travis.yml [new file with mode: 0644]
vendor/github.com/satori/go.uuid/LICENSE [new file with mode: 0644]
vendor/github.com/satori/go.uuid/README.md [new file with mode: 0644]
vendor/github.com/satori/go.uuid/codec.go [new file with mode: 0644]
vendor/github.com/satori/go.uuid/generator.go [new file with mode: 0644]
vendor/github.com/satori/go.uuid/sql.go [new file with mode: 0644]
vendor/github.com/satori/go.uuid/uuid.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/.travis.yml [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/LICENSE [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/NOTICE [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/apic.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/converter.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/decode.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/emitterc.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/encode.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/error.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/go.mod [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/go.sum [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/implied_type.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/parserc.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/readerc.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/resolve.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/scannerc.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/writerc.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/yaml.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/yamlh.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go [new file with mode: 0644]
vendor/github.com/zclconf/go-cty/cty/path.go
vendor/golang.org/x/crypto/openpgp/keys.go
vendor/golang.org/x/crypto/openpgp/packet/private_key.go
vendor/modules.txt

diff --git a/go.mod b/go.mod
index d9992469d3071ec6bfa2409dd77e37fe253659da..e46254455bb3ac9381c1a4a937fff906c870809f 100644 (file)
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,8 @@
 module github.com/terraform-providers/terraform-provider-statuscake
 
+go 1.12
+
 require (
-       github.com/DreamItGetIT/statuscake v0.0.0-20190218105717-471b24d8edfb
-       github.com/hashicorp/terraform v0.12.0
+       github.com/DreamItGetIT/statuscake v0.0.0-20190809134845-9d26ad75405b
+       github.com/hashicorp/terraform v0.12.6
 )
diff --git a/go.sum b/go.sum
index cc8f846a576f52a7718e2373a055b978471381e8..9e0daff3f38d5c00a127a47d29f539573e3b29df 100644 (file)
--- a/go.sum
+++ b/go.sum
@@ -8,26 +8,22 @@ dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBr
 dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
 dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
 git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
-github.com/Azure/azure-sdk-for-go v21.3.0+incompatible h1:YFvAka2WKAl2xnJkYV1e1b7E2z88AgFszDzWU18ejMY=
 github.com/Azure/azure-sdk-for-go v21.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/go-autorest v10.15.4+incompatible h1:q+DRrRdbCnkY7f2WxQBx58TwCGkEdMAK/hkZ10g0Pzk=
 github.com/Azure/go-autorest v10.15.4+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4 h1:pSm8mp0T2OH2CPmPDPtwHPr3VAQaOwVF/JbllOPP4xA=
 github.com/Azure/go-ntlmssp v0.0.0-20180810175552-4a21cbd618b4/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022 h1:y8Gs8CzNfDF5AZvjr+5UyGQvQEBL7pwo+v+wX6q9JI8=
 github.com/ChrisTrenkamp/goxpath v0.0.0-20170922090931-c385f95c6022/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4=
-github.com/DreamItGetIT/statuscake v0.0.0-20190218105717-471b24d8edfb h1:vJ3lnCyZNmSV/OKyw4d7GuZHFrUaa3FY6/NqtvRE0lw=
-github.com/DreamItGetIT/statuscake v0.0.0-20190218105717-471b24d8edfb/go.mod h1:OclNh7ZacJo61GDJablmsOZ7l8/AVtzGqP8G7baOdAs=
-github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292 h1:tuQ7w+my8a8mkwN7x2TSd7OzTjkZ7rAeSyH4xncuAMI=
+github.com/DreamItGetIT/statuscake v0.0.0-20190809134845-9d26ad75405b h1:HLzBntqyCxSeFdkdqnD1/5HYdPyafFpoWXTQNQkldso=
+github.com/DreamItGetIT/statuscake v0.0.0-20190809134845-9d26ad75405b/go.mod h1:OclNh7ZacJo61GDJablmsOZ7l8/AVtzGqP8G7baOdAs=
 github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292/go.mod h1:KYCjqMOeHpNuTOiFQU6WEcTG7poCJrUs0YgyHNtn1no=
 github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw=
-github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8=
 github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
 github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE=
 github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
-github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6 h1:LoeFxdq5zUCBQPhbQKE6zvoGwHMxCBlqwbH9+9kHoHA=
 github.com/agl/ed25519 v0.0.0-20150830182803-278e1ec8e8a6/go.mod h1:WPjqKcmVOxf0XSf3YxCJs6N6AOSrOx3obionmG7T0y0=
+github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190329064014-6e358769c32a/go.mod h1:T9M45xf79ahXVelWoOBmH0y4aC1t5kXO5BxwyakgIGA=
+github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190103054945-8205d1f41e70/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
+github.com/aliyun/aliyun-tablestore-go-sdk v4.1.2+incompatible/go.mod h1:LDQHRZylxvcg8H7wBIDfvO5g/cy4/sz1iucBlc2l3Jw=
 github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
 github.com/antchfx/xpath v0.0.0-20190129040759-c8489ed3251e/go.mod h1:Yee4kTMuNiPYJ7nSNorELQMr1J33uOpXDMByNYhvtNk=
 github.com/antchfx/xquery v0.0.0-20180515051857-ad5b8c7a47b0/go.mod h1:LzD22aAzDP8/dyiCKFp31He4m2GPjl0AFyzDtZzUu9M=
@@ -38,7 +34,6 @@ github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0 h1:MzVXffFU
 github.com/apparentlymart/go-dump v0.0.0-20190214190832-042adf3cf4a0/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
 github.com/apparentlymart/go-textseg v1.0.0 h1:rRmlIsPEEhUTIKQb7T++Nz/A5Q6C9IuX2wFoYVvnCs0=
 github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk=
-github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs=
 github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
 github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
 github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
@@ -46,8 +41,9 @@ github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
 github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
 github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM=
 github.com/aws/aws-sdk-go v1.16.36/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.19.18 h1:Hb3+b9HCqrOrbAtFstUWg7H5TQ+/EcklJtE8VShVs8o=
-github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go v1.21.7 h1:ml+k7szyVaq4YD+3LhqOGl9tgMTqgMbpnuUSkB6UJvQ=
+github.com/aws/aws-sdk-go v1.21.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas=
 github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
@@ -60,28 +56,21 @@ github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBT
 github.com/bsm/go-vlq v0.0.0-20150828105119-ec6e8d4f5f4e/go.mod h1:N+BjUcTjSxc2mtRGSCPsat1kze3CUtvJN3/jTXlp29k=
 github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
 github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
-github.com/chzyer/readline v0.0.0-20161106042343-c914be64f07d h1:aG5FcWiZTOhPQzYIxwxSR1zEOxzL32fwr1CsaCfhO6w=
 github.com/chzyer/readline v0.0.0-20161106042343-c914be64f07d/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
 github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
 github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
-github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04=
 github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
 github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
 github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/dimchansky/utfbom v1.0.0 h1:fGC2kkf4qOoKqZ4q7iIh+Vef4ubC1c38UDsEyZynZPc=
 github.com/dimchansky/utfbom v1.0.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
 github.com/dnaeon/go-vcr v0.0.0-20180920040454-5637cf3d8a31/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
 github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dylanmei/iso8601 v0.1.0 h1:812NGQDBcqquTfH5Yeo7lwR0nzx/cKdsmf3qMjPURUI=
 github.com/dylanmei/iso8601 v0.1.0/go.mod h1:w9KhXSgIyROl1DefbMYIE7UVSIvELTbMrCfx+QkYnoQ=
 github.com/dylanmei/winrmtest v0.0.0-20190225150635-99b7fe2fddf1/go.mod h1:lcy9/2gH1jn/VCLouHA6tOEwLoNVd4GW6zhuKLmHC2Y=
 github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
@@ -93,24 +82,24 @@ github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aev
 github.com/go-test/deep v1.0.1 h1:UQhStjbkDClarlmv0am7OXXO4/GaPdCGiUiMTvi28sg=
 github.com/go-test/deep v1.0.1/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
 github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI=
 github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
 github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
 github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
 github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk=
 github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
 github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
 github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
 github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk=
 github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0=
 github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
 github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
 github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
 github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
@@ -121,11 +110,8 @@ github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK
 github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
 github.com/googleapis/gax-go/v2 v2.0.3 h1:siORttZ36U2R/WjiJuDz8znElWBiAlO9rVt+mqJt0Cc=
 github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
-github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968 h1:Pu+HW4kcQozw0QyrTTgLE+3RXNqFhQNNzhbnoLFL83c=
 github.com/gophercloud/gophercloud v0.0.0-20190208042652-bc37892e1968/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
-github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01 h1:OgCNGSnEalfkRpn//WGJHhpo7fkP+LhTpvEITZ7CkK4=
 github.com/gophercloud/utils v0.0.0-20190128072930-fbb6ab446f01/go.mod h1:wjDF8z83zTeg5eMLml5EBSlAhbF7G8DobyI1YsMuyzw=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
 github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
 github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
@@ -133,21 +119,17 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmg
 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
 github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
 github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
-github.com/hashicorp/aws-sdk-go-base v0.2.0 h1:5bjZnWCvQg9Im5CHZr9t90IaFC4uvVlMl2fTh23IoCk=
 github.com/hashicorp/aws-sdk-go-base v0.2.0/go.mod h1:ZIWACGGi0N7a4DZbf15yuE1JQORmWLtBcVM6F5SXNFU=
-github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089 h1:1eDpXAxTh0iPv+1kc9/gfSI2pxRERDsTk/lNGolwHn8=
 github.com/hashicorp/consul v0.0.0-20171026175957-610f3c86a089/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI=
 github.com/hashicorp/errwrap v0.0.0-20180715044906-d6c0cd880357/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
 github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-azure-helpers v0.0.0-20190129193224-166dfd221bb2 h1:VBRx+yPYUZaobnn5ANBcOUf4hhWpTHSQgftG4TcDkhI=
 github.com/hashicorp/go-azure-helpers v0.0.0-20190129193224-166dfd221bb2/go.mod h1:lu62V//auUow6k0IykxLK2DCNW8qTmpm8KqhYVWattA=
-github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU=
 github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg=
 github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
 github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-getter v1.3.0 h1:pFMSFlI9l5NaeuzkpE3L7BYk9qQ9juTAgXW/H0cqxcU=
-github.com/hashicorp/go-getter v1.3.0/go.mod h1:/O1k/AizTN0QmfEKknCYGvICeyKUDqCYA8vvWtGWDeQ=
+github.com/hashicorp/go-getter v1.3.1-0.20190627223108-da0323b9545e h1:6krcdHPiS+aIP9XKzJzSahfjD7jG7Z+4+opm0z39V1M=
+github.com/hashicorp/go-getter v1.3.1-0.20190627223108-da0323b9545e/go.mod h1:/O1k/AizTN0QmfEKknCYGvICeyKUDqCYA8vvWtGWDeQ=
 github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI=
 github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f h1:Yv9YzBlAETjy6AOX9eLBZ3nshNVRREgerT/3nvxlGho=
 github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
@@ -156,20 +138,15 @@ github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iP
 github.com/hashicorp/go-multierror v0.0.0-20180717150148-3d5d8f294aa0/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
 github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
 github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-plugin v1.0.1-0.20190430211030-5692942914bb h1:Zg2pmmk0lrLFL85lQGt08bOUBpIBaVs6/psiAyx0c4w=
-github.com/hashicorp/go-plugin v1.0.1-0.20190430211030-5692942914bb/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
-github.com/hashicorp/go-retryablehttp v0.5.2 h1:AoISa4P4IsW0/m4T6St8Yw38gTl5GtBAgfkhYh1xAz4=
+github.com/hashicorp/go-plugin v1.0.1-0.20190610192547-a1bc61569a26 h1:hRho44SAoNu1CBtn5r8Q9J3rCs4ZverWZ4R+UeeNuWM=
+github.com/hashicorp/go-plugin v1.0.1-0.20190610192547-a1bc61569a26/go.mod h1:++UyYGoz3o5w9ZzAdZxtQKrWWP+iqPBn3cQptSMzBuY=
 github.com/hashicorp/go-retryablehttp v0.5.2/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
-github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
 github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
 github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo=
 github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
-github.com/hashicorp/go-slug v0.3.0 h1:L0c+AvH/J64iMNF4VqRaRku2DMTEuHioPVS7kMjWIU8=
 github.com/hashicorp/go-slug v0.3.0/go.mod h1:I5tq5Lv0E2xcNXNkmx7BSfzi1PsJ2cNjs3cC3LwyhK8=
 github.com/hashicorp/go-sockaddr v0.0.0-20180320115054-6d291a969b86/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-tfe v0.3.16 h1:GS2yv580p0co4j3FBVaC6Zahd9mxdCGehhJ0qqzFMH0=
 github.com/hashicorp/go-tfe v0.3.16/go.mod h1:SuPHR+OcxvzBZNye7nGPfwZTEyd3rWPfLVbCgyZPezM=
-github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
 github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
 github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
 github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
@@ -179,16 +156,16 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
 github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws=
 github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w=
 github.com/hashicorp/hcl2 v0.0.0-20181208003705-670926858200/go.mod h1:ShfpTh661oAaxo7VcNxg0zcZW6jvMa7Moy2oFx7e5dE=
-github.com/hashicorp/hcl2 v0.0.0-20190515223218-4b22149b7cef h1:xZRvbcwHY8zhaxDwgkmpAp2emwZkVn7p3gat0zhq2X0=
-github.com/hashicorp/hcl2 v0.0.0-20190515223218-4b22149b7cef/go.mod h1:4oI94iqF3GB10QScn46WqbG0kgTUpha97SAzzg2+2ec=
+github.com/hashicorp/hcl2 v0.0.0-20190725010614-0c3fe388e450 h1:wpa0vOXOnSEuwZ++eVk1gQNm3Jy2+Envn0cQRgsl8K8=
+github.com/hashicorp/hcl2 v0.0.0-20190725010614-0c3fe388e450/go.mod h1:FSQTwDi9qesxGBsII2VqhIzKQ4r0bHvBkOczWfD7llg=
 github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590 h1:2yzhWGdgQUWZUCNK+AoO35V+HTsgEmcM4J9IkArh7PI=
 github.com/hashicorp/hil v0.0.0-20190212112733-ab17b08d6590/go.mod h1:n2TSygSNwsLJ76m8qFXTSc7beTb+auJxYdqrnoqwZWE=
 github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
 github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
 github.com/hashicorp/memberlist v0.1.0/go.mod h1:ncdBp14cuox2iFOq3kDiquKU6fqsTBc3W6JvZwjxxsE=
 github.com/hashicorp/serf v0.0.0-20160124182025-e4ec8cc423bb/go.mod h1:h/Ru6tmZazX7WO/GDmwdpS975F019L4t5ng5IgwbNrE=
-github.com/hashicorp/terraform v0.12.0 h1:It2vmod2dBMB4+r+aUW2Afx0HlftyUwzNsNH3I2vrJ8=
-github.com/hashicorp/terraform v0.12.0/go.mod h1:Ke0ig9gGZ8rhV6OddAhBYt5nXmpvXsuNQQ8w9qYBZfU=
+github.com/hashicorp/terraform v0.12.6 h1:mWItQdLZQ7f3kBYBu2Kgdg+E5iZb1KtCq73V10Hmu48=
+github.com/hashicorp/terraform v0.12.6/go.mod h1:udmq5rU8CO9pEIh/A/Xrs3zb3yYU/W9ce1pp8K1ysHA=
 github.com/hashicorp/terraform-config-inspect v0.0.0-20190327195015-8022a2663a70 h1:oZm5nE11yhzsTRz/YrUyDMSvixePqjoZihwn8ipuOYI=
 github.com/hashicorp/terraform-config-inspect v0.0.0-20190327195015-8022a2663a70/go.mod h1:ItvqtvbC3K23FFET62ZwnkwtpbKZm8t8eMcWjmVVjD8=
 github.com/hashicorp/vault v0.10.4/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0=
@@ -201,14 +178,11 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht
 github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
 github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
 github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926 h1:kie3qOosvRKqwij2HGzXWffwpXvcqfPPXRUw8I4F/mg=
 github.com/joyent/triton-go v0.0.0-20180313100802-d8f9c0314926/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA=
-github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE=
+github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
 github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
 github.com/kardianos/osext v0.0.0-20170510131534-ae77be60afb1/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
-github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
 github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
-github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba h1:NARVGAAgEXvoMeNPHhPFt1SBt1VMznA3Gnz9d0qj+co=
 github.com/keybase/go-crypto v0.0.0-20161004153544-93f5b35093ba/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M=
 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
 github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -220,15 +194,10 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
 github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
 github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
 github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
-github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
 github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
-github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82 h1:wnfcqULT+N2seWf6y4yHzmi7GD2kNx4Ute0qArktD48=
 github.com/lusis/go-artifactory v0.0.0-20160115162124-7e4ce345df82/go.mod h1:y54tfGmO3NKssKveTEFFzH8C/akrSOy/iW9qEAUDV84=
-github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=
 github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
-github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9 h1:SmVbOZFWAlyQshuMfOkiAx1f5oUTsOGG5IXplAEYeeM=
 github.com/masterzen/simplexml v0.0.0-20160608183007-4572e39b1ab9/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc=
-github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b h1:/1RFh2SLCJ+tEnT73+Fh5R2AO89sQqs8ba7o+hx1G0Y=
 github.com/masterzen/winrm v0.0.0-20190223112901-5e5c9a7fe54b/go.mod h1:wr1VqkwW0AB5JS0QLy5GpVMS9E3VtRoSYXUYyVk46KY=
 github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
 github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg=
@@ -238,7 +207,6 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
 github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw=
 github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
 github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-shellwords v1.0.4 h1:xmZZyxuP+bYKAKkA9ABYXVNJ+G/Wf3R8d8vAP3LDJJk=
 github.com/mattn/go-shellwords v1.0.4/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
 github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
 github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
@@ -251,12 +219,10 @@ github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMK
 github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
 github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
 github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-linereader v0.0.0-20190213213312-1b945b3263eb h1:GRiLv4rgyqjqzxbhJke65IYUf4NCOOvrPOJbV/sPxkM=
 github.com/mitchellh/go-linereader v0.0.0-20190213213312-1b945b3263eb/go.mod h1:OaY7UOoTkkrX3wRwjpYRKafIkkyeD0UtweSHAWWiqQM=
 github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
 github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
 github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=
 github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
 github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
 github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
@@ -264,15 +230,14 @@ github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9
 github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
 github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
 github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4 h1:jw9tsdJ1FQmUkyTXdIF/nByTX+mMnnp16glnvGZMsC4=
 github.com/mitchellh/panicwrap v0.0.0-20190213213626-17011010aaa4/go.mod h1:YYMf4xtQnR8LRC0vKi3afvQ5QwRPQ17zjcpkBCufb+I=
-github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51 h1:eD92Am0Qf3rqhsOeA1zwBHSfRkoHrt4o6uORamdmJP8=
 github.com/mitchellh/prefixedio v0.0.0-20190213213902-5733675afd51/go.mod h1:kB1naBgV9ORnkiTVeyJOI1DavaJkG4oNIq0Af6ZVKUo=
 github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY=
 github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
 github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
 github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
-github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ=
 github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U=
 github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
 github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
@@ -280,10 +245,8 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
 github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
 github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
-github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58 h1:m3CEgv3ah1Rhy82L+c0QG/U3VyY1UsvsIdkh0/rU97Y=
 github.com/packer-community/winrmcp v0.0.0-20180102160824-81144009af58/go.mod h1:f6Izs6JvFTdnRbziASagjZ2vmf55NSIkC/weStxCHqk=
 github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17 h1:chPfVn+gpAM5CTpTyVU9j8J+xgRGwmoDlNDLjKnJiYo=
 github.com/pkg/errors v0.0.0-20170505043639-c605e284fe17/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -323,7 +286,6 @@ github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.
 github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
 github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
 github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
 github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
@@ -336,28 +298,25 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d h1:Z4EH+5EffvBEhh37F0C0DnpklTMh00JOkjW5zK3ofBI=
 github.com/svanharmelen/jsonapi v0.0.0-20180618144545-0c0828c3f16d/go.mod h1:BSTlc8jOjh0niykqEGVXOLXdi9o0r0kR8tCYiMvjFgw=
 github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
-github.com/terraform-providers/terraform-provider-openstack v1.15.0 h1:adpjqej+F8BAX9dHmuPF47sUIkgifeqBu6p7iCsyj0Y=
 github.com/terraform-providers/terraform-provider-openstack v1.15.0/go.mod h1:2aQ6n/BtChAl1y2S60vebhyJyZXBsuAI5G4+lHrT1Ew=
 github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5 h1:cMjKdf4PxEBN9K5HaD9UMW8gkTbM0kMzkTa9SJe0WNQ=
 github.com/ugorji/go v0.0.0-20180813092308-00b869d2f4a5/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
 github.com/ulikunitz/xz v0.5.5 h1:pFrO0lVpTBXLpYw+pnLj6TbvHuyjXMfjGeCwSqCVwok=
 github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
 github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
 github.com/vmihailenco/msgpack v4.0.1+incompatible h1:RMF1enSPeKTlXrXdOcqjFUElywVZjjC6pqse21bKbEU=
 github.com/vmihailenco/msgpack v4.0.1+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
-github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=
 github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
 github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557 h1:Jpn2j6wHkC9wJv5iMfJhKqrZJx3TahFx+7sbZ7zQdxs=
 github.com/xlab/treeprint v0.0.0-20161029104018-1d6e34225557/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
 github.com/zclconf/go-cty v0.0.0-20181129180422-88fbe721e0f8/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s=
-github.com/zclconf/go-cty v0.0.0-20190426224007-b18a157db9e2/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s=
-github.com/zclconf/go-cty v0.0.0-20190516203816-4fecf87372ec h1:MSeYjmyjucsFbecMTxg63ASg23lcSARP/kr9sClTFfk=
-github.com/zclconf/go-cty v0.0.0-20190516203816-4fecf87372ec/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s=
+github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s=
+github.com/zclconf/go-cty v1.0.1-0.20190708163926-19588f92a98f h1:sq2p8SN6ji66CFEQFIWLlD/gFmGtr5hBrOzv5nLlGfA=
+github.com/zclconf/go-cty v1.0.1-0.20190708163926-19588f92a98f/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s=
+github.com/zclconf/go-cty-yaml v1.0.1 h1:up11wlgAaDvlAGENcFDnZgkn0qUJurso7k6EpURKNF8=
+github.com/zclconf/go-cty-yaml v1.0.1/go.mod h1:IP3Ylp0wQpYm50IHK8OZWKMu6sPJIUgKa8XhiVHura0=
 go.opencensus.io v0.18.0 h1:Mk5rgZcggtbvtAun5aJzAtjKKN/t0R3jJPlWILlv938=
 go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
 go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
@@ -372,8 +331,9 @@ golang.org/x/crypto v0.0.0-20181112202954-3d3f9f413869/go.mod h1:6SG95UA2DQfeDnf
 golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190222235706-ffb98f73852f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 h1:p/H982KKEjUnLJkM3tt/LemDnOc1GiZL5FCVlORJ5zo=
 golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -387,6 +347,7 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r
 golang.org/x/net v0.0.0-20181129055619-fae4c4e3ad76/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190502183928-7f726cade0ab h1:9RfW3ktsOZxgo9YNbBAjq1FWzc/igwEcUzZz8IXgSbk=
 golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -398,7 +359,6 @@ golang.org/x/oauth2 v0.0.0-20190220154721-9b3c75971fc9/go.mod h1:gOpvHmFTYa4Iltr
 golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -415,19 +375,17 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82 h1:vsphBvatvfbhlb4PO1BYSr9dzugGxJ/SQHoNufZJq1w=
 golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
 golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
 golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
 google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
 google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
 google.golang.org/api v0.1.0 h1:K6z2u68e86TPdSdefXdzvXgR1zEMa+459vBSfWYAZkI=
@@ -454,9 +412,9 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
 gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
 gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
 gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
 gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
 honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
index c5b189e9c52289be4d3f46f4c1a142b844b631ab..b433daf4ab58eda21a63c4f786964e90f87cd5cd 100644 (file)
   revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
   version = "v1.1.1"
 
+[[projects]]
+  digest = "1:a63cff6b5d8b95638bfe300385d93b2a6d9d687734b863da8e09dc834510a690"
+  name = "github.com/google/go-querystring"
+  packages = ["query"]
+  pruneopts = "UT"
+  revision = "44c6ddd0a2342c386950e880b658017258da92fc"
+  version = "v1.0.0"
+
 [[projects]]
   digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
   name = "github.com/pmezard/go-difflib"
@@ -30,7 +38,7 @@
   name = "github.com/stretchr/testify"
   packages = [
     "assert",
-    "require",
+    "require"
   ]
   pruneopts = "UT"
   revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
@@ -41,8 +49,9 @@
   analyzer-version = 1
   input-imports = [
     "github.com/DreamItGetIT/statuscake",
+    "github.com/google/go-querystring/query",
     "github.com/stretchr/testify/assert",
-    "github.com/stretchr/testify/require",
+    "github.com/stretchr/testify/require"
   ]
   solver-name = "gps-cdcl"
   solver-version = 1
diff --git a/vendor/github.com/DreamItGetIT/statuscake/contactGroups.go b/vendor/github.com/DreamItGetIT/statuscake/contactGroups.go
new file mode 100644 (file)
index 0000000..437fe37
--- /dev/null
@@ -0,0 +1,149 @@
+package statuscake
+
+import (
+       "encoding/json"
+       "fmt"
+       "net/url"
+       "strings"
+       "github.com/google/go-querystring/query"
+)
+
+//ContactGroup represent the data received by the API with GET
+type ContactGroup struct {
+       GroupName      string   `json:"GroupName"    url:"GroupName,omitempty"`
+       Emails         []string `json:"Emails"`
+       EmailsPut      string                       `url:"Email,omitempty"`
+       Mobiles        string   `json:"Mobiles"      url:"Mobile,omitempty"`
+       Boxcar         string   `json:"Boxcar"       url:"Boxcar,omitempty"`
+       Pushover       string   `json:"Pushover"     url:"Pushover,omitempty"`
+       ContactID      int      `json:"ContactID"    url:"ContactID,omitempty"`
+       DesktopAlert   string   `json:"DesktopAlert" url:"DesktopAlert,omitempty"`
+       PingURL        string   `json:"PingURL"      url:"PingURL,omitempty"`
+
+}
+
+type Response struct {
+       Success  bool   `json:"Success"`
+       Message  string `json:"Message"`
+       InsertID int    `json:"InsertID"`
+}
+
+//ContactGroups represent the actions done wit the API
+type ContactGroups interface {
+       All() ([]*ContactGroup, error)
+       Detail(int) (*ContactGroup, error)
+       Update(*ContactGroup) (*ContactGroup, error)
+       Delete(int) error
+       Create(*ContactGroup) (*ContactGroup, error)
+}
+
+func findContactGroup(responses []*ContactGroup, id int) (*ContactGroup, error) {
+       var response *ContactGroup
+       for _, elem := range responses {
+               if (*elem).ContactID == id {
+                       return elem, nil
+               }
+       }
+       return response, fmt.Errorf("%d Not found", id)
+}
+
+type contactGroups struct{
+       client apiClient
+}
+
+//NewContactGroups return a new ssls
+func NewContactGroups(c apiClient) ContactGroups {
+       return &contactGroups{
+               client: c,
+       }
+}
+
+//All return a list of all the ContactGroup from the API
+func (tt *contactGroups) All() ([]*ContactGroup, error) {
+       rawResponse, err := tt.client.get("/ContactGroups", nil)
+       if err != nil {
+               return nil, fmt.Errorf("Error getting StatusCake contactGroups: %s", err.Error())
+       }
+       var getResponse []*ContactGroup
+       err = json.NewDecoder(rawResponse.Body).Decode(&getResponse)
+       if err != nil {
+               return nil, err
+       }
+       return getResponse, err
+}
+
+//Detail return the ContactGroup corresponding to the id 
+func (tt *contactGroups) Detail(id int) (*ContactGroup, error) {
+       responses, err := tt.All()
+       if err != nil {
+               return nil, err
+       }
+       myContactGroup, errF := findContactGroup(responses, id)
+       if errF != nil {
+               return nil, errF
+       }
+       return myContactGroup, nil
+}
+
+//Update update the API with cg and create one if cg.ContactID=0 then return the corresponding ContactGroup
+func (tt *contactGroups) Update(cg *ContactGroup) (*ContactGroup, error) {
+
+       if(cg.ContactID == 0){
+               return tt.Create(cg)
+       }
+       cg.EmailsPut=strings.Join(cg.Emails,",")
+       var v url.Values
+
+       v, _ = query.Values(*cg)
+       
+       rawResponse, err := tt.client.put("/ContactGroups/Update", v)
+       if err != nil {
+               return nil, fmt.Errorf("Error creating StatusCake ContactGroup: %s", err.Error())
+       }
+       
+       var response Response
+       err = json.NewDecoder(rawResponse.Body).Decode(&response)
+       if err != nil {
+               return nil, err
+       }
+       
+       if !response.Success {
+               return nil, fmt.Errorf("%s", response.Message)
+       }
+
+       return cg, nil
+}
+
+//Delete delete the ContactGroup which ID is id
+func (tt *contactGroups) Delete(id int) error {
+       _, err := tt.client.delete("/ContactGroups/Update", url.Values{"ContactID": {fmt.Sprint(id)}})
+       return err
+}
+
+//CreatePartial create the ContactGroup whith the data in cg and return the ContactGroup created
+func (tt *contactGroups) Create(cg *ContactGroup) (*ContactGroup, error) {
+       cg.ContactID=0
+       cg.EmailsPut=strings.Join(cg.Emails,",")
+       var v url.Values
+       v, _ = query.Values(*cg)
+       
+       rawResponse, err := tt.client.put("/ContactGroups/Update", v)
+       if err != nil {
+               return nil, fmt.Errorf("Error creating StatusCake ContactGroup: %s", err.Error())
+       }
+
+       var response Response
+       err = json.NewDecoder(rawResponse.Body).Decode(&response)
+       if err != nil {
+               return nil, err
+       }
+
+       if !response.Success {
+               return nil, fmt.Errorf("%s", response.Message)
+       }
+       
+       cg.ContactID = response.InsertID
+       
+       return cg,nil
+}
+
index ec74a63853ce24924bfbee2081d98028053f46cc..553cb95443efa2a11901880d588a403d6ef0d240 100644 (file)
@@ -64,6 +64,7 @@ type detailResponse struct {
        EnableSSLWarning  bool                       `json:"EnableSSLWarning"`
        FollowRedirect  bool                         `json:"FollowRedirect"`
        StatusCodes     []string                     `json:"StatusCodes"`
+       Tags            []string                     `json:"Tags"`
 }
 
 func (d *detailResponse) test() *Test {
@@ -100,5 +101,6 @@ func (d *detailResponse) test() *Test {
                EnableSSLAlert: d.EnableSSLWarning,
                FollowRedirect: d.FollowRedirect,
                StatusCodes:    strings.Join(d.StatusCodes[:], ","),
+               TestTags:       d.Tags,
        }
 }
diff --git a/vendor/github.com/DreamItGetIT/statuscake/ssl.go b/vendor/github.com/DreamItGetIT/statuscake/ssl.go
new file mode 100644 (file)
index 0000000..3f73d8d
--- /dev/null
@@ -0,0 +1,273 @@
+package statuscake
+
+import (
+       "encoding/json"
+       "fmt"
+       "net/url"
+       "strings"
+       "strconv"
+       
+       "github.com/google/go-querystring/query"
+)
+
+//Ssl represent the data received by the API with GET
+type Ssl struct {
+       ID             string              `json:"id"                 url:"id,omitempty"`
+       Domain         string              `json:"domain"             url:"domain,omitempty"`
+       Checkrate      int                 `json:"checkrate"          url:"checkrate,omitempty"`
+       ContactGroupsC string              `                          url:"contact_groups,omitempty"`
+       AlertAt        string              `json:"alert_at"           url:"alert_at,omitempty"`
+       AlertReminder  bool                `json:"alert_reminder"     url:"alert_expiry,omitempty"`
+       AlertExpiry    bool                `json:"alert_expiry"       url:"alert_reminder,omitempty"`
+       AlertBroken    bool                `json:"alert_broken"       url:"alert_broken,omitempty"`
+       AlertMixed     bool                `json:"alert_mixed"        url:"alert_mixed,omitempty"`
+       Paused         bool                `json:"paused"`
+       IssuerCn       string              `json:"issuer_cn"`
+       CertScore      string              `json:"cert_score"`
+       CipherScore    string              `json:"cipher_score"`
+       CertStatus     string              `json:"cert_status"`
+       Cipher         string              `json:"cipher"`
+       ValidFromUtc   string              `json:"valid_from_utc"`
+       ValidUntilUtc  string              `json:"valid_until_utc"`
+       MixedContent   []map[string]string `json:"mixed_content"`
+       Flags          map[string]bool     `json:"flags"`
+       ContactGroups  []string            `json:"contact_groups"`
+       LastReminder   int                 `json:"last_reminder"`
+       LastUpdatedUtc string              `json:"last_updated_utc"`
+}
+
+//PartialSsl represent  a ssl test creation or modification
+type PartialSsl struct {
+       ID             int
+       Domain         string
+       Checkrate      string
+       ContactGroupsC string
+       AlertAt        string
+       AlertExpiry    bool
+       AlertReminder  bool
+       AlertBroken    bool
+       AlertMixed     bool
+}
+
+type createSsl struct {
+       ID             int    `url:"id,omitempty"`
+       Domain         string `url:"domain"         json:"domain"`
+       Checkrate      string `url:"checkrate"      json:"checkrate"`
+       ContactGroupsC string `url:"contact_groups" json:"contact_groups"`
+       AlertAt        string `url:"alert_at"       json:"alert_at"`
+       AlertExpiry    bool   `url:"alert_expiry"   json:"alert_expiry"`
+       AlertReminder  bool   `url:"alert_reminder" json:"alert_reminder"`
+       AlertBroken    bool   `url:"alert_broken"   json:"alert_broken"`
+       AlertMixed     bool   `url:"alert_mixed"    json:"alert_mixed"`
+}
+
+type updateSsl struct {
+       ID             int    `url:"id"`
+       Domain         string `url:"domain"         json:"domain"`
+       Checkrate      string `url:"checkrate"      json:"checkrate"`
+       ContactGroupsC string `url:"contact_groups" json:"contact_groups"`
+       AlertAt        string `url:"alert_at"       json:"alert_at"`
+       AlertExpiry    bool   `url:"alert_expiry"   json:"alert_expiry"`
+       AlertReminder  bool   `url:"alert_reminder" json:"alert_reminder"`
+       AlertBroken    bool   `url:"alert_broken"   json:"alert_broken"`
+       AlertMixed     bool   `url:"alert_mixed"    json:"alert_mixed"`
+}
+
+
+type sslUpdateResponse struct {
+       Success bool   `json:"Success"`
+       Message interface{} `json:"Message"`
+}
+
+type sslCreateResponse struct {
+       Success bool   `json:"Success"`
+       Message interface{} `json:"Message"`
+       Input createSsl `json:"Input"`
+}
+
+//Ssls represent the actions done wit the API
+type Ssls interface {
+       All() ([]*Ssl, error)
+       completeSsl(*PartialSsl) (*Ssl, error)
+       Detail(string) (*Ssl, error)
+       Update(*PartialSsl) (*Ssl, error)
+       UpdatePartial(*PartialSsl) (*PartialSsl, error)
+       Delete(ID string) error
+       CreatePartial(*PartialSsl) (*PartialSsl, error)
+       Create(*PartialSsl) (*Ssl, error)
+}
+
+func consolidateSsl(s *Ssl) {
+       (*s).ContactGroupsC = strings.Trim(strings.Join(strings.Fields(fmt.Sprint((*s).ContactGroups)), ","), "[]")
+}
+
+func findSsl(responses []*Ssl, id string) (*Ssl, error) {
+       var response *Ssl
+       for _, elem := range responses {
+               if (*elem).ID == id {
+                       return elem, nil
+               }
+       }
+       return response, fmt.Errorf("%s Not found", id)
+}
+
+func (tt *ssls) completeSsl(s *PartialSsl) (*Ssl, error) {
+       full, err := tt.Detail(strconv.Itoa((*s).ID))
+       if err != nil {
+               return nil, err
+       }
+       (*full).ContactGroups = strings.Split((*s).ContactGroupsC,",")
+       return full, nil
+}
+
+//Partial return a PartialSsl corresponding to the Ssl
+func Partial(s *Ssl) (*PartialSsl,error) {
+       if s==nil {
+               return nil,fmt.Errorf("s is nil")
+       }
+       id,err:=strconv.Atoi(s.ID)
+       if(err!=nil){
+               return nil,err
+       }
+       return &PartialSsl{
+               ID: id,
+               Domain: s.Domain,
+               Checkrate: strconv.Itoa(s.Checkrate),
+               ContactGroupsC: s.ContactGroupsC,
+               AlertReminder: s.AlertReminder,
+               AlertExpiry: s.AlertExpiry,
+               AlertBroken: s.AlertBroken,
+               AlertMixed: s.AlertMixed,
+               AlertAt: s.AlertAt,
+       },nil
+       
+}
+
+type ssls struct {
+       client apiClient
+}
+
+//NewSsls return a new ssls
+func NewSsls(c apiClient) Ssls {
+       return &ssls{
+               client: c,
+       }
+}
+
+//All return a list of all the ssl from the API
+func (tt *ssls) All() ([]*Ssl, error) {
+       rawResponse, err := tt.client.get("/SSL", nil)
+       if err != nil {
+               return nil, fmt.Errorf("Error getting StatusCake Ssl: %s", err.Error())
+       }
+       var getResponse []*Ssl
+       err = json.NewDecoder(rawResponse.Body).Decode(&getResponse)
+       if err != nil {
+               return nil, err
+       }
+       
+       for ssl := range getResponse {
+               consolidateSsl(getResponse[ssl])
+       }
+       
+       return getResponse, err
+}
+
+//Detail return the ssl corresponding to the id 
+func (tt *ssls) Detail(id string) (*Ssl, error) {
+       responses, err := tt.All()
+       if err != nil {
+               return nil, err
+       }
+       mySsl, errF := findSsl(responses, id)
+       if errF != nil {
+               return nil, errF
+       }
+       return mySsl, nil
+}
+
+//Update update the API with s and create one if s.ID=0 then return the corresponding Ssl
+func (tt *ssls) Update(s *PartialSsl) (*Ssl, error) {
+       var err error
+       s, err = tt.UpdatePartial(s)
+       if err!= nil {
+               return nil, err
+       }
+       return tt.completeSsl(s)
+}
+
+//UpdatePartial update the API with s and create one if s.ID=0 then return the corresponding PartialSsl
+func (tt *ssls) UpdatePartial(s *PartialSsl) (*PartialSsl, error) {
+
+       if((*s).ID == 0){
+               return tt.CreatePartial(s)
+       }
+       var v url.Values
+
+       v, _ = query.Values(updateSsl(*s))
+       
+       rawResponse, err := tt.client.put("/SSL/Update", v)
+       if err != nil {
+               return nil, fmt.Errorf("Error creating StatusCake Ssl: %s", err.Error())
+       }
+       
+       var updateResponse sslUpdateResponse
+       err = json.NewDecoder(rawResponse.Body).Decode(&updateResponse)
+       if err != nil {
+               return nil, err
+       }
+       
+       if !updateResponse.Success {
+               return nil, fmt.Errorf("%s", updateResponse.Message.(string))
+       }
+
+
+       return s, nil
+}
+
+//Delete delete the ssl which ID is id
+func (tt *ssls) Delete(id string) error {
+       _, err := tt.client.delete("/SSL/Update", url.Values{"id": {fmt.Sprint(id)}})
+       if err != nil {
+               return err
+       }
+
+       return nil
+}
+
+//Create create the ssl whith the data in s and return the Ssl created
+func (tt *ssls) Create(s *PartialSsl) (*Ssl, error) {
+       var err error
+       s, err = tt.CreatePartial(s)
+       if err!= nil {
+               return nil, err
+       }
+       return tt.completeSsl(s)
+}
+
+//CreatePartial create the ssl whith the data in s and return the PartialSsl created
+func (tt *ssls) CreatePartial(s *PartialSsl) (*PartialSsl, error) {
+       (*s).ID=0
+       var v url.Values
+       v, _ = query.Values(createSsl(*s))
+       
+       rawResponse, err := tt.client.put("/SSL/Update", v)
+       if err != nil {
+               return nil, fmt.Errorf("Error creating StatusCake Ssl: %s", err.Error())
+       }
+
+       var createResponse sslCreateResponse
+       err = json.NewDecoder(rawResponse.Body).Decode(&createResponse)
+       if err != nil {
+               return nil, err
+       }
+
+       if !createResponse.Success {
+               return nil, fmt.Errorf("%s", createResponse.Message.(string))
+       }
+       *s = PartialSsl(createResponse.Input)
+       (*s).ID = int(createResponse.Message.(float64))
+       
+       return s,nil
+}
+
index 2a2383d550be493609a5c9f5d5da0b2177ccdaed..f92d29fed27dc6e1c6181efe8428037f4dc07f57 100644 (file)
@@ -99,7 +99,7 @@ type Test struct {
        TestTags []string `json:"TestTags" querystring:"TestTags"`
 
        // Comma Seperated List of StatusCodes to Trigger Error on (on Update will replace, so send full list each time)
-       StatusCodes string `json:"StatusCodes" querystring:"StatusCodes"`
+       StatusCodes string `json:"StatusCodes" querystring:"StatusCodes" querystringoptions:"omitempty"`
 
        // Set to 1 to enable the Cookie Jar. Required for some redirects.
        UseJar int `json:"UseJar" querystring:"UseJar"`
index 56fdfc2bfc76c9715a861810bee26b80e03734cd..99849c0e19c002b434530c0bbee7c563943fee08 100644 (file)
@@ -138,8 +138,27 @@ type RequestFailure interface {
        RequestID() string
 }
 
-// NewRequestFailure returns a new request error wrapper for the given Error
-// provided.
+// NewRequestFailure returns a wrapped error with additional information for
+// request status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
 func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
        return newRequestError(err, statusCode, reqID)
 }
+
+// UnmarshalError provides the interface for the SDK failing to unmarshal data.
+type UnmarshalError interface {
+       awsError
+       Bytes() []byte
+}
+
+// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding
+// the bytes that fail to unmarshal to the error.
+func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError {
+       return &unmarshalError{
+               awsError: New("UnmarshalError", msg, err),
+               bytes:    bytes,
+       }
+}
index 0202a008f5d7dec3c76a1e5bebe813a8b8b2593b..9cf7eaf4007f544fdbbb060ac3d1a60f94623219 100644 (file)
@@ -1,6 +1,9 @@
 package awserr
 
-import "fmt"
+import (
+       "encoding/hex"
+       "fmt"
+)
 
 // SprintError returns a string of the formatted error code.
 //
@@ -119,6 +122,7 @@ type requestError struct {
        awsError
        statusCode int
        requestID  string
+       bytes      []byte
 }
 
 // newRequestError returns a wrapped error with additional information for
@@ -170,6 +174,29 @@ func (r requestError) OrigErrs() []error {
        return []error{r.OrigErr()}
 }
 
+type unmarshalError struct {
+       awsError
+       bytes []byte
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (e unmarshalError) Error() string {
+       extra := hex.Dump(e.bytes)
+       return SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (e unmarshalError) String() string {
+       return e.Error()
+}
+
+// Bytes returns the bytes that failed to unmarshal.
+func (e unmarshalError) Bytes() []byte {
+       return e.bytes
+}
+
 // An error list that satisfies the golang interface
 type errorList []error
 
@@ -181,7 +208,7 @@ func (e errorList) Error() string {
        // How do we want to handle the array size being zero
        if size := len(e); size > 0 {
                for i := 0; i < size; i++ {
-                       msg += fmt.Sprintf("%s", e[i].Error())
+                       msg += e[i].Error()
                        // We check the next index to see if it is within the slice.
                        // If it is, then we append a newline. We do this, because unit tests
                        // could be broken with the additional '\n'
index 11c52c38968743fce74d8d08271a2f8c97e651f3..285e54d67993d5c7670b7b34959d7a6ddd7f32e6 100644 (file)
@@ -185,13 +185,12 @@ func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
 // SetValueAtPath sets a value at the case insensitive lexical path inside
 // of a structure.
 func SetValueAtPath(i interface{}, path string, v interface{}) {
-       if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
-               for _, rval := range rvals {
-                       if rval.Kind() == reflect.Ptr && rval.IsNil() {
-                               continue
-                       }
-                       setValue(rval, v)
+       rvals := rValuesAtPath(i, path, true, false, v == nil)
+       for _, rval := range rvals {
+               if rval.Kind() == reflect.Ptr && rval.IsNil() {
+                       continue
                }
+               setValue(rval, v)
        }
 }
 
index 7b5e1276acfe18dd9a587b7a0d84d362b33c0492..8958c32d4e9fbbe1ee7842e96da1a27c9254a1b2 100644 (file)
@@ -67,10 +67,14 @@ func logRequest(r *request.Request) {
                if !bodySeekable {
                        r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
                }
-               // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
-               // Body as a NoOpCloser and will not be reset after read by the HTTP
-               // client reader.
-               r.ResetBody()
+               // Reset the request body because dumpRequest will re-wrap the
+               // r.HTTPRequest's Body as a NoOpCloser and will not be reset after
+               // read by the HTTP client reader.
+               if err := r.Error; err != nil {
+                       r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
+                               r.ClientInfo.ServiceName, r.Operation.Name, err))
+                       return
+               }
        }
 
        r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
index 894bbc7f82c3411174bfff3dcf2c9457fadc2845..4af592158144c12bcaa16745de5e87e91a7be7d1 100644 (file)
@@ -50,9 +50,10 @@ package credentials
 
 import (
        "fmt"
-       "github.com/aws/aws-sdk-go/aws/awserr"
        "sync"
        "time"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
 )
 
 // AnonymousCredentials is an empty Credential object that can be used as
@@ -83,6 +84,12 @@ type Value struct {
        ProviderName string
 }
 
+// HasKeys returns if the credentials Value has both AccessKeyID and
+// SecretAccessKey value set.
+func (v Value) HasKeys() bool {
+       return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0
+}
+
 // A Provider is the interface for any component which will provide credentials
 // Value. A provider is required to manage its own Expired state, and what to
 // be expired means.
index 0ed791be641a227b65e251a9b6cd47c5e2bb2f00..43d4ed386ab85fbfc19f813ea03dc2b6b0c61ce2 100644 (file)
@@ -11,6 +11,7 @@ import (
        "github.com/aws/aws-sdk-go/aws/client"
        "github.com/aws/aws-sdk-go/aws/credentials"
        "github.com/aws/aws-sdk-go/aws/ec2metadata"
+       "github.com/aws/aws-sdk-go/aws/request"
        "github.com/aws/aws-sdk-go/internal/sdkuri"
 )
 
@@ -142,7 +143,8 @@ func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
        }
 
        if err := s.Err(); err != nil {
-               return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
+               return nil, awserr.New(request.ErrCodeSerialization,
+                       "failed to read EC2 instance role from metadata service", err)
        }
 
        return credsList, nil
@@ -164,7 +166,7 @@ func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCred
        respCreds := ec2RoleCredRespBody{}
        if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
                return ec2RoleCredRespBody{},
-                       awserr.New("SerializationError",
+                       awserr.New(request.ErrCodeSerialization,
                                fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
                                err)
        }
index ace51313820ae591722ef68de5e9276c8b14892c..c2b2c5d65c39274fa4136c34bbf7c283dcd904d0 100644 (file)
@@ -39,6 +39,7 @@ import (
        "github.com/aws/aws-sdk-go/aws/client/metadata"
        "github.com/aws/aws-sdk-go/aws/credentials"
        "github.com/aws/aws-sdk-go/aws/request"
+       "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
 )
 
 // ProviderName is the name of the credentials provider.
@@ -174,7 +175,7 @@ func unmarshalHandler(r *request.Request) {
 
        out := r.Data.(*getCredentialsOutput)
        if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
-               r.Error = awserr.New("SerializationError",
+               r.Error = awserr.New(request.ErrCodeSerialization,
                        "failed to decode endpoint credentials",
                        err,
                )
@@ -185,11 +186,15 @@ func unmarshalError(r *request.Request) {
        defer r.HTTPResponse.Body.Close()
 
        var errOut errorOutput
-       if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil {
-               r.Error = awserr.New("SerializationError",
-                       "failed to decode endpoint credentials",
-                       err,
+       err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body)
+       if err != nil {
+               r.Error = awserr.NewRequestFailure(
+                       awserr.New(request.ErrCodeSerialization,
+                               "failed to decode error message", err),
+                       r.HTTPResponse.StatusCode,
+                       r.RequestID,
                )
+               return
        }
 
        // Response body format is not consistent between metadata endpoints.
index b6dbfd2467de49d4c826a96a059423b0117b9d80..2e528d130d49c0bbf532b5ca35b42f78f5f9c529 100644 (file)
@@ -200,7 +200,7 @@ type AssumeRoleProvider struct {
        // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must
        // have a value between 0 and 1. Any other value may lead to expected behavior.
        // With a MaxJitterFrac value of 0, default) will no jitter will be used.
-       // 
+       //
        // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the
        // AssumeRole call will be made with an arbitrary Duration between 27m and
        // 30m.
@@ -258,7 +258,6 @@ func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*
 
 // Retrieve generates a new set of temporary credentials using STS.
 func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
-
        // Apply defaults where parameters are not set.
        if p.RoleSessionName == "" {
                // Try to work out a role name that will hopefully end up unique.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
new file mode 100644 (file)
index 0000000..20510d9
--- /dev/null
@@ -0,0 +1,97 @@
+package stscreds
+
+import (
+       "fmt"
+       "io/ioutil"
+       "strconv"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/client"
+       "github.com/aws/aws-sdk-go/aws/credentials"
+       "github.com/aws/aws-sdk-go/service/sts"
+       "github.com/aws/aws-sdk-go/service/sts/stsiface"
+)
+
+const (
+       // ErrCodeWebIdentity will be used as an error code when constructing
+       // a new error to be returned during session creation or retrieval.
+       ErrCodeWebIdentity = "WebIdentityErr"
+
+       // WebIdentityProviderName is the web identity provider name
+       WebIdentityProviderName = "WebIdentityCredentials"
+)
+
+// now is used to return a time.Time object representing
+// the current time. This can be used to easily test and
+// compare test values.
+var now = time.Now
+
+// WebIdentityRoleProvider is used to retrieve credentials using
+// an OIDC token.
+type WebIdentityRoleProvider struct {
+       credentials.Expiry
+
+       client       stsiface.STSAPI
+       ExpiryWindow time.Duration
+
+       tokenFilePath   string
+       roleARN         string
+       roleSessionName string
+}
+
+// NewWebIdentityCredentials will return a new set of credentials with a given
+// configuration, role arn, and token file path.
+func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials {
+       svc := sts.New(c)
+       p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path)
+       return credentials.NewCredentials(p)
+}
+
+// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the
+// provided stsiface.STSAPI
+func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider {
+       return &WebIdentityRoleProvider{
+               client:          svc,
+               tokenFilePath:   path,
+               roleARN:         roleARN,
+               roleSessionName: roleSessionName,
+       }
+}
+
+// Retrieve will attempt to assume a role from a token which is located at
+// 'WebIdentityTokenFilePath' specified destination and if that is empty an
+// error will be returned.
+func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) {
+       b, err := ioutil.ReadFile(p.tokenFilePath)
+       if err != nil {
+               errMsg := fmt.Sprintf("unable to read file at %s", p.tokenFilePath)
+               return credentials.Value{}, awserr.New(ErrCodeWebIdentity, errMsg, err)
+       }
+
+       sessionName := p.roleSessionName
+       if len(sessionName) == 0 {
+               // session name is used to uniquely identify a session. This simply
+               // uses unix time in nanoseconds to uniquely identify sessions.
+               sessionName = strconv.FormatInt(now().UnixNano(), 10)
+       }
+       resp, err := p.client.AssumeRoleWithWebIdentity(&sts.AssumeRoleWithWebIdentityInput{
+               RoleArn:          &p.roleARN,
+               RoleSessionName:  &sessionName,
+               WebIdentityToken: aws.String(string(b)),
+       })
+       if err != nil {
+               return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err)
+       }
+
+       p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow)
+
+       value := credentials.Value{
+               AccessKeyID:     aws.StringValue(resp.Credentials.AccessKeyId),
+               SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey),
+               SessionToken:    aws.StringValue(resp.Credentials.SessionToken),
+               ProviderName:    WebIdentityProviderName,
+       }
+       return value, nil
+}
index 152d785b362bdacf49e3be5727f9da2c32db0a89..25a66d1dda22d8ec869f65c7b2410ad32579fae5 100644 (file)
@@ -1,30 +1,61 @@
-// Package csm provides Client Side Monitoring (CSM) which enables sending metrics
-// via UDP connection. Using the Start function will enable the reporting of
-// metrics on a given port. If Start is called, with different parameters, again,
-// a panic will occur.
+// Package csm provides the Client Side Monitoring (CSM) client which enables
+// sending metrics via UDP connection to the CSM agent. This package provides
+// control options, and configuration for the CSM client. The client can be
+// controlled manually, or automatically via the SDK's Session configuration.
 //
-// Pause can be called to pause any metrics publishing on a given port. Sessions
-// that have had their handlers modified via InjectHandlers may still be used.
-// However, the handlers will act as a no-op meaning no metrics will be published.
+// Enabling CSM client via SDK's Session configuration
+//
+// The CSM client can be enabled automatically via SDK's Session configuration.
+// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT
+// environment variable is set to a non-empty value.
+//
+// The configuration options for the CSM client via the SDK's session
+// configuration are:
+//
+//     * AWS_CSM_PORT=<port number>
+//       The port number the CSM agent will receive metrics on.
+//
+//     * AWS_CSM_HOST=<hostname or ip>
+//       The hostname, or IP address the CSM agent will receive metrics on.
+//       Without port number.
+//
+// Manually enabling the CSM client
+//
+// The CSM client can be started, paused, and resumed manually. The Start
+// function will enable the CSM client to publish metrics to the CSM agent. It
+// is safe to call Start concurrently, but if Start is called additional times
+// with different ClientID or address it will panic.
 //
-//     Example:
 //             r, err := csm.Start("clientID", ":31000")
 //             if err != nil {
 //                     panic(fmt.Errorf("failed starting CSM:  %v", err))
 //             }
 //
+// When controlling the CSM client manually, you must also inject its request
+// handlers into the SDK's Session configuration for the SDK's API clients to
+// publish metrics.
+//
 //             sess, err := session.NewSession(&aws.Config{})
 //             if err != nil {
 //                     panic(fmt.Errorf("failed loading session: %v", err))
 //             }
 //
+//             // Add CSM client's metric publishing request handlers to the SDK's
+//             // Session Configuration.
 //             r.InjectHandlers(&sess.Handlers)
 //
-//             client := s3.New(sess)
-//             resp, err := client.GetObject(&s3.GetObjectInput{
-//                     Bucket: aws.String("bucket"),
-//                     Key: aws.String("key"),
-//             })
+// Controlling CSM client
+//
+// Once the CSM client has been enabled the Get function will return a Reporter
+// value that you can use to pause and resume the metrics published to the CSM
+// agent. If Get function is called before the reporter is enabled with the
+// Start function or via SDK's Session configuration nil will be returned.
+//
+// The Pause method can be called to stop the CSM client publishing metrics to
+// the CSM agent. The Continue method will resume metric publishing.
+//
+//             // Get the CSM client Reporter.
+//             r := csm.Get()
 //
 //             // Will pause monitoring
 //             r.Pause()
 //
 //             // Resume monitoring
 //             r.Continue()
-//
-// Start returns a Reporter that is used to enable or disable monitoring. If
-// access to the Reporter is required later, calling Get will return the Reporter
-// singleton.
-//
-//     Example:
-//             r := csm.Get()
-//             r.Continue()
 package csm
index 2f0c6eac9a80d31f1134ec39314fdebd30c595e4..4b19e2800e3c836997b6797d378aff563e113ebc 100644 (file)
@@ -2,6 +2,7 @@ package csm
 
 import (
        "fmt"
+       "strings"
        "sync"
 )
 
@@ -9,19 +10,40 @@ var (
        lock sync.Mutex
 )
 
-// Client side metric handler names
 const (
-       APICallMetricHandlerName        = "awscsm.SendAPICallMetric"
-       APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
+       // DefaultPort is used when no port is specified.
+       DefaultPort = "31000"
+
+       // DefaultHost is the host that will be used when none is specified.
+       DefaultHost = "127.0.0.1"
 )
 
-// Start will start the a long running go routine to capture
+// AddressWithDefaults returns a CSM address built from the host and port
+// values. If the host or port is not set, default values will be used
+// instead. If host is "localhost" it will be replaced with "127.0.0.1".
+func AddressWithDefaults(host, port string) string {
+       if len(host) == 0 || strings.EqualFold(host, "localhost") {
+               host = DefaultHost
+       }
+
+       if len(port) == 0 {
+               port = DefaultPort
+       }
+
+       // Only IP6 host can contain a colon
+       if strings.Contains(host, ":") {
+               return "[" + host + "]:" + port
+       }
+
+       return host + ":" + port
+}
+
+// Start will start a long running go routine to capture
 // client side metrics. Calling start multiple time will only
 // start the metric listener once and will panic if a different
 // client ID or port is passed in.
 //
-//     Example:
-//             r, err := csm.Start("clientID", "127.0.0.1:8094")
+//             r, err := csm.Start("clientID", "127.0.0.1:31000")
 //             if err != nil {
 //                     panic(fmt.Errorf("expected no error, but received %v", err))
 //             }
index 0b5571acfbf1b282da78f82e83b9a86acc0ca98c..c7008d8c3fc41c6f754a2e8883c0965798eab154 100644 (file)
@@ -10,11 +10,6 @@ import (
        "github.com/aws/aws-sdk-go/aws/request"
 )
 
-const (
-       // DefaultPort is used when no port is specified
-       DefaultPort = "31000"
-)
-
 // Reporter will gather metrics of API requests made and
 // send those metrics to the CSM endpoint.
 type Reporter struct {
@@ -96,7 +91,7 @@ func getMetricException(err awserr.Error) metricException {
 
        switch code {
        case "RequestError",
-               "SerializationError",
+               request.ErrCodeSerialization,
                request.CanceledErrorCode:
                return sdkException{
                        requestException{exception: code, message: msg},
@@ -123,7 +118,7 @@ func (rep *Reporter) sendAPICallMetric(r *request.Request) {
                Type:               aws.String("ApiCall"),
                AttemptCount:       aws.Int(r.RetryCount + 1),
                Region:             r.Config.Region,
-               Latency:            aws.Int(int(time.Now().Sub(r.Time) / time.Millisecond)),
+               Latency:            aws.Int(int(time.Since(r.Time) / time.Millisecond)),
                XAmzRequestID:      aws.String(r.RequestID),
                MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
        }
@@ -190,8 +185,9 @@ func (rep *Reporter) start() {
        }
 }
 
-// Pause will pause the metric channel preventing any new metrics from
-// being added.
+// Pause will pause the metric channel preventing any new metrics from being
+// added. It is safe to call concurrently with other calls to Pause, but if
+// called concurently with Continue can lead to unexpected state.
 func (rep *Reporter) Pause() {
        lock.Lock()
        defer lock.Unlock()
@@ -203,8 +199,9 @@ func (rep *Reporter) Pause() {
        rep.close()
 }
 
-// Continue will reopen the metric channel and allow for monitoring
-// to be resumed.
+// Continue will reopen the metric channel and allow for monitoring to be
+// resumed. It is safe to call concurrently with other calls to Continue, but
+// if called concurently with Pause can lead to unexpected state.
 func (rep *Reporter) Continue() {
        lock.Lock()
        defer lock.Unlock()
@@ -219,10 +216,18 @@ func (rep *Reporter) Continue() {
        rep.metricsCh.Continue()
 }
 
+// Client side metric handler names
+const (
+       APICallMetricHandlerName        = "awscsm.SendAPICallMetric"
+       APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
+)
+
 // InjectHandlers will will enable client side metrics and inject the proper
 // handlers to handle how metrics are sent.
 //
-//     Example:
+// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers
+// multiple times may lead to unexpected behavior, (e.g. duplicate metrics).
+//
 //             // Start must be called in order to inject the correct handlers
 //             r, err := csm.Start("clientID", "127.0.0.1:8094")
 //             if err != nil {
index d57a1af5992067c75012fb7930c1564df7ee4e64..2c8d5f56d0e231501fcb4b0364d70ca5d1ac958d 100644 (file)
@@ -82,7 +82,7 @@ func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument
        doc := EC2InstanceIdentityDocument{}
        if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
                return EC2InstanceIdentityDocument{},
-                       awserr.New("SerializationError",
+                       awserr.New(request.ErrCodeSerialization,
                                "failed to decode EC2 instance identity document", err)
        }
 
@@ -101,7 +101,7 @@ func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
        info := EC2IAMInfo{}
        if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
                return EC2IAMInfo{},
-                       awserr.New("SerializationError",
+                       awserr.New(request.ErrCodeSerialization,
                                "failed to decode EC2 IAM info", err)
        }
 
index f4438eae9c91d5f30b431bdf72c6de19faa399db..f0c1d31e756a2c661997db930a2d009291ee3ed1 100644 (file)
@@ -123,7 +123,7 @@ func unmarshalHandler(r *request.Request) {
        defer r.HTTPResponse.Body.Close()
        b := &bytes.Buffer{}
        if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
-               r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
+               r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata respose", err)
                return
        }
 
@@ -136,7 +136,7 @@ func unmarshalError(r *request.Request) {
        defer r.HTTPResponse.Body.Close()
        b := &bytes.Buffer{}
        if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
-               r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
+               r.Error = awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error respose", err)
                return
        }
 
index 50e170eee2c5d0ee8b2440484debc62e86f07956..2e7bd7a0e7c6634f33395373d6f27f5bcdaa0959 100644 (file)
@@ -27,6 +27,7 @@ const (
        EuWest1RegionID      = "eu-west-1"      // EU (Ireland).
        EuWest2RegionID      = "eu-west-2"      // EU (London).
        EuWest3RegionID      = "eu-west-3"      // EU (Paris).
+       MeSouth1RegionID     = "me-south-1"     // Middle East (Bahrain).
        SaEast1RegionID      = "sa-east-1"      // South America (Sao Paulo).
        UsEast1RegionID      = "us-east-1"      // US East (N. Virginia).
        UsEast2RegionID      = "us-east-2"      // US East (Ohio).
@@ -128,6 +129,9 @@ var awsPartition = partition{
                "eu-west-3": region{
                        Description: "EU (Paris)",
                },
+               "me-south-1": region{
+                       Description: "Middle East (Bahrain)",
+               },
                "sa-east-1": region{
                        Description: "South America (Sao Paulo)",
                },
@@ -166,6 +170,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -178,6 +183,7 @@ var awsPartition = partition{
                                Protocols: []string{"https"},
                        },
                        Endpoints: endpoints{
+                               "ap-east-1":      endpoint{},
                                "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
                                "ap-south-1":     endpoint{},
@@ -270,6 +276,12 @@ var awsPartition = partition{
                                                Region: "eu-west-3",
                                        },
                                },
+                               "me-south-1": endpoint{
+                                       Hostname: "api.ecr.me-south-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "me-south-1",
+                                       },
+                               },
                                "sa-east-1": endpoint{
                                        Hostname: "api.ecr.sa-east-1.amazonaws.com",
                                        CredentialScope: credentialScope{
@@ -381,6 +393,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -409,6 +422,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -416,6 +430,24 @@ var awsPartition = partition{
                                "us-west-2":      endpoint{},
                        },
                },
+               "appmesh": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
                "appstream2": service{
                        Defaults: endpoint{
                                Protocols: []string{"https"},
@@ -460,6 +492,7 @@ var awsPartition = partition{
                                "ap-southeast-2": endpoint{},
                                "ca-central-1":   endpoint{},
                                "eu-central-1":   endpoint{},
+                               "eu-north-1":     endpoint{},
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "us-east-1":      endpoint{},
@@ -484,6 +517,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -515,9 +549,27 @@ var awsPartition = partition{
                                "us-west-2":      endpoint{},
                        },
                },
+               "backup": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
                "batch": service{
 
                        Endpoints: endpoints{
+                               "ap-east-1":      endpoint{},
                                "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
                                "ap-south-1":     endpoint{},
@@ -584,6 +636,7 @@ var awsPartition = partition{
                        Endpoints: endpoints{
                                "ap-northeast-1": endpoint{},
                                "ap-southeast-1": endpoint{},
+                               "eu-central-1":   endpoint{},
                                "eu-west-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -619,6 +672,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -662,6 +716,7 @@ var awsPartition = partition{
                                },
                        },
                        Endpoints: endpoints{
+                               "ap-east-1":      endpoint{},
                                "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
                                "ap-south-1":     endpoint{},
@@ -709,6 +764,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -726,6 +782,7 @@ var awsPartition = partition{
                                "ap-southeast-2": endpoint{},
                                "ca-central-1":   endpoint{},
                                "eu-central-1":   endpoint{},
+                               "eu-north-1":     endpoint{},
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
@@ -789,6 +846,7 @@ var awsPartition = partition{
                "codedeploy": service{
 
                        Endpoints: endpoints{
+                               "ap-east-1":      endpoint{},
                                "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
                                "ap-south-1":     endpoint{},
@@ -800,6 +858,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-1-fips": endpoint{
@@ -937,10 +996,13 @@ var awsPartition = partition{
                "comprehendmedical": service{
 
                        Endpoints: endpoints{
-                               "eu-west-1": endpoint{},
-                               "us-east-1": endpoint{},
-                               "us-east-2": endpoint{},
-                               "us-west-2": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-2":      endpoint{},
                        },
                },
                "config": service{
@@ -958,6 +1020,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -971,6 +1034,19 @@ var awsPartition = partition{
                                "us-east-1": endpoint{},
                        },
                },
+               "data.mediastore": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-north-1":     endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
                "datapipeline": service{
 
                        Endpoints: endpoints{
@@ -1032,6 +1108,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -1060,6 +1137,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -1070,6 +1148,24 @@ var awsPartition = partition{
                "docdb": service{
 
                        Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{
+                                       Hostname: "rds.ap-northeast-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "ap-northeast-1",
+                                       },
+                               },
+                               "ap-northeast-2": endpoint{
+                                       Hostname: "rds.ap-northeast-2.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "ap-northeast-2",
+                                       },
+                               },
+                               "ap-southeast-2": endpoint{
+                                       Hostname: "rds.ap-southeast-2.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "ap-southeast-2",
+                                       },
+                               },
                                "eu-central-1": endpoint{
                                        Hostname: "rds.eu-central-1.amazonaws.com",
                                        CredentialScope: credentialScope{
@@ -1082,6 +1178,12 @@ var awsPartition = partition{
                                                Region: "eu-west-1",
                                        },
                                },
+                               "eu-west-2": endpoint{
+                                       Hostname: "rds.eu-west-2.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "eu-west-2",
+                                       },
+                               },
                                "us-east-1": endpoint{
                                        Hostname: "rds.us-east-1.amazonaws.com",
                                        CredentialScope: credentialScope{
@@ -1112,6 +1214,7 @@ var awsPartition = partition{
                                "ap-southeast-2": endpoint{},
                                "ca-central-1":   endpoint{},
                                "eu-central-1":   endpoint{},
+                               "eu-north-1":     endpoint{},
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "sa-east-1":      endpoint{},
@@ -1133,11 +1236,17 @@ var awsPartition = partition{
                                "ap-southeast-1": endpoint{},
                                "ap-southeast-2": endpoint{},
                                "ca-central-1":   endpoint{},
-                               "eu-central-1":   endpoint{},
-                               "eu-north-1":     endpoint{},
-                               "eu-west-1":      endpoint{},
-                               "eu-west-2":      endpoint{},
-                               "eu-west-3":      endpoint{},
+                               "ca-central-1-fips": endpoint{
+                                       Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "ca-central-1",
+                                       },
+                               },
+                               "eu-central-1": endpoint{},
+                               "eu-north-1":   endpoint{},
+                               "eu-west-1":    endpoint{},
+                               "eu-west-2":    endpoint{},
+                               "eu-west-3":    endpoint{},
                                "local": endpoint{
                                        Hostname:  "localhost:8000",
                                        Protocols: []string{"http"},
@@ -1145,11 +1254,36 @@ var awsPartition = partition{
                                                Region: "us-east-1",
                                        },
                                },
-                               "sa-east-1": endpoint{},
-                               "us-east-1": endpoint{},
+                               "me-south-1": endpoint{},
+                               "sa-east-1":  endpoint{},
+                               "us-east-1":  endpoint{},
+                               "us-east-1-fips": endpoint{
+                                       Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-1",
+                                       },
+                               },
                                "us-east-2": endpoint{},
+                               "us-east-2-fips": endpoint{
+                                       Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-2",
+                                       },
+                               },
                                "us-west-1": endpoint{},
+                               "us-west-1-fips": endpoint{
+                                       Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-west-1",
+                                       },
+                               },
                                "us-west-2": endpoint{},
+                               "us-west-2-fips": endpoint{
+                                       Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-west-2",
+                                       },
+                               },
                        },
                },
                "ec2": service{
@@ -1169,6 +1303,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -1202,6 +1337,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -1230,16 +1366,18 @@ var awsPartition = partition{
                                                Region: "us-west-1",
                                        },
                                },
-                               "sa-east-1": endpoint{},
-                               "us-east-1": endpoint{},
-                               "us-east-2": endpoint{},
-                               "us-west-1": endpoint{},
-                               "us-west-2": endpoint{},
+                               "me-south-1": endpoint{},
+                               "sa-east-1":  endpoint{},
+                               "us-east-1":  endpoint{},
+                               "us-east-2":  endpoint{},
+                               "us-west-1":  endpoint{},
+                               "us-west-2":  endpoint{},
                        },
                },
                "elasticbeanstalk": service{
 
                        Endpoints: endpoints{
+                               "ap-east-1":      endpoint{},
                                "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
                                "ap-south-1":     endpoint{},
@@ -1251,6 +1389,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -1263,11 +1402,14 @@ var awsPartition = partition{
                        Endpoints: endpoints{
                                "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
                                "ap-southeast-1": endpoint{},
                                "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
                                "eu-central-1":   endpoint{},
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
+                               "eu-west-3":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
                                "us-west-1":      endpoint{},
@@ -1291,6 +1433,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -1318,6 +1461,7 @@ var awsPartition = partition{
                                "eu-west-1":  endpoint{},
                                "eu-west-2":  endpoint{},
                                "eu-west-3":  endpoint{},
+                               "me-south-1": endpoint{},
                                "sa-east-1":  endpoint{},
                                "us-east-1": endpoint{
                                        SSLCommonName: "{service}.{region}.{dnsSuffix}",
@@ -1343,10 +1487,12 @@ var awsPartition = partition{
                "email": service{
 
                        Endpoints: endpoints{
-                               "eu-central-1": endpoint{},
-                               "eu-west-1":    endpoint{},
-                               "us-east-1":    endpoint{},
-                               "us-west-2":    endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-west-2":      endpoint{},
                        },
                },
                "entitlement.marketplace": service{
@@ -1402,6 +1548,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -1419,6 +1566,7 @@ var awsPartition = partition{
                                "ap-southeast-2": endpoint{},
                                "ca-central-1":   endpoint{},
                                "eu-central-1":   endpoint{},
+                               "eu-north-1":     endpoint{},
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
@@ -1435,11 +1583,15 @@ var awsPartition = partition{
                        },
                        Endpoints: endpoints{
                                "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-southeast-1": endpoint{},
                                "ap-southeast-2": endpoint{},
                                "eu-central-1":   endpoint{},
                                "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
                                "us-west-2":      endpoint{},
                        },
                },
@@ -1447,10 +1599,14 @@ var awsPartition = partition{
 
                        Endpoints: endpoints{
                                "ap-northeast-1": endpoint{},
+                               "ap-southeast-1": endpoint{},
                                "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
                                "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
                                "us-west-2":      endpoint{},
                        },
                },
@@ -1490,6 +1646,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -1500,6 +1657,7 @@ var awsPartition = partition{
                "glue": service{
 
                        Endpoints: endpoints{
+                               "ap-east-1":      endpoint{},
                                "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
                                "ap-south-1":     endpoint{},
@@ -1507,9 +1665,11 @@ var awsPartition = partition{
                                "ap-southeast-2": endpoint{},
                                "ca-central-1":   endpoint{},
                                "eu-central-1":   endpoint{},
+                               "eu-north-1":     endpoint{},
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
                                "us-west-1":      endpoint{},
@@ -1523,19 +1683,32 @@ var awsPartition = partition{
                        },
                        Endpoints: endpoints{
                                "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
                                "ap-southeast-2": endpoint{},
                                "eu-central-1":   endpoint{},
                                "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
                                "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
                                "us-west-2":      endpoint{},
                        },
                },
+               "groundstation": service{
+
+                       Endpoints: endpoints{
+                               "us-east-2": endpoint{},
+                               "us-west-2": endpoint{},
+                       },
+               },
                "guardduty": service{
                        IsRegionalized: boxedTrue,
                        Defaults: endpoint{
                                Protocols: []string{"https"},
                        },
                        Endpoints: endpoints{
+                               "ap-east-1":      endpoint{},
                                "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
                                "ap-south-1":     endpoint{},
@@ -1543,6 +1716,7 @@ var awsPartition = partition{
                                "ap-southeast-2": endpoint{},
                                "ca-central-1":   endpoint{},
                                "eu-central-1":   endpoint{},
+                               "eu-north-1":     endpoint{},
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
@@ -1595,7 +1769,9 @@ var awsPartition = partition{
                                "ap-south-1":     endpoint{},
                                "ap-southeast-2": endpoint{},
                                "eu-central-1":   endpoint{},
+                               "eu-north-1":     endpoint{},
                                "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
                                "us-west-1":      endpoint{},
@@ -1614,11 +1790,16 @@ var awsPartition = partition{
                                "ap-south-1":     endpoint{},
                                "ap-southeast-1": endpoint{},
                                "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
                                "eu-central-1":   endpoint{},
+                               "eu-north-1":     endpoint{},
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
+                               "eu-west-3":      endpoint{},
+                               "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
                                "us-west-2":      endpoint{},
                        },
                },
@@ -1633,6 +1814,95 @@ var awsPartition = partition{
                                "us-west-2":      endpoint{},
                        },
                },
+               "iotevents": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "ioteventsdata": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{
+                                       Hostname: "data.iotevents.ap-northeast-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "ap-northeast-1",
+                                       },
+                               },
+                               "ap-southeast-2": endpoint{
+                                       Hostname: "data.iotevents.ap-southeast-2.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "ap-southeast-2",
+                                       },
+                               },
+                               "eu-central-1": endpoint{
+                                       Hostname: "data.iotevents.eu-central-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "eu-central-1",
+                                       },
+                               },
+                               "eu-west-1": endpoint{
+                                       Hostname: "data.iotevents.eu-west-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "eu-west-1",
+                                       },
+                               },
+                               "us-east-1": endpoint{
+                                       Hostname: "data.iotevents.us-east-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-1",
+                                       },
+                               },
+                               "us-east-2": endpoint{
+                                       Hostname: "data.iotevents.us-east-2.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-2",
+                                       },
+                               },
+                               "us-west-2": endpoint{
+                                       Hostname: "data.iotevents.us-west-2.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-west-2",
+                                       },
+                               },
+                       },
+               },
+               "iotthingsgraph": service{
+                       Defaults: endpoint{
+                               CredentialScope: credentialScope{
+                                       Service: "iotthingsgraph",
+                               },
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "kafka": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-north-1":     endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "eu-west-3":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
                "kinesis": service{
 
                        Endpoints: endpoints{
@@ -1648,6 +1918,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -1658,11 +1929,16 @@ var awsPartition = partition{
                "kinesisanalytics": service{
 
                        Endpoints: endpoints{
-                               "eu-central-1": endpoint{},
-                               "eu-west-1":    endpoint{},
-                               "us-east-1":    endpoint{},
-                               "us-east-2":    endpoint{},
-                               "us-west-2":    endpoint{},
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-2":      endpoint{},
                        },
                },
                "kinesisvideo": service{
@@ -1679,12 +1955,6 @@ var awsPartition = partition{
                "kms": service{
 
                        Endpoints: endpoints{
-                               "ProdFips": endpoint{
-                                       Hostname: "kms-fips.ca-central-1.amazonaws.com",
-                                       CredentialScope: credentialScope{
-                                               Region: "ca-central-1",
-                                       },
-                               },
                                "ap-east-1":      endpoint{},
                                "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
@@ -1697,6 +1967,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -1719,6 +1990,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -1729,16 +2001,22 @@ var awsPartition = partition{
                "license-manager": service{
 
                        Endpoints: endpoints{
+                               "ap-east-1":      endpoint{},
                                "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
                                "ap-south-1":     endpoint{},
                                "ap-southeast-1": endpoint{},
                                "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
                                "eu-central-1":   endpoint{},
+                               "eu-north-1":     endpoint{},
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
+                               "eu-west-3":      endpoint{},
+                               "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
                                "us-west-2":      endpoint{},
                        },
                },
@@ -1775,6 +2053,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -1843,6 +2122,7 @@ var awsPartition = partition{
                                "ap-southeast-1": endpoint{},
                                "ap-southeast-2": endpoint{},
                                "eu-central-1":   endpoint{},
+                               "eu-north-1":     endpoint{},
                                "eu-west-1":      endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
@@ -1873,6 +2153,7 @@ var awsPartition = partition{
                                "ap-northeast-2": endpoint{},
                                "ap-southeast-2": endpoint{},
                                "eu-central-1":   endpoint{},
+                               "eu-north-1":     endpoint{},
                                "eu-west-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-west-2":      endpoint{},
@@ -1945,6 +2226,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -1957,11 +2239,14 @@ var awsPartition = partition{
                        Endpoints: endpoints{
                                "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
                                "ap-southeast-1": endpoint{},
                                "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
                                "eu-central-1":   endpoint{},
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
+                               "eu-west-3":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
                                "us-west-1":      endpoint{},
@@ -1987,6 +2272,12 @@ var awsPartition = partition{
                                                Region: "ap-northeast-1",
                                        },
                                },
+                               "ap-northeast-2": endpoint{
+                                       Hostname: "rds.ap-northeast-2.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "ap-northeast-2",
+                                       },
+                               },
                                "ap-south-1": endpoint{
                                        Hostname: "rds.ap-south-1.amazonaws.com",
                                        CredentialScope: credentialScope{
@@ -2008,7 +2299,13 @@ var awsPartition = partition{
                                "eu-central-1": endpoint{
                                        Hostname: "rds.eu-central-1.amazonaws.com",
                                        CredentialScope: credentialScope{
-                                               Region: "eu-central-1",
+                                               Region: "eu-central-1",
+                                       },
+                               },
+                               "eu-north-1": endpoint{
+                                       Hostname: "rds.eu-north-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "eu-north-1",
                                        },
                                },
                                "eu-west-1": endpoint{
@@ -2126,6 +2423,38 @@ var awsPartition = partition{
                                "us-west-2":      endpoint{},
                        },
                },
+               "projects.iot1click": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "ram": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-north-1":     endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "eu-west-3":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
                "rds": service{
 
                        Endpoints: endpoints{
@@ -2165,6 +2494,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -2178,10 +2508,14 @@ var awsPartition = partition{
                                "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
                                "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
                                "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
                                "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
                                "us-west-2":      endpoint{},
                        },
                },
@@ -2200,6 +2534,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -2211,8 +2546,11 @@ var awsPartition = partition{
 
                        Endpoints: endpoints{
                                "ap-northeast-1": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "eu-central-1":   endpoint{},
                                "eu-west-1":      endpoint{},
                                "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
                                "us-west-2":      endpoint{},
                        },
                },
@@ -2281,9 +2619,33 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "us-east-1":      endpoint{},
-                               "us-east-2":      endpoint{},
-                               "us-west-1":      endpoint{},
-                               "us-west-2":      endpoint{},
+                               "us-east-1-fips": endpoint{
+                                       Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-1",
+                                       },
+                               },
+                               "us-east-2": endpoint{},
+                               "us-east-2-fips": endpoint{
+                                       Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-2",
+                                       },
+                               },
+                               "us-west-1": endpoint{},
+                               "us-west-1-fips": endpoint{
+                                       Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-west-1",
+                                       },
+                               },
+                               "us-west-2": endpoint{},
+                               "us-west-2-fips": endpoint{
+                                       Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-west-2",
+                                       },
+                               },
                        },
                },
                "s3": service{
@@ -2319,8 +2681,9 @@ var awsPartition = partition{
                                        Hostname:          "s3.eu-west-1.amazonaws.com",
                                        SignatureVersions: []string{"s3", "s3v4"},
                                },
-                               "eu-west-2": endpoint{},
-                               "eu-west-3": endpoint{},
+                               "eu-west-2":  endpoint{},
+                               "eu-west-3":  endpoint{},
+                               "me-south-1": endpoint{},
                                "s3-external-1": endpoint{
                                        Hostname:          "s3-external-1.amazonaws.com",
                                        SignatureVersions: []string{"s3", "s3v4"},
@@ -2571,6 +2934,7 @@ var awsPartition = partition{
                                "ap-southeast-2": endpoint{},
                                "ca-central-1":   endpoint{},
                                "eu-central-1":   endpoint{},
+                               "eu-north-1":     endpoint{},
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
@@ -2714,6 +3078,7 @@ var awsPartition = partition{
                "sms": service{
 
                        Endpoints: endpoints{
+                               "ap-east-1":      endpoint{},
                                "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
                                "ap-south-1":     endpoint{},
@@ -2736,6 +3101,7 @@ var awsPartition = partition{
 
                        Endpoints: endpoints{
                                "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
                                "ap-south-1":     endpoint{},
                                "ap-southeast-1": endpoint{},
                                "ap-southeast-2": endpoint{},
@@ -2768,6 +3134,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -2817,7 +3184,8 @@ var awsPartition = partition{
                                                Region: "us-west-2",
                                        },
                                },
-                               "sa-east-1": endpoint{},
+                               "me-south-1": endpoint{},
+                               "sa-east-1":  endpoint{},
                                "us-east-1": endpoint{
                                        SSLCommonName: "queue.{dnsSuffix}",
                                },
@@ -2841,6 +3209,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -2863,6 +3232,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -2884,6 +3254,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -2905,11 +3276,17 @@ var awsPartition = partition{
                                "ap-southeast-1": endpoint{},
                                "ap-southeast-2": endpoint{},
                                "ca-central-1":   endpoint{},
-                               "eu-central-1":   endpoint{},
-                               "eu-north-1":     endpoint{},
-                               "eu-west-1":      endpoint{},
-                               "eu-west-2":      endpoint{},
-                               "eu-west-3":      endpoint{},
+                               "ca-central-1-fips": endpoint{
+                                       Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "ca-central-1",
+                                       },
+                               },
+                               "eu-central-1": endpoint{},
+                               "eu-north-1":   endpoint{},
+                               "eu-west-1":    endpoint{},
+                               "eu-west-2":    endpoint{},
+                               "eu-west-3":    endpoint{},
                                "local": endpoint{
                                        Hostname:  "localhost:8000",
                                        Protocols: []string{"http"},
@@ -2917,11 +3294,36 @@ var awsPartition = partition{
                                                Region: "us-east-1",
                                        },
                                },
-                               "sa-east-1": endpoint{},
-                               "us-east-1": endpoint{},
+                               "me-south-1": endpoint{},
+                               "sa-east-1":  endpoint{},
+                               "us-east-1":  endpoint{},
+                               "us-east-1-fips": endpoint{
+                                       Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-1",
+                                       },
+                               },
                                "us-east-2": endpoint{},
+                               "us-east-2-fips": endpoint{
+                                       Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-2",
+                                       },
+                               },
                                "us-west-1": endpoint{},
+                               "us-west-1-fips": endpoint{
+                                       Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-west-1",
+                                       },
+                               },
                                "us-west-2": endpoint{},
+                               "us-west-2-fips": endpoint{
+                                       Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-west-2",
+                                       },
+                               },
                        },
                },
                "sts": service{
@@ -2956,8 +3358,14 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
-                               "sa-east-1":      endpoint{},
-                               "us-east-1":      endpoint{},
+                               "me-south-1": endpoint{
+                                       Hostname: "sts.me-south-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "me-south-1",
+                                       },
+                               },
+                               "sa-east-1": endpoint{},
+                               "us-east-1": endpoint{},
                                "us-east-1-fips": endpoint{
                                        Hostname: "sts-fips.us-east-1.amazonaws.com",
                                        CredentialScope: credentialScope{
@@ -2988,9 +3396,15 @@ var awsPartition = partition{
                        },
                },
                "support": service{
+                       PartitionEndpoint: "aws-global",
 
                        Endpoints: endpoints{
-                               "us-east-1": endpoint{},
+                               "aws-global": endpoint{
+                                       Hostname: "support.us-east-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-1",
+                                       },
+                               },
                        },
                },
                "swf": service{
@@ -3008,6 +3422,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -3030,6 +3445,7 @@ var awsPartition = partition{
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
                                "eu-west-3":      endpoint{},
+                               "me-south-1":     endpoint{},
                                "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
@@ -3061,7 +3477,11 @@ var awsPartition = partition{
                                Protocols: []string{"https"},
                        },
                        Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ca-central-1":   endpoint{},
                                "eu-central-1":   endpoint{},
                                "eu-west-1":      endpoint{},
                                "us-east-1":      endpoint{},
@@ -3105,12 +3525,16 @@ var awsPartition = partition{
                        Endpoints: endpoints{
                                "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
                                "ap-southeast-1": endpoint{},
                                "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
                                "eu-central-1":   endpoint{},
                                "eu-north-1":     endpoint{},
                                "eu-west-1":      endpoint{},
                                "eu-west-2":      endpoint{},
+                               "eu-west-3":      endpoint{},
+                               "sa-east-1":      endpoint{},
                                "us-east-1":      endpoint{},
                                "us-east-2":      endpoint{},
                                "us-west-1":      endpoint{},
@@ -3157,6 +3581,7 @@ var awsPartition = partition{
                "xray": service{
 
                        Endpoints: endpoints{
+                               "ap-east-1":      endpoint{},
                                "ap-northeast-1": endpoint{},
                                "ap-northeast-2": endpoint{},
                                "ap-south-1":     endpoint{},
@@ -3433,6 +3858,15 @@ var awscnPartition = partition{
                                "cn-northwest-1": endpoint{},
                        },
                },
+               "greengrass": service{
+                       IsRegionalized: boxedTrue,
+                       Defaults: endpoint{
+                               Protocols: []string{"https"},
+                       },
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
                "iam": service{
                        PartitionEndpoint: "aws-cn-global",
                        IsRegionalized:    boxedFalse,
@@ -3463,6 +3897,13 @@ var awscnPartition = partition{
                                "cn-northwest-1": endpoint{},
                        },
                },
+               "kms": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1":     endpoint{},
+                               "cn-northwest-1": endpoint{},
+                       },
+               },
                "lambda": service{
 
                        Endpoints: endpoints{
@@ -3470,6 +3911,13 @@ var awscnPartition = partition{
                                "cn-northwest-1": endpoint{},
                        },
                },
+               "license-manager": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1":     endpoint{},
+                               "cn-northwest-1": endpoint{},
+                       },
+               },
                "logs": service{
 
                        Endpoints: endpoints{
@@ -3480,7 +3928,12 @@ var awscnPartition = partition{
                "mediaconvert": service{
 
                        Endpoints: endpoints{
-                               "cn-northwest-1": endpoint{},
+                               "cn-northwest-1": endpoint{
+                                       Hostname: "subscribe.mediaconvert.cn-northwest-1.amazonaws.com.cn",
+                                       CredentialScope: credentialScope{
+                                               Region: "cn-northwest-1",
+                                       },
+                               },
                        },
                },
                "monitoring": service{
@@ -3615,6 +4068,18 @@ var awscnPartition = partition{
                                "cn-northwest-1": endpoint{},
                        },
                },
+               "support": service{
+                       PartitionEndpoint: "aws-cn-global",
+
+                       Endpoints: endpoints{
+                               "aws-cn-global": endpoint{
+                                       Hostname: "support.cn-north-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "cn-north-1",
+                                       },
+                               },
+                       },
+               },
                "swf": service{
 
                        Endpoints: endpoints{
@@ -3668,6 +4133,15 @@ var awsusgovPartition = partition{
                                "us-gov-west-1": endpoint{},
                        },
                },
+               "acm-pca": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"https"},
+                       },
+                       Endpoints: endpoints{
+                               "us-gov-east-1": endpoint{},
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
                "api.ecr": service{
 
                        Endpoints: endpoints{
@@ -3713,6 +4187,7 @@ var awsusgovPartition = partition{
                "athena": service{
 
                        Endpoints: endpoints{
+                               "us-gov-east-1": endpoint{},
                                "us-gov-west-1": endpoint{},
                        },
                },
@@ -3762,9 +4237,17 @@ var awsusgovPartition = partition{
                                "us-gov-west-1": endpoint{},
                        },
                },
+               "codebuild": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-east-1": endpoint{},
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
                "codecommit": service{
 
                        Endpoints: endpoints{
+                               "us-gov-east-1": endpoint{},
                                "us-gov-west-1": endpoint{},
                        },
                },
@@ -3802,6 +4285,12 @@ var awsusgovPartition = partition{
                                "us-gov-west-1": endpoint{},
                        },
                },
+               "datasync": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
                "directconnect": service{
 
                        Endpoints: endpoints{
@@ -3819,6 +4308,7 @@ var awsusgovPartition = partition{
                "ds": service{
 
                        Endpoints: endpoints{
+                               "us-gov-east-1": endpoint{},
                                "us-gov-west-1": endpoint{},
                        },
                },
@@ -3826,6 +4316,12 @@ var awsusgovPartition = partition{
 
                        Endpoints: endpoints{
                                "us-gov-east-1": endpoint{},
+                               "us-gov-east-1-fips": endpoint{
+                                       Hostname: "dynamodb.us-gov-east-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-gov-east-1",
+                                       },
+                               },
                                "us-gov-west-1": endpoint{},
                                "us-gov-west-1-fips": endpoint{
                                        Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
@@ -3927,6 +4423,7 @@ var awsusgovPartition = partition{
                "firehose": service{
 
                        Endpoints: endpoints{
+                               "us-gov-east-1": endpoint{},
                                "us-gov-west-1": endpoint{},
                        },
                },
@@ -3941,6 +4438,16 @@ var awsusgovPartition = partition{
                },
                "glue": service{
 
+                       Endpoints: endpoints{
+                               "us-gov-east-1": endpoint{},
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "greengrass": service{
+                       IsRegionalized: boxedTrue,
+                       Defaults: endpoint{
+                               Protocols: []string{"https"},
+                       },
                        Endpoints: endpoints{
                                "us-gov-west-1": endpoint{},
                        },
@@ -4048,12 +4555,31 @@ var awsusgovPartition = partition{
                                "us-gov-west-1": endpoint{},
                        },
                },
+               "organizations": service{
+                       PartitionEndpoint: "aws-us-gov-global",
+                       IsRegionalized:    boxedFalse,
+
+                       Endpoints: endpoints{
+                               "aws-us-gov-global": endpoint{
+                                       Hostname: "organizations.us-gov-west-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-gov-west-1",
+                                       },
+                               },
+                       },
+               },
                "polly": service{
 
                        Endpoints: endpoints{
                                "us-gov-west-1": endpoint{},
                        },
                },
+               "ram": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
                "rds": service{
 
                        Endpoints: endpoints{
@@ -4137,6 +4663,28 @@ var awsusgovPartition = partition{
                                },
                        },
                },
+               "secretsmanager": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                               "us-gov-west-1-fips": endpoint{
+                                       Hostname: "secretsmanager-fips.us-gov-west-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-gov-west-1",
+                                       },
+                               },
+                       },
+               },
+               "serverlessrepo": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"https"},
+                       },
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{
+                                       Protocols: []string{"https"},
+                               },
+                       },
+               },
                "sms": service{
 
                        Endpoints: endpoints{
@@ -4198,6 +4746,12 @@ var awsusgovPartition = partition{
                        },
                        Endpoints: endpoints{
                                "us-gov-east-1": endpoint{},
+                               "us-gov-east-1-fips": endpoint{
+                                       Hostname: "dynamodb.us-gov-east-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-gov-east-1",
+                                       },
+                               },
                                "us-gov-west-1": endpoint{},
                                "us-gov-west-1-fips": endpoint{
                                        Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
index 000dd79eec47b2382f4cf7dab55e6ecb3a4fc4a2..ca8fc828e15986a6fec6629dc1a6cebb4a9eae25 100644 (file)
@@ -2,7 +2,7 @@ package endpoints
 
 // Service identifiers
 //
-// Deprecated: Use client package's EndpointID value instead of these
+// Deprecated: Use client package's EndpointsID value instead of these
 // ServiceIDs. These IDs are not maintained, and are out of date.
 const (
        A4bServiceID                          = "a4b"                          // A4b.
index 271da432ce177c56264f173b0150836267d73e8a..d9b37f4d32ad08be83974d38db738771c7ee8019 100644 (file)
@@ -1,18 +1,17 @@
-// +build !appengine,!plan9
-
 package request
 
 import (
-       "net"
-       "os"
-       "syscall"
+       "strings"
 )
 
 func isErrConnectionReset(err error) bool {
-       if opErr, ok := err.(*net.OpError); ok {
-               if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
-                       return sysErr.Err == syscall.ECONNRESET
-               }
+       if strings.Contains(err.Error(), "read: connection reset") {
+               return false
+       }
+
+       if strings.Contains(err.Error(), "connection reset") ||
+               strings.Contains(err.Error(), "broken pipe") {
+               return true
        }
 
        return false
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go
deleted file mode 100644 (file)
index daf9eca..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build appengine plan9
-
-package request
-
-import (
-       "strings"
-)
-
-func isErrConnectionReset(err error) bool {
-       return strings.Contains(err.Error(), "connection reset")
-}
index 8ef8548a96d8cfb97321e841a7c26dade232b588..627ec722c051824ce128ea826d84abc7ae7e3b58 100644 (file)
@@ -59,6 +59,51 @@ func (h *Handlers) Clear() {
        h.Complete.Clear()
 }
 
+// IsEmpty returns if there are no handlers in any of the handlerlists.
+func (h *Handlers) IsEmpty() bool {
+       if h.Validate.Len() != 0 {
+               return false
+       }
+       if h.Build.Len() != 0 {
+               return false
+       }
+       if h.Send.Len() != 0 {
+               return false
+       }
+       if h.Sign.Len() != 0 {
+               return false
+       }
+       if h.Unmarshal.Len() != 0 {
+               return false
+       }
+       if h.UnmarshalStream.Len() != 0 {
+               return false
+       }
+       if h.UnmarshalMeta.Len() != 0 {
+               return false
+       }
+       if h.UnmarshalError.Len() != 0 {
+               return false
+       }
+       if h.ValidateResponse.Len() != 0 {
+               return false
+       }
+       if h.Retry.Len() != 0 {
+               return false
+       }
+       if h.AfterRetry.Len() != 0 {
+               return false
+       }
+       if h.CompleteAttempt.Len() != 0 {
+               return false
+       }
+       if h.Complete.Len() != 0 {
+               return false
+       }
+
+       return true
+}
+
 // A HandlerListRunItem represents an entry in the HandlerList which
 // is being run.
 type HandlerListRunItem struct {
index b0c2ef4fe677f9162b06d659367eeb74443eed6e..9370fa50c3827d6d7325ada6b3b9c4da619f0fe0 100644 (file)
@@ -15,12 +15,15 @@ type offsetReader struct {
        closed bool
 }
 
-func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
+func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) {
        reader := &offsetReader{}
-       buf.Seek(offset, sdkio.SeekStart)
+       _, err := buf.Seek(offset, sdkio.SeekStart)
+       if err != nil {
+               return nil, err
+       }
 
        reader.buf = buf
-       return reader
+       return reader, nil
 }
 
 // Close will close the instance of the offset reader's access to
@@ -54,7 +57,9 @@ func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
 
 // CloseAndCopy will return a new offsetReader with a copy of the old buffer
 // and close the old buffer.
-func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
-       o.Close()
+func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) {
+       if err := o.Close(); err != nil {
+               return nil, err
+       }
        return newOffsetReader(o.buf, offset)
 }
index 8f2eb3e43c57a98199587d462293328796aed9dd..e7c9b2b61af7a7597a465b5d9a42bada4aa8e16d 100644 (file)
@@ -4,6 +4,7 @@ import (
        "bytes"
        "fmt"
        "io"
+       "net"
        "net/http"
        "net/url"
        "reflect"
@@ -231,6 +232,10 @@ func (r *Request) WillRetry() bool {
        return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
 }
 
+func fmtAttemptCount(retryCount, maxRetries int) string {
+       return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries)
+}
+
 // ParamsFilled returns if the request's parameters have been populated
 // and the parameters are valid. False is returned if no parameters are
 // provided or invalid.
@@ -259,7 +264,18 @@ func (r *Request) SetStringBody(s string) {
 // SetReaderBody will set the request's body reader.
 func (r *Request) SetReaderBody(reader io.ReadSeeker) {
        r.Body = reader
-       r.BodyStart, _ = reader.Seek(0, sdkio.SeekCurrent) // Get the Bodies current offset.
+
+       if aws.IsReaderSeekable(reader) {
+               var err error
+               // Get the Bodies current offset so retries will start from the same
+               // initial position.
+               r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent)
+               if err != nil {
+                       r.Error = awserr.New(ErrCodeSerialization,
+                               "failed to determine start of request body", err)
+                       return
+               }
+       }
        r.ResetBody()
 }
 
@@ -330,16 +346,15 @@ func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, err
        return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
 }
 
-func debugLogReqError(r *Request, stage string, retrying bool, err error) {
+const (
+       notRetrying = "not retrying"
+)
+
+func debugLogReqError(r *Request, stage, retryStr string, err error) {
        if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
                return
        }
 
-       retryStr := "not retrying"
-       if retrying {
-               retryStr = "will retry"
-       }
-
        r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
                stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
 }
@@ -358,12 +373,12 @@ func (r *Request) Build() error {
        if !r.built {
                r.Handlers.Validate.Run(r)
                if r.Error != nil {
-                       debugLogReqError(r, "Validate Request", false, r.Error)
+                       debugLogReqError(r, "Validate Request", notRetrying, r.Error)
                        return r.Error
                }
                r.Handlers.Build.Run(r)
                if r.Error != nil {
-                       debugLogReqError(r, "Build Request", false, r.Error)
+                       debugLogReqError(r, "Build Request", notRetrying, r.Error)
                        return r.Error
                }
                r.built = true
@@ -379,7 +394,7 @@ func (r *Request) Build() error {
 func (r *Request) Sign() error {
        r.Build()
        if r.Error != nil {
-               debugLogReqError(r, "Build Request", false, r.Error)
+               debugLogReqError(r, "Build Request", notRetrying, r.Error)
                return r.Error
        }
 
@@ -387,12 +402,16 @@ func (r *Request) Sign() error {
        return r.Error
 }
 
-func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
+func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) {
        if r.safeBody != nil {
                r.safeBody.Close()
        }
 
-       r.safeBody = newOffsetReader(r.Body, r.BodyStart)
+       r.safeBody, err = newOffsetReader(r.Body, r.BodyStart)
+       if err != nil {
+               return nil, awserr.New(ErrCodeSerialization,
+                       "failed to get next request body reader", err)
+       }
 
        // Go 1.8 tightened and clarified the rules code needs to use when building
        // requests with the http package. Go 1.8 removed the automatic detection
@@ -409,10 +428,10 @@ func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
        // Related golang/go#18257
        l, err := aws.SeekerLen(r.Body)
        if err != nil {
-               return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
+               return nil, awserr.New(ErrCodeSerialization,
+                       "failed to compute request body size", err)
        }
 
-       var body io.ReadCloser
        if l == 0 {
                body = NoBody
        } else if l > 0 {
@@ -473,13 +492,13 @@ func (r *Request) Send() error {
                r.AttemptTime = time.Now()
 
                if err := r.Sign(); err != nil {
-                       debugLogReqError(r, "Sign Request", false, err)
+                       debugLogReqError(r, "Sign Request", notRetrying, err)
                        return err
                }
 
                if err := r.sendRequest(); err == nil {
                        return nil
-               } else if !shouldRetryCancel(r.Error) {
+               } else if !shouldRetryError(r.Error) {
                        return err
                } else {
                        r.Handlers.Retry.Run(r)
@@ -489,13 +508,16 @@ func (r *Request) Send() error {
                                return r.Error
                        }
 
-                       r.prepareRetry()
+                       if err := r.prepareRetry(); err != nil {
+                               r.Error = err
+                               return err
+                       }
                        continue
                }
        }
 }
 
-func (r *Request) prepareRetry() {
+func (r *Request) prepareRetry() error {
        if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
                r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
                        r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
@@ -506,12 +528,19 @@ func (r *Request) prepareRetry() {
        // the request's body even though the Client's Do returned.
        r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
        r.ResetBody()
+       if err := r.Error; err != nil {
+               return awserr.New(ErrCodeSerialization,
+                       "failed to prepare body for retry", err)
+
+       }
 
        // Closing response body to ensure that no response body is leaked
        // between retry attempts.
        if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
                r.HTTPResponse.Body.Close()
        }
+
+       return nil
 }
 
 func (r *Request) sendRequest() (sendErr error) {
@@ -520,7 +549,9 @@ func (r *Request) sendRequest() (sendErr error) {
        r.Retryable = nil
        r.Handlers.Send.Run(r)
        if r.Error != nil {
-               debugLogReqError(r, "Send Request", r.WillRetry(), r.Error)
+               debugLogReqError(r, "Send Request",
+                       fmtAttemptCount(r.RetryCount, r.MaxRetries()),
+                       r.Error)
                return r.Error
        }
 
@@ -528,13 +559,17 @@ func (r *Request) sendRequest() (sendErr error) {
        r.Handlers.ValidateResponse.Run(r)
        if r.Error != nil {
                r.Handlers.UnmarshalError.Run(r)
-               debugLogReqError(r, "Validate Response", r.WillRetry(), r.Error)
+               debugLogReqError(r, "Validate Response",
+                       fmtAttemptCount(r.RetryCount, r.MaxRetries()),
+                       r.Error)
                return r.Error
        }
 
        r.Handlers.Unmarshal.Run(r)
        if r.Error != nil {
-               debugLogReqError(r, "Unmarshal Response", r.WillRetry(), r.Error)
+               debugLogReqError(r, "Unmarshal Response",
+                       fmtAttemptCount(r.RetryCount, r.MaxRetries()),
+                       r.Error)
                return r.Error
        }
 
@@ -565,13 +600,13 @@ type temporary interface {
        Temporary() bool
 }
 
-func shouldRetryCancel(err error) bool {
-       switch err := err.(type) {
+func shouldRetryError(origErr error) bool {
+       switch err := origErr.(type) {
        case awserr.Error:
                if err.Code() == CanceledErrorCode {
                        return false
                }
-               return shouldRetryCancel(err.OrigErr())
+               return shouldRetryError(err.OrigErr())
        case *url.Error:
                if strings.Contains(err.Error(), "connection refused") {
                        // Refused connections should be retried as the service may not yet
@@ -581,14 +616,17 @@ func shouldRetryCancel(err error) bool {
                }
                // *url.Error only implements Temporary after golang 1.6 but since
                // url.Error only wraps the error:
-               return shouldRetryCancel(err.Err)
+               return shouldRetryError(err.Err)
        case temporary:
+               if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" {
+                       return true
+               }
                // If the error is temporary, we want to allow continuation of the
                // retry process
-               return err.Temporary()
+               return err.Temporary() || isErrConnectionReset(origErr)
        case nil:
                // `awserr.Error.OrigErr()` can be nil, meaning there was an error but
-               // because we don't know the cause, it is marked as retriable. See
+               // because we don't know the cause, it is marked as retryable. See
                // TestRequest4xxUnretryable for an example.
                return true
        default:
index 7c6a8000f6751244fedc0997c1edb61d54bf2602..de1292f45a23d6fe7c23725c809f97b66a0eb952 100644 (file)
@@ -4,6 +4,8 @@ package request
 
 import (
        "net/http"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
 )
 
 // NoBody is a http.NoBody reader instructing Go HTTP client to not include
@@ -24,7 +26,8 @@ var NoBody = http.NoBody
 func (r *Request) ResetBody() {
        body, err := r.getNextRequestBody()
        if err != nil {
-               r.Error = err
+               r.Error = awserr.New(ErrCodeSerialization,
+                       "failed to reset request body", err)
                return
        }
 
index a633ed5acfa3ec1f8dab7ad1d41b24775d1c7c09..f093fc542df0d4bc4f7f4bdbe5dde20286c14db6 100644 (file)
@@ -146,7 +146,7 @@ func (r *Request) nextPageTokens() []interface{} {
                                return nil
                        }
                case bool:
-                       if v == false {
+                       if !v {
                                return nil
                        }
                }
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
new file mode 100644 (file)
index 0000000..ce41518
--- /dev/null
@@ -0,0 +1,258 @@
+package session
+
+import (
+       "fmt"
+       "os"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/credentials"
+       "github.com/aws/aws-sdk-go/aws/credentials/processcreds"
+       "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
+       "github.com/aws/aws-sdk-go/aws/defaults"
+       "github.com/aws/aws-sdk-go/aws/request"
+       "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+func resolveCredentials(cfg *aws.Config,
+       envCfg envConfig, sharedCfg sharedConfig,
+       handlers request.Handlers,
+       sessOpts Options,
+) (*credentials.Credentials, error) {
+
+       switch {
+       case len(envCfg.Profile) != 0:
+               // User explicitly provided an Profile, so load from shared config
+               // first.
+               return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts)
+
+       case envCfg.Creds.HasKeys():
+               // Environment credentials
+               return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil
+
+       case len(envCfg.WebIdentityTokenFilePath) != 0:
+               // Web identity token from environment, RoleARN required to also be
+               // set.
+               return assumeWebIdentity(cfg, handlers,
+                       envCfg.WebIdentityTokenFilePath,
+                       envCfg.RoleARN,
+                       envCfg.RoleSessionName,
+               )
+
+       default:
+               // Fallback to the "default" credential resolution chain.
+               return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts)
+       }
+}
+
+// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but
+// 'AWS_IAM_ROLE_ARN' was not set.
+var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil)
+
+// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_IAM_ROLE_ARN' was set but
+// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set.
+var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil)
+
+func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers,
+       filepath string,
+       roleARN, sessionName string,
+) (*credentials.Credentials, error) {
+
+       if len(filepath) == 0 {
+               return nil, WebIdentityEmptyTokenFilePathErr
+       }
+
+       if len(roleARN) == 0 {
+               return nil, WebIdentityEmptyRoleARNErr
+       }
+
+       creds := stscreds.NewWebIdentityCredentials(
+               &Session{
+                       Config:   cfg,
+                       Handlers: handlers.Copy(),
+               },
+               roleARN,
+               sessionName,
+               filepath,
+       )
+
+       return creds, nil
+}
+
+func resolveCredsFromProfile(cfg *aws.Config,
+       envCfg envConfig, sharedCfg sharedConfig,
+       handlers request.Handlers,
+       sessOpts Options,
+) (creds *credentials.Credentials, err error) {
+
+       switch {
+       case sharedCfg.SourceProfile != nil:
+               // Assume IAM role with credentials source from a different profile.
+               creds, err = resolveCredsFromProfile(cfg, envCfg,
+                       *sharedCfg.SourceProfile, handlers, sessOpts,
+               )
+
+       case sharedCfg.Creds.HasKeys():
+               // Static Credentials from Shared Config/Credentials file.
+               creds = credentials.NewStaticCredentialsFromCreds(
+                       sharedCfg.Creds,
+               )
+
+       case len(sharedCfg.CredentialProcess) != 0:
+               // Get credentials from CredentialProcess
+               creds = processcreds.NewCredentials(sharedCfg.CredentialProcess)
+
+       case len(sharedCfg.CredentialSource) != 0:
+               creds, err = resolveCredsFromSource(cfg, envCfg,
+                       sharedCfg, handlers, sessOpts,
+               )
+
+       case len(sharedCfg.WebIdentityTokenFile) != 0:
+               // Credentials from Assume Web Identity token require an IAM Role, and
+               // that roll will be assumed. May be wrapped with another assume role
+               // via SourceProfile.
+               return assumeWebIdentity(cfg, handlers,
+                       sharedCfg.WebIdentityTokenFile,
+                       sharedCfg.RoleARN,
+                       sharedCfg.RoleSessionName,
+               )
+
+       default:
+               // Fallback to default credentials provider, include mock errors for
+               // the credential chain so user can identify why credentials failed to
+               // be retrieved.
+               creds = credentials.NewCredentials(&credentials.ChainProvider{
+                       VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+                       Providers: []credentials.Provider{
+                               &credProviderError{
+                                       Err: awserr.New("EnvAccessKeyNotFound",
+                                               "failed to find credentials in the environment.", nil),
+                               },
+                               &credProviderError{
+                                       Err: awserr.New("SharedCredsLoad",
+                                               fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil),
+                               },
+                               defaults.RemoteCredProvider(*cfg, handlers),
+                       },
+               })
+       }
+       if err != nil {
+               return nil, err
+       }
+
+       if len(sharedCfg.RoleARN) > 0 {
+               cfgCp := *cfg
+               cfgCp.Credentials = creds
+               return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts)
+       }
+
+       return creds, nil
+}
+
+// valid credential source values
+const (
+       credSourceEc2Metadata  = "Ec2InstanceMetadata"
+       credSourceEnvironment  = "Environment"
+       credSourceECSContainer = "EcsContainer"
+)
+
+func resolveCredsFromSource(cfg *aws.Config,
+       envCfg envConfig, sharedCfg sharedConfig,
+       handlers request.Handlers,
+       sessOpts Options,
+) (creds *credentials.Credentials, err error) {
+
+       switch sharedCfg.CredentialSource {
+       case credSourceEc2Metadata:
+               p := defaults.RemoteCredProvider(*cfg, handlers)
+               creds = credentials.NewCredentials(p)
+
+       case credSourceEnvironment:
+               creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds)
+
+       case credSourceECSContainer:
+               if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 {
+                       return nil, ErrSharedConfigECSContainerEnvVarEmpty
+               }
+
+               p := defaults.RemoteCredProvider(*cfg, handlers)
+               creds = credentials.NewCredentials(p)
+
+       default:
+               return nil, ErrSharedConfigInvalidCredSource
+       }
+
+       return creds, nil
+}
+
+func credsFromAssumeRole(cfg aws.Config,
+       handlers request.Handlers,
+       sharedCfg sharedConfig,
+       sessOpts Options,
+) (*credentials.Credentials, error) {
+
+       if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil {
+               // AssumeRole Token provider is required if doing Assume Role
+               // with MFA.
+               return nil, AssumeRoleTokenProviderNotSetError{}
+       }
+
+       return stscreds.NewCredentials(
+               &Session{
+                       Config:   &cfg,
+                       Handlers: handlers.Copy(),
+               },
+               sharedCfg.RoleARN,
+               func(opt *stscreds.AssumeRoleProvider) {
+                       opt.RoleSessionName = sharedCfg.RoleSessionName
+                       opt.Duration = sessOpts.AssumeRoleDuration
+
+                       // Assume role with external ID
+                       if len(sharedCfg.ExternalID) > 0 {
+                               opt.ExternalID = aws.String(sharedCfg.ExternalID)
+                       }
+
+                       // Assume role with MFA
+                       if len(sharedCfg.MFASerial) > 0 {
+                               opt.SerialNumber = aws.String(sharedCfg.MFASerial)
+                               opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
+                       }
+               },
+       ), nil
+}
+
+// AssumeRoleTokenProviderNotSetError is an error returned when creating a
+// session when the MFAToken option is not set when shared config is configured
+// load assume a role with an MFA token.
+type AssumeRoleTokenProviderNotSetError struct{}
+
+// Code is the short id of the error.
+func (e AssumeRoleTokenProviderNotSetError) Code() string {
+       return "AssumeRoleTokenProviderNotSetError"
+}
+
+// Message is the description of the error
+func (e AssumeRoleTokenProviderNotSetError) Message() string {
+       return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
+       return nil
+}
+
+// Error satisfies the error interface.
+func (e AssumeRoleTokenProviderNotSetError) Error() string {
+       return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
+
+type credProviderError struct {
+       Err error
+}
+
+func (c credProviderError) Retrieve() (credentials.Value, error) {
+       return credentials.Value{}, c.Err
+}
+func (c credProviderError) IsExpired() bool {
+       return true
+}
index e3959b959ef936f6b39d2fa5ee3fa31ebe840609..3a998d5bd626a36d2693859a14798fabee59296b 100644 (file)
@@ -102,18 +102,38 @@ type envConfig struct {
        CSMEnabled  bool
        CSMPort     string
        CSMClientID string
+       CSMHost     string
 
-       enableEndpointDiscovery string
        // Enables endpoint discovery via environment variables.
        //
        //      AWS_ENABLE_ENDPOINT_DISCOVERY=true
        EnableEndpointDiscovery *bool
+       enableEndpointDiscovery string
+
+       // Specifies the WebIdentity token the SDK should use to assume a role
+       // with.
+       //
+       //  AWS_WEB_IDENTITY_TOKEN_FILE=file_path
+       WebIdentityTokenFilePath string
+
+       // Specifies the IAM role arn to use when assuming an role.
+       //
+       //  AWS_ROLE_ARN=role_arn
+       RoleARN string
+
+       // Specifies the IAM role session name to use when assuming a role.
+       //
+       //  AWS_ROLE_SESSION_NAME=session_name
+       RoleSessionName string
 }
 
 var (
        csmEnabledEnvKey = []string{
                "AWS_CSM_ENABLED",
        }
+       csmHostEnvKey = []string{
+               "AWS_CSM_HOST",
+       }
        csmPortEnvKey = []string{
                "AWS_CSM_PORT",
        }
@@ -150,6 +170,15 @@ var (
        sharedConfigFileEnvKey = []string{
                "AWS_CONFIG_FILE",
        }
+       webIdentityTokenFilePathEnvKey = []string{
+               "AWS_WEB_IDENTITY_TOKEN_FILE",
+       }
+       roleARNEnvKey = []string{
+               "AWS_ROLE_ARN",
+       }
+       roleSessionNameEnvKey = []string{
+               "AWS_ROLE_SESSION_NAME",
+       }
 )
 
 // loadEnvConfig retrieves the SDK's environment configuration.
@@ -178,23 +207,31 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
 
        cfg.EnableSharedConfig = enableSharedConfig
 
-       setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey)
-       setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
-       setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
+       // Static environment credentials
+       var creds credentials.Value
+       setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey)
+       setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey)
+       setFromEnvVal(&creds.SessionToken, credSessionEnvKey)
+       if creds.HasKeys() {
+               // Require logical grouping of credentials
+               creds.ProviderName = EnvProviderName
+               cfg.Creds = creds
+       }
+
+       // Role Metadata
+       setFromEnvVal(&cfg.RoleARN, roleARNEnvKey)
+       setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey)
+
+       // Web identity environment variables
+       setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey)
 
        // CSM environment variables
        setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey)
+       setFromEnvVal(&cfg.CSMHost, csmHostEnvKey)
        setFromEnvVal(&cfg.CSMPort, csmPortEnvKey)
        setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey)
        cfg.CSMEnabled = len(cfg.csmEnabled) > 0
 
-       // Require logical grouping of credentials
-       if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
-               cfg.Creds = credentials.Value{}
-       } else {
-               cfg.Creds.ProviderName = EnvProviderName
-       }
-
        regionKeys := regionEnvKeys
        profileKeys := profileEnvKeys
        if !cfg.EnableSharedConfig {
index be4b5f077722aedebfe91e9ca1de38a8bd31bf03..3a28da5a8a57176cb72e169db1ad49aa5a6e3551 100644 (file)
@@ -8,19 +8,17 @@ import (
        "io/ioutil"
        "net/http"
        "os"
+       "time"
 
        "github.com/aws/aws-sdk-go/aws"
        "github.com/aws/aws-sdk-go/aws/awserr"
        "github.com/aws/aws-sdk-go/aws/client"
        "github.com/aws/aws-sdk-go/aws/corehandlers"
        "github.com/aws/aws-sdk-go/aws/credentials"
-       "github.com/aws/aws-sdk-go/aws/credentials/processcreds"
-       "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
        "github.com/aws/aws-sdk-go/aws/csm"
        "github.com/aws/aws-sdk-go/aws/defaults"
        "github.com/aws/aws-sdk-go/aws/endpoints"
        "github.com/aws/aws-sdk-go/aws/request"
-       "github.com/aws/aws-sdk-go/internal/shareddefaults"
 )
 
 const (
@@ -107,7 +105,15 @@ func New(cfgs ...*aws.Config) *Session {
 
        s := deprecatedNewSession(cfgs...)
        if envCfg.CSMEnabled {
-               enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
+               err := enableCSM(&s.Handlers, envCfg.CSMClientID,
+                       envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger)
+               if err != nil {
+                       err = fmt.Errorf("failed to enable CSM, %v", err)
+                       s.Config.Logger.Log("ERROR:", err.Error())
+                       s.Handlers.Validate.PushBack(func(r *request.Request) {
+                               r.Error = err
+                       })
+               }
        }
 
        return s
@@ -210,6 +216,12 @@ type Options struct {
        // the config enables assume role wit MFA via the mfa_serial field.
        AssumeRoleTokenProvider func() (string, error)
 
+       // When the SDK's shared config is configured to assume a role this option
+       // may be provided to set the expiry duration of the STS credentials.
+       // Defaults to 15 minutes if not set as documented in the
+       // stscreds.AssumeRoleProvider.
+       AssumeRoleDuration time.Duration
+
        // Reader for a custom Credentials Authority (CA) bundle in PEM format that
        // the SDK will use instead of the default system's root CA bundle. Use this
        // only if you want to replace the CA bundle the SDK uses for TLS requests.
@@ -224,6 +236,12 @@ type Options struct {
        // to also enable this feature. CustomCABundle session option field has priority
        // over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
        CustomCABundle io.Reader
+
+       // The handlers that the session and all API clients will be created with.
+       // This must be a complete set of handlers. Use the defaults.Handlers()
+       // function to initialize this value before changing the handlers to be
+       // used by the SDK.
+       Handlers request.Handlers
 }
 
 // NewSessionWithOptions returns a new Session created from SDK defaults, config files,
@@ -329,27 +347,36 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session {
        return s
 }
 
-func enableCSM(handlers *request.Handlers, clientID string, port string, logger aws.Logger) {
-       logger.Log("Enabling CSM")
-       if len(port) == 0 {
-               port = csm.DefaultPort
+func enableCSM(handlers *request.Handlers,
+       clientID, host, port string,
+       logger aws.Logger,
+) error {
+       if logger != nil {
+               logger.Log("Enabling CSM")
        }
 
-       r, err := csm.Start(clientID, "127.0.0.1:"+port)
+       r, err := csm.Start(clientID, csm.AddressWithDefaults(host, port))
        if err != nil {
-               return
+               return err
        }
        r.InjectHandlers(handlers)
+
+       return nil
 }
 
 func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
        cfg := defaults.Config()
-       handlers := defaults.Handlers()
+
+       handlers := opts.Handlers
+       if handlers.IsEmpty() {
+               handlers = defaults.Handlers()
+       }
 
        // Get a merged version of the user provided config to determine if
        // credentials were.
        userCfg := &aws.Config{}
        userCfg.MergeIn(cfgs...)
+       cfg.MergeIn(userCfg)
 
        // Ordered config files will be loaded in with later files overwriting
        // previous config file values.
@@ -366,9 +393,11 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
        }
 
        // Load additional config from file(s)
-       sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles)
+       sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig)
        if err != nil {
-               return nil, err
+               if _, ok := err.(SharedConfigProfileNotExistsError); !ok {
+                       return nil, err
+               }
        }
 
        if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil {
@@ -382,7 +411,11 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
 
        initHandlers(s)
        if envCfg.CSMEnabled {
-               enableCSM(&s.Handlers, envCfg.CSMClientID, envCfg.CSMPort, s.Config.Logger)
+               err := enableCSM(&s.Handlers, envCfg.CSMClientID,
+                       envCfg.CSMHost, envCfg.CSMPort, s.Config.Logger)
+               if err != nil {
+                       return nil, err
+               }
        }
 
        // Setup HTTP client with custom cert bundle if enabled
@@ -443,9 +476,11 @@ func loadCertPool(r io.Reader) (*x509.CertPool, error) {
        return p, nil
 }
 
-func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error {
-       // Merge in user provided configuration
-       cfg.MergeIn(userCfg)
+func mergeConfigSrcs(cfg, userCfg *aws.Config,
+       envCfg envConfig, sharedCfg sharedConfig,
+       handlers request.Handlers,
+       sessOpts Options,
+) error {
 
        // Region if not already set by user
        if len(aws.StringValue(cfg.Region)) == 0 {
@@ -464,164 +499,19 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
                }
        }
 
-       // Configure credentials if not already set
+       // Configure credentials if not already set by the user when creating the
+       // Session.
        if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
-
-               // inspect the profile to see if a credential source has been specified.
-               if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.CredentialSource) > 0 {
-
-                       // if both credential_source and source_profile have been set, return an error
-                       // as this is undefined behavior.
-                       if len(sharedCfg.AssumeRole.SourceProfile) > 0 {
-                               return ErrSharedConfigSourceCollision
-                       }
-
-                       // valid credential source values
-                       const (
-                               credSourceEc2Metadata  = "Ec2InstanceMetadata"
-                               credSourceEnvironment  = "Environment"
-                               credSourceECSContainer = "EcsContainer"
-                       )
-
-                       switch sharedCfg.AssumeRole.CredentialSource {
-                       case credSourceEc2Metadata:
-                               cfgCp := *cfg
-                               p := defaults.RemoteCredProvider(cfgCp, handlers)
-                               cfgCp.Credentials = credentials.NewCredentials(p)
-
-                               if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
-                                       // AssumeRole Token provider is required if doing Assume Role
-                                       // with MFA.
-                                       return AssumeRoleTokenProviderNotSetError{}
-                               }
-
-                               cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
-                       case credSourceEnvironment:
-                               cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
-                                       envCfg.Creds,
-                               )
-                       case credSourceECSContainer:
-                               if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 {
-                                       return ErrSharedConfigECSContainerEnvVarEmpty
-                               }
-
-                               cfgCp := *cfg
-                               p := defaults.RemoteCredProvider(cfgCp, handlers)
-                               creds := credentials.NewCredentials(p)
-
-                               cfg.Credentials = creds
-                       default:
-                               return ErrSharedConfigInvalidCredSource
-                       }
-
-                       return nil
-               }
-
-               if len(envCfg.Creds.AccessKeyID) > 0 {
-                       cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
-                               envCfg.Creds,
-                       )
-               } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
-                       cfgCp := *cfg
-                       cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
-                               sharedCfg.AssumeRoleSource.Creds,
-                       )
-
-                       if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
-                               // AssumeRole Token provider is required if doing Assume Role
-                               // with MFA.
-                               return AssumeRoleTokenProviderNotSetError{}
-                       }
-
-                       cfg.Credentials = assumeRoleCredentials(cfgCp, handlers, sharedCfg, sessOpts)
-               } else if len(sharedCfg.Creds.AccessKeyID) > 0 {
-                       cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
-                               sharedCfg.Creds,
-                       )
-               } else if len(sharedCfg.CredentialProcess) > 0 {
-                       cfg.Credentials = processcreds.NewCredentials(
-                               sharedCfg.CredentialProcess,
-                       )
-               } else {
-                       // Fallback to default credentials provider, include mock errors
-                       // for the credential chain so user can identify why credentials
-                       // failed to be retrieved.
-                       cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{
-                               VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
-                               Providers: []credentials.Provider{
-                                       &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)},
-                                       &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)},
-                                       defaults.RemoteCredProvider(*cfg, handlers),
-                               },
-                       })
+               creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts)
+               if err != nil {
+                       return err
                }
+               cfg.Credentials = creds
        }
 
        return nil
 }
 
-func assumeRoleCredentials(cfg aws.Config, handlers request.Handlers, sharedCfg sharedConfig, sessOpts Options) *credentials.Credentials {
-       return stscreds.NewCredentials(
-               &Session{
-                       Config:   &cfg,
-                       Handlers: handlers.Copy(),
-               },
-               sharedCfg.AssumeRole.RoleARN,
-               func(opt *stscreds.AssumeRoleProvider) {
-                       opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
-
-                       // Assume role with external ID
-                       if len(sharedCfg.AssumeRole.ExternalID) > 0 {
-                               opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
-                       }
-
-                       // Assume role with MFA
-                       if len(sharedCfg.AssumeRole.MFASerial) > 0 {
-                               opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
-                               opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
-                       }
-               },
-       )
-}
-
-// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the
-// MFAToken option is not set when shared config is configured load assume a
-// role with an MFA token.
-type AssumeRoleTokenProviderNotSetError struct{}
-
-// Code is the short id of the error.
-func (e AssumeRoleTokenProviderNotSetError) Code() string {
-       return "AssumeRoleTokenProviderNotSetError"
-}
-
-// Message is the description of the error
-func (e AssumeRoleTokenProviderNotSetError) Message() string {
-       return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
-}
-
-// OrigErr is the underlying error that caused the failure.
-func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
-       return nil
-}
-
-// Error satisfies the error interface.
-func (e AssumeRoleTokenProviderNotSetError) Error() string {
-       return awserr.SprintError(e.Code(), e.Message(), "", nil)
-}
-
-type credProviderError struct {
-       Err error
-}
-
-var emptyCreds = credentials.Value{}
-
-func (c credProviderError) Retrieve() (credentials.Value, error) {
-       return credentials.Value{}, c.Err
-}
-func (c credProviderError) IsExpired() bool {
-       return true
-}
-
 func initHandlers(s *Session) {
        // Add the Validate parameter handler if it is not disabled.
        s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
index 7cb44021b3fce230726f0ddb6d98acb52a42ae5a..5170b4982e068a714a9a4410b0761caee1f02f8c 100644 (file)
@@ -5,7 +5,6 @@ import (
 
        "github.com/aws/aws-sdk-go/aws/awserr"
        "github.com/aws/aws-sdk-go/aws/credentials"
-
        "github.com/aws/aws-sdk-go/internal/ini"
 )
 
@@ -28,8 +27,12 @@ const (
 
        // endpoint discovery group
        enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
+
        // External Credential Process
-       credentialProcessKey = `credential_process`
+       credentialProcessKey = `credential_process` // optional
+
+       // Web Identity Token File
+       webIdentityTokenFileKey = `web_identity_token_file` // optional
 
        // DefaultSharedConfigProfile is the default profile to be used when
        // loading configuration from the config files if another profile name
@@ -37,36 +40,33 @@ const (
        DefaultSharedConfigProfile = `default`
 )
 
-type assumeRoleConfig struct {
-       RoleARN          string
-       SourceProfile    string
-       CredentialSource string
-       ExternalID       string
-       MFASerial        string
-       RoleSessionName  string
-}
-
 // sharedConfig represents the configuration fields of the SDK config files.
 type sharedConfig struct {
-       // Credentials values from the config file. Both aws_access_key_id
-       // and aws_secret_access_key must be provided together in the same file
-       // to be considered valid. The values will be ignored if not a complete group.
-       // aws_session_token is an optional field that can be provided if both of the
-       // other two fields are also provided.
+       // Credentials values from the config file. Both aws_access_key_id and
+       // aws_secret_access_key must be provided together in the same file to be
+       // considered valid. The values will be ignored if not a complete group.
+       // aws_session_token is an optional field that can be provided if both of
+       // the other two fields are also provided.
        //
        //      aws_access_key_id
        //      aws_secret_access_key
        //      aws_session_token
        Creds credentials.Value
 
-       AssumeRole       assumeRoleConfig
-       AssumeRoleSource *sharedConfig
+       CredentialSource     string
+       CredentialProcess    string
+       WebIdentityTokenFile string
+
+       RoleARN         string
+       RoleSessionName string
+       ExternalID      string
+       MFASerial       string
 
-       // An external process to request credentials
-       CredentialProcess string
+       SourceProfileName string
+       SourceProfile     *sharedConfig
 
-       // Region is the region the SDK should use for looking up AWS service endpoints
-       // and signing requests.
+       // Region is the region the SDK should use for looking up AWS service
+       // endpoints and signing requests.
        //
        //      region
        Region string
@@ -83,17 +83,18 @@ type sharedConfigFile struct {
        IniData  ini.Sections
 }
 
-// loadSharedConfig retrieves the configuration from the list of files
-// using the profile provided. The order the files are listed will determine
+// loadSharedConfig retrieves the configuration from the list of files using
+// the profile provided. The order the files are listed will determine
 // precedence. Values in subsequent files will overwrite values defined in
 // earlier files.
 //
 // For example, given two files A and B. Both define credentials. If the order
-// of the files are A then B, B's credential values will be used instead of A's.
+// of the files are A then B, B's credential values will be used instead of
+// A's.
 //
 // See sharedConfig.setFromFile for information how the config files
 // will be loaded.
-func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) {
+func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) {
        if len(profile) == 0 {
                profile = DefaultSharedConfigProfile
        }
@@ -104,16 +105,11 @@ func loadSharedConfig(profile string, filenames []string) (sharedConfig, error)
        }
 
        cfg := sharedConfig{}
-       if err = cfg.setFromIniFiles(profile, files); err != nil {
+       profiles := map[string]struct{}{}
+       if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil {
                return sharedConfig{}, err
        }
 
-       if len(cfg.AssumeRole.SourceProfile) > 0 {
-               if err := cfg.setAssumeRoleSource(profile, files); err != nil {
-                       return sharedConfig{}, err
-               }
-       }
-
        return cfg, nil
 }
 
@@ -137,60 +133,88 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
        return files, nil
 }
 
-func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
-       var assumeRoleSrc sharedConfig
-
-       if len(cfg.AssumeRole.CredentialSource) > 0 {
-               // setAssumeRoleSource is only called when source_profile is found.
-               // If both source_profile and credential_source are set, then
-               // ErrSharedConfigSourceCollision will be returned
-               return ErrSharedConfigSourceCollision
+func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error {
+       // Trim files from the list that don't exist.
+       var skippedFiles int
+       var profileNotFoundErr error
+       for _, f := range files {
+               if err := cfg.setFromIniFile(profile, f, exOpts); err != nil {
+                       if _, ok := err.(SharedConfigProfileNotExistsError); ok {
+                               // Ignore profiles not defined in individual files.
+                               profileNotFoundErr = err
+                               skippedFiles++
+                               continue
+                       }
+                       return err
+               }
+       }
+       if skippedFiles == len(files) {
+               // If all files were skipped because the profile is not found, return
+               // the original profile not found error.
+               return profileNotFoundErr
        }
 
-       // Multiple level assume role chains are not support
-       if cfg.AssumeRole.SourceProfile == origProfile {
-               assumeRoleSrc = *cfg
-               assumeRoleSrc.AssumeRole = assumeRoleConfig{}
+       if _, ok := profiles[profile]; ok {
+               // if this is the second instance of the profile the Assume Role
+               // options must be cleared because they are only valid for the
+               // first reference of a profile. The self linked instance of the
+               // profile only have credential provider options.
+               cfg.clearAssumeRoleOptions()
        } else {
-               err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files)
-               if err != nil {
+               // First time a profile has been seen, It must either be a assume role
+               // or credentials. Assert if the credential type requires a role ARN,
+               // the ARN is also set.
+               if err := cfg.validateCredentialsRequireARN(profile); err != nil {
                        return err
                }
        }
+       profiles[profile] = struct{}{}
 
-       if len(assumeRoleSrc.Creds.AccessKeyID) == 0 {
-               return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN}
+       if err := cfg.validateCredentialType(); err != nil {
+               return err
        }
 
-       cfg.AssumeRoleSource = &assumeRoleSrc
-
-       return nil
-}
+       // Link source profiles for assume roles
+       if len(cfg.SourceProfileName) != 0 {
+               // Linked profile via source_profile ignore credential provider
+               // options, the source profile must provide the credentials.
+               cfg.clearCredentialOptions()
 
-func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error {
-       // Trim files from the list that don't exist.
-       for _, f := range files {
-               if err := cfg.setFromIniFile(profile, f); err != nil {
+               srcCfg := &sharedConfig{}
+               err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts)
+               if err != nil {
+                       // SourceProfile that doesn't exist is an error in configuration.
                        if _, ok := err.(SharedConfigProfileNotExistsError); ok {
-                               // Ignore proviles missings
-                               continue
+                               err = SharedConfigAssumeRoleError{
+                                       RoleARN:       cfg.RoleARN,
+                                       SourceProfile: cfg.SourceProfileName,
+                               }
                        }
                        return err
                }
+
+               if !srcCfg.hasCredentials() {
+                       return SharedConfigAssumeRoleError{
+                               RoleARN:       cfg.RoleARN,
+                               SourceProfile: cfg.SourceProfileName,
+                       }
+               }
+
+               cfg.SourceProfile = srcCfg
        }
 
        return nil
 }
 
-// setFromFile loads the configuration from the file using
-// the profile provided. A sharedConfig pointer type value is used so that
-// multiple config file loadings can be chained.
+// setFromFile loads the configuration from the file using the profile
+// provided. A sharedConfig pointer type value is used so that multiple config
+// file loadings can be chained.
 //
 // Only loads complete logically grouped values, and will not set fields in cfg
-// for incomplete grouped values in the config. Such as credentials. For example
-// if a config file only includes aws_access_key_id but no aws_secret_access_key
-// the aws_access_key_id will be ignored.
-func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
+// for incomplete grouped values in the config. Such as credentials. For
+// example if a config file only includes aws_access_key_id but no
+// aws_secret_access_key the aws_access_key_id will be ignored.
+func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error {
        section, ok := file.IniData.GetSection(profile)
        if !ok {
                // Fallback to to alternate profile name: profile <name>
@@ -200,42 +224,30 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) e
                }
        }
 
-       // Shared Credentials
-       akid := section.String(accessKeyIDKey)
-       secret := section.String(secretAccessKey)
-       if len(akid) > 0 && len(secret) > 0 {
-               cfg.Creds = credentials.Value{
-                       AccessKeyID:     akid,
-                       SecretAccessKey: secret,
-                       SessionToken:    section.String(sessionTokenKey),
-                       ProviderName:    fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
-               }
-       }
+       if exOpts {
+               // Assume Role Parameters
+               updateString(&cfg.RoleARN, section, roleArnKey)
+               updateString(&cfg.ExternalID, section, externalIDKey)
+               updateString(&cfg.MFASerial, section, mfaSerialKey)
+               updateString(&cfg.RoleSessionName, section, roleSessionNameKey)
+               updateString(&cfg.SourceProfileName, section, sourceProfileKey)
+               updateString(&cfg.CredentialSource, section, credentialSourceKey)
 
-       // Assume Role
-       roleArn := section.String(roleArnKey)
-       srcProfile := section.String(sourceProfileKey)
-       credentialSource := section.String(credentialSourceKey)
-       hasSource := len(srcProfile) > 0 || len(credentialSource) > 0
-       if len(roleArn) > 0 && hasSource {
-               cfg.AssumeRole = assumeRoleConfig{
-                       RoleARN:          roleArn,
-                       SourceProfile:    srcProfile,
-                       CredentialSource: credentialSource,
-                       ExternalID:       section.String(externalIDKey),
-                       MFASerial:        section.String(mfaSerialKey),
-                       RoleSessionName:  section.String(roleSessionNameKey),
-               }
+               updateString(&cfg.Region, section, regionKey)
        }
 
-       // `credential_process`
-       if credProc := section.String(credentialProcessKey); len(credProc) > 0 {
-               cfg.CredentialProcess = credProc
-       }
+       updateString(&cfg.CredentialProcess, section, credentialProcessKey)
+       updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey)
 
-       // Region
-       if v := section.String(regionKey); len(v) > 0 {
-               cfg.Region = v
+       // Shared Credentials
+       creds := credentials.Value{
+               AccessKeyID:     section.String(accessKeyIDKey),
+               SecretAccessKey: section.String(secretAccessKey),
+               SessionToken:    section.String(sessionTokenKey),
+               ProviderName:    fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
+       }
+       if creds.HasKeys() {
+               cfg.Creds = creds
        }
 
        // Endpoint discovery
@@ -247,6 +259,95 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) e
        return nil
 }
 
+func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error {
+       var credSource string
+
+       switch {
+       case len(cfg.SourceProfileName) != 0:
+               credSource = sourceProfileKey
+       case len(cfg.CredentialSource) != 0:
+               credSource = credentialSourceKey
+       case len(cfg.WebIdentityTokenFile) != 0:
+               credSource = webIdentityTokenFileKey
+       }
+
+       if len(credSource) != 0 && len(cfg.RoleARN) == 0 {
+               return CredentialRequiresARNError{
+                       Type:    credSource,
+                       Profile: profile,
+               }
+       }
+
+       return nil
+}
+
+func (cfg *sharedConfig) validateCredentialType() error {
+       // Only one or no credential type can be defined.
+       if !oneOrNone(
+               len(cfg.SourceProfileName) != 0,
+               len(cfg.CredentialSource) != 0,
+               len(cfg.CredentialProcess) != 0,
+               len(cfg.WebIdentityTokenFile) != 0,
+       ) {
+               return ErrSharedConfigSourceCollision
+       }
+
+       return nil
+}
+
+func (cfg *sharedConfig) hasCredentials() bool {
+       switch {
+       case len(cfg.SourceProfileName) != 0:
+       case len(cfg.CredentialSource) != 0:
+       case len(cfg.CredentialProcess) != 0:
+       case len(cfg.WebIdentityTokenFile) != 0:
+       case cfg.Creds.HasKeys():
+       default:
+               return false
+       }
+
+       return true
+}
+
+func (cfg *sharedConfig) clearCredentialOptions() {
+       cfg.CredentialSource = ""
+       cfg.CredentialProcess = ""
+       cfg.WebIdentityTokenFile = ""
+       cfg.Creds = credentials.Value{}
+}
+
+func (cfg *sharedConfig) clearAssumeRoleOptions() {
+       cfg.RoleARN = ""
+       cfg.ExternalID = ""
+       cfg.MFASerial = ""
+       cfg.RoleSessionName = ""
+       cfg.SourceProfileName = ""
+}
+
+func oneOrNone(bs ...bool) bool {
+       var count int
+
+       for _, b := range bs {
+               if b {
+                       count++
+                       if count > 1 {
+                               return false
+                       }
+               }
+       }
+
+       return true
+}
+
+// updateString will only update the dst with the value in the section key, key
+// is present in the section.
+func updateString(dst *string, section ini.Section, key string) {
+       if !section.Has(key) {
+               return
+       }
+       *dst = section.String(key)
+}
+
 // SharedConfigLoadError is an error for the shared config file failed to load.
 type SharedConfigLoadError struct {
        Filename string
@@ -304,7 +405,8 @@ func (e SharedConfigProfileNotExistsError) Error() string {
 // profile contains assume role information, but that information is invalid
 // or not complete.
 type SharedConfigAssumeRoleError struct {
-       RoleARN string
+       RoleARN       string
+       SourceProfile string
 }
 
 // Code is the short id of the error.
@@ -314,8 +416,10 @@ func (e SharedConfigAssumeRoleError) Code() string {
 
 // Message is the description of the error
 func (e SharedConfigAssumeRoleError) Message() string {
-       return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials",
-               e.RoleARN)
+       return fmt.Sprintf(
+               "failed to load assume role for %s, source profile %s has no shared credentials",
+               e.RoleARN, e.SourceProfile,
+       )
 }
 
 // OrigErr is the underlying error that caused the failure.
@@ -327,3 +431,36 @@ func (e SharedConfigAssumeRoleError) OrigErr() error {
 func (e SharedConfigAssumeRoleError) Error() string {
        return awserr.SprintError(e.Code(), e.Message(), "", nil)
 }
+
+// CredentialRequiresARNError provides the error for shared config credentials
+// that are incorrectly configured in the shared config or credentials file.
+type CredentialRequiresARNError struct {
+       // type of credentials that were configured.
+       Type string
+
+       // Profile name the credentials were in.
+       Profile string
+}
+
+// Code is the short id of the error.
+func (e CredentialRequiresARNError) Code() string {
+       return "CredentialRequiresARNError"
+}
+
+// Message is the description of the error
+func (e CredentialRequiresARNError) Message() string {
+       return fmt.Sprintf(
+               "credential type %s requires role_arn, profile %s",
+               e.Type, e.Profile,
+       )
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e CredentialRequiresARNError) OrigErr() error {
+       return nil
+}
+
+// Error satisfies the error interface.
+func (e CredentialRequiresARNError) Error() string {
+       return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
index 523db79f8d2acd018359c66615070684f7bb2244..8104793aa5bc4564c1e1a915b607c91534cc405e 100644 (file)
@@ -687,7 +687,11 @@ func (ctx *signingCtx) buildBodyDigest() error {
                        if !aws.IsReaderSeekable(ctx.Body) {
                                return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
                        }
-                       hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
+                       hashBytes, err := makeSha256Reader(ctx.Body)
+                       if err != nil {
+                               return err
+                       }
+                       hash = hex.EncodeToString(hashBytes)
                }
 
                if includeSHA256Header {
@@ -734,10 +738,16 @@ func makeSha256(data []byte) []byte {
        return hash.Sum(nil)
 }
 
-func makeSha256Reader(reader io.ReadSeeker) []byte {
+func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) {
        hash := sha256.New()
-       start, _ := reader.Seek(0, sdkio.SeekCurrent)
-       defer reader.Seek(start, sdkio.SeekStart)
+       start, err := reader.Seek(0, sdkio.SeekCurrent)
+       if err != nil {
+               return nil, err
+       }
+       defer func() {
+               // ensure error is return if unable to seek back to start of payload.
+               _, err = reader.Seek(start, sdkio.SeekStart)
+       }()
 
        // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
        // smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
@@ -748,7 +758,7 @@ func makeSha256Reader(reader io.ReadSeeker) []byte {
                io.CopyN(hash, reader, size)
        }
 
-       return hash.Sum(nil)
+       return hash.Sum(nil), nil
 }
 
 const doubleSpace = "  "
index 8b6f23425a66da2926e6a46aa8f6b77083e60f17..455091540fdf2da2595af4886a1482d14f89f0b6 100644 (file)
@@ -7,13 +7,18 @@ import (
        "github.com/aws/aws-sdk-go/internal/sdkio"
 )
 
-// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should
-// only be used with an io.Reader that is also an io.Seeker. Doing so may
-// cause request signature errors, or request body's not sent for GET, HEAD
-// and DELETE HTTP methods.
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the
+// SDK to accept an io.Reader that is not also an io.Seeker for unsigned
+// streaming payload API operations.
 //
-// Deprecated: Should only be used with io.ReadSeeker. If using for
-// S3 PutObject to stream content use s3manager.Uploader instead.
+// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API
+// operation's input will prevent that operation being retried in the case of
+// network errors, and cause operation requests to fail if the operation
+// requires payload signing.
+//
+// Note: If using With S3 PutObject to stream an object upload The SDK's S3
+// Upload manager (s3manager.Uploader) provides support for streaming with the
+// ability to retry network errors.
 func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
        return ReaderSeekerCloser{r}
 }
@@ -43,7 +48,8 @@ func IsReaderSeekable(r io.Reader) bool {
 // Read reads from the reader up to size of p. The number of bytes read, and
 // error if it occurred will be returned.
 //
-// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
+// If the reader is not an io.Reader zero bytes read, and nil error will be
+// returned.
 //
 // Performs the same functionality as io.Reader Read
 func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
index 15ad9cfe463c0366ebc0b004f6c1d0f97d6473a4..23aae7d9eca4e7e1aa2aa774acce295f8a39e6e3 100644 (file)
@@ -5,4 +5,4 @@ package aws
 const SDKName = "aws-sdk-go"
 
 // SDKVersion is the version of this SDK
-const SDKVersion = "1.19.18"
+const SDKVersion = "1.21.7"
index f99703372c4e2657160e9fead7a3056d3d7f8c21..e56dcee2f8e54e23345805e666f9463df536fb40 100644 (file)
@@ -304,7 +304,9 @@ loop:
                        stmt := newCommentStatement(tok)
                        stack.Push(stmt)
                default:
-                       return nil, NewParseError(fmt.Sprintf("invalid state with ASTKind %v and TokenType %v", k, tok))
+                       return nil, NewParseError(
+                               fmt.Sprintf("invalid state with ASTKind %v and TokenType %v",
+                                       k, tok.Type()))
                }
 
                if len(tokens) > 0 {
@@ -314,7 +316,7 @@ loop:
 
        // this occurs when a statement has not been completed
        if stack.top > 1 {
-               return nil, NewParseError(fmt.Sprintf("incomplete expression: %v", stack.container))
+               return nil, NewParseError(fmt.Sprintf("incomplete ini expression"))
        }
 
        // returns a sublist which excludes the start symbol
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
new file mode 100644 (file)
index 0000000..864fb67
--- /dev/null
@@ -0,0 +1,296 @@
+// Package jsonutil provides JSON serialization of AWS requests and responses.
+package jsonutil
+
+import (
+       "bytes"
+       "encoding/base64"
+       "encoding/json"
+       "fmt"
+       "math"
+       "reflect"
+       "sort"
+       "strconv"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+var timeType = reflect.ValueOf(time.Time{}).Type()
+var byteSliceType = reflect.ValueOf([]byte{}).Type()
+
+// BuildJSON builds a JSON string for a given object v.
+func BuildJSON(v interface{}) ([]byte, error) {
+       var buf bytes.Buffer
+
+       err := buildAny(reflect.ValueOf(v), &buf, "")
+       return buf.Bytes(), err
+}
+
+func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+       origVal := value
+       value = reflect.Indirect(value)
+       if !value.IsValid() {
+               return nil
+       }
+
+       vtype := value.Type()
+
+       t := tag.Get("type")
+       if t == "" {
+               switch vtype.Kind() {
+               case reflect.Struct:
+                       // also it can't be a time object
+                       if value.Type() != timeType {
+                               t = "structure"
+                       }
+               case reflect.Slice:
+                       // also it can't be a byte slice
+                       if _, ok := value.Interface().([]byte); !ok {
+                               t = "list"
+                       }
+               case reflect.Map:
+                       // cannot be a JSONValue map
+                       if _, ok := value.Interface().(aws.JSONValue); !ok {
+                               t = "map"
+                       }
+               }
+       }
+
+       switch t {
+       case "structure":
+               if field, ok := vtype.FieldByName("_"); ok {
+                       tag = field.Tag
+               }
+               return buildStruct(value, buf, tag)
+       case "list":
+               return buildList(value, buf, tag)
+       case "map":
+               return buildMap(value, buf, tag)
+       default:
+               return buildScalar(origVal, buf, tag)
+       }
+}
+
+func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+       if !value.IsValid() {
+               return nil
+       }
+
+       // unwrap payloads
+       if payload := tag.Get("payload"); payload != "" {
+               field, _ := value.Type().FieldByName(payload)
+               tag = field.Tag
+               value = elemOf(value.FieldByName(payload))
+
+               if !value.IsValid() {
+                       return nil
+               }
+       }
+
+       buf.WriteByte('{')
+
+       t := value.Type()
+       first := true
+       for i := 0; i < t.NumField(); i++ {
+               member := value.Field(i)
+
+               // This allocates the most memory.
+               // Additionally, we cannot skip nil fields due to
+               // idempotency auto filling.
+               field := t.Field(i)
+
+               if field.PkgPath != "" {
+                       continue // ignore unexported fields
+               }
+               if field.Tag.Get("json") == "-" {
+                       continue
+               }
+               if field.Tag.Get("location") != "" {
+                       continue // ignore non-body elements
+               }
+               if field.Tag.Get("ignore") != "" {
+                       continue
+               }
+
+               if protocol.CanSetIdempotencyToken(member, field) {
+                       token := protocol.GetIdempotencyToken()
+                       member = reflect.ValueOf(&token)
+               }
+
+               if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() {
+                       continue // ignore unset fields
+               }
+
+               if first {
+                       first = false
+               } else {
+                       buf.WriteByte(',')
+               }
+
+               // figure out what this field is called
+               name := field.Name
+               if locName := field.Tag.Get("locationName"); locName != "" {
+                       name = locName
+               }
+
+               writeString(name, buf)
+               buf.WriteString(`:`)
+
+               err := buildAny(member, buf, field.Tag)
+               if err != nil {
+                       return err
+               }
+
+       }
+
+       buf.WriteString("}")
+
+       return nil
+}
+
+func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+       buf.WriteString("[")
+
+       for i := 0; i < value.Len(); i++ {
+               buildAny(value.Index(i), buf, "")
+
+               if i < value.Len()-1 {
+                       buf.WriteString(",")
+               }
+       }
+
+       buf.WriteString("]")
+
+       return nil
+}
+
+type sortedValues []reflect.Value
+
+func (sv sortedValues) Len() int           { return len(sv) }
+func (sv sortedValues) Swap(i, j int)      { sv[i], sv[j] = sv[j], sv[i] }
+func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() }
+
+func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+       buf.WriteString("{")
+
+       sv := sortedValues(value.MapKeys())
+       sort.Sort(sv)
+
+       for i, k := range sv {
+               if i > 0 {
+                       buf.WriteByte(',')
+               }
+
+               writeString(k.String(), buf)
+               buf.WriteString(`:`)
+
+               buildAny(value.MapIndex(k), buf, "")
+       }
+
+       buf.WriteString("}")
+
+       return nil
+}
+
+func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
+       // prevents allocation on the heap.
+       scratch := [64]byte{}
+       switch value := reflect.Indirect(v); value.Kind() {
+       case reflect.String:
+               writeString(value.String(), buf)
+       case reflect.Bool:
+               if value.Bool() {
+                       buf.WriteString("true")
+               } else {
+                       buf.WriteString("false")
+               }
+       case reflect.Int64:
+               buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10))
+       case reflect.Float64:
+               f := value.Float()
+               if math.IsInf(f, 0) || math.IsNaN(f) {
+                       return &json.UnsupportedValueError{Value: v, Str: strconv.FormatFloat(f, 'f', -1, 64)}
+               }
+               buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64))
+       default:
+               switch converted := value.Interface().(type) {
+               case time.Time:
+                       format := tag.Get("timestampFormat")
+                       if len(format) == 0 {
+                               format = protocol.UnixTimeFormatName
+                       }
+
+                       ts := protocol.FormatTime(format, converted)
+                       if format != protocol.UnixTimeFormatName {
+                               ts = `"` + ts + `"`
+                       }
+
+                       buf.WriteString(ts)
+               case []byte:
+                       if !value.IsNil() {
+                               buf.WriteByte('"')
+                               if len(converted) < 1024 {
+                                       // for small buffers, using Encode directly is much faster.
+                                       dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted)))
+                                       base64.StdEncoding.Encode(dst, converted)
+                                       buf.Write(dst)
+                               } else {
+                                       // for large buffers, avoid unnecessary extra temporary
+                                       // buffer space.
+                                       enc := base64.NewEncoder(base64.StdEncoding, buf)
+                                       enc.Write(converted)
+                                       enc.Close()
+                               }
+                               buf.WriteByte('"')
+                       }
+               case aws.JSONValue:
+                       str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape)
+                       if err != nil {
+                               return fmt.Errorf("unable to encode JSONValue, %v", err)
+                       }
+                       buf.WriteString(str)
+               default:
+                       return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type())
+               }
+       }
+       return nil
+}
+
+var hex = "0123456789abcdef"
+
+func writeString(s string, buf *bytes.Buffer) {
+       buf.WriteByte('"')
+       for i := 0; i < len(s); i++ {
+               if s[i] == '"' {
+                       buf.WriteString(`\"`)
+               } else if s[i] == '\\' {
+                       buf.WriteString(`\\`)
+               } else if s[i] == '\b' {
+                       buf.WriteString(`\b`)
+               } else if s[i] == '\f' {
+                       buf.WriteString(`\f`)
+               } else if s[i] == '\r' {
+                       buf.WriteString(`\r`)
+               } else if s[i] == '\t' {
+                       buf.WriteString(`\t`)
+               } else if s[i] == '\n' {
+                       buf.WriteString(`\n`)
+               } else if s[i] < 32 {
+                       buf.WriteString("\\u00")
+                       buf.WriteByte(hex[s[i]>>4])
+                       buf.WriteByte(hex[s[i]&0xF])
+               } else {
+                       buf.WriteByte(s[i])
+               }
+       }
+       buf.WriteByte('"')
+}
+
+// Returns the reflection element of a value, if it is a pointer.
+func elemOf(value reflect.Value) reflect.Value {
+       for value.Kind() == reflect.Ptr {
+               value = value.Elem()
+       }
+       return value
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
new file mode 100644 (file)
index 0000000..ea0da79
--- /dev/null
@@ -0,0 +1,250 @@
+package jsonutil
+
+import (
+       "bytes"
+       "encoding/base64"
+       "encoding/json"
+       "fmt"
+       "io"
+       "reflect"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in
+// type. The value to unmarshal the json document into must be a pointer to the
+// type.
+func UnmarshalJSONError(v interface{}, stream io.Reader) error {
+       var errBuf bytes.Buffer
+       body := io.TeeReader(stream, &errBuf)
+
+       err := json.NewDecoder(body).Decode(v)
+       if err != nil {
+               msg := "failed decoding error message"
+               if err == io.EOF {
+                       msg = "error message missing"
+                       err = nil
+               }
+               return awserr.NewUnmarshalError(err, msg, errBuf.Bytes())
+       }
+
+       return nil
+}
+
+// UnmarshalJSON reads a stream and unmarshals the results in object v.
+func UnmarshalJSON(v interface{}, stream io.Reader) error {
+       var out interface{}
+
+       err := json.NewDecoder(stream).Decode(&out)
+       if err == io.EOF {
+               return nil
+       } else if err != nil {
+               return err
+       }
+
+       return unmarshalAny(reflect.ValueOf(v), out, "")
+}
+
+func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+       vtype := value.Type()
+       if vtype.Kind() == reflect.Ptr {
+               vtype = vtype.Elem() // check kind of actual element type
+       }
+
+       t := tag.Get("type")
+       if t == "" {
+               switch vtype.Kind() {
+               case reflect.Struct:
+                       // also it can't be a time object
+                       if _, ok := value.Interface().(*time.Time); !ok {
+                               t = "structure"
+                       }
+               case reflect.Slice:
+                       // also it can't be a byte slice
+                       if _, ok := value.Interface().([]byte); !ok {
+                               t = "list"
+                       }
+               case reflect.Map:
+                       // cannot be a JSONValue map
+                       if _, ok := value.Interface().(aws.JSONValue); !ok {
+                               t = "map"
+                       }
+               }
+       }
+
+       switch t {
+       case "structure":
+               if field, ok := vtype.FieldByName("_"); ok {
+                       tag = field.Tag
+               }
+               return unmarshalStruct(value, data, tag)
+       case "list":
+               return unmarshalList(value, data, tag)
+       case "map":
+               return unmarshalMap(value, data, tag)
+       default:
+               return unmarshalScalar(value, data, tag)
+       }
+}
+
+func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+       if data == nil {
+               return nil
+       }
+       mapData, ok := data.(map[string]interface{})
+       if !ok {
+               return fmt.Errorf("JSON value is not a structure (%#v)", data)
+       }
+
+       t := value.Type()
+       if value.Kind() == reflect.Ptr {
+               if value.IsNil() { // create the structure if it's nil
+                       s := reflect.New(value.Type().Elem())
+                       value.Set(s)
+                       value = s
+               }
+
+               value = value.Elem()
+               t = t.Elem()
+       }
+
+       // unwrap any payloads
+       if payload := tag.Get("payload"); payload != "" {
+               field, _ := t.FieldByName(payload)
+               return unmarshalAny(value.FieldByName(payload), data, field.Tag)
+       }
+
+       for i := 0; i < t.NumField(); i++ {
+               field := t.Field(i)
+               if field.PkgPath != "" {
+                       continue // ignore unexported fields
+               }
+
+               // figure out what this field is called
+               name := field.Name
+               if locName := field.Tag.Get("locationName"); locName != "" {
+                       name = locName
+               }
+
+               member := value.FieldByIndex(field.Index)
+               err := unmarshalAny(member, mapData[name], field.Tag)
+               if err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+       if data == nil {
+               return nil
+       }
+       listData, ok := data.([]interface{})
+       if !ok {
+               return fmt.Errorf("JSON value is not a list (%#v)", data)
+       }
+
+       if value.IsNil() {
+               l := len(listData)
+               value.Set(reflect.MakeSlice(value.Type(), l, l))
+       }
+
+       for i, c := range listData {
+               err := unmarshalAny(value.Index(i), c, "")
+               if err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
+
+func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+       if data == nil {
+               return nil
+       }
+       mapData, ok := data.(map[string]interface{})
+       if !ok {
+               return fmt.Errorf("JSON value is not a map (%#v)", data)
+       }
+
+       if value.IsNil() {
+               value.Set(reflect.MakeMap(value.Type()))
+       }
+
+       for k, v := range mapData {
+               kvalue := reflect.ValueOf(k)
+               vvalue := reflect.New(value.Type().Elem()).Elem()
+
+               unmarshalAny(vvalue, v, "")
+               value.SetMapIndex(kvalue, vvalue)
+       }
+
+       return nil
+}
+
+func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error {
+
+       switch d := data.(type) {
+       case nil:
+               return nil // nothing to do here
+       case string:
+               switch value.Interface().(type) {
+               case *string:
+                       value.Set(reflect.ValueOf(&d))
+               case []byte:
+                       b, err := base64.StdEncoding.DecodeString(d)
+                       if err != nil {
+                               return err
+                       }
+                       value.Set(reflect.ValueOf(b))
+               case *time.Time:
+                       format := tag.Get("timestampFormat")
+                       if len(format) == 0 {
+                               format = protocol.ISO8601TimeFormatName
+                       }
+
+                       t, err := protocol.ParseTime(format, d)
+                       if err != nil {
+                               return err
+                       }
+                       value.Set(reflect.ValueOf(&t))
+               case aws.JSONValue:
+                       // No need to use escaping as the value is a non-quoted string.
+                       v, err := protocol.DecodeJSONValue(d, protocol.NoEscape)
+                       if err != nil {
+                               return err
+                       }
+                       value.Set(reflect.ValueOf(v))
+               default:
+                       return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
+               }
+       case float64:
+               switch value.Interface().(type) {
+               case *int64:
+                       di := int64(d)
+                       value.Set(reflect.ValueOf(&di))
+               case *float64:
+                       value.Set(reflect.ValueOf(&d))
+               case *time.Time:
+                       // Time unmarshaled from a float64 can only be epoch seconds
+                       t := time.Unix(int64(d), 0).UTC()
+                       value.Set(reflect.ValueOf(&t))
+               default:
+                       return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
+               }
+       case bool:
+               switch value.Interface().(type) {
+               case *bool:
+                       value.Set(reflect.ValueOf(&d))
+               default:
+                       return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
+               }
+       default:
+               return fmt.Errorf("unsupported JSON value (%v)", data)
+       }
+       return nil
+}
index 60e5b09d548ccef16ceb57a1791941dcf0a81455..0cb99eb579682773ec5622debf34f355c8073cb9 100644 (file)
@@ -21,7 +21,7 @@ func Build(r *request.Request) {
                "Version": {r.ClientInfo.APIVersion},
        }
        if err := queryutil.Parse(body, r.Params, false); err != nil {
-               r.Error = awserr.New("SerializationError", "failed encoding Query request", err)
+               r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err)
                return
        }
 
index 3495c73070bb9100c4b7fafd3cec3bc83b473963..f69c1efc93ad2f2cdec174aefafae0dd07f5f379 100644 (file)
@@ -24,7 +24,7 @@ func Unmarshal(r *request.Request) {
                err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
                if err != nil {
                        r.Error = awserr.NewRequestFailure(
-                               awserr.New("SerializationError", "failed decoding Query response", err),
+                               awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err),
                                r.HTTPResponse.StatusCode,
                                r.RequestID,
                        )
index 46d354e826f45a3d1378ff288922c4a05164f53a..831b0110c54bd71b27b12a5504157fb6631954b5 100644 (file)
@@ -2,73 +2,68 @@ package query
 
 import (
        "encoding/xml"
-       "io/ioutil"
+       "fmt"
 
        "github.com/aws/aws-sdk-go/aws/awserr"
        "github.com/aws/aws-sdk-go/aws/request"
+       "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
 )
 
+// UnmarshalErrorHandler is a name request handler to unmarshal request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
+
 type xmlErrorResponse struct {
-       XMLName   xml.Name `xml:"ErrorResponse"`
-       Code      string   `xml:"Error>Code"`
-       Message   string   `xml:"Error>Message"`
-       RequestID string   `xml:"RequestId"`
+       Code      string `xml:"Error>Code"`
+       Message   string `xml:"Error>Message"`
+       RequestID string `xml:"RequestId"`
 }
 
-type xmlServiceUnavailableResponse struct {
-       XMLName xml.Name `xml:"ServiceUnavailableException"`
+type xmlResponseError struct {
+       xmlErrorResponse
 }
 
-// UnmarshalErrorHandler is a name request handler to unmarshal request errors
-var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
+func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
+       const svcUnavailableTagName = "ServiceUnavailableException"
+       const errorResponseTagName = "ErrorResponse"
+
+       switch start.Name.Local {
+       case svcUnavailableTagName:
+               e.Code = svcUnavailableTagName
+               e.Message = "service is unavailable"
+               return d.Skip()
+
+       case errorResponseTagName:
+               return d.DecodeElement(&e.xmlErrorResponse, &start)
+
+       default:
+               return fmt.Errorf("unknown error response tag, %v", start)
+       }
+}
 
 // UnmarshalError unmarshals an error response for an AWS Query service.
 func UnmarshalError(r *request.Request) {
        defer r.HTTPResponse.Body.Close()
 
-       bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
+       var respErr xmlResponseError
+       err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body)
        if err != nil {
                r.Error = awserr.NewRequestFailure(
-                       awserr.New("SerializationError", "failed to read from query HTTP response body", err),
+                       awserr.New(request.ErrCodeSerialization,
+                               "failed to unmarshal error message", err),
                        r.HTTPResponse.StatusCode,
                        r.RequestID,
                )
                return
        }
 
-       // First check for specific error
-       resp := xmlErrorResponse{}
-       decodeErr := xml.Unmarshal(bodyBytes, &resp)
-       if decodeErr == nil {
-               reqID := resp.RequestID
-               if reqID == "" {
-                       reqID = r.RequestID
-               }
-               r.Error = awserr.NewRequestFailure(
-                       awserr.New(resp.Code, resp.Message, nil),
-                       r.HTTPResponse.StatusCode,
-                       reqID,
-               )
-               return
-       }
-
-       // Check for unhandled error
-       servUnavailResp := xmlServiceUnavailableResponse{}
-       unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp)
-       if unavailErr == nil {
-               r.Error = awserr.NewRequestFailure(
-                       awserr.New("ServiceUnavailableException", "service is unavailable", nil),
-                       r.HTTPResponse.StatusCode,
-                       r.RequestID,
-               )
-               return
+       reqID := respErr.RequestID
+       if len(reqID) == 0 {
+               reqID = r.RequestID
        }
 
-       // Failed to retrieve any error message from the response body
        r.Error = awserr.NewRequestFailure(
-               awserr.New("SerializationError",
-                       "failed to decode query XML error response", decodeErr),
+               awserr.New(respErr.Code, respErr.Message, nil),
                r.HTTPResponse.StatusCode,
-               r.RequestID,
+               reqID,
        )
 }
index b80f84fbb86492b760745e74d61924d1f35b0801..1301b149d35e8a175d99a7d757ca9832f4276c14 100644 (file)
@@ -25,6 +25,8 @@ var noEscape [256]bool
 
 var errValueNotSet = fmt.Errorf("value not set")
 
+var byteSliceType = reflect.TypeOf([]byte{})
+
 func init() {
        for i := 0; i < len(noEscape); i++ {
                // AWS expects every character except these to be escaped
@@ -94,6 +96,14 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo
                                continue
                        }
 
+                       // Support the ability to customize values to be marshaled as a
+                       // blob even though they were modeled as a string. Required for S3
+                       // API operations like SSECustomerKey is modeled as stirng but
+                       // required to be base64 encoded in request.
+                       if field.Tag.Get("marshal-as") == "blob" {
+                               m = m.Convert(byteSliceType)
+                       }
+
                        var err error
                        switch field.Tag.Get("location") {
                        case "headers": // header maps
@@ -137,7 +147,7 @@ func buildBody(r *request.Request, v reflect.Value) {
                                        case string:
                                                r.SetStringBody(reader)
                                        default:
-                                               r.Error = awserr.New("SerializationError",
+                                               r.Error = awserr.New(request.ErrCodeSerialization,
                                                        "failed to encode REST request",
                                                        fmt.Errorf("unknown payload type %s", payload.Type()))
                                        }
@@ -152,7 +162,7 @@ func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.
        if err == errValueNotSet {
                return nil
        } else if err != nil {
-               return awserr.New("SerializationError", "failed to encode REST request", err)
+               return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
        }
 
        name = strings.TrimSpace(name)
@@ -170,7 +180,7 @@ func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag)
                if err == errValueNotSet {
                        continue
                } else if err != nil {
-                       return awserr.New("SerializationError", "failed to encode REST request", err)
+                       return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
 
                }
                keyStr := strings.TrimSpace(key.String())
@@ -186,7 +196,7 @@ func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) e
        if err == errValueNotSet {
                return nil
        } else if err != nil {
-               return awserr.New("SerializationError", "failed to encode REST request", err)
+               return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
        }
 
        u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1)
@@ -219,7 +229,7 @@ func buildQueryString(query url.Values, v reflect.Value, name string, tag reflec
                if err == errValueNotSet {
                        return nil
                } else if err != nil {
-                       return awserr.New("SerializationError", "failed to encode REST request", err)
+                       return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
                }
                query.Set(name, str)
        }
index 33fd53b126a136bd20184fa5232d9ee34e06e85a..de021367da24f80b3c8b0dfd8cce5f35ffa06fe3 100644 (file)
@@ -57,7 +57,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
                                                defer r.HTTPResponse.Body.Close()
                                                b, err := ioutil.ReadAll(r.HTTPResponse.Body)
                                                if err != nil {
-                                                       r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+                                                       r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
                                                } else {
                                                        payload.Set(reflect.ValueOf(b))
                                                }
@@ -65,7 +65,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
                                                defer r.HTTPResponse.Body.Close()
                                                b, err := ioutil.ReadAll(r.HTTPResponse.Body)
                                                if err != nil {
-                                                       r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+                                                       r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
                                                } else {
                                                        str := string(b)
                                                        payload.Set(reflect.ValueOf(&str))
@@ -77,7 +77,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
                                                case "io.ReadSeeker":
                                                        b, err := ioutil.ReadAll(r.HTTPResponse.Body)
                                                        if err != nil {
-                                                               r.Error = awserr.New("SerializationError",
+                                                               r.Error = awserr.New(request.ErrCodeSerialization,
                                                                        "failed to read response body", err)
                                                                return
                                                        }
@@ -85,7 +85,7 @@ func unmarshalBody(r *request.Request, v reflect.Value) {
                                                default:
                                                        io.Copy(ioutil.Discard, r.HTTPResponse.Body)
                                                        defer r.HTTPResponse.Body.Close()
-                                                       r.Error = awserr.New("SerializationError",
+                                                       r.Error = awserr.New(request.ErrCodeSerialization,
                                                                "failed to decode REST response",
                                                                fmt.Errorf("unknown payload type %s", payload.Type()))
                                                }
@@ -115,14 +115,14 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) {
                        case "header":
                                err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag)
                                if err != nil {
-                                       r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+                                       r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
                                        break
                                }
                        case "headers":
                                prefix := field.Tag.Get("locationName")
                                err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
                                if err != nil {
-                                       r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+                                       r.Error = awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
                                        break
                                }
                        }
index b0f4e245661d03c6afaef660f905e93425e2d4b0..cf569645dc22276c985e5fc46c60f4a6be9dff7f 100644 (file)
@@ -37,7 +37,8 @@ func Build(r *request.Request) {
                err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf))
                if err != nil {
                        r.Error = awserr.NewRequestFailure(
-                               awserr.New("SerializationError", "failed to encode rest XML request", err),
+                               awserr.New(request.ErrCodeSerialization,
+                                       "failed to encode rest XML request", err),
                                r.HTTPResponse.StatusCode,
                                r.RequestID,
                        )
@@ -55,7 +56,8 @@ func Unmarshal(r *request.Request) {
                err := xmlutil.UnmarshalXML(r.Data, decoder, "")
                if err != nil {
                        r.Error = awserr.NewRequestFailure(
-                               awserr.New("SerializationError", "failed to decode REST XML response", err),
+                               awserr.New(request.ErrCodeSerialization,
+                                       "failed to decode REST XML response", err),
                                r.HTTPResponse.StatusCode,
                                r.RequestID,
                        )
index ff1ef6830b93d1ee107e3dc017113b1b8fc14921..7108d3800937131b398713735628740aa173d0e9 100644 (file)
@@ -1,6 +1,7 @@
 package xmlutil
 
 import (
+       "bytes"
        "encoding/base64"
        "encoding/xml"
        "fmt"
@@ -10,9 +11,27 @@ import (
        "strings"
        "time"
 
+       "github.com/aws/aws-sdk-go/aws/awserr"
        "github.com/aws/aws-sdk-go/private/protocol"
 )
 
+// UnmarshalXMLError unmarshals the XML error from the stream into the value
+// type specified. The value must be a pointer. If the message fails to
+// unmarshal, the message content will be included in the returned error as a
+// awserr.UnmarshalError.
+func UnmarshalXMLError(v interface{}, stream io.Reader) error {
+       var errBuf bytes.Buffer
+       body := io.TeeReader(stream, &errBuf)
+
+       err := xml.NewDecoder(body).Decode(v)
+       if err != nil && err != io.EOF {
+               return awserr.NewUnmarshalError(err,
+                       "failed to unmarshal error message", errBuf.Bytes())
+       }
+
+       return nil
+}
+
 // UnmarshalXML deserializes an xml.Decoder into the container v. V
 // needs to match the shape of the XML expected to be decoded.
 // If the shape doesn't match unmarshaling will fail.
index 83a42d249b4a460dd6efeadb916c803dd0bc692e..139c27d14c1cd312861b80e08b577382bc4d4dcc 100644 (file)
@@ -545,6 +545,10 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt
 // Deletes an analytics configuration for the bucket (specified by the analytics
 // configuration ID).
 //
+// To use this operation, you must have permissions to perform the s3:PutAnalyticsConfiguration
+// action. The bucket owner has this permission by default. The bucket owner
+// can grant this permission to others.
+//
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
 // with awserr.Error's Code and Message methods to get detailed information about
 // the error.
@@ -1071,7 +1075,7 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput)
 // DeleteBucketReplication API operation for Amazon Simple Storage Service.
 //
 // Deletes the replication configuration from the bucket. For information about
-// replication configuration, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
+// replication configuration, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
 // in the Amazon S3 Developer Guide.
 //
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -3335,8 +3339,8 @@ func (c *S3) GetObjectLockConfigurationRequest(input *GetObjectLockConfiguration
 
 // GetObjectLockConfiguration API operation for Amazon Simple Storage Service.
 //
-// Gets the Object Lock configuration for a bucket. The rule specified in the
-// Object Lock configuration will be applied by default to every new object
+// Gets the object lock configuration for a bucket. The rule specified in the
+// object lock configuration will be applied by default to every new object
 // placed in the specified bucket.
 //
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -4210,7 +4214,7 @@ func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipa
 //    // Example iterating over at most 3 pages of a ListMultipartUploads operation.
 //    pageNum := 0
 //    err := client.ListMultipartUploadsPages(params,
-//        func(page *ListMultipartUploadsOutput, lastPage bool) bool {
+//        func(page *s3.ListMultipartUploadsOutput, lastPage bool) bool {
 //            pageNum++
 //            fmt.Println(page)
 //            return pageNum <= 3
@@ -4340,7 +4344,7 @@ func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVer
 //    // Example iterating over at most 3 pages of a ListObjectVersions operation.
 //    pageNum := 0
 //    err := client.ListObjectVersionsPages(params,
-//        func(page *ListObjectVersionsOutput, lastPage bool) bool {
+//        func(page *s3.ListObjectVersionsOutput, lastPage bool) bool {
 //            pageNum++
 //            fmt.Println(page)
 //            return pageNum <= 3
@@ -4477,7 +4481,7 @@ func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, op
 //    // Example iterating over at most 3 pages of a ListObjects operation.
 //    pageNum := 0
 //    err := client.ListObjectsPages(params,
-//        func(page *ListObjectsOutput, lastPage bool) bool {
+//        func(page *s3.ListObjectsOutput, lastPage bool) bool {
 //            pageNum++
 //            fmt.Println(page)
 //            return pageNum <= 3
@@ -4615,7 +4619,7 @@ func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input
 //    // Example iterating over at most 3 pages of a ListObjectsV2 operation.
 //    pageNum := 0
 //    err := client.ListObjectsV2Pages(params,
-//        func(page *ListObjectsV2Output, lastPage bool) bool {
+//        func(page *s3.ListObjectsV2Output, lastPage bool) bool {
 //            pageNum++
 //            fmt.Println(page)
 //            return pageNum <= 3
@@ -4745,7 +4749,7 @@ func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts .
 //    // Example iterating over at most 3 pages of a ListParts operation.
 //    pageNum := 0
 //    err := client.ListPartsPages(params,
-//        func(page *ListPartsOutput, lastPage bool) bool {
+//        func(page *s3.ListPartsOutput, lastPage bool) bool {
 //            pageNum++
 //            fmt.Println(page)
 //            return pageNum <= 3
@@ -5754,8 +5758,7 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R
 
 // PutBucketPolicy API operation for Amazon Simple Storage Service.
 //
-// Replaces a policy on a bucket. If the bucket already has a policy, the one
-// in this request completely replaces it.
+// Applies an Amazon S3 bucket policy to an Amazon S3 bucket.
 //
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
 // with awserr.Error's Code and Message methods to get detailed information about
@@ -5831,7 +5834,7 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req
 // PutBucketReplication API operation for Amazon Simple Storage Service.
 //
 // Creates a replication configuration or replaces an existing one. For more
-// information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
+// information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
 // in the Amazon S3 Developer Guide.
 //
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -6439,8 +6442,8 @@ func (c *S3) PutObjectLockConfigurationRequest(input *PutObjectLockConfiguration
 
 // PutObjectLockConfiguration API operation for Amazon Simple Storage Service.
 //
-// Places an Object Lock configuration on the specified bucket. The rule specified
-// in the Object Lock configuration will be applied by default to every new
+// Places an object lock configuration on the specified bucket. The rule specified
+// in the object lock configuration will be applied by default to every new
 // object placed in the specified bucket.
 //
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -7010,13 +7013,16 @@ func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInp
        return out, req.Send()
 }
 
-// Specifies the days since the initiation of an Incomplete Multipart Upload
-// that Lifecycle will wait before permanently removing all parts of the upload.
+// Specifies the days since the initiation of an incomplete multipart upload
+// that Amazon S3 will wait before permanently removing all parts of the upload.
+// For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+// Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
+// in the Amazon Simple Storage Service Developer Guide.
 type AbortIncompleteMultipartUpload struct {
        _ struct{} `type:"structure"`
 
-       // Indicates the number of days that must pass since initiation for Lifecycle
-       // to abort an Incomplete Multipart Upload.
+       // Specifies the number of days after which Amazon S3 aborts an incomplete multipart
+       // upload.
        DaysAfterInitiation *int64 `type:"integer"`
 }
 
@@ -7039,9 +7045,13 @@ func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortI
 type AbortMultipartUploadInput struct {
        _ struct{} `type:"structure"`
 
+       // Name of the bucket to which the multipart upload was initiated.
+       //
        // Bucket is a required field
        Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
+       // Key of the object for which the multipart upload was initiated.
+       //
        // Key is a required field
        Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
 
@@ -7051,6 +7061,8 @@ type AbortMultipartUploadInput struct {
        // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
        RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
 
+       // Upload ID that identifies the multipart upload.
+       //
        // UploadId is a required field
        UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
 }
@@ -7145,10 +7157,13 @@ func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipart
        return s
 }
 
+// Configures the transfer acceleration state for an Amazon S3 bucket. For more
+// information, see Amazon S3 Transfer Acceleration (https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html)
+// in the Amazon Simple Storage Service Developer Guide.
 type AccelerateConfiguration struct {
        _ struct{} `type:"structure"`
 
-       // The accelerate configuration of the bucket.
+       // Specifies the transfer acceleration status of the bucket.
        Status *string `type:"string" enum:"BucketAccelerateStatus"`
 }
 
@@ -7168,12 +7183,14 @@ func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration {
        return s
 }
 
+// Contains the elements that set the ACL permissions for an object per grantee.
 type AccessControlPolicy struct {
        _ struct{} `type:"structure"`
 
        // A list of grants.
        Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
 
+       // Container for the bucket owner's display name and ID.
        Owner *Owner `type:"structure"`
 }
 
@@ -7223,7 +7240,9 @@ func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy {
 type AccessControlTranslation struct {
        _ struct{} `type:"structure"`
 
-       // The override value for the owner of the replica object.
+       // Specifies the replica ownership. For default and valid values, see PUT bucket
+       // replication (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html)
+       // in the Amazon Simple Storage Service API Reference.
        //
        // Owner is a required field
        Owner *string `type:"string" required:"true" enum:"OwnerOverride"`
@@ -7258,10 +7277,14 @@ func (s *AccessControlTranslation) SetOwner(v string) *AccessControlTranslation
        return s
 }
 
+// A conjunction (logical AND) of predicates, which is used in evaluating a
+// metrics filter. The operator must have at least two predicates in any combination,
+// and an object must match all of the predicates for the filter to apply.
 type AnalyticsAndOperator struct {
        _ struct{} `type:"structure"`
 
-       // The prefix to use when evaluating an AND predicate.
+       // The prefix to use when evaluating an AND predicate: The prefix that an object
+       // must have to be included in the metrics results.
        Prefix *string `type:"string"`
 
        // The list of tags to use when evaluating an AND predicate.
@@ -7310,6 +7333,11 @@ func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator {
        return s
 }
 
+// Specifies the configuration and any analyses for the analytics filter of
+// an Amazon S3 bucket.
+//
+// For more information, see GET Bucket analytics (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETAnalyticsConfig.html)
+// in the Amazon Simple Storage Service API Reference.
 type AnalyticsConfiguration struct {
        _ struct{} `type:"structure"`
 
@@ -7318,13 +7346,13 @@ type AnalyticsConfiguration struct {
        // If no filter is provided, all objects will be considered in any analysis.
        Filter *AnalyticsFilter `type:"structure"`
 
-       // The identifier used to represent an analytics configuration.
+       // The ID that identifies the analytics configuration.
        //
        // Id is a required field
        Id *string `type:"string" required:"true"`
 
-       // If present, it indicates that data related to access patterns will be collected
-       // and made available to analyze the tradeoffs between different storage classes.
+       // Contains data related to access patterns to be collected and made available
+       // to analyze the tradeoffs between different storage classes.
        //
        // StorageClassAnalysis is a required field
        StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"`
@@ -7384,6 +7412,7 @@ func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis
        return s
 }
 
+// Where to publish the analytics results.
 type AnalyticsExportDestination struct {
        _ struct{} `type:"structure"`
 
@@ -7492,7 +7521,7 @@ func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter {
 type AnalyticsS3BucketDestination struct {
        _ struct{} `type:"structure"`
 
-       // The Amazon resource name (ARN) of the bucket to which data is exported.
+       // The Amazon Resource Name (ARN) of the bucket to which data is exported.
        //
        // Bucket is a required field
        Bucket *string `type:"string" required:"true"`
@@ -7501,13 +7530,12 @@ type AnalyticsS3BucketDestination struct {
        // the owner will not be validated prior to exporting data.
        BucketAccountId *string `type:"string"`
 
-       // The file format used when exporting data to Amazon S3.
+       // Specifies the file format used when exporting data to Amazon S3.
        //
        // Format is a required field
        Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"`
 
-       // The prefix to use when exporting data. The exported data begins with this
-       // prefix.
+       // The prefix to use when exporting data. The prefix is prepended to all results.
        Prefix *string `type:"string"`
 }
 
@@ -7600,9 +7628,14 @@ func (s *Bucket) SetName(v string) *Bucket {
        return s
 }
 
+// Specifies the lifecycle configuration for objects in an Amazon S3 bucket.
+// For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
+// in the Amazon Simple Storage Service Developer Guide.
 type BucketLifecycleConfiguration struct {
        _ struct{} `type:"structure"`
 
+       // A lifecycle rule for individual objects in an Amazon S3 bucket.
+       //
        // Rules is a required field
        Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
 }
@@ -7649,9 +7682,10 @@ func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifec
 type BucketLoggingStatus struct {
        _ struct{} `type:"structure"`
 
-       // Container for logging information. Presence of this element indicates that
-       // logging is enabled. Parameters TargetBucket and TargetPrefix are required
-       // in this case.
+       // Describes where logs are stored and the prefix that Amazon S3 assigns to
+       // all log object keys for a bucket. For more information, see PUT Bucket logging
+       // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
+       // in the Amazon Simple Storage Service API Reference.
        LoggingEnabled *LoggingEnabled `type:"structure"`
 }
 
@@ -7686,9 +7720,15 @@ func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggin
        return s
 }
 
+// Describes the cross-origin access configuration for objects in an Amazon
+// S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing
+// (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon
+// Simple Storage Service Developer Guide.
 type CORSConfiguration struct {
        _ struct{} `type:"structure"`
 
+       // A set of allowed origins and methods.
+       //
        // CORSRules is a required field
        CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"`
 }
@@ -7732,14 +7772,18 @@ func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration {
        return s
 }
 
+// Specifies a cross-origin access rule for an Amazon S3 bucket.
 type CORSRule struct {
        _ struct{} `type:"structure"`
 
-       // Specifies which headers are allowed in a pre-flight OPTIONS request.
+       // Headers that are specified in the Access-Control-Request-Headers header.
+       // These headers are allowed in a preflight OPTIONS request. In response to
+       // any preflight OPTIONS request, Amazon S3 returns any requested headers that
+       // are allowed.
        AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"`
 
-       // Identifies HTTP methods that the domain/origin specified in the rule is allowed
-       // to execute.
+       // An HTTP method that you allow the origin to execute. Valid values are GET,
+       // PUT, HEAD, POST, and DELETE.
        //
        // AllowedMethods is a required field
        AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"`
@@ -8290,6 +8334,7 @@ func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart {
        return s
 }
 
+// Specifies a condition that must be met for a redirect to apply.
 type Condition struct {
        _ struct{} `type:"structure"`
 
@@ -8409,7 +8454,7 @@ type CopyObjectInput struct {
        // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
        // the source object. The encryption key provided in this header must be one
        // that was used when the source object was created.
-       CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+       CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"`
 
        // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
        // Amazon S3 uses this header for a message integrity check to ensure the encryption
@@ -8444,10 +8489,10 @@ type CopyObjectInput struct {
        // Specifies whether you want to apply a Legal Hold to the copied object.
        ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
 
-       // The Object Lock mode that you want to apply to the copied object.
+       // The object lock mode that you want to apply to the copied object.
        ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
 
-       // The date and time when you want the copied object's Object Lock to expire.
+       // The date and time when you want the copied object's object lock to expire.
        ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
 
        // Confirms that the requester knows that she or he will be charged for the
@@ -8464,13 +8509,18 @@ type CopyObjectInput struct {
        // does not store the encryption key. The key must be appropriate for use with
        // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
        // header.
-       SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+       SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
 
        // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
        // Amazon S3 uses this header for a message integrity check to ensure the encryption
        // key was transmitted without error.
        SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
 
+       // Specifies the AWS KMS Encryption Context to use for object encryption. The
+       // value of this header is a base64-encoded UTF-8 string holding JSON with the
+       // encryption context key-value pairs.
+       SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
+
        // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
        // requests for an object protected by AWS KMS will fail if not made via SSL
        // or using SigV4. Documentation on configuring any of the officially supported
@@ -8735,6 +8785,12 @@ func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput {
        return s
 }
 
+// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
+func (s *CopyObjectInput) SetSSEKMSEncryptionContext(v string) *CopyObjectInput {
+       s.SSEKMSEncryptionContext = &v
+       return s
+}
+
 // SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
 func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput {
        s.SSEKMSKeyId = &v
@@ -8795,6 +8851,11 @@ type CopyObjectOutput struct {
        // verification of the customer-provided encryption key.
        SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
 
+       // If present, specifies the AWS KMS Encryption Context to use for object encryption.
+       // The value of this header is a base64-encoded UTF-8 string holding JSON with
+       // the encryption context key-value pairs.
+       SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
+
        // If present, specifies the ID of the AWS Key Management Service (KMS) master
        // encryption key that was used for the object.
        SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
@@ -8853,6 +8914,12 @@ func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput {
        return s
 }
 
+// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
+func (s *CopyObjectOutput) SetSSEKMSEncryptionContext(v string) *CopyObjectOutput {
+       s.SSEKMSEncryptionContext = &v
+       return s
+}
+
 // SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
 func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput {
        s.SSEKMSKeyId = &v
@@ -8984,7 +9051,8 @@ type CreateBucketInput struct {
        // Allows grantee to write the ACL for the applicable bucket.
        GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
 
-       // Specifies whether you want S3 Object Lock to be enabled for the new bucket.
+       // Specifies whether you want Amazon S3 object lock to be enabled for the new
+       // bucket.
        ObjectLockEnabledForBucket *bool `location:"header" locationName:"x-amz-bucket-object-lock-enabled" type:"boolean"`
 }
 
@@ -9147,10 +9215,10 @@ type CreateMultipartUploadInput struct {
        // Specifies whether you want to apply a Legal Hold to the uploaded object.
        ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
 
-       // Specifies the Object Lock mode that you want to apply to the uploaded object.
+       // Specifies the object lock mode that you want to apply to the uploaded object.
        ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
 
-       // Specifies the date and time when you want the Object Lock to expire.
+       // Specifies the date and time when you want the object lock to expire.
        ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
 
        // Confirms that the requester knows that she or he will be charged for the
@@ -9167,13 +9235,18 @@ type CreateMultipartUploadInput struct {
        // does not store the encryption key. The key must be appropriate for use with
        // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
        // header.
-       SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+       SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
 
        // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
        // Amazon S3 uses this header for a message integrity check to ensure the encryption
        // key was transmitted without error.
        SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
 
+       // Specifies the AWS KMS Encryption Context to use for object encryption. The
+       // value of this header is a base64-encoded UTF-8 string holding JSON with the
+       // encryption context key-value pairs.
+       SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
+
        // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
        // requests for an object protected by AWS KMS will fail if not made via SSL
        // or using SigV4. Documentation on configuring any of the officially supported
@@ -9368,6 +9441,12 @@ func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMulti
        return s
 }
 
+// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
+func (s *CreateMultipartUploadInput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadInput {
+       s.SSEKMSEncryptionContext = &v
+       return s
+}
+
 // SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
 func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput {
        s.SSEKMSKeyId = &v
@@ -9428,6 +9507,11 @@ type CreateMultipartUploadOutput struct {
        // verification of the customer-provided encryption key.
        SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
 
+       // If present, specifies the AWS KMS Encryption Context to use for object encryption.
+       // The value of this header is a base64-encoded UTF-8 string holding JSON with
+       // the encryption context key-value pairs.
+       SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
+
        // If present, specifies the ID of the AWS Key Management Service (KMS) master
        // encryption key that was used for the object.
        SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
@@ -9499,6 +9583,12 @@ func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMult
        return s
 }
 
+// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
+func (s *CreateMultipartUploadOutput) SetSSEKMSEncryptionContext(v string) *CreateMultipartUploadOutput {
+       s.SSEKMSEncryptionContext = &v
+       return s
+}
+
 // SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
 func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput {
        s.SSEKMSKeyId = &v
@@ -9517,7 +9607,7 @@ func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUplo
        return s
 }
 
-// The container element for specifying the default Object Lock retention settings
+// The container element for specifying the default object lock retention settings
 // for new objects placed in the specified bucket.
 type DefaultRetention struct {
        _ struct{} `type:"structure"`
@@ -9525,7 +9615,7 @@ type DefaultRetention struct {
        // The number of days that you want to specify for the default retention period.
        Days *int64 `type:"integer"`
 
-       // The default Object Lock retention mode you want to apply to new objects placed
+       // The default object lock retention mode you want to apply to new objects placed
        // in the specified bucket.
        Mode *string `type:"string" enum:"ObjectLockRetentionMode"`
 
@@ -9625,7 +9715,7 @@ type DeleteBucketAnalyticsConfigurationInput struct {
        // Bucket is a required field
        Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-       // The identifier used to represent an analytics configuration.
+       // The ID that identifies the analytics configuration.
        //
        // Id is a required field
        Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
@@ -10425,7 +10515,7 @@ type DeleteObjectInput struct {
        // Bucket is a required field
        Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-       // Indicates whether S3 Object Lock should bypass Governance-mode restrictions
+       // Indicates whether Amazon S3 object lock should bypass governance-mode restrictions
        // to process this operation.
        BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"`
 
@@ -10665,7 +10755,7 @@ type DeleteObjectsInput struct {
        Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
        // Specifies whether you want to delete this object even if it has a Governance-type
-       // Object Lock in place. You must have sufficient permissions to perform this
+       // object lock in place. You must have sufficient permissions to perform this
        // operation.
        BypassGovernanceRetention *bool `location:"header" locationName:"x-amz-bypass-governance-retention" type:"boolean"`
 
@@ -10902,33 +10992,33 @@ func (s *DeletedObject) SetVersionId(v string) *DeletedObject {
        return s
 }
 
-// A container for information about the replication destination.
+// Specifies information about where to publish analysis or configuration results
+// for an Amazon S3 bucket.
 type Destination struct {
        _ struct{} `type:"structure"`
 
-       // A container for information about access control for replicas.
-       //
-       // Use this element only in a cross-account scenario where source and destination
-       // bucket owners are not the same to change replica ownership to the AWS account
-       // that owns the destination bucket. If you don't add this element to the replication
-       // configuration, the replicas are owned by same AWS account that owns the source
-       // object.
+       // Specify this only in a cross-account scenario (where source and destination
+       // bucket owners are not the same), and you want to change replica ownership
+       // to the AWS account that owns the destination bucket. If this is not specified
+       // in the replication configuration, the replicas are owned by same AWS account
+       // that owns the source object.
        AccessControlTranslation *AccessControlTranslation `type:"structure"`
 
-       // The account ID of the destination bucket. Currently, Amazon S3 verifies this
-       // value only if Access Control Translation is enabled.
-       //
-       // In a cross-account scenario, if you change replica ownership to the AWS account
-       // that owns the destination bucket by adding the AccessControlTranslation element,
-       // this is the account ID of the owner of the destination bucket.
+       // Destination bucket owner account ID. In a cross-account scenario, if you
+       // direct Amazon S3 to change replica ownership to the AWS account that owns
+       // the destination bucket by specifying the AccessControlTranslation property,
+       // this is the account ID of the destination bucket owner. For more information,
+       // see Cross-Region Replication Additional Configuration: Change Replica Owner
+       // (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-change-owner.html) in
+       // the Amazon Simple Storage Service Developer Guide.
        Account *string `type:"string"`
 
        // The Amazon Resource Name (ARN) of the bucket where you want Amazon S3 to
        // store replicas of the object identified by the rule.
        //
-       // If there are multiple rules in your replication configuration, all rules
-       // must specify the same bucket as the destination. A replication configuration
-       // can replicate objects to only one destination bucket.
+       // A replication configuration can replicate objects to only one destination
+       // bucket. If there are multiple rules in your replication configuration, all
+       // rules must specify the same destination bucket.
        //
        // Bucket is a required field
        Bucket *string `type:"string" required:"true"`
@@ -10937,8 +11027,13 @@ type Destination struct {
        // is specified, you must specify this element.
        EncryptionConfiguration *EncryptionConfiguration `type:"structure"`
 
-       // The class of storage used to store the object. By default Amazon S3 uses
-       // storage class of the source object when creating a replica.
+       // The storage class to use when replicating objects, such as standard or reduced
+       // redundancy. By default, Amazon S3 uses the storage class of the source object
+       // to create the object replica.
+       //
+       // For valid values, see the StorageClass element of the PUT Bucket replication
+       // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html)
+       // action in the Amazon Simple Storage Service API Reference.
        StorageClass *string `type:"string" enum:"StorageClass"`
 }
 
@@ -11068,13 +11163,13 @@ func (s *Encryption) SetKMSKeyId(v string) *Encryption {
        return s
 }
 
-// A container for information about the encryption-based configuration for
-// replicas.
+// Specifies encryption-related information for an Amazon S3 bucket that is
+// a destination for replicated objects.
 type EncryptionConfiguration struct {
        _ struct{} `type:"structure"`
 
-       // The ID of the AWS KMS key for the AWS Region where the destination bucket
-       // resides. Amazon S3 uses this key to encrypt the replica object.
+       // Specifies the AWS KMS Key ID (Key ARN or Alias ARN) for the destination bucket.
+       // Amazon S3 uses this key to encrypt replica objects.
        ReplicaKmsKeyID *string `type:"string"`
 }
 
@@ -11207,18 +11302,19 @@ func (s *ErrorDocument) SetKey(v string) *ErrorDocument {
        return s
 }
 
-// A container for a key value pair that defines the criteria for the filter
-// rule.
+// Specifies the Amazon S3 object key name to filter on and whether to filter
+// on the suffix or prefix of the key name.
 type FilterRule struct {
        _ struct{} `type:"structure"`
 
        // The object key name prefix or suffix identifying one or more objects to which
-       // the filtering rule applies. The maximum prefix length is 1,024 characters.
-       // Overlapping prefixes and suffixes are not supported. For more information,
-       // see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+       // the filtering rule applies. The maximum length is 1,024 characters. Overlapping
+       // prefixes and suffixes are not supported. For more information, see Configuring
+       // Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
        // in the Amazon Simple Storage Service Developer Guide.
        Name *string `type:"string" enum:"FilterRuleName"`
 
+       // The value that the filter searches for in object key names.
        Value *string `type:"string"`
 }
 
@@ -11400,7 +11496,7 @@ type GetBucketAnalyticsConfigurationInput struct {
        // Bucket is a required field
        Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-       // The identifier used to represent an analytics configuration.
+       // The ID that identifies the analytics configuration.
        //
        // Id is a required field
        Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
@@ -11597,8 +11693,7 @@ func (s *GetBucketEncryptionInput) getBucket() (v string) {
 type GetBucketEncryptionOutput struct {
        _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"`
 
-       // Container for server-side encryption configuration rules. Currently S3 supports
-       // one rule only.
+       // Specifies the default server-side-encryption configuration.
        ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `type:"structure"`
 }
 
@@ -11956,9 +12051,10 @@ func (s *GetBucketLoggingInput) getBucket() (v string) {
 type GetBucketLoggingOutput struct {
        _ struct{} `type:"structure"`
 
-       // Container for logging information. Presence of this element indicates that
-       // logging is enabled. Parameters TargetBucket and TargetPrefix are required
-       // in this case.
+       // Describes where logs are stored and the prefix that Amazon S3 assigns to
+       // all log object keys for a bucket. For more information, see PUT Bucket logging
+       // (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
+       // in the Amazon Simple Storage Service API Reference.
        LoggingEnabled *LoggingEnabled `type:"structure"`
 }
 
@@ -12592,6 +12688,8 @@ type GetBucketWebsiteOutput struct {
 
        IndexDocument *IndexDocument `type:"structure"`
 
+       // Specifies the redirect behavior of all requests to a website endpoint of
+       // an Amazon S3 bucket.
        RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
 
        RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
@@ -12820,7 +12918,7 @@ type GetObjectInput struct {
        // does not store the encryption key. The key must be appropriate for use with
        // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
        // header.
-       SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+       SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
 
        // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
        // Amazon S3 uses this header for a message integrity check to ensure the encryption
@@ -13103,7 +13201,7 @@ func (s *GetObjectLegalHoldOutput) SetLegalHold(v *ObjectLockLegalHold) *GetObje
 type GetObjectLockConfigurationInput struct {
        _ struct{} `type:"structure"`
 
-       // The bucket whose Object Lock configuration you want to retrieve.
+       // The bucket whose object lock configuration you want to retrieve.
        //
        // Bucket is a required field
        Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -13151,7 +13249,7 @@ func (s *GetObjectLockConfigurationInput) getBucket() (v string) {
 type GetObjectLockConfigurationOutput struct {
        _ struct{} `type:"structure" payload:"ObjectLockConfiguration"`
 
-       // The specified bucket's Object Lock configuration.
+       // The specified bucket's object lock configuration.
        ObjectLockConfiguration *ObjectLockConfiguration `type:"structure"`
 }
 
@@ -13235,10 +13333,10 @@ type GetObjectOutput struct {
        // returned if you have permission to view an object's legal hold status.
        ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
 
-       // The Object Lock mode currently in place for this object.
+       // The object lock mode currently in place for this object.
        ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
 
-       // The date and time when this object's Object Lock will expire.
+       // The date and time when this object's object lock will expire.
        ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
 
        // The count of parts this object has.
@@ -14136,7 +14234,7 @@ type HeadObjectInput struct {
        // does not store the encryption key. The key must be appropriate for use with
        // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
        // header.
-       SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+       SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
 
        // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
        // Amazon S3 uses this header for a message integrity check to ensure the encryption
@@ -14328,10 +14426,10 @@ type HeadObjectOutput struct {
        // The Legal Hold status for the specified object.
        ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
 
-       // The Object Lock mode currently in place for this object.
+       // The object lock mode currently in place for this object.
        ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
 
-       // The date and time when this object's Object Lock will expire.
+       // The date and time when this object's object lock expires.
        ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
 
        // The count of parts this object has.
@@ -14680,6 +14778,9 @@ func (s *InputSerialization) SetParquet(v *ParquetInput) *InputSerialization {
        return s
 }
 
+// Specifies the inventory configuration for an Amazon S3 bucket. For more information,
+// see GET Bucket inventory (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html)
+// in the Amazon Simple Storage Service API Reference.
 type InventoryConfiguration struct {
        _ struct{} `type:"structure"`
 
@@ -14697,12 +14798,16 @@ type InventoryConfiguration struct {
        // Id is a required field
        Id *string `type:"string" required:"true"`
 
-       // Specifies which object version(s) to included in the inventory results.
+       // Object versions to include in the inventory list. If set to All, the list
+       // includes all the object versions, which adds the version-related fields VersionId,
+       // IsLatest, and DeleteMarker to the list. If set to Current, the list does
+       // not contain these version-related fields.
        //
        // IncludedObjectVersions is a required field
        IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"`
 
-       // Specifies whether the inventory is enabled or disabled.
+       // Specifies whether the inventory is enabled or disabled. If set to True, an
+       // inventory list is generated. If set to False, no inventory list is generated.
        //
        // IsEnabled is a required field
        IsEnabled *bool `type:"boolean" required:"true"`
@@ -15145,11 +15250,15 @@ func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter {
 type LambdaFunctionConfiguration struct {
        _ struct{} `type:"structure"`
 
+       // The Amazon S3 bucket event for which to invoke the AWS Lambda function. For
+       // more information, see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+       // in the Amazon Simple Storage Service Developer Guide.
+       //
        // Events is a required field
        Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
 
-       // A container for object key name filtering rules. For information about key
-       // name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+       // Specifies object key name filtering rules. For information about key name
+       // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
        // in the Amazon Simple Storage Service Developer Guide.
        Filter *NotificationConfigurationFilter `type:"structure"`
 
@@ -15157,8 +15266,8 @@ type LambdaFunctionConfiguration struct {
        // If you don't provide one, Amazon S3 will assign an ID.
        Id *string `type:"string"`
 
-       // The Amazon Resource Name (ARN) of the Lambda cloud function that Amazon S3
-       // can invoke when it detects events of the specified type.
+       // The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3
+       // invokes when the specified event type occurs.
        //
        // LambdaFunctionArn is a required field
        LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"`
@@ -15309,8 +15418,11 @@ func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExp
 type LifecycleRule struct {
        _ struct{} `type:"structure"`
 
-       // Specifies the days since the initiation of an Incomplete Multipart Upload
-       // that Lifecycle will wait before permanently removing all parts of the upload.
+       // Specifies the days since the initiation of an incomplete multipart upload
+       // that Amazon S3 will wait before permanently removing all parts of the upload.
+       // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+       // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
+       // in the Amazon Simple Storage Service Developer Guide.
        AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
 
        Expiration *LifecycleExpiration `type:"structure"`
@@ -17267,9 +17379,10 @@ func (s *Location) SetUserMetadata(v []*MetadataEntry) *Location {
        return s
 }
 
-// Container for logging information. Presence of this element indicates that
-// logging is enabled. Parameters TargetBucket and TargetPrefix are required
-// in this case.
+// Describes where logs are stored and the prefix that Amazon S3 assigns to
+// all log object keys for a bucket. For more information, see PUT Bucket logging
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html)
+// in the Amazon Simple Storage Service API Reference.
 type LoggingEnabled struct {
        _ struct{} `type:"structure"`
 
@@ -17285,8 +17398,9 @@ type LoggingEnabled struct {
 
        TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"`
 
-       // This element lets you specify a prefix for the keys that the log files will
-       // be stored under.
+       // A prefix for all log object keys. If you store log files from multiple Amazon
+       // S3 buckets in a single bucket, you can use a prefix to distinguish which
+       // log files came from which bucket.
        //
        // TargetPrefix is a required field
        TargetPrefix *string `type:"string" required:"true"`
@@ -17429,6 +17543,13 @@ func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator {
        return s
 }
 
+// Specifies a metrics configuration for the CloudWatch request metrics (specified
+// by the metrics configuration ID) from an Amazon S3 bucket. If you're updating
+// an existing metrics configuration, note that this is a full replacement of
+// the existing metrics configuration. If you don't include the elements you
+// want to keep, they are erased. For more information, see PUT Bucket metrics
+// (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html)
+// in the Amazon Simple Storage Service API Reference.
 type MetricsConfiguration struct {
        _ struct{} `type:"structure"`
 
@@ -17624,7 +17745,7 @@ type NoncurrentVersionExpiration struct {
        // Specifies the number of days an object is noncurrent before Amazon S3 can
        // perform the associated action. For information about the noncurrent days
        // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
-       // (https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
+       // (https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations)
        // in the Amazon Simple Storage Service Developer Guide.
        NoncurrentDays *int64 `type:"integer"`
 }
@@ -17646,11 +17767,11 @@ func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVers
 }
 
 // Container for the transition rule that describes when noncurrent objects
-// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER or
-// DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning
+// transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER,
+// or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning
 // is suspended), you can set this action to request that Amazon S3 transition
 // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING,
-// GLACIER or DEEP_ARCHIVE storage class at a specific period in the object's
+// GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's
 // lifetime.
 type NoncurrentVersionTransition struct {
        _ struct{} `type:"structure"`
@@ -17693,10 +17814,16 @@ func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersi
 type NotificationConfiguration struct {
        _ struct{} `type:"structure"`
 
+       // Describes the AWS Lambda functions to invoke and the events for which to
+       // invoke them.
        LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"`
 
+       // The Amazon Simple Queue Service queues to publish messages to and the events
+       // for which to publish messages.
        QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"`
 
+       // The topic to which notifications are sent and the events for which notifications
+       // are generated.
        TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"`
 }
 
@@ -17806,8 +17933,8 @@ func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConf
        return s
 }
 
-// A container for object key name filtering rules. For information about key
-// name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+// Specifies object key name filtering rules. For information about key name
+// filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
 // in the Amazon Simple Storage Service Developer Guide.
 type NotificationConfigurationFilter struct {
        _ struct{} `type:"structure"`
@@ -17945,14 +18072,14 @@ func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier {
        return s
 }
 
-// The container element for Object Lock configuration parameters.
+// The container element for object lock configuration parameters.
 type ObjectLockConfiguration struct {
        _ struct{} `type:"structure"`
 
-       // Indicates whether this bucket has an Object Lock configuration enabled.
+       // Indicates whether this bucket has an object lock configuration enabled.
        ObjectLockEnabled *string `type:"string" enum:"ObjectLockEnabled"`
 
-       // The Object Lock rule in place for the specified object.
+       // The object lock rule in place for the specified object.
        Rule *ObjectLockRule `type:"structure"`
 }
 
@@ -18009,7 +18136,7 @@ type ObjectLockRetention struct {
        // Indicates the Retention mode for the specified object.
        Mode *string `type:"string" enum:"ObjectLockRetentionMode"`
 
-       // The date on which this Object Lock Retention will expire.
+       // The date on which this object lock retention expires.
        RetainUntilDate *time.Time `type:"timestamp" timestampFormat:"iso8601"`
 }
 
@@ -18035,7 +18162,7 @@ func (s *ObjectLockRetention) SetRetainUntilDate(v time.Time) *ObjectLockRetenti
        return s
 }
 
-// The container element for an Object Lock rule.
+// The container element for an object lock rule.
 type ObjectLockRule struct {
        _ struct{} `type:"structure"`
 
@@ -18418,6 +18545,7 @@ func (s *ProgressEvent) UnmarshalEvent(
        return nil
 }
 
+// Specifies the Block Public Access configuration for an Amazon S3 bucket.
 type PublicAccessBlockConfiguration struct {
        _ struct{} `type:"structure"`
 
@@ -18575,6 +18703,7 @@ type PutBucketAclInput struct {
        // The canned ACL to apply to the bucket.
        ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
 
+       // Contains the elements that set the ACL permissions for an object per grantee.
        AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
 
        // Bucket is a required field
@@ -18710,7 +18839,7 @@ type PutBucketAnalyticsConfigurationInput struct {
        // Bucket is a required field
        Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-       // The identifier used to represent an analytics configuration.
+       // The ID that identifies the analytics configuration.
        //
        // Id is a required field
        Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
@@ -18798,6 +18927,11 @@ type PutBucketCorsInput struct {
        // Bucket is a required field
        Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
+       // Describes the cross-origin access configuration for objects in an Amazon
+       // S3 bucket. For more information, see Enabling Cross-Origin Resource Sharing
+       // (https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html) in the Amazon
+       // Simple Storage Service Developer Guide.
+       //
        // CORSConfiguration is a required field
        CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
 }
@@ -18872,14 +19006,16 @@ func (s PutBucketCorsOutput) GoString() string {
 type PutBucketEncryptionInput struct {
        _ struct{} `type:"structure" payload:"ServerSideEncryptionConfiguration"`
 
-       // The name of the bucket for which the server-side encryption configuration
-       // is set.
+       // Specifies default encryption for a bucket using server-side encryption with
+       // Amazon S3-managed keys (SSE-S3) or AWS KMS-managed keys (SSE-KMS). For information
+       // about the Amazon S3 default encryption feature, see Amazon S3 Default Bucket
+       // Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html)
+       // in the Amazon Simple Storage Service Developer Guide.
        //
        // Bucket is a required field
        Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-       // Container for server-side encryption configuration rules. Currently S3 supports
-       // one rule only.
+       // Specifies the default server-side-encryption configuration.
        //
        // ServerSideEncryptionConfiguration is a required field
        ServerSideEncryptionConfiguration *ServerSideEncryptionConfiguration `locationName:"ServerSideEncryptionConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
@@ -19053,6 +19189,9 @@ type PutBucketLifecycleConfigurationInput struct {
        // Bucket is a required field
        Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
+       // Specifies the lifecycle configuration for objects in an Amazon S3 bucket.
+       // For more information, see Object Lifecycle Management (https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html)
+       // in the Amazon Simple Storage Service Developer Guide.
        LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
 }
 
@@ -19612,6 +19751,9 @@ type PutBucketReplicationInput struct {
        //
        // ReplicationConfiguration is a required field
        ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
+
+       // A token that allows Amazon S3 object lock to be enabled for an existing bucket.
+       Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"`
 }
 
 // String returns the string representation
@@ -19667,6 +19809,12 @@ func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationCo
        return s
 }
 
+// SetToken sets the Token field's value.
+func (s *PutBucketReplicationInput) SetToken(v string) *PutBucketReplicationInput {
+       s.Token = &v
+       return s
+}
+
 type PutBucketReplicationOutput struct {
        _ struct{} `type:"structure"`
 }
@@ -19845,6 +19993,10 @@ type PutBucketVersioningInput struct {
        // and the value that is displayed on your authentication device.
        MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
 
+       // Describes the versioning state of an Amazon S3 bucket. For more information,
+       // see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html)
+       // in the Amazon Simple Storage Service API Reference.
+       //
        // VersioningConfiguration is a required field
        VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
 }
@@ -19923,6 +20075,8 @@ type PutBucketWebsiteInput struct {
        // Bucket is a required field
        Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
+       // Specifies website configuration parameters for an Amazon S3 bucket.
+       //
        // WebsiteConfiguration is a required field
        WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
 }
@@ -20000,6 +20154,7 @@ type PutObjectAclInput struct {
        // The canned ACL to apply to the object.
        ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
 
+       // Contains the elements that set the ACL permissions for an object per grantee.
        AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
 
        // Bucket is a required field
@@ -20201,7 +20356,8 @@ type PutObjectInput struct {
        ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
 
        // The base64-encoded 128-bit MD5 digest of the part data. This parameter is
-       // auto-populated when using the command from the CLI
+       // auto-populated when using the command from the CLI. This parameted is required
+       // if object lock parameters are specified.
        ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
 
        // A standard MIME type describing the format of the object data.
@@ -20233,10 +20389,10 @@ type PutObjectInput struct {
        // The Legal Hold status that you want to apply to the specified object.
        ObjectLockLegalHoldStatus *string `location:"header" locationName:"x-amz-object-lock-legal-hold" type:"string" enum:"ObjectLockLegalHoldStatus"`
 
-       // The Object Lock mode that you want to apply to this object.
+       // The object lock mode that you want to apply to this object.
        ObjectLockMode *string `location:"header" locationName:"x-amz-object-lock-mode" type:"string" enum:"ObjectLockMode"`
 
-       // The date and time when you want this object's Object Lock to expire.
+       // The date and time when you want this object's object lock to expire.
        ObjectLockRetainUntilDate *time.Time `location:"header" locationName:"x-amz-object-lock-retain-until-date" type:"timestamp" timestampFormat:"iso8601"`
 
        // Confirms that the requester knows that she or he will be charged for the
@@ -20253,13 +20409,18 @@ type PutObjectInput struct {
        // does not store the encryption key. The key must be appropriate for use with
        // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
        // header.
-       SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+       SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
 
        // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
        // Amazon S3 uses this header for a message integrity check to ensure the encryption
        // key was transmitted without error.
        SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
 
+       // Specifies the AWS KMS Encryption Context to use for object encryption. The
+       // value of this header is a base64-encoded UTF-8 string holding JSON with the
+       // encryption context key-value pairs.
+       SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
+
        // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
        // requests for an object protected by AWS KMS will fail if not made via SSL
        // or using SigV4. Documentation on configuring any of the officially supported
@@ -20473,6 +20634,12 @@ func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput {
        return s
 }
 
+// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
+func (s *PutObjectInput) SetSSEKMSEncryptionContext(v string) *PutObjectInput {
+       s.SSEKMSEncryptionContext = &v
+       return s
+}
+
 // SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
 func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput {
        s.SSEKMSKeyId = &v
@@ -20626,12 +20793,12 @@ func (s *PutObjectLegalHoldOutput) SetRequestCharged(v string) *PutObjectLegalHo
 type PutObjectLockConfigurationInput struct {
        _ struct{} `type:"structure" payload:"ObjectLockConfiguration"`
 
-       // The bucket whose Object Lock configuration you want to create or replace.
+       // The bucket whose object lock configuration you want to create or replace.
        //
        // Bucket is a required field
        Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
 
-       // The Object Lock configuration that you want to apply to the specified bucket.
+       // The object lock configuration that you want to apply to the specified bucket.
        ObjectLockConfiguration *ObjectLockConfiguration `locationName:"ObjectLockConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
 
        // Confirms that the requester knows that she or he will be charged for the
@@ -20640,7 +20807,7 @@ type PutObjectLockConfigurationInput struct {
        // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
        RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
 
-       // A token to allow Object Lock to be enabled for an existing bucket.
+       // A token to allow Amazon S3 object lock to be enabled for an existing bucket.
        Token *string `location:"header" locationName:"x-amz-bucket-object-lock-token" type:"string"`
 }
 
@@ -20749,6 +20916,11 @@ type PutObjectOutput struct {
        // verification of the customer-provided encryption key.
        SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
 
+       // If present, specifies the AWS KMS Encryption Context to use for object encryption.
+       // The value of this header is a base64-encoded UTF-8 string holding JSON with
+       // the encryption context key-value pairs.
+       SSEKMSEncryptionContext *string `location:"header" locationName:"x-amz-server-side-encryption-context" type:"string" sensitive:"true"`
+
        // If present, specifies the ID of the AWS Key Management Service (KMS) master
        // encryption key that was used for the object.
        SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
@@ -20801,6 +20973,12 @@ func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput {
        return s
 }
 
+// SetSSEKMSEncryptionContext sets the SSEKMSEncryptionContext field's value.
+func (s *PutObjectOutput) SetSSEKMSEncryptionContext(v string) *PutObjectOutput {
+       s.SSEKMSEncryptionContext = &v
+       return s
+}
+
 // SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
 func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput {
        s.SSEKMSKeyId = &v
@@ -21139,17 +21317,16 @@ func (s PutPublicAccessBlockOutput) GoString() string {
        return s.String()
 }
 
-// A container for specifying the configuration for publication of messages
-// to an Amazon Simple Queue Service (Amazon SQS) queue.when Amazon S3 detects
-// specified events.
+// Specifies the configuration for publishing messages to an Amazon Simple Queue
+// Service (Amazon SQS) queue when Amazon S3 detects specified events.
 type QueueConfiguration struct {
        _ struct{} `type:"structure"`
 
        // Events is a required field
        Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
 
-       // A container for object key name filtering rules. For information about key
-       // name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+       // Specifies object key name filtering rules. For information about key name
+       // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
        // in the Amazon Simple Storage Service Developer Guide.
        Filter *NotificationConfigurationFilter `type:"structure"`
 
@@ -21158,7 +21335,7 @@ type QueueConfiguration struct {
        Id *string `type:"string"`
 
        // The Amazon Resource Name (ARN) of the Amazon SQS queue to which Amazon S3
-       // will publish a message when it detects events of the specified type.
+       // publishes a message when it detects events of the specified type.
        //
        // QueueArn is a required field
        QueueArn *string `locationName:"Queue" type:"string" required:"true"`
@@ -21304,6 +21481,8 @@ func (s *RecordsEvent) UnmarshalEvent(
        return nil
 }
 
+// Specifies how requests are redirected. In the event of an error, you can
+// specify a different error code to return.
 type Redirect struct {
        _ struct{} `type:"structure"`
 
@@ -21314,8 +21493,8 @@ type Redirect struct {
        // siblings is present.
        HttpRedirectCode *string `type:"string"`
 
-       // Protocol to use (http, https) when redirecting requests. The default is the
-       // protocol that is used in the original request.
+       // Protocol to use when redirecting requests. The default is the protocol that
+       // is used in the original request.
        Protocol *string `type:"string" enum:"Protocol"`
 
        // The object key prefix to use in the redirect request. For example, to redirect
@@ -21327,7 +21506,7 @@ type Redirect struct {
        ReplaceKeyPrefixWith *string `type:"string"`
 
        // The specific object key to use in the redirect request. For example, redirect
-       // request to error.html. Not required if one of the sibling is present. Can
+       // request to error.html. Not required if one of the siblings is present. Can
        // be present only if ReplaceKeyPrefixWith is not provided.
        ReplaceKeyWith *string `type:"string"`
 }
@@ -21372,16 +21551,18 @@ func (s *Redirect) SetReplaceKeyWith(v string) *Redirect {
        return s
 }
 
+// Specifies the redirect behavior of all requests to a website endpoint of
+// an Amazon S3 bucket.
 type RedirectAllRequestsTo struct {
        _ struct{} `type:"structure"`
 
-       // Name of the host where requests will be redirected.
+       // Name of the host where requests are redirected.
        //
        // HostName is a required field
        HostName *string `type:"string" required:"true"`
 
-       // Protocol to use (http, https) when redirecting requests. The default is the
-       // protocol that is used in the original request.
+       // Protocol to use when redirecting requests. The default is the protocol that
+       // is used in the original request.
        Protocol *string `type:"string" enum:"Protocol"`
 }
 
@@ -21426,7 +21607,9 @@ type ReplicationConfiguration struct {
        _ struct{} `type:"structure"`
 
        // The Amazon Resource Name (ARN) of the AWS Identity and Access Management
-       // (IAM) role that Amazon S3 can assume when replicating the objects.
+       // (IAM) role that Amazon S3 assumes when replicating objects. For more information,
+       // see How to Set Up Cross-Region Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr-how-setup.html)
+       // in the Amazon Simple Storage Service Developer Guide.
        //
        // Role is a required field
        Role *string `type:"string" required:"true"`
@@ -21486,7 +21669,7 @@ func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationCo
        return s
 }
 
-// A container for information about a specific replication rule.
+// Specifies which Amazon S3 objects to replicate and where to store the replicas.
 type ReplicationRule struct {
        _ struct{} `type:"structure"`
 
@@ -21506,7 +21689,8 @@ type ReplicationRule struct {
        ID *string `type:"string"`
 
        // An object keyname prefix that identifies the object or objects to which the
-       // rule applies. The maximum prefix length is 1,024 characters.
+       // rule applies. The maximum prefix length is 1,024 characters. To include all
+       // objects in a bucket, specify an empty string.
        //
        // Deprecated: Prefix has been deprecated
        Prefix *string `deprecated:"true" type:"string"`
@@ -21522,7 +21706,7 @@ type ReplicationRule struct {
        //    * Same object qualify tag based filter criteria specified in multiple
        //    rules
        //
-       // For more information, see Cross-Region Replication (CRR) ( https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
+       // For more information, see Cross-Region Replication (CRR) (https://docs.aws.amazon.com/AmazonS3/latest/dev/crr.html)
        // in the Amazon S3 Developer Guide.
        Priority *int64 `type:"integer"`
 
@@ -21531,12 +21715,9 @@ type ReplicationRule struct {
        // replication of these objects. Currently, Amazon S3 supports only the filter
        // that you can specify for objects created with server-side encryption using
        // an AWS KMS-Managed Key (SSE-KMS).
-       //
-       // If you want Amazon S3 to replicate objects created with server-side encryption
-       // using AWS KMS-Managed Keys.
        SourceSelectionCriteria *SourceSelectionCriteria `type:"structure"`
 
-       // If status isn't enabled, the rule is ignored.
+       // Specifies whether the rule is enabled.
        //
        // Status is a required field
        Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"`
@@ -22051,6 +22232,7 @@ func (s *RestoreRequest) SetType(v string) *RestoreRequest {
        return s
 }
 
+// Specifies the redirect behavior and when a redirect is applied.
 type RoutingRule struct {
        _ struct{} `type:"structure"`
 
@@ -22103,16 +22285,22 @@ func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule {
        return s
 }
 
+// Specifies lifecycle rules for an Amazon S3 bucket. For more information,
+// see PUT Bucket lifecycle (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html)
+// in the Amazon Simple Storage Service API Reference.
 type Rule struct {
        _ struct{} `type:"structure"`
 
-       // Specifies the days since the initiation of an Incomplete Multipart Upload
-       // that Lifecycle will wait before permanently removing all parts of the upload.
+       // Specifies the days since the initiation of an incomplete multipart upload
+       // that Amazon S3 will wait before permanently removing all parts of the upload.
+       // For more information, see Aborting Incomplete Multipart Uploads Using a Bucket
+       // Lifecycle Policy (https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config)
+       // in the Amazon Simple Storage Service Developer Guide.
        AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
 
        Expiration *LifecycleExpiration `type:"structure"`
 
-       // Unique identifier for the rule. The value cannot be longer than 255 characters.
+       // Unique identifier for the rule. The value can't be longer than 255 characters.
        ID *string `type:"string"`
 
        // Specifies when noncurrent object versions expire. Upon expiration, Amazon
@@ -22123,25 +22311,27 @@ type Rule struct {
        NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
 
        // Container for the transition rule that describes when noncurrent objects
-       // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER or
-       // DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning
+       // transition to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, GLACIER,
+       // or DEEP_ARCHIVE storage class. If your bucket is versioning-enabled (or versioning
        // is suspended), you can set this action to request that Amazon S3 transition
        // noncurrent object versions to the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING,
-       // GLACIER or DEEP_ARCHIVE storage class at a specific period in the object's
+       // GLACIER, or DEEP_ARCHIVE storage class at a specific period in the object's
        // lifetime.
        NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"`
 
-       // Prefix identifying one or more objects to which the rule applies.
+       // Object key prefix that identifies one or more objects to which this rule
+       // applies.
        //
        // Prefix is a required field
        Prefix *string `type:"string" required:"true"`
 
-       // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule
-       // is not currently being applied.
+       // If Enabled, the rule is currently being applied. If Disabled, the rule is
+       // not currently being applied.
        //
        // Status is a required field
        Status *string `type:"string" required:"true" enum:"ExpirationStatus"`
 
+       // Specifies when an object transitions to a specified storage class.
        Transition *Transition `type:"structure"`
 }
 
@@ -22537,15 +22727,15 @@ type SelectObjectContentInput struct {
        // Specifies if periodic request progress information should be enabled.
        RequestProgress *RequestProgress `type:"structure"`
 
-       // The SSE Algorithm used to encrypt the object. For more information, see
-       // Server-Side Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
+       // The SSE Algorithm used to encrypt the object. For more information, see Server-Side
+       // Encryption (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
        SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
 
-       // The SSE Customer Key. For more information, see  Server-Side Encryption (Using
+       // The SSE Customer Key. For more information, see Server-Side Encryption (Using
        // Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
-       SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+       SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
 
-       // The SSE Customer Key MD5. For more information, see  Server-Side Encryption
+       // The SSE Customer Key MD5. For more information, see Server-Side Encryption
        // (Using Customer-Provided Encryption Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html).
        SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
 }
@@ -22792,13 +22982,15 @@ func (s *SelectParameters) SetOutputSerialization(v *OutputSerialization) *Selec
 }
 
 // Describes the default server-side encryption to apply to new objects in the
-// bucket. If Put Object request does not specify any server-side encryption,
-// this default encryption will be applied.
+// bucket. If a PUT Object request doesn't specify any server-side encryption,
+// this default encryption will be applied. For more information, see PUT Bucket
+// encryption (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html)
+// in the Amazon Simple Storage Service API Reference.
 type ServerSideEncryptionByDefault struct {
        _ struct{} `type:"structure"`
 
        // KMS master key ID to use for the default encryption. This parameter is allowed
-       // if SSEAlgorithm is aws:kms.
+       // if and only if SSEAlgorithm is set to aws:kms.
        KMSMasterKeyID *string `type:"string" sensitive:"true"`
 
        // Server-side encryption algorithm to use for the default encryption.
@@ -22842,8 +23034,7 @@ func (s *ServerSideEncryptionByDefault) SetSSEAlgorithm(v string) *ServerSideEnc
        return s
 }
 
-// Container for server-side encryption configuration rules. Currently S3 supports
-// one rule only.
+// Specifies the default server-side-encryption configuration.
 type ServerSideEncryptionConfiguration struct {
        _ struct{} `type:"structure"`
 
@@ -22893,13 +23084,12 @@ func (s *ServerSideEncryptionConfiguration) SetRules(v []*ServerSideEncryptionRu
        return s
 }
 
-// Container for information about a particular server-side encryption configuration
-// rule.
+// Specifies the default server-side encryption configuration.
 type ServerSideEncryptionRule struct {
        _ struct{} `type:"structure"`
 
-       // Describes the default server-side encryption to apply to new objects in the
-       // bucket. If Put Object request does not specify any server-side encryption,
+       // Specifies the default server-side encryption to apply to new objects in the
+       // bucket. If a PUT Object request doesn't specify any server-side encryption,
        // this default encryption will be applied.
        ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"`
 }
@@ -22935,13 +23125,17 @@ func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *Serv
        return s
 }
 
-// A container for filters that define which source objects should be replicated.
+// A container that describes additional filters for identifying the source
+// objects that you want to replicate. You can choose to enable or disable the
+// replication of these objects. Currently, Amazon S3 supports only the filter
+// that you can specify for objects created with server-side encryption using
+// an AWS KMS-Managed Key (SSE-KMS).
 type SourceSelectionCriteria struct {
        _ struct{} `type:"structure"`
 
-       // A container for filter information for the selection of S3 objects encrypted
-       // with AWS KMS. If you include SourceSelectionCriteria in the replication configuration,
-       // this element is required.
+       // A container for filter information for the selection of Amazon S3 objects
+       // encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication
+       // configuration, this element is required.
        SseKmsEncryptedObjects *SseKmsEncryptedObjects `type:"structure"`
 }
 
@@ -22981,8 +23175,8 @@ func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedOb
 type SseKmsEncryptedObjects struct {
        _ struct{} `type:"structure"`
 
-       // If the status is not Enabled, replication for S3 objects encrypted with AWS
-       // KMS is disabled.
+       // Specifies whether Amazon S3 replicates objects created with server-side encryption
+       // using an AWS KMS-managed key.
        //
        // Status is a required field
        Status *string `type:"string" required:"true" enum:"SseKmsEncryptedObjectsStatus"`
@@ -23098,11 +23292,14 @@ func (s *StatsEvent) UnmarshalEvent(
        return nil
 }
 
+// Specifies data related to access patterns to be collected and made available
+// to analyze the tradeoffs between different storage classes for an Amazon
+// S3 bucket.
 type StorageClassAnalysis struct {
        _ struct{} `type:"structure"`
 
-       // A container used to describe how data related to the storage class analysis
-       // should be exported.
+       // Specifies how data related to the storage class analysis for an Amazon S3
+       // bucket should be exported.
        DataExport *StorageClassAnalysisDataExport `type:"structure"`
 }
 
@@ -23342,16 +23539,20 @@ func (s *TargetGrant) SetPermission(v string) *TargetGrant {
 }
 
 // A container for specifying the configuration for publication of messages
-// to an Amazon Simple Notification Service (Amazon SNS) topic.when Amazon S3
+// to an Amazon Simple Notification Service (Amazon SNS) topic when Amazon S3
 // detects specified events.
 type TopicConfiguration struct {
        _ struct{} `type:"structure"`
 
+       // The Amazon S3 bucket event about which to send notifications. For more information,
+       // see Supported Event Types (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+       // in the Amazon Simple Storage Service Developer Guide.
+       //
        // Events is a required field
        Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
 
-       // A container for object key name filtering rules. For information about key
-       // name filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+       // Specifies object key name filtering rules. For information about key name
+       // filtering, see Configuring Event Notifications (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
        // in the Amazon Simple Storage Service Developer Guide.
        Filter *NotificationConfigurationFilter `type:"structure"`
 
@@ -23360,7 +23561,7 @@ type TopicConfiguration struct {
        Id *string `type:"string"`
 
        // The Amazon Resource Name (ARN) of the Amazon SNS topic to which Amazon S3
-       // will publish a message when it detects events of the specified type.
+       // publishes a message when it detects events of the specified type.
        //
        // TopicArn is a required field
        TopicArn *string `locationName:"Topic" type:"string" required:"true"`
@@ -23469,18 +23670,19 @@ func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDep
        return s
 }
 
+// Specifies when an object transitions to a specified storage class.
 type Transition struct {
        _ struct{} `type:"structure"`
 
-       // Indicates at what date the object is to be moved or deleted. Should be in
-       // GMT ISO 8601 Format.
+       // Indicates when objects are transitioned to the specified storage class. The
+       // date value must be in ISO 8601 format. The time is always midnight UTC.
        Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
 
-       // Indicates the lifetime, in days, of the objects that are subject to the rule.
-       // The value must be a non-zero positive integer.
+       // Indicates the number of days after creation when objects are transitioned
+       // to the specified storage class. The value must be a positive integer.
        Days *int64 `type:"integer"`
 
-       // The class of storage used to store the object.
+       // The storage class to which you want the object to transition.
        StorageClass *string `type:"string" enum:"TransitionStorageClass"`
 }
 
@@ -23550,7 +23752,7 @@ type UploadPartCopyInput struct {
        // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
        // the source object. The encryption key provided in this header must be one
        // that was used when the source object was created.
-       CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+       CopySourceSSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string" sensitive:"true"`
 
        // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
        // Amazon S3 uses this header for a message integrity check to ensure the encryption
@@ -23581,7 +23783,7 @@ type UploadPartCopyInput struct {
        // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
        // header. This must be the same encryption key specified in the initiate multipart
        // upload request.
-       SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+       SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
 
        // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
        // Amazon S3 uses this header for a message integrity check to ensure the encryption
@@ -23857,7 +24059,9 @@ type UploadPartInput struct {
        // body cannot be determined automatically.
        ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
 
-       // The base64-encoded 128-bit MD5 digest of the part data.
+       // The base64-encoded 128-bit MD5 digest of the part data. This parameter is
+       // auto-populated when using the command from the CLI. This parameted is required
+       // if object lock parameters are specified.
        ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
 
        // Object key for which the multipart upload was initiated.
@@ -23886,7 +24090,7 @@ type UploadPartInput struct {
        // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
        // header. This must be the same encryption key specified in the initiate multipart
        // upload request.
-       SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
+       SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
 
        // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
        // Amazon S3 uses this header for a message integrity check to ensure the encryption
@@ -24092,6 +24296,9 @@ func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput {
        return s
 }
 
+// Describes the versioning state of an Amazon S3 bucket. For more information,
+// see PUT Bucket versioning (https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html)
+// in the Amazon Simple Storage Service API Reference.
 type VersioningConfiguration struct {
        _ struct{} `type:"structure"`
 
@@ -24126,15 +24333,22 @@ func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration {
        return s
 }
 
+// Specifies website configuration parameters for an Amazon S3 bucket.
 type WebsiteConfiguration struct {
        _ struct{} `type:"structure"`
 
+       // The name of the error document for the website.
        ErrorDocument *ErrorDocument `type:"structure"`
 
+       // The name of the index document for the website.
        IndexDocument *IndexDocument `type:"structure"`
 
+       // The redirect behavior for every request to this bucket's website endpoint.
+       //
+       // If you specify this property, you can't specify any other property.
        RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
 
+       // Rules that define when a redirect is applied and the redirect behavior.
        RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
 }
 
index bc68a46acfa445bdf95d0d7fe004dced9e8868d6..9ba8a78872082205c8931e7374c6a269beea5f53 100644 (file)
@@ -80,7 +80,8 @@ func buildGetBucketLocation(r *request.Request) {
                out := r.Data.(*GetBucketLocationOutput)
                b, err := ioutil.ReadAll(r.HTTPResponse.Body)
                if err != nil {
-                       r.Error = awserr.New("SerializationError", "failed reading response body", err)
+                       r.Error = awserr.New(request.ErrCodeSerialization,
+                               "failed reading response body", err)
                        return
                }
 
index 95f2456363ea8429f63d86b028d459db9d3bf100..23d386b16c8145e581c8bd9933be46faab207baa 100644 (file)
@@ -17,7 +17,8 @@ func defaultInitClientFn(c *client.Client) {
 
        // Require SSL when using SSE keys
        c.Handlers.Validate.PushBack(validateSSERequiresSSL)
-       c.Handlers.Build.PushBack(computeSSEKeys)
+       c.Handlers.Build.PushBack(computeSSEKeyMD5)
+       c.Handlers.Build.PushBack(computeCopySourceSSEKeyMD5)
 
        // S3 uses custom error unmarshaling logic
        c.Handlers.UnmarshalError.Clear()
index 8010c4fa1960bcfab85d94955016f12ba113a1a1..b71c835deef90cdcafb78b0756820d40e1b43a42 100644 (file)
@@ -3,6 +3,7 @@ package s3
 import (
        "crypto/md5"
        "encoding/base64"
+       "net/http"
 
        "github.com/aws/aws-sdk-go/aws/awserr"
        "github.com/aws/aws-sdk-go/aws/request"
@@ -30,25 +31,54 @@ func validateSSERequiresSSL(r *request.Request) {
        }
 }
 
-func computeSSEKeys(r *request.Request) {
-       headers := []string{
-               "x-amz-server-side-encryption-customer-key",
-               "x-amz-copy-source-server-side-encryption-customer-key",
+const (
+       sseKeyHeader    = "x-amz-server-side-encryption-customer-key"
+       sseKeyMD5Header = sseKeyHeader + "-md5"
+)
+
+func computeSSEKeyMD5(r *request.Request) {
+       var key string
+       if g, ok := r.Params.(sseCustomerKeyGetter); ok {
+               key = g.getSSECustomerKey()
+       }
+
+       computeKeyMD5(sseKeyHeader, sseKeyMD5Header, key, r.HTTPRequest)
+}
+
+const (
+       copySrcSSEKeyHeader    = "x-amz-copy-source-server-side-encryption-customer-key"
+       copySrcSSEKeyMD5Header = copySrcSSEKeyHeader + "-md5"
+)
+
+func computeCopySourceSSEKeyMD5(r *request.Request) {
+       var key string
+       if g, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
+               key = g.getCopySourceSSECustomerKey()
        }
 
-       for _, h := range headers {
-               md5h := h + "-md5"
-               if key := r.HTTPRequest.Header.Get(h); key != "" {
-                       // Base64-encode the value
-                       b64v := base64.StdEncoding.EncodeToString([]byte(key))
-                       r.HTTPRequest.Header.Set(h, b64v)
-
-                       // Add MD5 if it wasn't computed
-                       if r.HTTPRequest.Header.Get(md5h) == "" {
-                               sum := md5.Sum([]byte(key))
-                               b64sum := base64.StdEncoding.EncodeToString(sum[:])
-                               r.HTTPRequest.Header.Set(md5h, b64sum)
-                       }
+       computeKeyMD5(copySrcSSEKeyHeader, copySrcSSEKeyMD5Header, key, r.HTTPRequest)
+}
+
+func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) {
+       if len(key) == 0 {
+               // Backwards compatiablity where user just set the header value instead
+               // of using the API parameter, or setting the header value for an
+               // operation without the parameters modeled.
+               key = r.Header.Get(keyHeader)
+               if len(key) == 0 {
+                       return
                }
+
+               // In backwards compatiable, the header's value is not base64 encoded,
+               // and needs to be encoded and updated by the SDK's customizations.
+               b64Key := base64.StdEncoding.EncodeToString([]byte(key))
+               r.Header.Set(keyHeader, b64Key)
+       }
+
+       // Only update Key's MD5 if not already set.
+       if len(r.Header.Get(keyMD5Header)) == 0 {
+               sum := md5.Sum([]byte(key))
+               keyMD5 := base64.StdEncoding.EncodeToString(sum[:])
+               r.Header.Set(keyMD5Header, keyMD5)
        }
 }
index fde3050f95b67a1e057e666ad5be7e26754d13aa..f6a69aed11b56dad6cd49e3a7b2ae84872a24973 100644 (file)
@@ -14,7 +14,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
        b, err := ioutil.ReadAll(r.HTTPResponse.Body)
        if err != nil {
                r.Error = awserr.NewRequestFailure(
-                       awserr.New("SerializationError", "unable to read response body", err),
+                       awserr.New(request.ErrCodeSerialization, "unable to read response body", err),
                        r.HTTPResponse.StatusCode,
                        r.RequestID,
                )
@@ -31,7 +31,7 @@ func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
 
        unmarshalError(r)
        if err, ok := r.Error.(awserr.Error); ok && err != nil {
-               if err.Code() == "SerializationError" {
+               if err.Code() == request.ErrCodeSerialization {
                        r.Error = nil
                        return
                }
index 1db7e133bafbbb8dd32f1f8a8978f67e7236be6c..5b63fac72fff1bbf03865972737fc82ed99ba035 100644 (file)
@@ -11,6 +11,7 @@ import (
        "github.com/aws/aws-sdk-go/aws"
        "github.com/aws/aws-sdk-go/aws/awserr"
        "github.com/aws/aws-sdk-go/aws/request"
+       "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
 )
 
 type xmlErrorResponse struct {
@@ -42,29 +43,34 @@ func unmarshalError(r *request.Request) {
                return
        }
 
-       var errCode, errMsg string
-
        // Attempt to parse error from body if it is known
-       resp := &xmlErrorResponse{}
-       err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
-       if err != nil && err != io.EOF {
-               errCode = "SerializationError"
-               errMsg = "failed to decode S3 XML error response"
-       } else {
-               errCode = resp.Code
-               errMsg = resp.Message
+       var errResp xmlErrorResponse
+       err := xmlutil.UnmarshalXMLError(&errResp, r.HTTPResponse.Body)
+       if err == io.EOF {
+               // Only capture the error if an unmarshal error occurs that is not EOF,
+               // because S3 might send an error without a error message which causes
+               // the XML unmarshal to fail with EOF.
                err = nil
        }
+       if err != nil {
+               r.Error = awserr.NewRequestFailure(
+                       awserr.New(request.ErrCodeSerialization,
+                               "failed to unmarshal error message", err),
+                       r.HTTPResponse.StatusCode,
+                       r.RequestID,
+               )
+               return
+       }
 
        // Fallback to status code converted to message if still no error code
-       if len(errCode) == 0 {
+       if len(errResp.Code) == 0 {
                statusText := http.StatusText(r.HTTPResponse.StatusCode)
-               errCode = strings.Replace(statusText, " ", "", -1)
-               errMsg = statusText
+               errResp.Code = strings.Replace(statusText, " ", "", -1)
+               errResp.Message = statusText
        }
 
        r.Error = awserr.NewRequestFailure(
-               awserr.New(errCode, errMsg, err),
+               awserr.New(errResp.Code, errResp.Message, err),
                r.HTTPResponse.StatusCode,
                r.RequestID,
        )
index 8113089649146eac965afaca9fa7f3ebbd61ec4d..d22c38b53f49834283dd5e1d1cfc7b4ff2de0494 100644 (file)
@@ -3,6 +3,7 @@
 package sts
 
 import (
+       "fmt"
        "time"
 
        "github.com/aws/aws-sdk-go/aws"
@@ -55,38 +56,26 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
 
 // AssumeRole API operation for AWS Security Token Service.
 //
-// Returns a set of temporary security credentials (consisting of an access
-// key ID, a secret access key, and a security token) that you can use to access
-// AWS resources that you might not normally have access to. Typically, you
-// use AssumeRole for cross-account access or federation. For a comparison of
-// AssumeRole with the other APIs that produce temporary credentials, see Requesting
-// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// Returns a set of temporary security credentials that you can use to access
+// AWS resources that you might not normally have access to. These temporary
+// credentials consist of an access key ID, a secret access key, and a security
+// token. Typically, you use AssumeRole within your account or for cross-account
+// access. For a comparison of AssumeRole with other API operations that produce
+// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
 // in the IAM User Guide.
 //
-// Important: You cannot call AssumeRole by using AWS root account credentials;
-// access is denied. You must use credentials for an IAM user or an IAM role
-// to call AssumeRole.
+// You cannot use AWS account root user credentials to call AssumeRole. You
+// must use credentials for an IAM user or an IAM role to call AssumeRole.
 //
 // For cross-account access, imagine that you own multiple accounts and need
 // to access resources in each account. You could create long-term credentials
 // in each account to access those resources. However, managing all those credentials
 // and remembering which one can access which account can be time consuming.
-// Instead, you can create one set of long-term credentials in one account and
-// then use temporary security credentials to access all the other accounts
+// Instead, you can create one set of long-term credentials in one account.
+// Then use temporary security credentials to access all the other accounts
 // by assuming roles in those accounts. For more information about roles, see
-// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html)
-// in the IAM User Guide.
-//
-// For federation, you can, for example, grant single sign-on access to the
-// AWS Management Console. If you already have an identity and authentication
-// system in your corporate network, you don't have to recreate user identities
-// in AWS in order to grant those user identities access to AWS. Instead, after
-// a user has been authenticated, you call AssumeRole (and specify the role
-// with the appropriate permissions) to get temporary security credentials for
-// that user. With those temporary security credentials, you construct a sign-in
-// URL that users can use to access the console. For more information, see Common
-// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction)
+// IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html)
 // in the IAM User Guide.
 //
 // By default, the temporary security credentials created by AssumeRole last
@@ -95,69 +84,73 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
 // seconds (15 minutes) up to the maximum session duration setting for the role.
 // This setting can have a value from 1 hour to 12 hours. To learn how to view
 // the maximum value for your role, see View the Maximum Session Duration Setting
-// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
 // in the IAM User Guide. The maximum session duration limit applies when you
-// use the AssumeRole* API operations or the assume-role* CLI operations but
-// does not apply when you use those operations to create a console URL. For
-// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// use the AssumeRole* API operations or the assume-role* CLI commands. However
+// the limit does not apply when you use those operations to create a console
+// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
 // in the IAM User Guide.
 //
 // The temporary security credentials created by AssumeRole can be used to make
-// API calls to any AWS service with the following exception: you cannot call
-// the STS service's GetFederationToken or GetSessionToken APIs.
-//
-// Optionally, you can pass an IAM access policy to this operation. If you choose
-// not to pass a policy, the temporary security credentials that are returned
-// by the operation have the permissions that are defined in the access policy
-// of the role that is being assumed. If you pass a policy to this operation,
-// the temporary security credentials that are returned by the operation have
-// the permissions that are allowed by both the access policy of the role that
-// is being assumed, and the policy that you pass. This gives you a way to further
-// restrict the permissions for the resulting temporary security credentials.
-// You cannot use the passed policy to grant permissions that are in excess
-// of those allowed by the access policy of the role that is being assumed.
-// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
-// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// API calls to any AWS service with the following exception: You cannot call
+// the AWS STS GetFederationToken or GetSessionToken API operations.
+//
+// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to
+// use as managed session policies. The plain text that you use for both inline
+// and managed session policies shouldn't exceed 2048 characters. Passing policies
+// to this operation returns new temporary credentials. The resulting session's
+// permissions are the intersection of the role's identity-based policy and
+// the session policies. You can use the role's temporary credentials in subsequent
+// AWS API calls to access resources in the account that owns the role. You
+// cannot use session policies to grant more permissions than those allowed
+// by the identity-based policy of the role that is being assumed. For more
+// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
 // in the IAM User Guide.
 //
-// To assume a role, your AWS account must be trusted by the role. The trust
-// relationship is defined in the role's trust policy when the role is created.
-// That trust policy states which accounts are allowed to delegate access to
-// this account's role.
-//
-// The user who wants to access the role must also have permissions delegated
-// from the role's administrator. If the user is in a different account than
-// the role, then the user's administrator must attach a policy that allows
-// the user to call AssumeRole on the ARN of the role in the other account.
-// If the user is in the same account as the role, then you can either attach
-// a policy to the user (identical to the previous different account user),
-// or you can add the user as a principal directly in the role's trust policy.
-// In this case, the trust policy acts as the only resource-based policy in
-// IAM, and users in the same account as the role do not need explicit permission
-// to assume the role. For more information about trust policies and resource-based
-// policies, see IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
+// To assume a role from a different account, your AWS account must be trusted
+// by the role. The trust relationship is defined in the role's trust policy
+// when the role is created. That trust policy states which accounts are allowed
+// to delegate that access to users in the account.
+//
+// A user who wants to access a role in a different account must also have permissions
+// that are delegated from the user account administrator. The administrator
+// must attach a policy that allows the user to call AssumeRole for the ARN
+// of the role in the other account. If the user is in the same account as the
+// role, then you can do either of the following:
+//
+//    * Attach a policy to the user (identical to the previous user in a different
+//    account).
+//
+//    * Add the user as a principal directly in the role's trust policy.
+//
+// In this case, the trust policy acts as an IAM resource-based policy. Users
+// in the same account as the role do not need explicit permission to assume
+// the role. For more information about trust policies and resource-based policies,
+// see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
 // in the IAM User Guide.
 //
 // Using MFA with AssumeRole
 //
-// You can optionally include multi-factor authentication (MFA) information
-// when you call AssumeRole. This is useful for cross-account scenarios in which
-// you want to make sure that the user who is assuming the role has been authenticated
-// using an AWS MFA device. In that scenario, the trust policy of the role being
-// assumed includes a condition that tests for MFA authentication; if the caller
-// does not include valid MFA information, the request to assume the role is
-// denied. The condition in a trust policy that tests for MFA authentication
-// might look like the following example.
+// (Optional) You can include multi-factor authentication (MFA) information
+// when you call AssumeRole. This is useful for cross-account scenarios to ensure
+// that the user that assumes the role has been authenticated with an AWS MFA
+// device. In that scenario, the trust policy of the role being assumed includes
+// a condition that tests for MFA authentication. If the caller does not include
+// valid MFA information, the request to assume the role is denied. The condition
+// in a trust policy that tests for MFA authentication might look like the following
+// example.
 //
 // "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
 //
-// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
+// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
 // in the IAM User Guide guide.
 //
 // To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode
 // parameters. The SerialNumber value identifies the user's hardware or virtual
 // MFA device. The TokenCode is the time-based one-time password (TOTP) that
-// the MFA devices produces.
+// the MFA device produces.
 //
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
 // with awserr.Error's Code and Message methods to get detailed information about
@@ -180,7 +173,7 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
 //   STS is not activated in the requested region for the account that is being
 //   asked to generate credentials. The account administrator must use the IAM
 //   console to activate STS in that region. For more information, see Activating
-//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+//   and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
 //   in the IAM User Guide.
 //
 // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
@@ -254,9 +247,9 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
 // via a SAML authentication response. This operation provides a mechanism for
 // tying an enterprise identity store or directory to role-based AWS access
 // without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML
-// with the other APIs that produce temporary credentials, see Requesting Temporary
-// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// with the other API operations that produce temporary credentials, see Requesting
+// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
 // in the IAM User Guide.
 //
 // The temporary security credentials returned by this operation consist of
@@ -271,37 +264,36 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
 // a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session
 // duration setting for the role. This setting can have a value from 1 hour
 // to 12 hours. To learn how to view the maximum value for your role, see View
-// the Maximum Session Duration Setting for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
 // in the IAM User Guide. The maximum session duration limit applies when you
-// use the AssumeRole* API operations or the assume-role* CLI operations but
-// does not apply when you use those operations to create a console URL. For
-// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// use the AssumeRole* API operations or the assume-role* CLI commands. However
+// the limit does not apply when you use those operations to create a console
+// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
 // in the IAM User Guide.
 //
 // The temporary security credentials created by AssumeRoleWithSAML can be used
 // to make API calls to any AWS service with the following exception: you cannot
-// call the STS service's GetFederationToken or GetSessionToken APIs.
-//
-// Optionally, you can pass an IAM access policy to this operation. If you choose
-// not to pass a policy, the temporary security credentials that are returned
-// by the operation have the permissions that are defined in the access policy
-// of the role that is being assumed. If you pass a policy to this operation,
-// the temporary security credentials that are returned by the operation have
-// the permissions that are allowed by the intersection of both the access policy
-// of the role that is being assumed, and the policy that you pass. This means
-// that both policies must grant the permission for the action to be allowed.
-// This gives you a way to further restrict the permissions for the resulting
-// temporary security credentials. You cannot use the passed policy to grant
-// permissions that are in excess of those allowed by the access policy of the
-// role that is being assumed. For more information, see Permissions for AssumeRole,
-// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// call the STS GetFederationToken or GetSessionToken API operations.
+//
+// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to
+// use as managed session policies. The plain text that you use for both inline
+// and managed session policies shouldn't exceed 2048 characters. Passing policies
+// to this operation returns new temporary credentials. The resulting session's
+// permissions are the intersection of the role's identity-based policy and
+// the session policies. You can use the role's temporary credentials in subsequent
+// AWS API calls to access resources in the account that owns the role. You
+// cannot use session policies to grant more permissions than those allowed
+// by the identity-based policy of the role that is being assumed. For more
+// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
 // in the IAM User Guide.
 //
 // Before your application can call AssumeRoleWithSAML, you must configure your
 // SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
 // you must use AWS Identity and Access Management (IAM) to create a SAML provider
-// entity in your AWS account that represents your identity provider, and create
-// an IAM role that specifies this SAML provider in its trust policy.
+// entity in your AWS account that represents your identity provider. You must
+// also create an IAM role that specifies this SAML provider in its trust policy.
 //
 // Calling AssumeRoleWithSAML does not require the use of AWS security credentials.
 // The identity of the caller is validated by using keys in the metadata document
@@ -315,16 +307,16 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
 //
 // For more information, see the following resources:
 //
-//    * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
+//    * About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
 //    in the IAM User Guide.
 //
-//    * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
+//    * Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
 //    in the IAM User Guide.
 //
-//    * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
+//    * Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
 //    in the IAM User Guide.
 //
-//    * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
+//    * Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
 //    in the IAM User Guide.
 //
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -363,7 +355,7 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
 //   STS is not activated in the requested region for the account that is being
 //   asked to generate credentials. The account administrator must use the IAM
 //   console to activate STS in that region. For more information, see Activating
-//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+//   and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
 //   in the IAM User Guide.
 //
 // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
@@ -434,35 +426,35 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
 // AssumeRoleWithWebIdentity API operation for AWS Security Token Service.
 //
 // Returns a set of temporary security credentials for users who have been authenticated
-// in a mobile or web application with a web identity provider, such as Amazon
-// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible
-// identity provider.
+// in a mobile or web application with a web identity provider. Example providers
+// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID
+// Connect-compatible identity provider.
 //
 // For mobile applications, we recommend that you use Amazon Cognito. You can
-// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/)
-// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely
-// identify a user and supply the user with a consistent identity throughout
-// the lifetime of an application.
-//
-// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
-// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview
-// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
+// use Amazon Cognito with the AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/)
+// and the AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/)
+// to uniquely identify a user. You can also supply the user with a consistent
+// identity throughout the lifetime of an application.
+//
+// To learn more about Amazon Cognito, see Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
+// in AWS SDK for Android Developer Guide and Amazon Cognito Overview (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
 // in the AWS SDK for iOS Developer Guide.
 //
 // Calling AssumeRoleWithWebIdentity does not require the use of AWS security
 // credentials. Therefore, you can distribute an application (for example, on
 // mobile devices) that requests temporary security credentials without including
-// long-term AWS credentials in the application, and without deploying server-based
-// proxy services that use long-term AWS credentials. Instead, the identity
-// of the caller is validated by using a token from the web identity provider.
-// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce
-// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// long-term AWS credentials in the application. You also don't need to deploy
+// server-based proxy services that use long-term AWS credentials. Instead,
+// the identity of the caller is validated by using a token from the web identity
+// provider. For a comparison of AssumeRoleWithWebIdentity with the other API
+// operations that produce temporary credentials, see Requesting Temporary Security
+// Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
 // in the IAM User Guide.
 //
 // The temporary security credentials returned by this API consist of an access
 // key ID, a secret access key, and a security token. Applications can use these
-// temporary security credentials to sign calls to AWS service APIs.
+// temporary security credentials to sign calls to AWS service API operations.
 //
 // By default, the temporary security credentials created by AssumeRoleWithWebIdentity
 // last for one hour. However, you can use the optional DurationSeconds parameter
@@ -470,29 +462,29 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
 // seconds (15 minutes) up to the maximum session duration setting for the role.
 // This setting can have a value from 1 hour to 12 hours. To learn how to view
 // the maximum value for your role, see View the Maximum Session Duration Setting
-// for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
 // in the IAM User Guide. The maximum session duration limit applies when you
-// use the AssumeRole* API operations or the assume-role* CLI operations but
-// does not apply when you use those operations to create a console URL. For
-// more information, see Using IAM Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
+// use the AssumeRole* API operations or the assume-role* CLI commands. However
+// the limit does not apply when you use those operations to create a console
+// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
 // in the IAM User Guide.
 //
 // The temporary security credentials created by AssumeRoleWithWebIdentity can
 // be used to make API calls to any AWS service with the following exception:
-// you cannot call the STS service's GetFederationToken or GetSessionToken APIs.
-//
-// Optionally, you can pass an IAM access policy to this operation. If you choose
-// not to pass a policy, the temporary security credentials that are returned
-// by the operation have the permissions that are defined in the access policy
-// of the role that is being assumed. If you pass a policy to this operation,
-// the temporary security credentials that are returned by the operation have
-// the permissions that are allowed by both the access policy of the role that
-// is being assumed, and the policy that you pass. This gives you a way to further
-// restrict the permissions for the resulting temporary security credentials.
-// You cannot use the passed policy to grant permissions that are in excess
-// of those allowed by the access policy of the role that is being assumed.
-// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
-// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// you cannot call the STS GetFederationToken or GetSessionToken API operations.
+//
+// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to
+// use as managed session policies. The plain text that you use for both inline
+// and managed session policies shouldn't exceed 2048 characters. Passing policies
+// to this operation returns new temporary credentials. The resulting session's
+// permissions are the intersection of the role's identity-based policy and
+// the session policies. You can use the role's temporary credentials in subsequent
+// AWS API calls to access resources in the account that owns the role. You
+// cannot use session policies to grant more permissions than those allowed
+// by the identity-based policy of the role that is being assumed. For more
+// information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
 // in the IAM User Guide.
 //
 // Before your application can call AssumeRoleWithWebIdentity, you must have
@@ -511,21 +503,19 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
 // For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
 // API, see the following resources:
 //
-//    * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
-//    and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//    * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
+//    and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
 //
+//    * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
+//    Walk through the process of authenticating through Login with Amazon,
+//    Facebook, or Google, getting temporary security credentials, and then
+//    using those credentials to make a request to AWS.
 //
-//    *  Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
-//    This interactive website lets you walk through the process of authenticating
-//    via Login with Amazon, Facebook, or Google, getting temporary security
-//    credentials, and then using those credentials to make a request to AWS.
-//
-//
-//    * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android
-//    (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample
-//    apps that show how to invoke the identity providers, and then how to use
-//    the information from these providers to get and use temporary security
-//    credentials.
+//    * AWS SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and
+//    AWS SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/).
+//    These toolkits contain sample apps that show how to invoke the identity
+//    providers, and then how to use the information from these providers to
+//    get and use temporary security credentials.
 //
 //    * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
 //    This article discusses web identity federation and shows an example of
@@ -575,7 +565,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
 //   STS is not activated in the requested region for the account that is being
 //   asked to generate credentials. The account administrator must use the IAM
 //   console to activate STS in that region. For more information, see Activating
-//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+//   and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
 //   in the IAM User Guide.
 //
 // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
@@ -647,17 +637,17 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
 // Decodes additional information about the authorization status of a request
 // from an encoded message returned in response to an AWS request.
 //
-// For example, if a user is not authorized to perform an action that he or
-// she has requested, the request returns a Client.UnauthorizedOperation response
-// (an HTTP 403 response). Some AWS actions additionally return an encoded message
-// that can provide details about this authorization failure.
+// For example, if a user is not authorized to perform an operation that he
+// or she has requested, the request returns a Client.UnauthorizedOperation
+// response (an HTTP 403 response). Some AWS operations additionally return
+// an encoded message that can provide details about this authorization failure.
 //
-// Only certain AWS actions return an encoded authorization message. The documentation
-// for an individual action indicates whether that action returns an encoded
-// message in addition to returning an HTTP code.
+// Only certain AWS operations return an encoded authorization message. The
+// documentation for an individual operation indicates whether that operation
+// returns an encoded message in addition to returning an HTTP code.
 //
 // The message is encoded because the details of the authorization status can
-// constitute privileged information that the user who requested the action
+// constitute privileged information that the user who requested the operation
 // should not see. To decode an authorization status message, a user must be
 // granted permissions via an IAM policy to request the DecodeAuthorizationMessage
 // (sts:DecodeAuthorizationMessage) action.
@@ -666,7 +656,7 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
 //
 //    * Whether the request was denied due to an explicit deny or due to the
 //    absence of an explicit allow. For more information, see Determining Whether
-//    a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
+//    a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
 //    in the IAM User Guide.
 //
 //    * The principal who made the request.
@@ -712,6 +702,102 @@ func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *Deco
        return out, req.Send()
 }
 
+const opGetAccessKeyInfo = "GetAccessKeyInfo"
+
+// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the
+// client's request for the GetAccessKeyInfo operation. The "output" return
+// value will be populated with the request's response once the request completes
+// successfully.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
+//
+// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
+//
+//
+//    // Example sending a request using the GetAccessKeyInfoRequest method.
+//    req, resp := client.GetAccessKeyInfoRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo
+func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) {
+       op := &request.Operation{
+               Name:       opGetAccessKeyInfo,
+               HTTPMethod: "POST",
+               HTTPPath:   "/",
+       }
+
+       if input == nil {
+               input = &GetAccessKeyInfoInput{}
+       }
+
+       output = &GetAccessKeyInfoOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetAccessKeyInfo API operation for AWS Security Token Service.
+//
+// Returns the account identifier for the specified access key ID.
+//
+// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE)
+// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY).
+// For more information about access keys, see Managing Access Keys for IAM
+// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html)
+// in the IAM User Guide.
+//
+// When you pass an access key ID to this operation, it returns the ID of the
+// AWS account to which the keys belong. Access key IDs beginning with AKIA
+// are long-term credentials for an IAM user or the AWS account root user. Access
+// key IDs beginning with ASIA are temporary credentials that are created using
+// STS operations. If the account in the response belongs to you, you can sign
+// in as the root user and review your root user access keys. Then, you can
+// pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report)
+// to learn which IAM user owns the keys. To learn who requested the temporary
+// credentials for an ASIA access key, view the STS events in your CloudTrail
+// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration).
+//
+// This operation does not indicate the state of the access key. The key might
+// be active, inactive, or deleted. Active keys might not have permissions to
+// perform an operation. Providing a deleted keys might return an error that
+// the key doesn't exist.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetAccessKeyInfo for usage and error information.
+// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo
+func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) {
+       req, out := c.GetAccessKeyInfoRequest(input)
+       return out, req.Send()
+}
+
+// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetAccessKeyInfo for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) {
+       req, out := c.GetAccessKeyInfoRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
 const opGetCallerIdentity = "GetCallerIdentity"
 
 // GetCallerIdentityRequest generates a "aws/request.Request" representing the
@@ -834,81 +920,65 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
 // Returns a set of temporary security credentials (consisting of an access
 // key ID, a secret access key, and a security token) for a federated user.
 // A typical use is in a proxy application that gets temporary security credentials
-// on behalf of distributed applications inside a corporate network. Because
-// you must call the GetFederationToken action using the long-term security
-// credentials of an IAM user, this call is appropriate in contexts where those
+// on behalf of distributed applications inside a corporate network. You must
+// call the GetFederationToken operation using the long-term security credentials
+// of an IAM user. As a result, this call is appropriate in contexts where those
 // credentials can be safely stored, usually in a server-based application.
-// For a comparison of GetFederationToken with the other APIs that produce temporary
-// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// For a comparison of GetFederationToken with the other API operations that
+// produce temporary credentials, see Requesting Temporary Security Credentials
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
 // in the IAM User Guide.
 //
-// If you are creating a mobile-based or browser-based app that can authenticate
+// You can create a mobile-based or browser-based app that can authenticate
 // users using a web identity provider like Login with Amazon, Facebook, Google,
-// or an OpenID Connect-compatible identity provider, we recommend that you
-// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
+// or an OpenID Connect-compatible identity provider. In this case, we recommend
+// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
 // For more information, see Federation Through a Web-based Identity Provider
-// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
-//
-// The GetFederationToken action must be called by using the long-term AWS security
-// credentials of an IAM user. You can also call GetFederationToken using the
-// security credentials of an AWS root account, but we do not recommended it.
-// Instead, we recommend that you create an IAM user for the purpose of the
-// proxy application and then attach a policy to the IAM user that limits federated
-// users to only the actions and resources that they need access to. For more
-// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
+// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+// You can also call GetFederationToken using the security credentials of an
+// AWS account root user, but we do not recommend it. Instead, we recommend
+// that you create an IAM user for the purpose of the proxy application. Then
+// attach a policy to the IAM user that limits federated users to only the actions
+// and resources that they need to access. For more information, see IAM Best
+// Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
 // in the IAM User Guide.
 //
-// The temporary security credentials that are obtained by using the long-term
-// credentials of an IAM user are valid for the specified duration, from 900
-// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default
-// is 43200 seconds (12 hours). Temporary credentials that are obtained by using
-// AWS root account credentials have a maximum duration of 3600 seconds (1 hour).
+// The temporary credentials are valid for the specified duration, from 900
+// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
+// is 43,200 seconds (12 hours). Temporary credentials that are obtained by
+// using AWS account root user credentials have a maximum duration of 3,600
+// seconds (1 hour).
 //
 // The temporary security credentials created by GetFederationToken can be used
 // to make API calls to any AWS service with the following exceptions:
 //
-//    * You cannot use these credentials to call any IAM APIs.
+//    * You cannot use these credentials to call any IAM API operations.
 //
-//    * You cannot call any STS APIs except GetCallerIdentity.
+//    * You cannot call any STS API operations except GetCallerIdentity.
 //
 // Permissions
 //
-// The permissions for the temporary security credentials returned by GetFederationToken
-// are determined by a combination of the following:
-//
-//    * The policy or policies that are attached to the IAM user whose credentials
-//    are used to call GetFederationToken.
-//
-//    * The policy that is passed as a parameter in the call.
-//
-// The passed policy is attached to the temporary security credentials that
-// result from the GetFederationToken API call--that is, to the federated user.
-// When the federated user makes an AWS request, AWS evaluates the policy attached
-// to the federated user in combination with the policy or policies attached
-// to the IAM user whose credentials were used to call GetFederationToken. AWS
-// allows the federated user's request only when both the federated user and
-// the IAM user are explicitly allowed to perform the requested action. The
-// passed policy cannot grant more permissions than those that are defined in
-// the IAM user policy.
-//
-// A typical use case is that the permissions of the IAM user whose credentials
-// are used to call GetFederationToken are designed to allow access to all the
-// actions and resources that any federated user will need. Then, for individual
-// users, you pass a policy to the operation that scopes down the permissions
-// to a level that's appropriate to that individual user, using a policy that
-// allows only a subset of permissions that are granted to the IAM user.
-//
-// If you do not pass a policy, the resulting temporary security credentials
-// have no effective permissions. The only exception is when the temporary security
-// credentials are used to access a resource that has a resource-based policy
-// that specifically allows the federated user to access the resource.
-//
-// For more information about how permissions work, see Permissions for GetFederationToken
-// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
-// For information about using GetFederationToken to create temporary security
-// credentials, see GetFederationToken—Federation Through a Custom Identity
-// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
+// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// to this operation. You can pass a single JSON policy document to use as an
+// inline session policy. You can also specify up to 10 managed policies to
+// use as managed session policies. The plain text that you use for both inline
+// and managed session policies shouldn't exceed 2048 characters.
+//
+// Though the session policy parameters are optional, if you do not pass a policy,
+// then the resulting federated user session has no permissions. The only exception
+// is when the credentials are used to access a resource that has a resource-based
+// policy that specifically references the federated user session in the Principal
+// element of the policy. When you pass session policies, the session permissions
+// are the intersection of the IAM user policies and the session policies that
+// you pass. This gives you a way to further restrict the permissions for a
+// federated user. You cannot use session policies to grant more permissions
+// than those that are defined in the permissions policy of the IAM user. For
+// more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+// in the IAM User Guide. For information about using GetFederationToken to
+// create temporary security credentials, see GetFederationToken—Federation
+// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
 //
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
 // with awserr.Error's Code and Message methods to get detailed information about
@@ -931,7 +1001,7 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
 //   STS is not activated in the requested region for the account that is being
 //   asked to generate credentials. The account administrator must use the IAM
 //   console to activate STS in that region. For more information, see Activating
-//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+//   and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
 //   in the IAM User Guide.
 //
 // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
@@ -1003,48 +1073,47 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
 // Returns a set of temporary credentials for an AWS account or IAM user. The
 // credentials consist of an access key ID, a secret access key, and a security
 // token. Typically, you use GetSessionToken if you want to use MFA to protect
-// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled
-// IAM users would need to call GetSessionToken and submit an MFA code that
-// is associated with their MFA device. Using the temporary security credentials
-// that are returned from the call, IAM users can then make programmatic calls
-// to APIs that require MFA authentication. If you do not supply a correct MFA
-// code, then the API returns an access denied error. For a comparison of GetSessionToken
-// with the other APIs that produce temporary credentials, see Requesting Temporary
-// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// programmatic calls to specific AWS API operations like Amazon EC2 StopInstances.
+// MFA-enabled IAM users would need to call GetSessionToken and submit an MFA
+// code that is associated with their MFA device. Using the temporary security
+// credentials that are returned from the call, IAM users can then make programmatic
+// calls to API operations that require MFA authentication. If you do not supply
+// a correct MFA code, then the API returns an access denied error. For a comparison
+// of GetSessionToken with the other API operations that produce temporary credentials,
+// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
 // in the IAM User Guide.
 //
-// The GetSessionToken action must be called by using the long-term AWS security
-// credentials of the AWS account or an IAM user. Credentials that are created
-// by IAM users are valid for the duration that you specify, from 900 seconds
-// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default
-// of 43200 seconds (12 hours); credentials that are created by using account
-// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600
-// seconds (1 hour), with a default of 1 hour.
+// The GetSessionToken operation must be called by using the long-term AWS security
+// credentials of the AWS account root user or an IAM user. Credentials that
+// are created by IAM users are valid for the duration that you specify. This
+// duration can range from 900 seconds (15 minutes) up to a maximum of 129,600
+// seconds (36 hours), with a default of 43,200 seconds (12 hours). Credentials
+// based on account credentials can range from 900 seconds (15 minutes) up to
+// 3,600 seconds (1 hour), with a default of 1 hour.
 //
 // The temporary security credentials created by GetSessionToken can be used
 // to make API calls to any AWS service with the following exceptions:
 //
-//    * You cannot call any IAM APIs unless MFA authentication information is
-//    included in the request.
+//    * You cannot call any IAM API operations unless MFA authentication information
+//    is included in the request.
 //
-//    * You cannot call any STS API exceptAssumeRole or GetCallerIdentity.
+//    * You cannot call any STS API except AssumeRole or GetCallerIdentity.
 //
-// We recommend that you do not call GetSessionToken with root account credentials.
-// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
+// We recommend that you do not call GetSessionToken with AWS account root user
+// credentials. Instead, follow our best practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
 // by creating one or more IAM users, giving them the necessary permissions,
 // and using IAM users for everyday interaction with AWS.
 //
-// The permissions associated with the temporary security credentials returned
-// by GetSessionToken are based on the permissions associated with account or
-// IAM user whose credentials are used to call the action. If GetSessionToken
-// is called using root account credentials, the temporary credentials have
-// root account permissions. Similarly, if GetSessionToken is called using the
-// credentials of an IAM user, the temporary credentials have the same permissions
-// as the IAM user.
+// The credentials that are returned by GetSessionToken are based on permissions
+// associated with the user whose credentials were used to call the operation.
+// If GetSessionToken is called using AWS account root user credentials, the
+// temporary credentials have root user permissions. Similarly, if GetSessionToken
+// is called using the credentials of an IAM user, the temporary credentials
+// have the same permissions as the IAM user.
 //
 // For more information about using GetSessionToken to create temporary credentials,
-// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
+// go to Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
 // in the IAM User Guide.
 //
 // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -1059,7 +1128,7 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
 //   STS is not activated in the requested region for the account that is being
 //   asked to generate credentials. The account administrator must use the IAM
 //   console to activate STS in that region. For more information, see Activating
-//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+//   and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
 //   in the IAM User Guide.
 //
 // See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
@@ -1094,7 +1163,7 @@ type AssumeRoleInput struct {
        // a session duration of 12 hours, but your administrator set the maximum session
        // duration to 6 hours, your operation fails. To learn how to view the maximum
        // value for your role, see View the Maximum Session Duration Setting for a
-       // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+       // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
        // in the IAM User Guide.
        //
        // By default, the value is set to 3600 seconds.
@@ -1104,51 +1173,77 @@ type AssumeRoleInput struct {
        // to the federation endpoint for a console sign-in token takes a SessionDuration
        // parameter that specifies the maximum length of the console session. For more
        // information, see Creating a URL that Enables Federated Users to Access the
-       // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+       // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
        // in the IAM User Guide.
        DurationSeconds *int64 `min:"900" type:"integer"`
 
-       // A unique identifier that is used by third parties when assuming roles in
-       // their customers' accounts. For each role that the third party can assume,
-       // they should instruct their customers to ensure the role's trust policy checks
-       // for the external ID that the third party generated. Each time the third party
-       // assumes the role, they should pass the customer's external ID. The external
-       // ID is useful in order to help third parties bind a role to the customer who
-       // created it. For more information about the external ID, see How to Use an
-       // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
+       // A unique identifier that might be required when you assume a role in another
+       // account. If the administrator of the account to which the role belongs provided
+       // you with an external ID, then provide that value in the ExternalId parameter.
+       // This value can be any string, such as a passphrase or account number. A cross-account
+       // role is usually set up to trust everyone in an account. Therefore, the administrator
+       // of the trusting account might send an external ID to the administrator of
+       // the trusted account. That way, only someone with the ID can assume the role,
+       // rather than everyone in the account. For more information about the external
+       // ID, see How to Use an External ID When Granting Access to Your AWS Resources
+       // to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
        // in the IAM User Guide.
        //
-       // The regex used to validated this parameter is a string of characters consisting
+       // The regex used to validate this parameter is a string of characters consisting
        // of upper- and lower-case alphanumeric characters with no spaces. You can
        // also include underscores or any of the following characters: =,.@:/-
        ExternalId *string `min:"2" type:"string"`
 
-       // An IAM policy in JSON format.
-       //
-       // This parameter is optional. If you pass a policy, the temporary security
-       // credentials that are returned by the operation have the permissions that
-       // are allowed by both (the intersection of) the access policy of the role that
-       // is being assumed, and the policy that you pass. This gives you a way to further
-       // restrict the permissions for the resulting temporary security credentials.
-       // You cannot use the passed policy to grant permissions that are in excess
-       // of those allowed by the access policy of the role that is being assumed.
-       // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
-       // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+       // An IAM policy in JSON format that you want to use as an inline session policy.
+       //
+       // This parameter is optional. Passing policies to this operation returns new
+       // temporary credentials. The resulting session's permissions are the intersection
+       // of the role's identity-based policy and the session policies. You can use
+       // the role's temporary credentials in subsequent AWS API calls to access resources
+       // in the account that owns the role. You cannot use session policies to grant
+       // more permissions than those allowed by the identity-based policy of the role
+       // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
        // in the IAM User Guide.
        //
-       // The format for this parameter, as described by its regex pattern, is a string
-       // of characters up to 2048 characters in length. The characters can be any
-       // ASCII character from the space character to the end of the valid character
-       // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+       // The plain text that you use for both inline and managed session policies
+       // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+       // character from the space character to the end of the valid character list
+       // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
        // and carriage return (\u000D) characters.
        //
-       // The policy plain text must be 2048 bytes or shorter. However, an internal
-       // conversion compresses it into a packed binary format with a separate limit.
-       // The PackedPolicySize response element indicates by percentage how close to
-       // the upper size limit the policy is, with 100% equaling the maximum allowed
-       // size.
+       // The characters in this parameter count towards the 2048 character session
+       // policy guideline. However, an AWS conversion compresses the session policies
+       // into a packed binary format that has a separate limit. This is the enforced
+       // limit. The PackedPolicySize response element indicates by percentage how
+       // close the policy is to the upper size limit.
        Policy *string `min:"1" type:"string"`
 
+       // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+       // to use as managed session policies. The policies must exist in the same account
+       // as the role.
+       //
+       // This parameter is optional. You can provide up to 10 managed policy ARNs.
+       // However, the plain text that you use for both inline and managed session
+       // policies shouldn't exceed 2048 characters. For more information about ARNs,
+       // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+       // in the AWS General Reference.
+       //
+       // The characters in this parameter count towards the 2048 character session
+       // policy guideline. However, an AWS conversion compresses the session policies
+       // into a packed binary format that has a separate limit. This is the enforced
+       // limit. The PackedPolicySize response element indicates by percentage how
+       // close the policy is to the upper size limit.
+       //
+       // Passing policies to this operation returns new temporary credentials. The
+       // resulting session's permissions are the intersection of the role's identity-based
+       // policy and the session policies. You can use the role's temporary credentials
+       // in subsequent AWS API calls to access resources in the account that owns
+       // the role. You cannot use session policies to grant more permissions than
+       // those allowed by the identity-based policy of the role that is being assumed.
+       // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+       // in the IAM User Guide.
+       PolicyArns []*PolicyDescriptorType `type:"list"`
+
        // The Amazon Resource Name (ARN) of the role to assume.
        //
        // RoleArn is a required field
@@ -1161,8 +1256,8 @@ type AssumeRoleInput struct {
        // scenarios, the role session name is visible to, and can be logged by the
        // account that owns the role. The role session name is also used in the ARN
        // of the assumed role principal. This means that subsequent cross-account API
-       // requests using the temporary security credentials will expose the role session
-       // name to the external account in their CloudTrail logs.
+       // requests that use the temporary security credentials will expose the role
+       // session name to the external account in their AWS CloudTrail logs.
        //
        // The regex used to validate this parameter is a string of characters consisting
        // of upper- and lower-case alphanumeric characters with no spaces. You can
@@ -1232,6 +1327,16 @@ func (s *AssumeRoleInput) Validate() error {
        if s.TokenCode != nil && len(*s.TokenCode) < 6 {
                invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
        }
+       if s.PolicyArns != nil {
+               for i, v := range s.PolicyArns {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
 
        if invalidParams.Len() > 0 {
                return invalidParams
@@ -1257,6 +1362,12 @@ func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput {
        return s
 }
 
+// SetPolicyArns sets the PolicyArns field's value.
+func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput {
+       s.PolicyArns = v
+       return s
+}
+
 // SetRoleArn sets the RoleArn field's value.
 func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
        s.RoleArn = &v
@@ -1296,10 +1407,8 @@ type AssumeRoleOutput struct {
        // The temporary security credentials, which include an access key ID, a secret
        // access key, and a security (or session) token.
        //
-       // Note: The size of the security token that STS APIs return is not fixed. We
-       // strongly recommend that you make no assumptions about the maximum size. As
-       // of this writing, the typical size is less than 4096 bytes, but that can vary.
-       // Also, future updates to AWS might require larger sizes.
+       // The size of the security token that STS API operations return is not fixed.
+       // We strongly recommend that you make no assumptions about the maximum size.
        Credentials *Credentials `type:"structure"`
 
        // A percentage value that indicates the size of the policy in packed form.
@@ -1349,7 +1458,7 @@ type AssumeRoleWithSAMLInput struct {
        // specify a session duration of 12 hours, but your administrator set the maximum
        // session duration to 6 hours, your operation fails. To learn how to view the
        // maximum value for your role, see View the Maximum Session Duration Setting
-       // for a Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+       // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
        // in the IAM User Guide.
        //
        // By default, the value is set to 3600 seconds.
@@ -1359,36 +1468,60 @@ type AssumeRoleWithSAMLInput struct {
        // to the federation endpoint for a console sign-in token takes a SessionDuration
        // parameter that specifies the maximum length of the console session. For more
        // information, see Creating a URL that Enables Federated Users to Access the
-       // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+       // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
        // in the IAM User Guide.
        DurationSeconds *int64 `min:"900" type:"integer"`
 
-       // An IAM policy in JSON format.
-       //
-       // The policy parameter is optional. If you pass a policy, the temporary security
-       // credentials that are returned by the operation have the permissions that
-       // are allowed by both the access policy of the role that is being assumed,
-       // and the policy that you pass. This gives you a way to further restrict the
-       // permissions for the resulting temporary security credentials. You cannot
-       // use the passed policy to grant permissions that are in excess of those allowed
-       // by the access policy of the role that is being assumed. For more information,
-       // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity
-       // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+       // An IAM policy in JSON format that you want to use as an inline session policy.
+       //
+       // This parameter is optional. Passing policies to this operation returns new
+       // temporary credentials. The resulting session's permissions are the intersection
+       // of the role's identity-based policy and the session policies. You can use
+       // the role's temporary credentials in subsequent AWS API calls to access resources
+       // in the account that owns the role. You cannot use session policies to grant
+       // more permissions than those allowed by the identity-based policy of the role
+       // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
        // in the IAM User Guide.
        //
-       // The format for this parameter, as described by its regex pattern, is a string
-       // of characters up to 2048 characters in length. The characters can be any
-       // ASCII character from the space character to the end of the valid character
-       // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+       // The plain text that you use for both inline and managed session policies
+       // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+       // character from the space character to the end of the valid character list
+       // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
        // and carriage return (\u000D) characters.
        //
-       // The policy plain text must be 2048 bytes or shorter. However, an internal
-       // conversion compresses it into a packed binary format with a separate limit.
-       // The PackedPolicySize response element indicates by percentage how close to
-       // the upper size limit the policy is, with 100% equaling the maximum allowed
-       // size.
+       // The characters in this parameter count towards the 2048 character session
+       // policy guideline. However, an AWS conversion compresses the session policies
+       // into a packed binary format that has a separate limit. This is the enforced
+       // limit. The PackedPolicySize response element indicates by percentage how
+       // close the policy is to the upper size limit.
        Policy *string `min:"1" type:"string"`
 
+       // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+       // to use as managed session policies. The policies must exist in the same account
+       // as the role.
+       //
+       // This parameter is optional. You can provide up to 10 managed policy ARNs.
+       // However, the plain text that you use for both inline and managed session
+       // policies shouldn't exceed 2048 characters. For more information about ARNs,
+       // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+       // in the AWS General Reference.
+       //
+       // The characters in this parameter count towards the 2048 character session
+       // policy guideline. However, an AWS conversion compresses the session policies
+       // into a packed binary format that has a separate limit. This is the enforced
+       // limit. The PackedPolicySize response element indicates by percentage how
+       // close the policy is to the upper size limit.
+       //
+       // Passing policies to this operation returns new temporary credentials. The
+       // resulting session's permissions are the intersection of the role's identity-based
+       // policy and the session policies. You can use the role's temporary credentials
+       // in subsequent AWS API calls to access resources in the account that owns
+       // the role. You cannot use session policies to grant more permissions than
+       // those allowed by the identity-based policy of the role that is being assumed.
+       // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+       // in the IAM User Guide.
+       PolicyArns []*PolicyDescriptorType `type:"list"`
+
        // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes
        // the IdP.
        //
@@ -1402,8 +1535,8 @@ type AssumeRoleWithSAMLInput struct {
 
        // The base-64 encoded SAML authentication response provided by the IdP.
        //
-       // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
-       // in the Using IAM guide.
+       // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
+       // in the IAM User Guide.
        //
        // SAMLAssertion is a required field
        SAMLAssertion *string `min:"4" type:"string" required:"true"`
@@ -1446,6 +1579,16 @@ func (s *AssumeRoleWithSAMLInput) Validate() error {
        if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 {
                invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4))
        }
+       if s.PolicyArns != nil {
+               for i, v := range s.PolicyArns {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
 
        if invalidParams.Len() > 0 {
                return invalidParams
@@ -1465,6 +1608,12 @@ func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput {
        return s
 }
 
+// SetPolicyArns sets the PolicyArns field's value.
+func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput {
+       s.PolicyArns = v
+       return s
+}
+
 // SetPrincipalArn sets the PrincipalArn field's value.
 func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput {
        s.PrincipalArn = &v
@@ -1499,10 +1648,8 @@ type AssumeRoleWithSAMLOutput struct {
        // The temporary security credentials, which include an access key ID, a secret
        // access key, and a security (or session) token.
        //
-       // Note: The size of the security token that STS APIs return is not fixed. We
-       // strongly recommend that you make no assumptions about the maximum size. As
-       // of this writing, the typical size is less than 4096 bytes, but that can vary.
-       // Also, future updates to AWS might require larger sizes.
+       // The size of the security token that STS API operations return is not fixed.
+       // We strongly recommend that you make no assumptions about the maximum size.
        Credentials *Credentials `type:"structure"`
 
        // The value of the Issuer element of the SAML assertion.
@@ -1606,7 +1753,7 @@ type AssumeRoleWithWebIdentityInput struct {
        // a session duration of 12 hours, but your administrator set the maximum session
        // duration to 6 hours, your operation fails. To learn how to view the maximum
        // value for your role, see View the Maximum Session Duration Setting for a
-       // Role (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
+       // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
        // in the IAM User Guide.
        //
        // By default, the value is set to 3600 seconds.
@@ -1616,35 +1763,60 @@ type AssumeRoleWithWebIdentityInput struct {
        // to the federation endpoint for a console sign-in token takes a SessionDuration
        // parameter that specifies the maximum length of the console session. For more
        // information, see Creating a URL that Enables Federated Users to Access the
-       // AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+       // AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
        // in the IAM User Guide.
        DurationSeconds *int64 `min:"900" type:"integer"`
 
-       // An IAM policy in JSON format.
+       // An IAM policy in JSON format that you want to use as an inline session policy.
        //
-       // The policy parameter is optional. If you pass a policy, the temporary security
-       // credentials that are returned by the operation have the permissions that
-       // are allowed by both the access policy of the role that is being assumed,
-       // and the policy that you pass. This gives you a way to further restrict the
-       // permissions for the resulting temporary security credentials. You cannot
-       // use the passed policy to grant permissions that are in excess of those allowed
-       // by the access policy of the role that is being assumed. For more information,
-       // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+       // This parameter is optional. Passing policies to this operation returns new
+       // temporary credentials. The resulting session's permissions are the intersection
+       // of the role's identity-based policy and the session policies. You can use
+       // the role's temporary credentials in subsequent AWS API calls to access resources
+       // in the account that owns the role. You cannot use session policies to grant
+       // more permissions than those allowed by the identity-based policy of the role
+       // that is being assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
        // in the IAM User Guide.
        //
-       // The format for this parameter, as described by its regex pattern, is a string
-       // of characters up to 2048 characters in length. The characters can be any
-       // ASCII character from the space character to the end of the valid character
-       // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+       // The plain text that you use for both inline and managed session policies
+       // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+       // character from the space character to the end of the valid character list
+       // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
        // and carriage return (\u000D) characters.
        //
-       // The policy plain text must be 2048 bytes or shorter. However, an internal
-       // conversion compresses it into a packed binary format with a separate limit.
-       // The PackedPolicySize response element indicates by percentage how close to
-       // the upper size limit the policy is, with 100% equaling the maximum allowed
-       // size.
+       // The characters in this parameter count towards the 2048 character session
+       // policy guideline. However, an AWS conversion compresses the session policies
+       // into a packed binary format that has a separate limit. This is the enforced
+       // limit. The PackedPolicySize response element indicates by percentage how
+       // close the policy is to the upper size limit.
        Policy *string `min:"1" type:"string"`
 
+       // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+       // to use as managed session policies. The policies must exist in the same account
+       // as the role.
+       //
+       // This parameter is optional. You can provide up to 10 managed policy ARNs.
+       // However, the plain text that you use for both inline and managed session
+       // policies shouldn't exceed 2048 characters. For more information about ARNs,
+       // see Amazon Resource Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+       // in the AWS General Reference.
+       //
+       // The characters in this parameter count towards the 2048 character session
+       // policy guideline. However, an AWS conversion compresses the session policies
+       // into a packed binary format that has a separate limit. This is the enforced
+       // limit. The PackedPolicySize response element indicates by percentage how
+       // close the policy is to the upper size limit.
+       //
+       // Passing policies to this operation returns new temporary credentials. The
+       // resulting session's permissions are the intersection of the role's identity-based
+       // policy and the session policies. You can use the role's temporary credentials
+       // in subsequent AWS API calls to access resources in the account that owns
+       // the role. You cannot use session policies to grant more permissions than
+       // those allowed by the identity-based policy of the role that is being assumed.
+       // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+       // in the IAM User Guide.
+       PolicyArns []*PolicyDescriptorType `type:"list"`
+
        // The fully qualified host component of the domain name of the identity provider.
        //
        // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com
@@ -1721,6 +1893,16 @@ func (s *AssumeRoleWithWebIdentityInput) Validate() error {
        if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 {
                invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4))
        }
+       if s.PolicyArns != nil {
+               for i, v := range s.PolicyArns {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
 
        if invalidParams.Len() > 0 {
                return invalidParams
@@ -1740,6 +1922,12 @@ func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebI
        return s
 }
 
+// SetPolicyArns sets the PolicyArns field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput {
+       s.PolicyArns = v
+       return s
+}
+
 // SetProviderId sets the ProviderId field's value.
 func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput {
        s.ProviderId = &v
@@ -1784,10 +1972,8 @@ type AssumeRoleWithWebIdentityOutput struct {
        // The temporary security credentials, which include an access key ID, a secret
        // access key, and a security token.
        //
-       // Note: The size of the security token that STS APIs return is not fixed. We
-       // strongly recommend that you make no assumptions about the maximum size. As
-       // of this writing, the typical size is less than 4096 bytes, but that can vary.
-       // Also, future updates to AWS might require larger sizes.
+       // The size of the security token that STS API operations return is not fixed.
+       // We strongly recommend that you make no assumptions about the maximum size.
        Credentials *Credentials `type:"structure"`
 
        // A percentage value that indicates the size of the policy in packed form.
@@ -1796,7 +1982,7 @@ type AssumeRoleWithWebIdentityOutput struct {
        PackedPolicySize *int64 `type:"integer"`
 
        // The issuing authority of the web identity token presented. For OpenID Connect
-       // ID Tokens this contains the value of the iss field. For OAuth 2.0 access
+       // ID tokens, this contains the value of the iss field. For OAuth 2.0 access
        // tokens, this contains the value of the ProviderId parameter that was passed
        // in the AssumeRoleWithWebIdentity request.
        Provider *string `type:"string"`
@@ -1863,7 +2049,7 @@ type AssumedRoleUser struct {
 
        // The ARN of the temporary security credentials that are returned from the
        // AssumeRole action. For more information about ARNs and how to use them in
-       // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+       // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
        // in Using IAM.
        //
        // Arn is a required field
@@ -2031,7 +2217,7 @@ type FederatedUser struct {
 
        // The ARN that specifies the federated user that is associated with the credentials.
        // For more information about ARNs and how to use them in policies, see IAM
-       // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+       // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
        // in Using IAM.
        //
        // Arn is a required field
@@ -2066,6 +2252,73 @@ func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser {
        return s
 }
 
+type GetAccessKeyInfoInput struct {
+       _ struct{} `type:"structure"`
+
+       // The identifier of an access key.
+       //
+       // This parameter allows (through its regex pattern) a string of characters
+       // that can consist of any upper- or lowercased letter or digit.
+       //
+       // AccessKeyId is a required field
+       AccessKeyId *string `min:"16" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetAccessKeyInfoInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetAccessKeyInfoInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetAccessKeyInfoInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"}
+       if s.AccessKeyId == nil {
+               invalidParams.Add(request.NewErrParamRequired("AccessKeyId"))
+       }
+       if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 {
+               invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput {
+       s.AccessKeyId = &v
+       return s
+}
+
+type GetAccessKeyInfoOutput struct {
+       _ struct{} `type:"structure"`
+
+       // The number used to identify the AWS account.
+       Account *string `type:"string"`
+}
+
+// String returns the string representation
+func (s GetAccessKeyInfoOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetAccessKeyInfoOutput) GoString() string {
+       return s.String()
+}
+
+// SetAccount sets the Account field's value.
+func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput {
+       s.Account = &v
+       return s
+}
+
 type GetCallerIdentityInput struct {
        _ struct{} `type:"structure"`
 }
@@ -2093,8 +2346,8 @@ type GetCallerIdentityOutput struct {
        Arn *string `min:"20" type:"string"`
 
        // The unique identifier of the calling entity. The exact value depends on the
-       // type of entity making the call. The values returned are those listed in the
-       // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
+       // type of entity that is making the call. The values returned are those listed
+       // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
        // found on the Policy Variables reference page in the IAM User Guide.
        UserId *string `type:"string"`
 }
@@ -2131,12 +2384,11 @@ type GetFederationTokenInput struct {
        _ struct{} `type:"structure"`
 
        // The duration, in seconds, that the session should last. Acceptable durations
-       // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds
-       // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained
-       // using AWS account (root) credentials are restricted to a maximum of 3600
+       // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds
+       // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained
+       // using AWS account root user credentials are restricted to a maximum of 3,600
        // seconds (one hour). If the specified duration is longer than one hour, the
-       // session obtained by using AWS account (root) credentials defaults to one
-       // hour.
+       // session obtained by using root user credentials defaults to one hour.
        DurationSeconds *int64 `min:"900" type:"integer"`
 
        // The name of the federated user. The name is used as an identifier for the
@@ -2151,36 +2403,73 @@ type GetFederationTokenInput struct {
        // Name is a required field
        Name *string `min:"2" type:"string" required:"true"`
 
-       // An IAM policy in JSON format that is passed with the GetFederationToken call
-       // and evaluated along with the policy or policies that are attached to the
-       // IAM user whose credentials are used to call GetFederationToken. The passed
-       // policy is used to scope down the permissions that are available to the IAM
-       // user, by allowing only a subset of the permissions that are granted to the
-       // IAM user. The passed policy cannot grant more permissions than those granted
-       // to the IAM user. The final permissions for the federated user are the most
-       // restrictive set based on the intersection of the passed policy and the IAM
-       // user policy.
-       //
-       // If you do not pass a policy, the resulting temporary security credentials
-       // have no effective permissions. The only exception is when the temporary security
-       // credentials are used to access a resource that has a resource-based policy
-       // that specifically allows the federated user to access the resource.
-       //
-       // The format for this parameter, as described by its regex pattern, is a string
-       // of characters up to 2048 characters in length. The characters can be any
-       // ASCII character from the space character to the end of the valid character
-       // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
-       // and carriage return (\u000D) characters.
+       // An IAM policy in JSON format that you want to use as an inline session policy.
+       //
+       // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+       // to this operation. You can pass a single JSON policy document to use as an
+       // inline session policy. You can also specify up to 10 managed policies to
+       // use as managed session policies.
        //
-       // The policy plain text must be 2048 bytes or shorter. However, an internal
-       // conversion compresses it into a packed binary format with a separate limit.
-       // The PackedPolicySize response element indicates by percentage how close to
-       // the upper size limit the policy is, with 100% equaling the maximum allowed
-       // size.
+       // This parameter is optional. However, if you do not pass any session policies,
+       // then the resulting federated user session has no permissions. The only exception
+       // is when the credentials are used to access a resource that has a resource-based
+       // policy that specifically references the federated user session in the Principal
+       // element of the policy.
        //
-       // For more information about how permissions work, see Permissions for GetFederationToken
-       // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
+       // When you pass session policies, the session permissions are the intersection
+       // of the IAM user policies and the session policies that you pass. This gives
+       // you a way to further restrict the permissions for a federated user. You cannot
+       // use session policies to grant more permissions than those that are defined
+       // in the permissions policy of the IAM user. For more information, see Session
+       // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+       // in the IAM User Guide.
+       //
+       // The plain text that you use for both inline and managed session policies
+       // shouldn't exceed 2048 characters. The JSON policy characters can be any ASCII
+       // character from the space character to the end of the valid character list
+       // (\u0020 through \u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+       // and carriage return (\u000D) characters.
+       //
+       // The characters in this parameter count towards the 2048 character session
+       // policy guideline. However, an AWS conversion compresses the session policies
+       // into a packed binary format that has a separate limit. This is the enforced
+       // limit. The PackedPolicySize response element indicates by percentage how
+       // close the policy is to the upper size limit.
        Policy *string `min:"1" type:"string"`
+
+       // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
+       // to use as a managed session policy. The policies must exist in the same account
+       // as the IAM user that is requesting federated access.
+       //
+       // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+       // to this operation. You can pass a single JSON policy document to use as an
+       // inline session policy. You can also specify up to 10 managed policies to
+       // use as managed session policies. The plain text that you use for both inline
+       // and managed session policies shouldn't exceed 2048 characters. You can provide
+       // up to 10 managed policy ARNs. For more information about ARNs, see Amazon
+       // Resource Names (ARNs) and AWS Service Namespaces (general/latest/gr/aws-arns-and-namespaces.html)
+       // in the AWS General Reference.
+       //
+       // This parameter is optional. However, if you do not pass any session policies,
+       // then the resulting federated user session has no permissions. The only exception
+       // is when the credentials are used to access a resource that has a resource-based
+       // policy that specifically references the federated user session in the Principal
+       // element of the policy.
+       //
+       // When you pass session policies, the session permissions are the intersection
+       // of the IAM user policies and the session policies that you pass. This gives
+       // you a way to further restrict the permissions for a federated user. You cannot
+       // use session policies to grant more permissions than those that are defined
+       // in the permissions policy of the IAM user. For more information, see Session
+       // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
+       // in the IAM User Guide.
+       //
+       // The characters in this parameter count towards the 2048 character session
+       // policy guideline. However, an AWS conversion compresses the session policies
+       // into a packed binary format that has a separate limit. This is the enforced
+       // limit. The PackedPolicySize response element indicates by percentage how
+       // close the policy is to the upper size limit.
+       PolicyArns []*PolicyDescriptorType `type:"list"`
 }
 
 // String returns the string representation
@@ -2208,6 +2497,16 @@ func (s *GetFederationTokenInput) Validate() error {
        if s.Policy != nil && len(*s.Policy) < 1 {
                invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
        }
+       if s.PolicyArns != nil {
+               for i, v := range s.PolicyArns {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
 
        if invalidParams.Len() > 0 {
                return invalidParams
@@ -2233,6 +2532,12 @@ func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput {
        return s
 }
 
+// SetPolicyArns sets the PolicyArns field's value.
+func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput {
+       s.PolicyArns = v
+       return s
+}
+
 // Contains the response to a successful GetFederationToken request, including
 // temporary AWS credentials that can be used to make AWS requests.
 type GetFederationTokenOutput struct {
@@ -2241,10 +2546,8 @@ type GetFederationTokenOutput struct {
        // The temporary security credentials, which include an access key ID, a secret
        // access key, and a security (or session) token.
        //
-       // Note: The size of the security token that STS APIs return is not fixed. We
-       // strongly recommend that you make no assumptions about the maximum size. As
-       // of this writing, the typical size is less than 4096 bytes, but that can vary.
-       // Also, future updates to AWS might require larger sizes.
+       // The size of the security token that STS API operations return is not fixed.
+       // We strongly recommend that you make no assumptions about the maximum size.
        Credentials *Credentials `type:"structure"`
 
        // Identifiers for the federated user associated with the credentials (such
@@ -2291,11 +2594,11 @@ type GetSessionTokenInput struct {
        _ struct{} `type:"structure"`
 
        // The duration, in seconds, that the credentials should remain valid. Acceptable
-       // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600
-       // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions
-       // for AWS account owners are restricted to a maximum of 3600 seconds (one hour).
-       // If the duration is longer than one hour, the session for AWS account owners
-       // defaults to one hour.
+       // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600
+       // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions
+       // for AWS account owners are restricted to a maximum of 3,600 seconds (one
+       // hour). If the duration is longer than one hour, the session for AWS account
+       // owners defaults to one hour.
        DurationSeconds *int64 `min:"900" type:"integer"`
 
        // The identification number of the MFA device that is associated with the IAM
@@ -2306,16 +2609,16 @@ type GetSessionTokenInput struct {
        // You can find the device for an IAM user by going to the AWS Management Console
        // and viewing the user's security credentials.
        //
-       // The regex used to validated this parameter is a string of characters consisting
+       // The regex used to validate this parameter is a string of characters consisting
        // of upper- and lower-case alphanumeric characters with no spaces. You can
        // also include underscores or any of the following characters: =,.@:/-
        SerialNumber *string `min:"9" type:"string"`
 
        // The value provided by the MFA device, if MFA is required. If any policy requires
        // the IAM user to submit an MFA code, specify this value. If MFA authentication
-       // is required, and the user does not provide a code when requesting a set of
-       // temporary security credentials, the user will receive an "access denied"
-       // response when requesting resources that require MFA authentication.
+       // is required, the user must provide a code when requesting a set of temporary
+       // security credentials. A user who fails to provide the code receives an "access
+       // denied" response when requesting resources that require MFA authentication.
        //
        // The format for this parameter, as described by its regex pattern, is a sequence
        // of six numeric digits.
@@ -2377,10 +2680,8 @@ type GetSessionTokenOutput struct {
        // The temporary security credentials, which include an access key ID, a secret
        // access key, and a security (or session) token.
        //
-       // Note: The size of the security token that STS APIs return is not fixed. We
-       // strongly recommend that you make no assumptions about the maximum size. As
-       // of this writing, the typical size is less than 4096 bytes, but that can vary.
-       // Also, future updates to AWS might require larger sizes.
+       // The size of the security token that STS API operations return is not fixed.
+       // We strongly recommend that you make no assumptions about the maximum size.
        Credentials *Credentials `type:"structure"`
 }
 
@@ -2399,3 +2700,44 @@ func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenO
        s.Credentials = v
        return s
 }
+
+// A reference to the IAM managed policy that is passed as a session policy
+// for a role session or a federated user session.
+type PolicyDescriptorType struct {
+       _ struct{} `type:"structure"`
+
+       // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session
+       // policy for the role. For more information about ARNs, see Amazon Resource
+       // Names (ARNs) and AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+       // in the AWS General Reference.
+       Arn *string `locationName:"arn" min:"20" type:"string"`
+}
+
+// String returns the string representation
+func (s PolicyDescriptorType) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PolicyDescriptorType) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PolicyDescriptorType) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"}
+       if s.Arn != nil && len(*s.Arn) < 20 {
+               invalidParams.Add(request.NewErrParamMinLen("Arn", 20))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetArn sets the Arn field's value.
+func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType {
+       s.Arn = &v
+       return s
+}
index ef681ab0c63c657237f7bee3eabfe7c3fce2fa56..fcb720dcac6dd7a4ec2328d87678fb850357d84e 100644 (file)
@@ -7,22 +7,14 @@
 // request temporary, limited-privilege credentials for AWS Identity and Access
 // Management (IAM) users or for users that you authenticate (federated users).
 // This guide provides descriptions of the STS API. For more detailed information
-// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
-//
-// As an alternative to using the API, you can use one of the AWS SDKs, which
-// consist of libraries and sample code for various programming languages and
-// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
-// way to create programmatic access to STS. For example, the SDKs take care
-// of cryptographically signing requests, managing errors, and retrying requests
-// automatically. For information about the AWS SDKs, including how to download
-// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
+// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
 //
 // For information about setting up signatures and authorization through the
-// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
+// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
 // in the AWS General Reference. For general information about the Query API,
-// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
+// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
 // in Using IAM. For information about using security tokens with other AWS
-// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
+// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
 // in the IAM User Guide.
 //
 // If you're new to AWS and need additional technical information about a specific
 //
 // Endpoints
 //
-// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
-// that maps to the US East (N. Virginia) region. Additional regions are available
-// and are activated by default. For more information, see Activating and Deactivating
-// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// By default, AWS Security Token Service (STS) is available as a global service,
+// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com.
+// Global requests map to the US East (N. Virginia) region. AWS recommends using
+// Regional AWS STS endpoints instead of the global endpoint to reduce latency,
+// build in redundancy, and increase session token validity. For more information,
+// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// Most AWS Regions are enabled for operations in all AWS services by default.
+// Those Regions are automatically activated for use with AWS STS. Some Regions,
+// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more
+// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html)
+// in the AWS General Reference. When you enable these AWS Regions, they are
+// automatically activated for use with AWS STS. You cannot activate the STS
+// endpoint for a Region that is disabled. Tokens that are valid in all AWS
+// Regions are longer than tokens that are valid in Regions that are enabled
+// by default. Changing this setting might affect existing systems where you
+// temporarily store tokens. For more information, see Managing Global Endpoint
+// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens)
 // in the IAM User Guide.
 //
-// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
-// in the AWS General Reference.
+// After you activate a Region for use with AWS STS, you can direct AWS STS
+// API calls to that Region. AWS STS recommends that you provide both the Region
+// and endpoint when you make calls to a Regional endpoint. You can provide
+// the Region alone for manually enabled Regions, such as Asia Pacific (Hong
+// Kong). In this case, the calls are directed to the STS Regional endpoint.
+// However, if you provide the Region alone for Regions enabled by default,
+// the calls are directed to the global endpoint of https://sts.amazonaws.com.
+//
+// To view the list of AWS STS endpoints and whether they are active by default,
+// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code)
+// in the IAM User Guide.
 //
 // Recording API requests
 //
 // your AWS account and delivers log files to an Amazon S3 bucket. By using
 // information collected by CloudTrail, you can determine what requests were
 // successfully made to STS, who made the request, when it was made, and so
-// on. To learn more about CloudTrail, including how to turn it on and find
-// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
+// on.
+//
+// If you activate AWS STS endpoints in Regions other than the default global
+// endpoint, then you must also turn on CloudTrail logging in those Regions.
+// This is necessary to record any AWS STS API calls that are made in those
+// Regions. For more information, see Turning On CloudTrail in Additional Regions
+// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html)
+// in the AWS CloudTrail User Guide.
+//
+// AWS Security Token Service (STS) is a global service with a single endpoint
+// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls
+// to a global service. However, because this endpoint is physically located
+// in the US East (N. Virginia) Region, your logs list us-east-1 as the event
+// Region. CloudTrail does not write these logs to the US East (Ohio) Region
+// unless you choose to include global service logs in that Region. CloudTrail
+// writes calls to all Regional endpoints to their respective Regions. For example,
+// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio)
+// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU
+// (Frankfurt) Region.
+//
+// To learn more about CloudTrail, including how to turn it on and find your
+// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
 //
 // See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
 //
index e24884ef371a9971a16525d6677c65eccb5ea3c7..41ea09c356cea7537f48739d9760eb158046a928 100644 (file)
@@ -67,7 +67,7 @@ const (
        // STS is not activated in the requested region for the account that is being
        // asked to generate credentials. The account administrator must use the IAM
        // console to activate STS in that region. For more information, see Activating
-       // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+       // and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
        // in the IAM User Guide.
        ErrCodeRegionDisabledException = "RegionDisabledException"
 )
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go
new file mode 100644 (file)
index 0000000..e2e1d6e
--- /dev/null
@@ -0,0 +1,96 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client
+// for testing your code.
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters.
+package stsiface
+
+import (
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/request"
+       "github.com/aws/aws-sdk-go/service/sts"
+)
+
+// STSAPI provides an interface to enable mocking the
+// sts.STS service client's API operation,
+// paginators, and waiters. This make unit testing your code that calls out
+// to the SDK's service client's calls easier.
+//
+// The best way to use this interface is so the SDK's service client's calls
+// can be stubbed out for unit testing your code with the SDK without needing
+// to inject custom request handlers into the SDK's request pipeline.
+//
+//    // myFunc uses an SDK service client to make a request to
+//    // AWS Security Token Service.
+//    func myFunc(svc stsiface.STSAPI) bool {
+//        // Make svc.AssumeRole request
+//    }
+//
+//    func main() {
+//        sess := session.New()
+//        svc := sts.New(sess)
+//
+//        myFunc(svc)
+//    }
+//
+// In your _test.go file:
+//
+//    // Define a mock struct to be used in your unit tests of myFunc.
+//    type mockSTSClient struct {
+//        stsiface.STSAPI
+//    }
+//    func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) {
+//        // mock response/functionality
+//    }
+//
+//    func TestMyFunc(t *testing.T) {
+//        // Setup Test
+//        mockSvc := &mockSTSClient{}
+//
+//        myfunc(mockSvc)
+//
+//        // Verify myFunc's functionality
+//    }
+//
+// It is important to note that this interface will have breaking changes
+// when the service model is updated and adds new API operations, paginators,
+// and waiters. Its suggested to use the pattern above for testing, or using
+// tooling to generate mocks to satisfy the interfaces.
+type STSAPI interface {
+       AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
+       AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error)
+       AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput)
+
+       AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error)
+       AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error)
+       AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput)
+
+       AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error)
+       AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error)
+       AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput)
+
+       DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error)
+       DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error)
+       DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput)
+
+       GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error)
+       GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error)
+       GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput)
+
+       GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error)
+       GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error)
+       GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput)
+
+       GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error)
+       GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error)
+       GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput)
+
+       GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error)
+       GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error)
+       GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput)
+}
+
+var _ STSAPI = (*sts.STS)(nil)
index 7e215f2202965996deba8605f541aafffcf34237..2133562b01c3ae4c73fd290935621b5899b175ce 100644 (file)
@@ -29,26 +29,17 @@ package cmp
 import (
        "fmt"
        "reflect"
+       "strings"
 
        "github.com/google/go-cmp/cmp/internal/diff"
+       "github.com/google/go-cmp/cmp/internal/flags"
        "github.com/google/go-cmp/cmp/internal/function"
        "github.com/google/go-cmp/cmp/internal/value"
 )
 
-// BUG(dsnet): Maps with keys containing NaN values cannot be properly compared due to
-// the reflection package's inability to retrieve such entries. Equal will panic
-// anytime it comes across a NaN key, but this behavior may change.
-//
-// See https://golang.org/issue/11104 for more details.
-
-var nothing = reflect.Value{}
-
 // Equal reports whether x and y are equal by recursively applying the
 // following rules in the given order to x and y and all of their sub-values:
 //
-// â€¢ If two values are not of the same type, then they are never equal
-// and the overall result is false.
-//
 // â€¢ Let S be the set of all Ignore, Transformer, and Comparer options that
 // remain after applying all path filters, value filters, and type filters.
 // If at least one Ignore exists in S, then the comparison is ignored.
@@ -61,43 +52,79 @@ var nothing = reflect.Value{}
 //
 // â€¢ If the values have an Equal method of the form "(T) Equal(T) bool" or
 // "(T) Equal(I) bool" where T is assignable to I, then use the result of
-// x.Equal(y) even if x or y is nil.
-// Otherwise, no such method exists and evaluation proceeds to the next rule.
+// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
+// evaluation proceeds to the next rule.
 //
 // â€¢ Lastly, try to compare x and y based on their basic kinds.
 // Simple kinds like booleans, integers, floats, complex numbers, strings, and
 // channels are compared using the equivalent of the == operator in Go.
 // Functions are only equal if they are both nil, otherwise they are unequal.
-// Pointers are equal if the underlying values they point to are also equal.
-// Interfaces are equal if their underlying concrete values are also equal.
 //
-// Structs are equal if all of their fields are equal. If a struct contains
-// unexported fields, Equal panics unless the AllowUnexported option is used or
-// an Ignore option (e.g., cmpopts.IgnoreUnexported) ignores that field.
+// Structs are equal if recursively calling Equal on all fields report equal.
+// If a struct contains unexported fields, Equal panics unless an Ignore option
+// (e.g., cmpopts.IgnoreUnexported) ignores that field or the AllowUnexported
+// option explicitly permits comparing the unexported field.
+//
+// Slices are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored slice or array elements report equal.
+// Empty non-nil slices and nil slices are not equal; to equate empty slices,
+// consider using cmpopts.EquateEmpty.
 //
-// Arrays, slices, and maps are equal if they are both nil or both non-nil
-// with the same length and the elements at each index or key are equal.
-// Note that a non-nil empty slice and a nil slice are not equal.
-// To equate empty slices and maps, consider using cmpopts.EquateEmpty.
+// Maps are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored map entries report equal.
 // Map keys are equal according to the == operator.
 // To use custom comparisons for map keys, consider using cmpopts.SortMaps.
+// Empty non-nil maps and nil maps are not equal; to equate empty maps,
+// consider using cmpopts.EquateEmpty.
+//
+// Pointers and interfaces are equal if they are both nil or both non-nil,
+// where they have the same underlying concrete type and recursively
+// calling Equal on the underlying values reports equal.
 func Equal(x, y interface{}, opts ...Option) bool {
+       vx := reflect.ValueOf(x)
+       vy := reflect.ValueOf(y)
+
+       // If the inputs are different types, auto-wrap them in an empty interface
+       // so that they have the same parent type.
+       var t reflect.Type
+       if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
+               t = reflect.TypeOf((*interface{})(nil)).Elem()
+               if vx.IsValid() {
+                       vvx := reflect.New(t).Elem()
+                       vvx.Set(vx)
+                       vx = vvx
+               }
+               if vy.IsValid() {
+                       vvy := reflect.New(t).Elem()
+                       vvy.Set(vy)
+                       vy = vvy
+               }
+       } else {
+               t = vx.Type()
+       }
+
        s := newState(opts)
-       s.compareAny(reflect.ValueOf(x), reflect.ValueOf(y))
+       s.compareAny(&pathStep{t, vx, vy})
        return s.result.Equal()
 }
 
 // Diff returns a human-readable report of the differences between two values.
 // It returns an empty string if and only if Equal returns true for the same
-// input values and options. The output string will use the "-" symbol to
-// indicate elements removed from x, and the "+" symbol to indicate elements
-// added to y.
+// input values and options.
+//
+// The output is displayed as a literal in pseudo-Go syntax.
+// At the start of each line, a "-" prefix indicates an element removed from x,
+// a "+" prefix to indicates an element added to y, and the lack of a prefix
+// indicates an element common to both x and y. If possible, the output
+// uses fmt.Stringer.String or error.Error methods to produce more humanly
+// readable outputs. In such cases, the string is prefixed with either an
+// 's' or 'e' character, respectively, to indicate that the method was called.
 //
-// Do not depend on this output being stable.
+// Do not depend on this output being stable. If you need the ability to
+// programmatically interpret the difference, consider using a custom Reporter.
 func Diff(x, y interface{}, opts ...Option) string {
        r := new(defaultReporter)
-       opts = Options{Options(opts), r}
-       eq := Equal(x, y, opts...)
+       eq := Equal(x, y, Options(opts), Reporter(r))
        d := r.String()
        if (d == "") != eq {
                panic("inconsistent difference and equality results")
@@ -108,9 +135,13 @@ func Diff(x, y interface{}, opts ...Option) string {
 type state struct {
        // These fields represent the "comparison state".
        // Calling statelessCompare must not result in observable changes to these.
-       result   diff.Result // The current result of comparison
-       curPath  Path        // The current path in the value tree
-       reporter reporter    // Optional reporter used for difference formatting
+       result    diff.Result // The current result of comparison
+       curPath   Path        // The current path in the value tree
+       reporters []reporter  // Optional reporters
+
+       // recChecker checks for infinite cycles applying the same set of
+       // transformers upon the output of itself.
+       recChecker recChecker
 
        // dynChecker triggers pseudo-random checks for option correctness.
        // It is safe for statelessCompare to mutate this value.
@@ -122,10 +153,9 @@ type state struct {
 }
 
 func newState(opts []Option) *state {
-       s := new(state)
-       for _, opt := range opts {
-               s.processOption(opt)
-       }
+       // Always ensure a validator option exists to validate the inputs.
+       s := &state{opts: Options{validator{}}}
+       s.processOption(Options(opts))
        return s
 }
 
@@ -152,10 +182,7 @@ func (s *state) processOption(opt Option) {
                        s.exporters[t] = true
                }
        case reporter:
-               if s.reporter != nil {
-                       panic("difference reporter already registered")
-               }
-               s.reporter = opt
+               s.reporters = append(s.reporters, opt)
        default:
                panic(fmt.Sprintf("unknown option %T", opt))
        }
@@ -164,153 +191,88 @@ func (s *state) processOption(opt Option) {
 // statelessCompare compares two values and returns the result.
 // This function is stateless in that it does not alter the current result,
 // or output to any registered reporters.
-func (s *state) statelessCompare(vx, vy reflect.Value) diff.Result {
+func (s *state) statelessCompare(step PathStep) diff.Result {
        // We do not save and restore the curPath because all of the compareX
        // methods should properly push and pop from the path.
        // It is an implementation bug if the contents of curPath differs from
        // when calling this function to when returning from it.
 
-       oldResult, oldReporter := s.result, s.reporter
+       oldResult, oldReporters := s.result, s.reporters
        s.result = diff.Result{} // Reset result
-       s.reporter = nil         // Remove reporter to avoid spurious printouts
-       s.compareAny(vx, vy)
+       s.reporters = nil        // Remove reporters to avoid spurious printouts
+       s.compareAny(step)
        res := s.result
-       s.result, s.reporter = oldResult, oldReporter
+       s.result, s.reporters = oldResult, oldReporters
        return res
 }
 
-func (s *state) compareAny(vx, vy reflect.Value) {
-       // TODO: Support cyclic data structures.
-
-       // Rule 0: Differing types are never equal.
-       if !vx.IsValid() || !vy.IsValid() {
-               s.report(vx.IsValid() == vy.IsValid(), vx, vy)
-               return
-       }
-       if vx.Type() != vy.Type() {
-               s.report(false, vx, vy) // Possible for path to be empty
-               return
-       }
-       t := vx.Type()
-       if len(s.curPath) == 0 {
-               s.curPath.push(&pathStep{typ: t})
-               defer s.curPath.pop()
+func (s *state) compareAny(step PathStep) {
+       // Update the path stack.
+       s.curPath.push(step)
+       defer s.curPath.pop()
+       for _, r := range s.reporters {
+               r.PushStep(step)
+               defer r.PopStep()
        }
-       vx, vy = s.tryExporting(vx, vy)
+       s.recChecker.Check(s.curPath)
+
+       // Obtain the current type and values.
+       t := step.Type()
+       vx, vy := step.Values()
 
        // Rule 1: Check whether an option applies on this node in the value tree.
-       if s.tryOptions(vx, vy, t) {
+       if s.tryOptions(t, vx, vy) {
                return
        }
 
        // Rule 2: Check whether the type has a valid Equal method.
-       if s.tryMethod(vx, vy, t) {
+       if s.tryMethod(t, vx, vy) {
                return
        }
 
-       // Rule 3: Recursively descend into each value's underlying kind.
+       // Rule 3: Compare based on the underlying kind.
        switch t.Kind() {
        case reflect.Bool:
-               s.report(vx.Bool() == vy.Bool(), vx, vy)
-               return
+               s.report(vx.Bool() == vy.Bool(), 0)
        case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-               s.report(vx.Int() == vy.Int(), vx, vy)
-               return
+               s.report(vx.Int() == vy.Int(), 0)
        case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-               s.report(vx.Uint() == vy.Uint(), vx, vy)
-               return
+               s.report(vx.Uint() == vy.Uint(), 0)
        case reflect.Float32, reflect.Float64:
-               s.report(vx.Float() == vy.Float(), vx, vy)
-               return
+               s.report(vx.Float() == vy.Float(), 0)
        case reflect.Complex64, reflect.Complex128:
-               s.report(vx.Complex() == vy.Complex(), vx, vy)
-               return
+               s.report(vx.Complex() == vy.Complex(), 0)
        case reflect.String:
-               s.report(vx.String() == vy.String(), vx, vy)
-               return
+               s.report(vx.String() == vy.String(), 0)
        case reflect.Chan, reflect.UnsafePointer:
-               s.report(vx.Pointer() == vy.Pointer(), vx, vy)
-               return
+               s.report(vx.Pointer() == vy.Pointer(), 0)
        case reflect.Func:
-               s.report(vx.IsNil() && vy.IsNil(), vx, vy)
-               return
+               s.report(vx.IsNil() && vy.IsNil(), 0)
+       case reflect.Struct:
+               s.compareStruct(t, vx, vy)
+       case reflect.Slice, reflect.Array:
+               s.compareSlice(t, vx, vy)
+       case reflect.Map:
+               s.compareMap(t, vx, vy)
        case reflect.Ptr:
-               if vx.IsNil() || vy.IsNil() {
-                       s.report(vx.IsNil() && vy.IsNil(), vx, vy)
-                       return
-               }
-               s.curPath.push(&indirect{pathStep{t.Elem()}})
-               defer s.curPath.pop()
-               s.compareAny(vx.Elem(), vy.Elem())
-               return
+               s.comparePtr(t, vx, vy)
        case reflect.Interface:
-               if vx.IsNil() || vy.IsNil() {
-                       s.report(vx.IsNil() && vy.IsNil(), vx, vy)
-                       return
-               }
-               if vx.Elem().Type() != vy.Elem().Type() {
-                       s.report(false, vx.Elem(), vy.Elem())
-                       return
-               }
-               s.curPath.push(&typeAssertion{pathStep{vx.Elem().Type()}})
-               defer s.curPath.pop()
-               s.compareAny(vx.Elem(), vy.Elem())
-               return
-       case reflect.Slice:
-               if vx.IsNil() || vy.IsNil() {
-                       s.report(vx.IsNil() && vy.IsNil(), vx, vy)
-                       return
-               }
-               fallthrough
-       case reflect.Array:
-               s.compareArray(vx, vy, t)
-               return
-       case reflect.Map:
-               s.compareMap(vx, vy, t)
-               return
-       case reflect.Struct:
-               s.compareStruct(vx, vy, t)
-               return
+               s.compareInterface(t, vx, vy)
        default:
                panic(fmt.Sprintf("%v kind not handled", t.Kind()))
        }
 }
 
-func (s *state) tryExporting(vx, vy reflect.Value) (reflect.Value, reflect.Value) {
-       if sf, ok := s.curPath[len(s.curPath)-1].(*structField); ok && sf.unexported {
-               if sf.force {
-                       // Use unsafe pointer arithmetic to get read-write access to an
-                       // unexported field in the struct.
-                       vx = unsafeRetrieveField(sf.pvx, sf.field)
-                       vy = unsafeRetrieveField(sf.pvy, sf.field)
-               } else {
-                       // We are not allowed to export the value, so invalidate them
-                       // so that tryOptions can panic later if not explicitly ignored.
-                       vx = nothing
-                       vy = nothing
-               }
-       }
-       return vx, vy
-}
-
-func (s *state) tryOptions(vx, vy reflect.Value, t reflect.Type) bool {
-       // If there were no FilterValues, we will not detect invalid inputs,
-       // so manually check for them and append invalid if necessary.
-       // We still evaluate the options since an ignore can override invalid.
-       opts := s.opts
-       if !vx.IsValid() || !vy.IsValid() {
-               opts = Options{opts, invalid{}}
-       }
-
+func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
        // Evaluate all filters and apply the remaining options.
-       if opt := opts.filter(s, vx, vy, t); opt != nil {
+       if opt := s.opts.filter(s, t, vx, vy); opt != nil {
                opt.apply(s, vx, vy)
                return true
        }
        return false
 }
 
-func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool {
+func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
        // Check if this type even has an Equal method.
        m, ok := t.MethodByName("Equal")
        if !ok || !function.IsType(m.Type, function.EqualAssignable) {
@@ -318,11 +280,11 @@ func (s *state) tryMethod(vx, vy reflect.Value, t reflect.Type) bool {
        }
 
        eq := s.callTTBFunc(m.Func, vx, vy)
-       s.report(eq, vx, vy)
+       s.report(eq, reportByMethod)
        return true
 }
 
-func (s *state) callTRFunc(f, v reflect.Value) reflect.Value {
+func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
        v = sanitizeValue(v, f.Type().In(0))
        if !s.dynChecker.Next() {
                return f.Call([]reflect.Value{v})[0]
@@ -333,15 +295,15 @@ func (s *state) callTRFunc(f, v reflect.Value) reflect.Value {
        // unsafe mutations to the input.
        c := make(chan reflect.Value)
        go detectRaces(c, f, v)
+       got := <-c
        want := f.Call([]reflect.Value{v})[0]
-       if got := <-c; !s.statelessCompare(got, want).Equal() {
+       if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
                // To avoid false-positives with non-reflexive equality operations,
                // we sanity check whether a value is equal to itself.
-               if !s.statelessCompare(want, want).Equal() {
+               if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
                        return want
                }
-               fn := getFuncName(f.Pointer())
-               panic(fmt.Sprintf("non-deterministic function detected: %s", fn))
+               panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
        }
        return want
 }
@@ -359,10 +321,10 @@ func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
        // unsafe mutations to the input.
        c := make(chan reflect.Value)
        go detectRaces(c, f, y, x)
+       got := <-c
        want := f.Call([]reflect.Value{x, y})[0].Bool()
-       if got := <-c; !got.IsValid() || got.Bool() != want {
-               fn := getFuncName(f.Pointer())
-               panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", fn))
+       if !got.IsValid() || got.Bool() != want {
+               panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
        }
        return want
 }
@@ -380,140 +342,241 @@ func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
 // assuming that T is assignable to R.
 // Otherwise, it returns the input value as is.
 func sanitizeValue(v reflect.Value, t reflect.Type) reflect.Value {
-       // TODO(dsnet): Remove this hacky workaround.
-       // See https://golang.org/issue/22143
-       if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
-               return reflect.New(t).Elem()
+       // TODO(dsnet): Workaround for reflect bug (https://golang.org/issue/22143).
+       if !flags.AtLeastGo110 {
+               if v.Kind() == reflect.Interface && v.IsNil() && v.Type() != t {
+                       return reflect.New(t).Elem()
+               }
        }
        return v
 }
 
-func (s *state) compareArray(vx, vy reflect.Value, t reflect.Type) {
-       step := &sliceIndex{pathStep{t.Elem()}, 0, 0}
-       s.curPath.push(step)
+func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
+       var vax, vay reflect.Value // Addressable versions of vx and vy
 
-       // Compute an edit-script for slices vx and vy.
-       es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
-               step.xkey, step.ykey = ix, iy
-               return s.statelessCompare(vx.Index(ix), vy.Index(iy))
-       })
+       step := StructField{&structField{}}
+       for i := 0; i < t.NumField(); i++ {
+               step.typ = t.Field(i).Type
+               step.vx = vx.Field(i)
+               step.vy = vy.Field(i)
+               step.name = t.Field(i).Name
+               step.idx = i
+               step.unexported = !isExported(step.name)
+               if step.unexported {
+                       if step.name == "_" {
+                               continue
+                       }
+                       // Defer checking of unexported fields until later to give an
+                       // Ignore a chance to ignore the field.
+                       if !vax.IsValid() || !vay.IsValid() {
+                               // For retrieveUnexportedField to work, the parent struct must
+                               // be addressable. Create a new copy of the values if
+                               // necessary to make them addressable.
+                               vax = makeAddressable(vx)
+                               vay = makeAddressable(vy)
+                       }
+                       step.mayForce = s.exporters[t]
+                       step.pvx = vax
+                       step.pvy = vay
+                       step.field = t.Field(i)
+               }
+               s.compareAny(step)
+       }
+}
 
-       // Report the entire slice as is if the arrays are of primitive kind,
-       // and the arrays are different enough.
-       isPrimitive := false
-       switch t.Elem().Kind() {
-       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
-               reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
-               reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
-               isPrimitive = true
-       }
-       if isPrimitive && es.Dist() > (vx.Len()+vy.Len())/4 {
-               s.curPath.pop() // Pop first since we are reporting the whole slice
-               s.report(false, vx, vy)
+func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
+       isSlice := t.Kind() == reflect.Slice
+       if isSlice && (vx.IsNil() || vy.IsNil()) {
+               s.report(vx.IsNil() && vy.IsNil(), 0)
                return
        }
 
-       // Replay the edit-script.
+       // TODO: Support cyclic data structures.
+
+       step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}}}
+       withIndexes := func(ix, iy int) SliceIndex {
+               if ix >= 0 {
+                       step.vx, step.xkey = vx.Index(ix), ix
+               } else {
+                       step.vx, step.xkey = reflect.Value{}, -1
+               }
+               if iy >= 0 {
+                       step.vy, step.ykey = vy.Index(iy), iy
+               } else {
+                       step.vy, step.ykey = reflect.Value{}, -1
+               }
+               return step
+       }
+
+       // Ignore options are able to ignore missing elements in a slice.
+       // However, detecting these reliably requires an optimal differencing
+       // algorithm, for which diff.Difference is not.
+       //
+       // Instead, we first iterate through both slices to detect which elements
+       // would be ignored if standing alone. The index of non-discarded elements
+       // are stored in a separate slice, which diffing is then performed on.
+       var indexesX, indexesY []int
+       var ignoredX, ignoredY []bool
+       for ix := 0; ix < vx.Len(); ix++ {
+               ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
+               if !ignored {
+                       indexesX = append(indexesX, ix)
+               }
+               ignoredX = append(ignoredX, ignored)
+       }
+       for iy := 0; iy < vy.Len(); iy++ {
+               ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
+               if !ignored {
+                       indexesY = append(indexesY, iy)
+               }
+               ignoredY = append(ignoredY, ignored)
+       }
+
+       // Compute an edit-script for slices vx and vy (excluding ignored elements).
+       edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
+               return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
+       })
+
+       // Replay the ignore-scripts and the edit-script.
        var ix, iy int
-       for _, e := range es {
+       for ix < vx.Len() || iy < vy.Len() {
+               var e diff.EditType
+               switch {
+               case ix < len(ignoredX) && ignoredX[ix]:
+                       e = diff.UniqueX
+               case iy < len(ignoredY) && ignoredY[iy]:
+                       e = diff.UniqueY
+               default:
+                       e, edits = edits[0], edits[1:]
+               }
                switch e {
                case diff.UniqueX:
-                       step.xkey, step.ykey = ix, -1
-                       s.report(false, vx.Index(ix), nothing)
+                       s.compareAny(withIndexes(ix, -1))
                        ix++
                case diff.UniqueY:
-                       step.xkey, step.ykey = -1, iy
-                       s.report(false, nothing, vy.Index(iy))
+                       s.compareAny(withIndexes(-1, iy))
                        iy++
                default:
-                       step.xkey, step.ykey = ix, iy
-                       if e == diff.Identity {
-                               s.report(true, vx.Index(ix), vy.Index(iy))
-                       } else {
-                               s.compareAny(vx.Index(ix), vy.Index(iy))
-                       }
+                       s.compareAny(withIndexes(ix, iy))
                        ix++
                        iy++
                }
        }
-       s.curPath.pop()
-       return
 }
 
-func (s *state) compareMap(vx, vy reflect.Value, t reflect.Type) {
+func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
        if vx.IsNil() || vy.IsNil() {
-               s.report(vx.IsNil() && vy.IsNil(), vx, vy)
+               s.report(vx.IsNil() && vy.IsNil(), 0)
                return
        }
 
+       // TODO: Support cyclic data structures.
+
        // We combine and sort the two map keys so that we can perform the
        // comparisons in a deterministic order.
-       step := &mapIndex{pathStep: pathStep{t.Elem()}}
-       s.curPath.push(step)
-       defer s.curPath.pop()
+       step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
        for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
+               step.vx = vx.MapIndex(k)
+               step.vy = vy.MapIndex(k)
                step.key = k
-               vvx := vx.MapIndex(k)
-               vvy := vy.MapIndex(k)
-               switch {
-               case vvx.IsValid() && vvy.IsValid():
-                       s.compareAny(vvx, vvy)
-               case vvx.IsValid() && !vvy.IsValid():
-                       s.report(false, vvx, nothing)
-               case !vvx.IsValid() && vvy.IsValid():
-                       s.report(false, nothing, vvy)
-               default:
-                       // It is possible for both vvx and vvy to be invalid if the
-                       // key contained a NaN value in it. There is no way in
-                       // reflection to be able to retrieve these values.
-                       // See https://golang.org/issue/11104
-                       panic(fmt.Sprintf("%#v has map key with NaNs", s.curPath))
+               if !step.vx.IsValid() && !step.vy.IsValid() {
+                       // It is possible for both vx and vy to be invalid if the
+                       // key contained a NaN value in it.
+                       //
+                       // Even with the ability to retrieve NaN keys in Go 1.12,
+                       // there still isn't a sensible way to compare the values since
+                       // a NaN key may map to multiple unordered values.
+                       // The most reasonable way to compare NaNs would be to compare the
+                       // set of values. However, this is impossible to do efficiently
+                       // since set equality is provably an O(n^2) operation given only
+                       // an Equal function. If we had a Less function or Hash function,
+                       // this could be done in O(n*log(n)) or O(n), respectively.
+                       //
+                       // Rather than adding complex logic to deal with NaNs, make it
+                       // the user's responsibility to compare such obscure maps.
+                       const help = "consider providing a Comparer to compare the map"
+                       panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
                }
+               s.compareAny(step)
        }
 }
 
-func (s *state) compareStruct(vx, vy reflect.Value, t reflect.Type) {
-       var vax, vay reflect.Value // Addressable versions of vx and vy
+func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
+       if vx.IsNil() || vy.IsNil() {
+               s.report(vx.IsNil() && vy.IsNil(), 0)
+               return
+       }
 
-       step := &structField{}
-       s.curPath.push(step)
-       defer s.curPath.pop()
-       for i := 0; i < t.NumField(); i++ {
-               vvx := vx.Field(i)
-               vvy := vy.Field(i)
-               step.typ = t.Field(i).Type
-               step.name = t.Field(i).Name
-               step.idx = i
-               step.unexported = !isExported(step.name)
-               if step.unexported {
-                       // Defer checking of unexported fields until later to give an
-                       // Ignore a chance to ignore the field.
-                       if !vax.IsValid() || !vay.IsValid() {
-                               // For unsafeRetrieveField to work, the parent struct must
-                               // be addressable. Create a new copy of the values if
-                               // necessary to make them addressable.
-                               vax = makeAddressable(vx)
-                               vay = makeAddressable(vy)
-                       }
-                       step.force = s.exporters[t]
-                       step.pvx = vax
-                       step.pvy = vay
-                       step.field = t.Field(i)
+       // TODO: Support cyclic data structures.
+
+       vx, vy = vx.Elem(), vy.Elem()
+       s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
+}
+
+func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
+       if vx.IsNil() || vy.IsNil() {
+               s.report(vx.IsNil() && vy.IsNil(), 0)
+               return
+       }
+       vx, vy = vx.Elem(), vy.Elem()
+       if vx.Type() != vy.Type() {
+               s.report(false, 0)
+               return
+       }
+       s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
+}
+
+func (s *state) report(eq bool, rf resultFlags) {
+       if rf&reportByIgnore == 0 {
+               if eq {
+                       s.result.NumSame++
+                       rf |= reportEqual
+               } else {
+                       s.result.NumDiff++
+                       rf |= reportUnequal
                }
-               s.compareAny(vvx, vvy)
+       }
+       for _, r := range s.reporters {
+               r.Report(Result{flags: rf})
        }
 }
 
-// report records the result of a single comparison.
-// It also calls Report if any reporter is registered.
-func (s *state) report(eq bool, vx, vy reflect.Value) {
-       if eq {
-               s.result.NSame++
-       } else {
-               s.result.NDiff++
+// recChecker tracks the state needed to periodically perform checks that
+// user provided transformers are not stuck in an infinitely recursive cycle.
+type recChecker struct{ next int }
+
+// Check scans the Path for any recursive transformers and panics when any
+// recursive transformers are detected. Note that the presence of a
+// recursive Transformer does not necessarily imply an infinite cycle.
+// As such, this check only activates after some minimal number of path steps.
+func (rc *recChecker) Check(p Path) {
+       const minLen = 1 << 16
+       if rc.next == 0 {
+               rc.next = minLen
+       }
+       if len(p) < rc.next {
+               return
+       }
+       rc.next <<= 1
+
+       // Check whether the same transformer has appeared at least twice.
+       var ss []string
+       m := map[Option]int{}
+       for _, ps := range p {
+               if t, ok := ps.(Transform); ok {
+                       t := t.Option()
+                       if m[t] == 1 { // Transformer was used exactly once before
+                               tf := t.(*transformer).fnc.Type()
+                               ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
+                       }
+                       m[t]++
+               }
        }
-       if s.reporter != nil {
-               s.reporter.Report(vx, vy, eq, s.curPath)
+       if len(ss) > 0 {
+               const warning = "recursive set of Transformers detected"
+               const help = "consider using cmpopts.AcyclicTransformer"
+               set := strings.Join(ss, "\n\t")
+               panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
        }
 }
 
similarity index 60%
rename from vendor/github.com/google/go-cmp/cmp/unsafe_panic.go
rename to vendor/github.com/google/go-cmp/cmp/export_panic.go
index d1518eb3a8c7857976a5eac119b7c2f32b3b4068..abc3a1c3e7655f2c8af24bc4df5f329ade8049f2 100644 (file)
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE.md file.
 
-// +build purego appengine js
+// +build purego
 
 package cmp
 
@@ -10,6 +10,6 @@ import "reflect"
 
 const supportAllowUnexported = false
 
-func unsafeRetrieveField(reflect.Value, reflect.StructField) reflect.Value {
-       panic("unsafeRetrieveField is not implemented")
+func retrieveUnexportedField(reflect.Value, reflect.StructField) reflect.Value {
+       panic("retrieveUnexportedField is not implemented")
 }
similarity index 64%
rename from vendor/github.com/google/go-cmp/cmp/unsafe_reflect.go
rename to vendor/github.com/google/go-cmp/cmp/export_unsafe.go
index 579b65507f6b836b02b4c6670cba0c568d0dbe75..59d4ee91b47f4269e58e98f0c209da97c892b475 100644 (file)
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE.md file.
 
-// +build !purego,!appengine,!js
+// +build !purego
 
 package cmp
 
@@ -13,11 +13,11 @@ import (
 
 const supportAllowUnexported = true
 
-// unsafeRetrieveField uses unsafe to forcibly retrieve any field from a struct
-// such that the value has read-write permissions.
+// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
+// a struct such that the value has read-write permissions.
 //
 // The parent struct, v, must be addressable, while f must be a StructField
 // describing the field to retrieve.
-func unsafeRetrieveField(v reflect.Value, f reflect.StructField) reflect.Value {
+func retrieveUnexportedField(v reflect.Value, f reflect.StructField) reflect.Value {
        return reflect.NewAt(f.Type, unsafe.Pointer(v.UnsafeAddr()+f.Offset)).Elem()
 }
index 42afa4960efa0261f614d8e49df51d095a2856ab..fe98dcc677462dad633078bbb50b4674974415d0 100644 (file)
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE.md file.
 
-// +build !debug
+// +build !cmp_debug
 
 package diff
 
index fd9f7f1773992e5bb08df81fbbf537bd5e652371..597b6ae56b1b2c96e73f3063c5da8e38c65c4749 100644 (file)
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE.md file.
 
-// +build debug
+// +build cmp_debug
 
 package diff
 
@@ -14,7 +14,7 @@ import (
 )
 
 // The algorithm can be seen running in real-time by enabling debugging:
-//     go test -tags=debug -v
+//     go test -tags=cmp_debug -v
 //
 // Example output:
 //     === RUN   TestDifference/#34
index 260befea2fd7e6fc87532105ee66609473d26db7..3d2e42662ca3e4b4e34d2a886e4473060232c45d 100644 (file)
@@ -85,22 +85,31 @@ func (es EditScript) LenY() int { return len(es) - es.stats().NX }
 type EqualFunc func(ix int, iy int) Result
 
 // Result is the result of comparison.
-// NSame is the number of sub-elements that are equal.
-// NDiff is the number of sub-elements that are not equal.
-type Result struct{ NSame, NDiff int }
+// NumSame is the number of sub-elements that are equal.
+// NumDiff is the number of sub-elements that are not equal.
+type Result struct{ NumSame, NumDiff int }
+
+// BoolResult returns a Result that is either Equal or not Equal.
+func BoolResult(b bool) Result {
+       if b {
+               return Result{NumSame: 1} // Equal, Similar
+       } else {
+               return Result{NumDiff: 2} // Not Equal, not Similar
+       }
+}
 
 // Equal indicates whether the symbols are equal. Two symbols are equal
-// if and only if NDiff == 0. If Equal, then they are also Similar.
-func (r Result) Equal() bool { return r.NDiff == 0 }
+// if and only if NumDiff == 0. If Equal, then they are also Similar.
+func (r Result) Equal() bool { return r.NumDiff == 0 }
 
 // Similar indicates whether two symbols are similar and may be represented
 // by using the Modified type. As a special case, we consider binary comparisons
 // (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
 //
-// The exact ratio of NSame to NDiff to determine similarity may change.
+// The exact ratio of NumSame to NumDiff to determine similarity may change.
 func (r Result) Similar() bool {
-       // Use NSame+1 to offset NSame so that binary comparisons are similar.
-       return r.NSame+1 >= r.NDiff
+       // Use NumSame+1 to offset NumSame so that binary comparisons are similar.
+       return r.NumSame+1 >= r.NumDiff
 }
 
 // Difference reports whether two lists of lengths nx and ny are equal
@@ -191,9 +200,9 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
        // that two lists commonly differ because elements were added to the front
        // or end of the other list.
        //
-       // Running the tests with the "debug" build tag prints a visualization of
-       // the algorithm running in real-time. This is educational for understanding
-       // how the algorithm works. See debug_enable.go.
+       // Running the tests with the "cmp_debug" build tag prints a visualization
+       // of the algorithm running in real-time. This is educational for
+       // understanding how the algorithm works. See debug_enable.go.
        f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
        for {
                // Forward search from the beginning.
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
new file mode 100644 (file)
index 0000000..a9e7fc0
--- /dev/null
@@ -0,0 +1,9 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package flags
+
+// Deterministic controls whether the output of Diff should be deterministic.
+// This is only used for testing.
+var Deterministic bool
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go
new file mode 100644 (file)
index 0000000..01aed0a
--- /dev/null
@@ -0,0 +1,10 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build !go1.10
+
+package flags
+
+// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
+const AtLeastGo110 = false
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go
new file mode 100644 (file)
index 0000000..c0b667f
--- /dev/null
@@ -0,0 +1,10 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build go1.10
+
+package flags
+
+// AtLeastGo110 reports whether the Go toolchain is at least Go 1.10.
+const AtLeastGo110 = true
index 4c35ff11ee13d6f61a0aa207ea9639d59db6dd51..ace1dbe86e577fc525854f3e124c116d0b1ceaa2 100644 (file)
@@ -2,25 +2,34 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE.md file.
 
-// Package function identifies function types.
+// Package function provides functionality for identifying function types.
 package function
 
-import "reflect"
+import (
+       "reflect"
+       "regexp"
+       "runtime"
+       "strings"
+)
 
 type funcType int
 
 const (
        _ funcType = iota
 
+       tbFunc  // func(T) bool
        ttbFunc // func(T, T) bool
+       trbFunc // func(T, R) bool
        tibFunc // func(T, I) bool
        trFunc  // func(T) R
 
-       Equal           = ttbFunc // func(T, T) bool
-       EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
-       Transformer     = trFunc  // func(T) R
-       ValueFilter     = ttbFunc // func(T, T) bool
-       Less            = ttbFunc // func(T, T) bool
+       Equal             = ttbFunc // func(T, T) bool
+       EqualAssignable   = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
+       Transformer       = trFunc  // func(T) R
+       ValueFilter       = ttbFunc // func(T, T) bool
+       Less              = ttbFunc // func(T, T) bool
+       ValuePredicate    = tbFunc  // func(T) bool
+       KeyValuePredicate = trbFunc // func(T, R) bool
 )
 
 var boolType = reflect.TypeOf(true)
@@ -32,10 +41,18 @@ func IsType(t reflect.Type, ft funcType) bool {
        }
        ni, no := t.NumIn(), t.NumOut()
        switch ft {
+       case tbFunc: // func(T) bool
+               if ni == 1 && no == 1 && t.Out(0) == boolType {
+                       return true
+               }
        case ttbFunc: // func(T, T) bool
                if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
                        return true
                }
+       case trbFunc: // func(T, R) bool
+               if ni == 2 && no == 1 && t.Out(0) == boolType {
+                       return true
+               }
        case tibFunc: // func(T, I) bool
                if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
                        return true
@@ -47,3 +64,36 @@ func IsType(t reflect.Type, ft funcType) bool {
        }
        return false
 }
+
+var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
+
+// NameOf returns the name of the function value.
+func NameOf(v reflect.Value) string {
+       fnc := runtime.FuncForPC(v.Pointer())
+       if fnc == nil {
+               return "<unknown>"
+       }
+       fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
+
+       // Method closures have a "-fm" suffix.
+       fullName = strings.TrimSuffix(fullName, "-fm")
+
+       var name string
+       for len(fullName) > 0 {
+               inParen := strings.HasSuffix(fullName, ")")
+               fullName = strings.TrimSuffix(fullName, ")")
+
+               s := lastIdentRx.FindString(fullName)
+               if s == "" {
+                       break
+               }
+               name = s + "." + name
+               fullName = strings.TrimSuffix(fullName, s)
+
+               if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
+                       fullName = fullName[:i]
+               }
+               fullName = strings.TrimSuffix(fullName, ".")
+       }
+       return strings.TrimSuffix(name, ".")
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/format.go b/vendor/github.com/google/go-cmp/cmp/internal/value/format.go
deleted file mode 100644 (file)
index 657e508..0000000
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE.md file.
-
-// Package value provides functionality for reflect.Value types.
-package value
-
-import (
-       "fmt"
-       "reflect"
-       "strconv"
-       "strings"
-       "unicode"
-)
-
-var stringerIface = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
-
-// Format formats the value v as a string.
-//
-// This is similar to fmt.Sprintf("%+v", v) except this:
-//     * Prints the type unless it can be elided
-//     * Avoids printing struct fields that are zero
-//     * Prints a nil-slice as being nil, not empty
-//     * Prints map entries in deterministic order
-func Format(v reflect.Value, conf FormatConfig) string {
-       conf.printType = true
-       conf.followPointers = true
-       conf.realPointers = true
-       return formatAny(v, conf, nil)
-}
-
-type FormatConfig struct {
-       UseStringer        bool // Should the String method be used if available?
-       printType          bool // Should we print the type before the value?
-       PrintPrimitiveType bool // Should we print the type of primitives?
-       followPointers     bool // Should we recursively follow pointers?
-       realPointers       bool // Should we print the real address of pointers?
-}
-
-func formatAny(v reflect.Value, conf FormatConfig, visited map[uintptr]bool) string {
-       // TODO: Should this be a multi-line printout in certain situations?
-
-       if !v.IsValid() {
-               return "<non-existent>"
-       }
-       if conf.UseStringer && v.Type().Implements(stringerIface) && v.CanInterface() {
-               if (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && v.IsNil() {
-                       return "<nil>"
-               }
-
-               const stringerPrefix = "s" // Indicates that the String method was used
-               s := v.Interface().(fmt.Stringer).String()
-               return stringerPrefix + formatString(s)
-       }
-
-       switch v.Kind() {
-       case reflect.Bool:
-               return formatPrimitive(v.Type(), v.Bool(), conf)
-       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-               return formatPrimitive(v.Type(), v.Int(), conf)
-       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-               if v.Type().PkgPath() == "" || v.Kind() == reflect.Uintptr {
-                       // Unnamed uints are usually bytes or words, so use hexadecimal.
-                       return formatPrimitive(v.Type(), formatHex(v.Uint()), conf)
-               }
-               return formatPrimitive(v.Type(), v.Uint(), conf)
-       case reflect.Float32, reflect.Float64:
-               return formatPrimitive(v.Type(), v.Float(), conf)
-       case reflect.Complex64, reflect.Complex128:
-               return formatPrimitive(v.Type(), v.Complex(), conf)
-       case reflect.String:
-               return formatPrimitive(v.Type(), formatString(v.String()), conf)
-       case reflect.UnsafePointer, reflect.Chan, reflect.Func:
-               return formatPointer(v, conf)
-       case reflect.Ptr:
-               if v.IsNil() {
-                       if conf.printType {
-                               return fmt.Sprintf("(%v)(nil)", v.Type())
-                       }
-                       return "<nil>"
-               }
-               if visited[v.Pointer()] || !conf.followPointers {
-                       return formatPointer(v, conf)
-               }
-               visited = insertPointer(visited, v.Pointer())
-               return "&" + formatAny(v.Elem(), conf, visited)
-       case reflect.Interface:
-               if v.IsNil() {
-                       if conf.printType {
-                               return fmt.Sprintf("%v(nil)", v.Type())
-                       }
-                       return "<nil>"
-               }
-               return formatAny(v.Elem(), conf, visited)
-       case reflect.Slice:
-               if v.IsNil() {
-                       if conf.printType {
-                               return fmt.Sprintf("%v(nil)", v.Type())
-                       }
-                       return "<nil>"
-               }
-               if visited[v.Pointer()] {
-                       return formatPointer(v, conf)
-               }
-               visited = insertPointer(visited, v.Pointer())
-               fallthrough
-       case reflect.Array:
-               var ss []string
-               subConf := conf
-               subConf.printType = v.Type().Elem().Kind() == reflect.Interface
-               for i := 0; i < v.Len(); i++ {
-                       s := formatAny(v.Index(i), subConf, visited)
-                       ss = append(ss, s)
-               }
-               s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
-               if conf.printType {
-                       return v.Type().String() + s
-               }
-               return s
-       case reflect.Map:
-               if v.IsNil() {
-                       if conf.printType {
-                               return fmt.Sprintf("%v(nil)", v.Type())
-                       }
-                       return "<nil>"
-               }
-               if visited[v.Pointer()] {
-                       return formatPointer(v, conf)
-               }
-               visited = insertPointer(visited, v.Pointer())
-
-               var ss []string
-               keyConf, valConf := conf, conf
-               keyConf.printType = v.Type().Key().Kind() == reflect.Interface
-               keyConf.followPointers = false
-               valConf.printType = v.Type().Elem().Kind() == reflect.Interface
-               for _, k := range SortKeys(v.MapKeys()) {
-                       sk := formatAny(k, keyConf, visited)
-                       sv := formatAny(v.MapIndex(k), valConf, visited)
-                       ss = append(ss, fmt.Sprintf("%s: %s", sk, sv))
-               }
-               s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
-               if conf.printType {
-                       return v.Type().String() + s
-               }
-               return s
-       case reflect.Struct:
-               var ss []string
-               subConf := conf
-               subConf.printType = true
-               for i := 0; i < v.NumField(); i++ {
-                       vv := v.Field(i)
-                       if isZero(vv) {
-                               continue // Elide zero value fields
-                       }
-                       name := v.Type().Field(i).Name
-                       subConf.UseStringer = conf.UseStringer
-                       s := formatAny(vv, subConf, visited)
-                       ss = append(ss, fmt.Sprintf("%s: %s", name, s))
-               }
-               s := fmt.Sprintf("{%s}", strings.Join(ss, ", "))
-               if conf.printType {
-                       return v.Type().String() + s
-               }
-               return s
-       default:
-               panic(fmt.Sprintf("%v kind not handled", v.Kind()))
-       }
-}
-
-func formatString(s string) string {
-       // Use quoted string if it the same length as a raw string literal.
-       // Otherwise, attempt to use the raw string form.
-       qs := strconv.Quote(s)
-       if len(qs) == 1+len(s)+1 {
-               return qs
-       }
-
-       // Disallow newlines to ensure output is a single line.
-       // Only allow printable runes for readability purposes.
-       rawInvalid := func(r rune) bool {
-               return r == '`' || r == '\n' || !unicode.IsPrint(r)
-       }
-       if strings.IndexFunc(s, rawInvalid) < 0 {
-               return "`" + s + "`"
-       }
-       return qs
-}
-
-func formatPrimitive(t reflect.Type, v interface{}, conf FormatConfig) string {
-       if conf.printType && (conf.PrintPrimitiveType || t.PkgPath() != "") {
-               return fmt.Sprintf("%v(%v)", t, v)
-       }
-       return fmt.Sprintf("%v", v)
-}
-
-func formatPointer(v reflect.Value, conf FormatConfig) string {
-       p := v.Pointer()
-       if !conf.realPointers {
-               p = 0 // For deterministic printing purposes
-       }
-       s := formatHex(uint64(p))
-       if conf.printType {
-               return fmt.Sprintf("(%v)(%s)", v.Type(), s)
-       }
-       return s
-}
-
-func formatHex(u uint64) string {
-       var f string
-       switch {
-       case u <= 0xff:
-               f = "0x%02x"
-       case u <= 0xffff:
-               f = "0x%04x"
-       case u <= 0xffffff:
-               f = "0x%06x"
-       case u <= 0xffffffff:
-               f = "0x%08x"
-       case u <= 0xffffffffff:
-               f = "0x%010x"
-       case u <= 0xffffffffffff:
-               f = "0x%012x"
-       case u <= 0xffffffffffffff:
-               f = "0x%014x"
-       case u <= 0xffffffffffffffff:
-               f = "0x%016x"
-       }
-       return fmt.Sprintf(f, u)
-}
-
-// insertPointer insert p into m, allocating m if necessary.
-func insertPointer(m map[uintptr]bool, p uintptr) map[uintptr]bool {
-       if m == nil {
-               m = make(map[uintptr]bool)
-       }
-       m[p] = true
-       return m
-}
-
-// isZero reports whether v is the zero value.
-// This does not rely on Interface and so can be used on unexported fields.
-func isZero(v reflect.Value) bool {
-       switch v.Kind() {
-       case reflect.Bool:
-               return v.Bool() == false
-       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
-               return v.Int() == 0
-       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
-               return v.Uint() == 0
-       case reflect.Float32, reflect.Float64:
-               return v.Float() == 0
-       case reflect.Complex64, reflect.Complex128:
-               return v.Complex() == 0
-       case reflect.String:
-               return v.String() == ""
-       case reflect.UnsafePointer:
-               return v.Pointer() == 0
-       case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
-               return v.IsNil()
-       case reflect.Array:
-               for i := 0; i < v.Len(); i++ {
-                       if !isZero(v.Index(i)) {
-                               return false
-                       }
-               }
-               return true
-       case reflect.Struct:
-               for i := 0; i < v.NumField(); i++ {
-                       if !isZero(v.Field(i)) {
-                               return false
-                       }
-               }
-               return true
-       }
-       return false
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go
new file mode 100644 (file)
index 0000000..0a01c47
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright 2018, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build purego
+
+package value
+
+import "reflect"
+
+// Pointer is an opaque typed pointer and is guaranteed to be comparable.
+type Pointer struct {
+       p uintptr
+       t reflect.Type
+}
+
+// PointerOf returns a Pointer from v, which must be a
+// reflect.Ptr, reflect.Slice, or reflect.Map.
+func PointerOf(v reflect.Value) Pointer {
+       // NOTE: Storing a pointer as an uintptr is technically incorrect as it
+       // assumes that the GC implementation does not use a moving collector.
+       return Pointer{v.Pointer(), v.Type()}
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go
new file mode 100644 (file)
index 0000000..da134ae
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright 2018, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+// +build !purego
+
+package value
+
+import (
+       "reflect"
+       "unsafe"
+)
+
+// Pointer is an opaque typed pointer and is guaranteed to be comparable.
+type Pointer struct {
+       p unsafe.Pointer
+       t reflect.Type
+}
+
+// PointerOf returns a Pointer from v, which must be a
+// reflect.Ptr, reflect.Slice, or reflect.Map.
+func PointerOf(v reflect.Value) Pointer {
+       // The proper representation of a pointer is unsafe.Pointer,
+       // which is necessary if the GC ever uses a moving collector.
+       return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
+}
index fe8aa27a077b5b6dee8a48ecbff53070cee9ed22..938f646f000a480308c72226ccdf2d644aeed37d 100644 (file)
@@ -19,7 +19,7 @@ func SortKeys(vs []reflect.Value) []reflect.Value {
        }
 
        // Sort the map keys.
-       sort.Sort(valueSorter(vs))
+       sort.Slice(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
 
        // Deduplicate keys (fails for NaNs).
        vs2 := vs[:1]
@@ -31,13 +31,6 @@ func SortKeys(vs []reflect.Value) []reflect.Value {
        return vs2
 }
 
-// TODO: Use sort.Slice once Google AppEngine is on Go1.8 or above.
-type valueSorter []reflect.Value
-
-func (vs valueSorter) Len() int           { return len(vs) }
-func (vs valueSorter) Less(i, j int) bool { return isLess(vs[i], vs[j]) }
-func (vs valueSorter) Swap(i, j int)      { vs[i], vs[j] = vs[j], vs[i] }
-
 // isLess is a generic function for sorting arbitrary map keys.
 // The inputs must be of the same type and must be comparable.
 func isLess(x, y reflect.Value) bool {
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
new file mode 100644 (file)
index 0000000..d13a12c
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package value
+
+import "reflect"
+
+// IsZero reports whether v is the zero value.
+// This does not rely on Interface and so can be used on unexported fields.
+func IsZero(v reflect.Value) bool {
+       switch v.Kind() {
+       case reflect.Bool:
+               return v.Bool() == false
+       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+               return v.Int() == 0
+       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+               return v.Uint() == 0
+       case reflect.Float32, reflect.Float64:
+               return v.Float() == 0
+       case reflect.Complex64, reflect.Complex128:
+               return v.Complex() == 0
+       case reflect.String:
+               return v.String() == ""
+       case reflect.UnsafePointer:
+               return v.Pointer() == 0
+       case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+               return v.IsNil()
+       case reflect.Array:
+               for i := 0; i < v.Len(); i++ {
+                       if !IsZero(v.Index(i)) {
+                               return false
+                       }
+               }
+               return true
+       case reflect.Struct:
+               for i := 0; i < v.NumField(); i++ {
+                       if !IsZero(v.Field(i)) {
+                               return false
+                       }
+               }
+               return true
+       }
+       return false
+}
index 91d4b066e0520f68ea48ecaf5c84ab34b2c2574b..793448160ee7121cf45eddfbc09d512e6bf2c5a6 100644 (file)
@@ -7,7 +7,7 @@ package cmp
 import (
        "fmt"
        "reflect"
-       "runtime"
+       "regexp"
        "strings"
 
        "github.com/google/go-cmp/cmp/internal/function"
@@ -29,11 +29,11 @@ type Option interface {
        // An Options is returned only if multiple comparers or transformers
        // can apply simultaneously and will only contain values of those types
        // or sub-Options containing values of those types.
-       filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption
+       filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
 }
 
 // applicableOption represents the following types:
-//     Fundamental: ignore | invalid | *comparer | *transformer
+//     Fundamental: ignore | validator | *comparer | *transformer
 //     Grouping:    Options
 type applicableOption interface {
        Option
@@ -43,7 +43,7 @@ type applicableOption interface {
 }
 
 // coreOption represents the following types:
-//     Fundamental: ignore | invalid | *comparer | *transformer
+//     Fundamental: ignore | validator | *comparer | *transformer
 //     Filters:     *pathFilter | *valuesFilter
 type coreOption interface {
        Option
@@ -63,19 +63,19 @@ func (core) isCore() {}
 // on all individual options held within.
 type Options []Option
 
-func (opts Options) filter(s *state, vx, vy reflect.Value, t reflect.Type) (out applicableOption) {
+func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
        for _, opt := range opts {
-               switch opt := opt.filter(s, vx, vy, t); opt.(type) {
+               switch opt := opt.filter(s, t, vx, vy); opt.(type) {
                case ignore:
                        return ignore{} // Only ignore can short-circuit evaluation
-               case invalid:
-                       out = invalid{} // Takes precedence over comparer or transformer
+               case validator:
+                       out = validator{} // Takes precedence over comparer or transformer
                case *comparer, *transformer, Options:
                        switch out.(type) {
                        case nil:
                                out = opt
-                       case invalid:
-                               // Keep invalid
+                       case validator:
+                               // Keep validator
                        case *comparer, *transformer, Options:
                                out = Options{out, opt} // Conflicting comparers or transformers
                        }
@@ -106,6 +106,11 @@ func (opts Options) String() string {
 // FilterPath returns a new Option where opt is only evaluated if filter f
 // returns true for the current Path in the value tree.
 //
+// This filter is called even if a slice element or map entry is missing and
+// provides an opportunity to ignore such cases. The filter function must be
+// symmetric such that the filter result is identical regardless of whether the
+// missing value is from x or y.
+//
 // The option passed in may be an Ignore, Transformer, Comparer, Options, or
 // a previously filtered Option.
 func FilterPath(f func(Path) bool, opt Option) Option {
@@ -124,22 +129,22 @@ type pathFilter struct {
        opt Option
 }
 
-func (f pathFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption {
+func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
        if f.fnc(s.curPath) {
-               return f.opt.filter(s, vx, vy, t)
+               return f.opt.filter(s, t, vx, vy)
        }
        return nil
 }
 
 func (f pathFilter) String() string {
-       fn := getFuncName(reflect.ValueOf(f.fnc).Pointer())
-       return fmt.Sprintf("FilterPath(%s, %v)", fn, f.opt)
+       return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
 }
 
 // FilterValues returns a new Option where opt is only evaluated if filter f,
 // which is a function of the form "func(T, T) bool", returns true for the
-// current pair of values being compared. If the type of the values is not
-// assignable to T, then this filter implicitly returns false.
+// current pair of values being compared. If either value is invalid or
+// the type of the values is not assignable to T, then this filter implicitly
+// returns false.
 //
 // The filter function must be
 // symmetric (i.e., agnostic to the order of the inputs) and
@@ -171,19 +176,18 @@ type valuesFilter struct {
        opt Option
 }
 
-func (f valuesFilter) filter(s *state, vx, vy reflect.Value, t reflect.Type) applicableOption {
-       if !vx.IsValid() || !vy.IsValid() {
-               return invalid{}
+func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
+       if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
+               return nil
        }
        if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
-               return f.opt.filter(s, vx, vy, t)
+               return f.opt.filter(s, t, vx, vy)
        }
        return nil
 }
 
 func (f valuesFilter) String() string {
-       fn := getFuncName(f.fnc.Pointer())
-       return fmt.Sprintf("FilterValues(%s, %v)", fn, f.opt)
+       return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
 }
 
 // Ignore is an Option that causes all comparisons to be ignored.
@@ -194,20 +198,45 @@ func Ignore() Option { return ignore{} }
 type ignore struct{ core }
 
 func (ignore) isFiltered() bool                                                     { return false }
-func (ignore) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return ignore{} }
-func (ignore) apply(_ *state, _, _ reflect.Value)                                   { return }
+func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
+func (ignore) apply(s *state, _, _ reflect.Value)                                   { s.report(true, reportByIgnore) }
 func (ignore) String() string                                                       { return "Ignore()" }
 
-// invalid is a sentinel Option type to indicate that some options could not
-// be evaluated due to unexported fields.
-type invalid struct{ core }
+// validator is a sentinel Option type to indicate that some options could not
+// be evaluated due to unexported fields, missing slice elements, or
+// missing map entries. Both values are validator only for unexported fields.
+type validator struct{ core }
+
+func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
+       if !vx.IsValid() || !vy.IsValid() {
+               return validator{}
+       }
+       if !vx.CanInterface() || !vy.CanInterface() {
+               return validator{}
+       }
+       return nil
+}
+func (validator) apply(s *state, vx, vy reflect.Value) {
+       // Implies missing slice element or map entry.
+       if !vx.IsValid() || !vy.IsValid() {
+               s.report(vx.IsValid() == vy.IsValid(), 0)
+               return
+       }
+
+       // Unable to Interface implies unexported field without visibility access.
+       if !vx.CanInterface() || !vy.CanInterface() {
+               const help = "consider using a custom Comparer; if you control the implementation of type, you can also consider AllowUnexported or cmpopts.IgnoreUnexported"
+               panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help))
+       }
 
-func (invalid) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption { return invalid{} }
-func (invalid) apply(s *state, _, _ reflect.Value) {
-       const help = "consider using AllowUnexported or cmpopts.IgnoreUnexported"
-       panic(fmt.Sprintf("cannot handle unexported field: %#v\n%s", s.curPath, help))
+       panic("not reachable")
 }
 
+// identRx represents a valid identifier according to the Go specification.
+const identRx = `[_\p{L}][_\p{L}\p{N}]*`
+
+var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
+
 // Transformer returns an Option that applies a transformation function that
 // converts values of a certain type into that of another.
 //
@@ -220,18 +249,25 @@ func (invalid) apply(s *state, _, _ reflect.Value) {
 // input and output types are the same), an implicit filter is added such that
 // a transformer is applicable only if that exact transformer is not already
 // in the tail of the Path since the last non-Transform step.
+// For situations where the implicit filter is still insufficient,
+// consider using cmpopts.AcyclicTransformer, which adds a filter
+// to prevent the transformer from being recursively applied upon itself.
 //
 // The name is a user provided label that is used as the Transform.Name in the
-// transformation PathStep. If empty, an arbitrary name is used.
+// transformation PathStep (and eventually shown in the Diff output).
+// The name must be a valid identifier or qualified identifier in Go syntax.
+// If empty, an arbitrary name is used.
 func Transformer(name string, f interface{}) Option {
        v := reflect.ValueOf(f)
        if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
                panic(fmt.Sprintf("invalid transformer function: %T", f))
        }
        if name == "" {
-               name = "λ" // Lambda-symbol as place-holder for anonymous transformer
-       }
-       if !isValid(name) {
+               name = function.NameOf(v)
+               if !identsRx.MatchString(name) {
+                       name = "λ" // Lambda-symbol as placeholder name
+               }
+       } else if !identsRx.MatchString(name) {
                panic(fmt.Sprintf("invalid name: %q", name))
        }
        tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
@@ -250,9 +286,9 @@ type transformer struct {
 
 func (tr *transformer) isFiltered() bool { return tr.typ != nil }
 
-func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) applicableOption {
+func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
        for i := len(s.curPath) - 1; i >= 0; i-- {
-               if t, ok := s.curPath[i].(*transform); !ok {
+               if t, ok := s.curPath[i].(Transform); !ok {
                        break // Hit most recent non-Transform step
                } else if tr == t.trans {
                        return nil // Cannot directly use same Transform
@@ -265,18 +301,15 @@ func (tr *transformer) filter(s *state, _, _ reflect.Value, t reflect.Type) appl
 }
 
 func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
-       // Update path before calling the Transformer so that dynamic checks
-       // will use the updated path.
-       s.curPath.push(&transform{pathStep{tr.fnc.Type().Out(0)}, tr})
-       defer s.curPath.pop()
-
-       vx = s.callTRFunc(tr.fnc, vx)
-       vy = s.callTRFunc(tr.fnc, vy)
-       s.compareAny(vx, vy)
+       step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
+       vvx := s.callTRFunc(tr.fnc, vx, step)
+       vvy := s.callTRFunc(tr.fnc, vy, step)
+       step.vx, step.vy = vvx, vvy
+       s.compareAny(step)
 }
 
 func (tr transformer) String() string {
-       return fmt.Sprintf("Transformer(%s, %s)", tr.name, getFuncName(tr.fnc.Pointer()))
+       return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
 }
 
 // Comparer returns an Option that determines whether two values are equal
@@ -311,7 +344,7 @@ type comparer struct {
 
 func (cm *comparer) isFiltered() bool { return cm.typ != nil }
 
-func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applicableOption {
+func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
        if cm.typ == nil || t.AssignableTo(cm.typ) {
                return cm
        }
@@ -320,11 +353,11 @@ func (cm *comparer) filter(_ *state, _, _ reflect.Value, t reflect.Type) applica
 
 func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
        eq := s.callTTBFunc(cm.fnc, vx, vy)
-       s.report(eq, vx, vy)
+       s.report(eq, reportByFunc)
 }
 
 func (cm comparer) String() string {
-       return fmt.Sprintf("Comparer(%s)", getFuncName(cm.fnc.Pointer()))
+       return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
 }
 
 // AllowUnexported returns an Option that forcibly allows operations on
@@ -338,7 +371,7 @@ func (cm comparer) String() string {
 // defined in an internal package where the semantic meaning of an unexported
 // field is in the control of the user.
 //
-// For some cases, a custom Comparer should be used instead that defines
+// In many cases, a custom Comparer should be used instead that defines
 // equality as a function of the public API of a type rather than the underlying
 // unexported implementation.
 //
@@ -370,27 +403,92 @@ func AllowUnexported(types ...interface{}) Option {
 
 type visibleStructs map[reflect.Type]bool
 
-func (visibleStructs) filter(_ *state, _, _ reflect.Value, _ reflect.Type) applicableOption {
+func (visibleStructs) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
        panic("not implemented")
 }
 
-// reporter is an Option that configures how differences are reported.
-type reporter interface {
-       // TODO: Not exported yet.
+// Result represents the comparison result for a single node and
+// is provided by cmp when calling Result (see Reporter).
+type Result struct {
+       _     [0]func() // Make Result incomparable
+       flags resultFlags
+}
+
+// Equal reports whether the node was determined to be equal or not.
+// As a special case, ignored nodes are considered equal.
+func (r Result) Equal() bool {
+       return r.flags&(reportEqual|reportByIgnore) != 0
+}
+
+// ByIgnore reports whether the node is equal because it was ignored.
+// This never reports true if Equal reports false.
+func (r Result) ByIgnore() bool {
+       return r.flags&reportByIgnore != 0
+}
+
+// ByMethod reports whether the Equal method determined equality.
+func (r Result) ByMethod() bool {
+       return r.flags&reportByMethod != 0
+}
+
+// ByFunc reports whether a Comparer function determined equality.
+func (r Result) ByFunc() bool {
+       return r.flags&reportByFunc != 0
+}
+
+type resultFlags uint
+
+const (
+       _ resultFlags = (1 << iota) / 2
+
+       reportEqual
+       reportUnequal
+       reportByIgnore
+       reportByMethod
+       reportByFunc
+)
+
+// Reporter is an Option that can be passed to Equal. When Equal traverses
+// the value trees, it calls PushStep as it descends into each node in the
+// tree and PopStep as it ascend out of the node. The leaves of the tree are
+// either compared (determined to be equal or not equal) or ignored and reported
+// as such by calling the Report method.
+func Reporter(r interface {
+       // PushStep is called when a tree-traversal operation is performed.
+       // The PathStep itself is only valid until the step is popped.
+       // The PathStep.Values are valid for the duration of the entire traversal
+       // and must not be mutated.
+       //
+       // Equal always calls PushStep at the start to provide an operation-less
+       // PathStep used to report the root values.
        //
-       // Perhaps add PushStep and PopStep and change Report to only accept
-       // a PathStep instead of the full-path? Adding a PushStep and PopStep makes
-       // it clear that we are traversing the value tree in a depth-first-search
-       // manner, which has an effect on how values are printed.
+       // Within a slice, the exact set of inserted, removed, or modified elements
+       // is unspecified and may change in future implementations.
+       // The entries of a map are iterated through in an unspecified order.
+       PushStep(PathStep)
+
+       // Report is called exactly once on leaf nodes to report whether the
+       // comparison identified the node as equal, unequal, or ignored.
+       // A leaf node is one that is immediately preceded by and followed by
+       // a pair of PushStep and PopStep calls.
+       Report(Result)
+
+       // PopStep ascends back up the value tree.
+       // There is always a matching pop call for every push call.
+       PopStep()
+}) Option {
+       return reporter{r}
+}
 
-       Option
+type reporter struct{ reporterIface }
+type reporterIface interface {
+       PushStep(PathStep)
+       Report(Result)
+       PopStep()
+}
 
-       // Report is called for every comparison made and will be provided with
-       // the two values being compared, the equality result, and the
-       // current path in the value tree. It is possible for x or y to be an
-       // invalid reflect.Value if one of the values is non-existent;
-       // which is possible with maps and slices.
-       Report(x, y reflect.Value, eq bool, p Path)
+func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
+       panic("not implemented")
 }
 
 // normalizeOption normalizes the input options such that all Options groups
@@ -424,30 +522,3 @@ func flattenOptions(dst, src Options) Options {
        }
        return dst
 }
-
-// getFuncName returns a short function name from the pointer.
-// The string parsing logic works up until Go1.9.
-func getFuncName(p uintptr) string {
-       fnc := runtime.FuncForPC(p)
-       if fnc == nil {
-               return "<unknown>"
-       }
-       name := fnc.Name() // E.g., "long/path/name/mypkg.(mytype).(long/path/name/mypkg.myfunc)-fm"
-       if strings.HasSuffix(name, ")-fm") || strings.HasSuffix(name, ")·fm") {
-               // Strip the package name from method name.
-               name = strings.TrimSuffix(name, ")-fm")
-               name = strings.TrimSuffix(name, ")·fm")
-               if i := strings.LastIndexByte(name, '('); i >= 0 {
-                       methodName := name[i+1:] // E.g., "long/path/name/mypkg.myfunc"
-                       if j := strings.LastIndexByte(methodName, '.'); j >= 0 {
-                               methodName = methodName[j+1:] // E.g., "myfunc"
-                       }
-                       name = name[:i] + methodName // E.g., "long/path/name/mypkg.(mytype)." + "myfunc"
-               }
-       }
-       if i := strings.LastIndexByte(name, '/'); i >= 0 {
-               // Strip the package name.
-               name = name[i+1:] // E.g., "mypkg.(mytype).myfunc"
-       }
-       return name
-}
index c08a3cf80d9b7fdd84a0fcd43dff0c2f75b62621..96fffd291f7352ca7f23918db1debed7f26f92e7 100644 (file)
@@ -12,80 +12,52 @@ import (
        "unicode/utf8"
 )
 
-type (
-       // Path is a list of PathSteps describing the sequence of operations to get
-       // from some root type to the current position in the value tree.
-       // The first Path element is always an operation-less PathStep that exists
-       // simply to identify the initial type.
-       //
-       // When traversing structs with embedded structs, the embedded struct will
-       // always be accessed as a field before traversing the fields of the
-       // embedded struct themselves. That is, an exported field from the
-       // embedded struct will never be accessed directly from the parent struct.
-       Path []PathStep
-
-       // PathStep is a union-type for specific operations to traverse
-       // a value's tree structure. Users of this package never need to implement
-       // these types as values of this type will be returned by this package.
-       PathStep interface {
-               String() string
-               Type() reflect.Type // Resulting type after performing the path step
-               isPathStep()
-       }
+// Path is a list of PathSteps describing the sequence of operations to get
+// from some root type to the current position in the value tree.
+// The first Path element is always an operation-less PathStep that exists
+// simply to identify the initial type.
+//
+// When traversing structs with embedded structs, the embedded struct will
+// always be accessed as a field before traversing the fields of the
+// embedded struct themselves. That is, an exported field from the
+// embedded struct will never be accessed directly from the parent struct.
+type Path []PathStep
 
-       // SliceIndex is an index operation on a slice or array at some index Key.
-       SliceIndex interface {
-               PathStep
-               Key() int // May return -1 if in a split state
-
-               // SplitKeys returns the indexes for indexing into slices in the
-               // x and y values, respectively. These indexes may differ due to the
-               // insertion or removal of an element in one of the slices, causing
-               // all of the indexes to be shifted. If an index is -1, then that
-               // indicates that the element does not exist in the associated slice.
-               //
-               // Key is guaranteed to return -1 if and only if the indexes returned
-               // by SplitKeys are not the same. SplitKeys will never return -1 for
-               // both indexes.
-               SplitKeys() (x int, y int)
-
-               isSliceIndex()
-       }
-       // MapIndex is an index operation on a map at some index Key.
-       MapIndex interface {
-               PathStep
-               Key() reflect.Value
-               isMapIndex()
-       }
-       // TypeAssertion represents a type assertion on an interface.
-       TypeAssertion interface {
-               PathStep
-               isTypeAssertion()
-       }
-       // StructField represents a struct field access on a field called Name.
-       StructField interface {
-               PathStep
-               Name() string
-               Index() int
-               isStructField()
-       }
-       // Indirect represents pointer indirection on the parent type.
-       Indirect interface {
-               PathStep
-               isIndirect()
-       }
-       // Transform is a transformation from the parent type to the current type.
-       Transform interface {
-               PathStep
-               Name() string
-               Func() reflect.Value
+// PathStep is a union-type for specific operations to traverse
+// a value's tree structure. Users of this package never need to implement
+// these types as values of this type will be returned by this package.
+//
+// Implementations of this interface are
+// StructField, SliceIndex, MapIndex, Indirect, TypeAssertion, and Transform.
+type PathStep interface {
+       String() string
 
-               // Option returns the originally constructed Transformer option.
-               // The == operator can be used to detect the exact option used.
-               Option() Option
+       // Type is the resulting type after performing the path step.
+       Type() reflect.Type
 
-               isTransform()
-       }
+       // Values is the resulting values after performing the path step.
+       // The type of each valid value is guaranteed to be identical to Type.
+       //
+       // In some cases, one or both may be invalid or have restrictions:
+       //      â€¢ For StructField, both are not interface-able if the current field
+       //      is unexported and the struct type is not explicitly permitted by
+       //      AllowUnexported to traverse unexported fields.
+       //      â€¢ For SliceIndex, one may be invalid if an element is missing from
+       //      either the x or y slice.
+       //      â€¢ For MapIndex, one may be invalid if an entry is missing from
+       //      either the x or y map.
+       //
+       // The provided values must not be mutated.
+       Values() (vx, vy reflect.Value)
+}
+
+var (
+       _ PathStep = StructField{}
+       _ PathStep = SliceIndex{}
+       _ PathStep = MapIndex{}
+       _ PathStep = Indirect{}
+       _ PathStep = TypeAssertion{}
+       _ PathStep = Transform{}
 )
 
 func (pa *Path) push(s PathStep) {
@@ -124,7 +96,7 @@ func (pa Path) Index(i int) PathStep {
 func (pa Path) String() string {
        var ss []string
        for _, s := range pa {
-               if _, ok := s.(*structField); ok {
+               if _, ok := s.(StructField); ok {
                        ss = append(ss, s.String())
                }
        }
@@ -144,13 +116,13 @@ func (pa Path) GoString() string {
                        nextStep = pa[i+1]
                }
                switch s := s.(type) {
-               case *indirect:
+               case Indirect:
                        numIndirect++
                        pPre, pPost := "(", ")"
                        switch nextStep.(type) {
-                       case *indirect:
+                       case Indirect:
                                continue // Next step is indirection, so let them batch up
-                       case *structField:
+                       case StructField:
                                numIndirect-- // Automatic indirection on struct fields
                        case nil:
                                pPre, pPost = "", "" // Last step; no need for parenthesis
@@ -161,19 +133,10 @@ func (pa Path) GoString() string {
                        }
                        numIndirect = 0
                        continue
-               case *transform:
+               case Transform:
                        ssPre = append(ssPre, s.trans.name+"(")
                        ssPost = append(ssPost, ")")
                        continue
-               case *typeAssertion:
-                       // As a special-case, elide type assertions on anonymous types
-                       // since they are typically generated dynamically and can be very
-                       // verbose. For example, some transforms return interface{} because
-                       // of Go's lack of generics, but typically take in and return the
-                       // exact same concrete type.
-                       if s.Type().PkgPath() == "" {
-                               continue
-                       }
                }
                ssPost = append(ssPost, s.String())
        }
@@ -183,44 +146,13 @@ func (pa Path) GoString() string {
        return strings.Join(ssPre, "") + strings.Join(ssPost, "")
 }
 
-type (
-       pathStep struct {
-               typ reflect.Type
-       }
-
-       sliceIndex struct {
-               pathStep
-               xkey, ykey int
-       }
-       mapIndex struct {
-               pathStep
-               key reflect.Value
-       }
-       typeAssertion struct {
-               pathStep
-       }
-       structField struct {
-               pathStep
-               name string
-               idx  int
-
-               // These fields are used for forcibly accessing an unexported field.
-               // pvx, pvy, and field are only valid if unexported is true.
-               unexported bool
-               force      bool                // Forcibly allow visibility
-               pvx, pvy   reflect.Value       // Parent values
-               field      reflect.StructField // Field information
-       }
-       indirect struct {
-               pathStep
-       }
-       transform struct {
-               pathStep
-               trans *transformer
-       }
-)
+type pathStep struct {
+       typ    reflect.Type
+       vx, vy reflect.Value
+}
 
-func (ps pathStep) Type() reflect.Type { return ps.typ }
+func (ps pathStep) Type() reflect.Type             { return ps.typ }
+func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
 func (ps pathStep) String() string {
        if ps.typ == nil {
                return "<nil>"
@@ -232,7 +164,54 @@ func (ps pathStep) String() string {
        return fmt.Sprintf("{%s}", s)
 }
 
-func (si sliceIndex) String() string {
+// StructField represents a struct field access on a field called Name.
+type StructField struct{ *structField }
+type structField struct {
+       pathStep
+       name string
+       idx  int
+
+       // These fields are used for forcibly accessing an unexported field.
+       // pvx, pvy, and field are only valid if unexported is true.
+       unexported bool
+       mayForce   bool                // Forcibly allow visibility
+       pvx, pvy   reflect.Value       // Parent values
+       field      reflect.StructField // Field information
+}
+
+func (sf StructField) Type() reflect.Type { return sf.typ }
+func (sf StructField) Values() (vx, vy reflect.Value) {
+       if !sf.unexported {
+               return sf.vx, sf.vy // CanInterface reports true
+       }
+
+       // Forcibly obtain read-write access to an unexported struct field.
+       if sf.mayForce {
+               vx = retrieveUnexportedField(sf.pvx, sf.field)
+               vy = retrieveUnexportedField(sf.pvy, sf.field)
+               return vx, vy // CanInterface reports true
+       }
+       return sf.vx, sf.vy // CanInterface reports false
+}
+func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
+
+// Name is the field name.
+func (sf StructField) Name() string { return sf.name }
+
+// Index is the index of the field in the parent struct type.
+// See reflect.Type.Field.
+func (sf StructField) Index() int { return sf.idx }
+
+// SliceIndex is an index operation on a slice or array at some index Key.
+type SliceIndex struct{ *sliceIndex }
+type sliceIndex struct {
+       pathStep
+       xkey, ykey int
+}
+
+func (si SliceIndex) Type() reflect.Type             { return si.typ }
+func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
+func (si SliceIndex) String() string {
        switch {
        case si.xkey == si.ykey:
                return fmt.Sprintf("[%d]", si.xkey)
@@ -247,63 +226,83 @@ func (si sliceIndex) String() string {
                return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
        }
 }
-func (mi mapIndex) String() string      { return fmt.Sprintf("[%#v]", mi.key) }
-func (ta typeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
-func (sf structField) String() string   { return fmt.Sprintf(".%s", sf.name) }
-func (in indirect) String() string      { return "*" }
-func (tf transform) String() string     { return fmt.Sprintf("%s()", tf.trans.name) }
 
-func (si sliceIndex) Key() int {
+// Key is the index key; it may return -1 if in a split state
+func (si SliceIndex) Key() int {
        if si.xkey != si.ykey {
                return -1
        }
        return si.xkey
 }
-func (si sliceIndex) SplitKeys() (x, y int) { return si.xkey, si.ykey }
-func (mi mapIndex) Key() reflect.Value      { return mi.key }
-func (sf structField) Name() string         { return sf.name }
-func (sf structField) Index() int           { return sf.idx }
-func (tf transform) Name() string           { return tf.trans.name }
-func (tf transform) Func() reflect.Value    { return tf.trans.fnc }
-func (tf transform) Option() Option         { return tf.trans }
-
-func (pathStep) isPathStep()           {}
-func (sliceIndex) isSliceIndex()       {}
-func (mapIndex) isMapIndex()           {}
-func (typeAssertion) isTypeAssertion() {}
-func (structField) isStructField()     {}
-func (indirect) isIndirect()           {}
-func (transform) isTransform()         {}
 
-var (
-       _ SliceIndex    = sliceIndex{}
-       _ MapIndex      = mapIndex{}
-       _ TypeAssertion = typeAssertion{}
-       _ StructField   = structField{}
-       _ Indirect      = indirect{}
-       _ Transform     = transform{}
-
-       _ PathStep = sliceIndex{}
-       _ PathStep = mapIndex{}
-       _ PathStep = typeAssertion{}
-       _ PathStep = structField{}
-       _ PathStep = indirect{}
-       _ PathStep = transform{}
-)
+// SplitKeys are the indexes for indexing into slices in the
+// x and y values, respectively. These indexes may differ due to the
+// insertion or removal of an element in one of the slices, causing
+// all of the indexes to be shifted. If an index is -1, then that
+// indicates that the element does not exist in the associated slice.
+//
+// Key is guaranteed to return -1 if and only if the indexes returned
+// by SplitKeys are not the same. SplitKeys will never return -1 for
+// both indexes.
+func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
+
+// MapIndex is an index operation on a map at some index Key.
+type MapIndex struct{ *mapIndex }
+type mapIndex struct {
+       pathStep
+       key reflect.Value
+}
+
+func (mi MapIndex) Type() reflect.Type             { return mi.typ }
+func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
+func (mi MapIndex) String() string                 { return fmt.Sprintf("[%#v]", mi.key) }
+
+// Key is the value of the map key.
+func (mi MapIndex) Key() reflect.Value { return mi.key }
+
+// Indirect represents pointer indirection on the parent type.
+type Indirect struct{ *indirect }
+type indirect struct {
+       pathStep
+}
+
+func (in Indirect) Type() reflect.Type             { return in.typ }
+func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
+func (in Indirect) String() string                 { return "*" }
+
+// TypeAssertion represents a type assertion on an interface.
+type TypeAssertion struct{ *typeAssertion }
+type typeAssertion struct {
+       pathStep
+}
+
+func (ta TypeAssertion) Type() reflect.Type             { return ta.typ }
+func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
+func (ta TypeAssertion) String() string                 { return fmt.Sprintf(".(%v)", ta.typ) }
+
+// Transform is a transformation from the parent type to the current type.
+type Transform struct{ *transform }
+type transform struct {
+       pathStep
+       trans *transformer
+}
+
+func (tf Transform) Type() reflect.Type             { return tf.typ }
+func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
+func (tf Transform) String() string                 { return fmt.Sprintf("%s()", tf.trans.name) }
+
+// Name is the name of the Transformer.
+func (tf Transform) Name() string { return tf.trans.name }
+
+// Func is the function pointer to the transformer function.
+func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
+
+// Option returns the originally constructed Transformer option.
+// The == operator can be used to detect the exact option used.
+func (tf Transform) Option() Option { return tf.trans }
 
 // isExported reports whether the identifier is exported.
 func isExported(id string) bool {
        r, _ := utf8.DecodeRuneInString(id)
        return unicode.IsUpper(r)
 }
-
-// isValid reports whether the identifier is valid.
-// Empty and underscore-only strings are not valid.
-func isValid(id string) bool {
-       ok := id != "" && id != "_"
-       for j, c := range id {
-               ok = ok && (j > 0 || !unicode.IsDigit(c))
-               ok = ok && (c == '_' || unicode.IsLetter(c) || unicode.IsDigit(c))
-       }
-       return ok
-}
diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go
new file mode 100644 (file)
index 0000000..6ddf299
--- /dev/null
@@ -0,0 +1,51 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+// defaultReporter implements the reporter interface.
+//
+// As Equal serially calls the PushStep, Report, and PopStep methods, the
+// defaultReporter constructs a tree-based representation of the compared value
+// and the result of each comparison (see valueNode).
+//
+// When the String method is called, the FormatDiff method transforms the
+// valueNode tree into a textNode tree, which is a tree-based representation
+// of the textual output (see textNode).
+//
+// Lastly, the textNode.String method produces the final report as a string.
+type defaultReporter struct {
+       root *valueNode
+       curr *valueNode
+}
+
+func (r *defaultReporter) PushStep(ps PathStep) {
+       r.curr = r.curr.PushStep(ps)
+       if r.root == nil {
+               r.root = r.curr
+       }
+}
+func (r *defaultReporter) Report(rs Result) {
+       r.curr.Report(rs)
+}
+func (r *defaultReporter) PopStep() {
+       r.curr = r.curr.PopStep()
+}
+
+// String provides a full report of the differences detected as a structured
+// literal in pseudo-Go syntax. String may only be called after the entire tree
+// has been traversed.
+func (r *defaultReporter) String() string {
+       assert(r.root != nil && r.curr == nil)
+       if r.root.NumDiff == 0 {
+               return ""
+       }
+       return formatOptions{}.FormatDiff(r.root).String()
+}
+
+func assert(ok bool) {
+       if !ok {
+               panic("assertion failure")
+       }
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go
new file mode 100644 (file)
index 0000000..05efb99
--- /dev/null
@@ -0,0 +1,296 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+       "fmt"
+       "reflect"
+
+       "github.com/google/go-cmp/cmp/internal/value"
+)
+
+// TODO: Enforce limits?
+//     * Enforce maximum number of records to print per node?
+//     * Enforce maximum size in bytes allowed?
+//     * As a heuristic, use less verbosity for equal nodes than unequal nodes.
+// TODO: Enforce unique outputs?
+//     * Avoid Stringer methods if it results in same output?
+//     * Print pointer address if outputs still equal?
+
+// numContextRecords is the number of surrounding equal records to print.
+const numContextRecords = 2
+
+type diffMode byte
+
+const (
+       diffUnknown   diffMode = 0
+       diffIdentical diffMode = ' '
+       diffRemoved   diffMode = '-'
+       diffInserted  diffMode = '+'
+)
+
+type typeMode int
+
+const (
+       // emitType always prints the type.
+       emitType typeMode = iota
+       // elideType never prints the type.
+       elideType
+       // autoType prints the type only for composite kinds
+       // (i.e., structs, slices, arrays, and maps).
+       autoType
+)
+
+type formatOptions struct {
+       // DiffMode controls the output mode of FormatDiff.
+       //
+       // If diffUnknown,   then produce a diff of the x and y values.
+       // If diffIdentical, then emit values as if they were equal.
+       // If diffRemoved,   then only emit x values (ignoring y values).
+       // If diffInserted,  then only emit y values (ignoring x values).
+       DiffMode diffMode
+
+       // TypeMode controls whether to print the type for the current node.
+       //
+       // As a general rule of thumb, we always print the type of the next node
+       // after an interface, and always elide the type of the next node after
+       // a slice or map node.
+       TypeMode typeMode
+
+       // formatValueOptions are options specific to printing reflect.Values.
+       formatValueOptions
+}
+
+func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
+       opts.DiffMode = d
+       return opts
+}
+func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
+       opts.TypeMode = t
+       return opts
+}
+
+// FormatDiff converts a valueNode tree into a textNode tree, where the later
+// is a textual representation of the differences detected in the former.
+func (opts formatOptions) FormatDiff(v *valueNode) textNode {
+       // Check whether we have specialized formatting for this node.
+       // This is not necessary, but helpful for producing more readable outputs.
+       if opts.CanFormatDiffSlice(v) {
+               return opts.FormatDiffSlice(v)
+       }
+
+       // For leaf nodes, format the value based on the reflect.Values alone.
+       if v.MaxDepth == 0 {
+               switch opts.DiffMode {
+               case diffUnknown, diffIdentical:
+                       // Format Equal.
+                       if v.NumDiff == 0 {
+                               outx := opts.FormatValue(v.ValueX, visitedPointers{})
+                               outy := opts.FormatValue(v.ValueY, visitedPointers{})
+                               if v.NumIgnored > 0 && v.NumSame == 0 {
+                                       return textEllipsis
+                               } else if outx.Len() < outy.Len() {
+                                       return outx
+                               } else {
+                                       return outy
+                               }
+                       }
+
+                       // Format unequal.
+                       assert(opts.DiffMode == diffUnknown)
+                       var list textList
+                       outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, visitedPointers{})
+                       outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, visitedPointers{})
+                       if outx != nil {
+                               list = append(list, textRecord{Diff: '-', Value: outx})
+                       }
+                       if outy != nil {
+                               list = append(list, textRecord{Diff: '+', Value: outy})
+                       }
+                       return opts.WithTypeMode(emitType).FormatType(v.Type, list)
+               case diffRemoved:
+                       return opts.FormatValue(v.ValueX, visitedPointers{})
+               case diffInserted:
+                       return opts.FormatValue(v.ValueY, visitedPointers{})
+               default:
+                       panic("invalid diff mode")
+               }
+       }
+
+       // Descend into the child value node.
+       if v.TransformerName != "" {
+               out := opts.WithTypeMode(emitType).FormatDiff(v.Value)
+               out = textWrap{"Inverse(" + v.TransformerName + ", ", out, ")"}
+               return opts.FormatType(v.Type, out)
+       } else {
+               switch k := v.Type.Kind(); k {
+               case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map:
+                       return opts.FormatType(v.Type, opts.formatDiffList(v.Records, k))
+               case reflect.Ptr:
+                       return textWrap{"&", opts.FormatDiff(v.Value), ""}
+               case reflect.Interface:
+                       return opts.WithTypeMode(emitType).FormatDiff(v.Value)
+               default:
+                       panic(fmt.Sprintf("%v cannot have children", k))
+               }
+       }
+}
+
+func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind) textNode {
+       // Derive record name based on the data structure kind.
+       var name string
+       var formatKey func(reflect.Value) string
+       switch k {
+       case reflect.Struct:
+               name = "field"
+               opts = opts.WithTypeMode(autoType)
+               formatKey = func(v reflect.Value) string { return v.String() }
+       case reflect.Slice, reflect.Array:
+               name = "element"
+               opts = opts.WithTypeMode(elideType)
+               formatKey = func(reflect.Value) string { return "" }
+       case reflect.Map:
+               name = "entry"
+               opts = opts.WithTypeMode(elideType)
+               formatKey = formatMapKey
+       }
+
+       // Handle unification.
+       switch opts.DiffMode {
+       case diffIdentical, diffRemoved, diffInserted:
+               var list textList
+               var deferredEllipsis bool // Add final "..." to indicate records were dropped
+               for _, r := range recs {
+                       // Elide struct fields that are zero value.
+                       if k == reflect.Struct {
+                               var isZero bool
+                               switch opts.DiffMode {
+                               case diffIdentical:
+                                       isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueX)
+                               case diffRemoved:
+                                       isZero = value.IsZero(r.Value.ValueX)
+                               case diffInserted:
+                                       isZero = value.IsZero(r.Value.ValueY)
+                               }
+                               if isZero {
+                                       continue
+                               }
+                       }
+                       // Elide ignored nodes.
+                       if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
+                               deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
+                               if !deferredEllipsis {
+                                       list.AppendEllipsis(diffStats{})
+                               }
+                               continue
+                       }
+                       if out := opts.FormatDiff(r.Value); out != nil {
+                               list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+                       }
+               }
+               if deferredEllipsis {
+                       list.AppendEllipsis(diffStats{})
+               }
+               return textWrap{"{", list, "}"}
+       case diffUnknown:
+       default:
+               panic("invalid diff mode")
+       }
+
+       // Handle differencing.
+       var list textList
+       groups := coalesceAdjacentRecords(name, recs)
+       for i, ds := range groups {
+               // Handle equal records.
+               if ds.NumDiff() == 0 {
+                       // Compute the number of leading and trailing records to print.
+                       var numLo, numHi int
+                       numEqual := ds.NumIgnored + ds.NumIdentical
+                       for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
+                               if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+                                       break
+                               }
+                               numLo++
+                       }
+                       for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+                               if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+                                       break
+                               }
+                               numHi++
+                       }
+                       if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
+                               numHi++ // Avoid pointless coalescing of a single equal record
+                       }
+
+                       // Format the equal values.
+                       for _, r := range recs[:numLo] {
+                               out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value)
+                               list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+                       }
+                       if numEqual > numLo+numHi {
+                               ds.NumIdentical -= numLo + numHi
+                               list.AppendEllipsis(ds)
+                       }
+                       for _, r := range recs[numEqual-numHi : numEqual] {
+                               out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value)
+                               list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+                       }
+                       recs = recs[numEqual:]
+                       continue
+               }
+
+               // Handle unequal records.
+               for _, r := range recs[:ds.NumDiff()] {
+                       switch {
+                       case opts.CanFormatDiffSlice(r.Value):
+                               out := opts.FormatDiffSlice(r.Value)
+                               list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+                       case r.Value.NumChildren == r.Value.MaxDepth:
+                               outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value)
+                               outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value)
+                               if outx != nil {
+                                       list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
+                               }
+                               if outy != nil {
+                                       list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
+                               }
+                       default:
+                               out := opts.FormatDiff(r.Value)
+                               list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+                       }
+               }
+               recs = recs[ds.NumDiff():]
+       }
+       assert(len(recs) == 0)
+       return textWrap{"{", list, "}"}
+}
+
+// coalesceAdjacentRecords coalesces the list of records into groups of
+// adjacent equal, or unequal counts.
+func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
+       var prevCase int // Arbitrary index into which case last occurred
+       lastStats := func(i int) *diffStats {
+               if prevCase != i {
+                       groups = append(groups, diffStats{Name: name})
+                       prevCase = i
+               }
+               return &groups[len(groups)-1]
+       }
+       for _, r := range recs {
+               switch rv := r.Value; {
+               case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
+                       lastStats(1).NumIgnored++
+               case rv.NumDiff == 0:
+                       lastStats(1).NumIdentical++
+               case rv.NumDiff > 0 && !rv.ValueY.IsValid():
+                       lastStats(2).NumRemoved++
+               case rv.NumDiff > 0 && !rv.ValueX.IsValid():
+                       lastStats(2).NumInserted++
+               default:
+                       lastStats(2).NumModified++
+               }
+       }
+       return groups
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
new file mode 100644 (file)
index 0000000..5521c60
--- /dev/null
@@ -0,0 +1,279 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+       "fmt"
+       "reflect"
+       "strconv"
+       "strings"
+       "unicode"
+
+       "github.com/google/go-cmp/cmp/internal/flags"
+       "github.com/google/go-cmp/cmp/internal/value"
+)
+
+type formatValueOptions struct {
+       // AvoidStringer controls whether to avoid calling custom stringer
+       // methods like error.Error or fmt.Stringer.String.
+       AvoidStringer bool
+
+       // ShallowPointers controls whether to avoid descending into pointers.
+       // Useful when printing map keys, where pointer comparison is performed
+       // on the pointer address rather than the pointed-at value.
+       ShallowPointers bool
+
+       // PrintAddresses controls whether to print the address of all pointers,
+       // slice elements, and maps.
+       PrintAddresses bool
+}
+
+// FormatType prints the type as if it were wrapping s.
+// This may return s as-is depending on the current type and TypeMode mode.
+func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
+       // Check whether to emit the type or not.
+       switch opts.TypeMode {
+       case autoType:
+               switch t.Kind() {
+               case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
+                       if s.Equal(textNil) {
+                               return s
+                       }
+               default:
+                       return s
+               }
+       case elideType:
+               return s
+       }
+
+       // Determine the type label, applying special handling for unnamed types.
+       typeName := t.String()
+       if t.Name() == "" {
+               // According to Go grammar, certain type literals contain symbols that
+               // do not strongly bind to the next lexicographical token (e.g., *T).
+               switch t.Kind() {
+               case reflect.Chan, reflect.Func, reflect.Ptr:
+                       typeName = "(" + typeName + ")"
+               }
+               typeName = strings.Replace(typeName, "struct {", "struct{", -1)
+               typeName = strings.Replace(typeName, "interface {", "interface{", -1)
+       }
+
+       // Avoid wrap the value in parenthesis if unnecessary.
+       if s, ok := s.(textWrap); ok {
+               hasParens := strings.HasPrefix(s.Prefix, "(") && strings.HasSuffix(s.Suffix, ")")
+               hasBraces := strings.HasPrefix(s.Prefix, "{") && strings.HasSuffix(s.Suffix, "}")
+               if hasParens || hasBraces {
+                       return textWrap{typeName, s, ""}
+               }
+       }
+       return textWrap{typeName + "(", s, ")"}
+}
+
+// FormatValue prints the reflect.Value, taking extra care to avoid descending
+// into pointers already in m. As pointers are visited, m is also updated.
+func (opts formatOptions) FormatValue(v reflect.Value, m visitedPointers) (out textNode) {
+       if !v.IsValid() {
+               return nil
+       }
+       t := v.Type()
+
+       // Check whether there is an Error or String method to call.
+       if !opts.AvoidStringer && v.CanInterface() {
+               // Avoid calling Error or String methods on nil receivers since many
+               // implementations crash when doing so.
+               if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
+                       switch v := v.Interface().(type) {
+                       case error:
+                               return textLine("e" + formatString(v.Error()))
+                       case fmt.Stringer:
+                               return textLine("s" + formatString(v.String()))
+                       }
+               }
+       }
+
+       // Check whether to explicitly wrap the result with the type.
+       var skipType bool
+       defer func() {
+               if !skipType {
+                       out = opts.FormatType(t, out)
+               }
+       }()
+
+       var ptr string
+       switch t.Kind() {
+       case reflect.Bool:
+               return textLine(fmt.Sprint(v.Bool()))
+       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+               return textLine(fmt.Sprint(v.Int()))
+       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+               // Unnamed uints are usually bytes or words, so use hexadecimal.
+               if t.PkgPath() == "" || t.Kind() == reflect.Uintptr {
+                       return textLine(formatHex(v.Uint()))
+               }
+               return textLine(fmt.Sprint(v.Uint()))
+       case reflect.Float32, reflect.Float64:
+               return textLine(fmt.Sprint(v.Float()))
+       case reflect.Complex64, reflect.Complex128:
+               return textLine(fmt.Sprint(v.Complex()))
+       case reflect.String:
+               return textLine(formatString(v.String()))
+       case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+               return textLine(formatPointer(v))
+       case reflect.Struct:
+               var list textList
+               for i := 0; i < v.NumField(); i++ {
+                       vv := v.Field(i)
+                       if value.IsZero(vv) {
+                               continue // Elide fields with zero values
+                       }
+                       s := opts.WithTypeMode(autoType).FormatValue(vv, m)
+                       list = append(list, textRecord{Key: t.Field(i).Name, Value: s})
+               }
+               return textWrap{"{", list, "}"}
+       case reflect.Slice:
+               if v.IsNil() {
+                       return textNil
+               }
+               if opts.PrintAddresses {
+                       ptr = formatPointer(v)
+               }
+               fallthrough
+       case reflect.Array:
+               var list textList
+               for i := 0; i < v.Len(); i++ {
+                       vi := v.Index(i)
+                       if vi.CanAddr() { // Check for cyclic elements
+                               p := vi.Addr()
+                               if m.Visit(p) {
+                                       var out textNode
+                                       out = textLine(formatPointer(p))
+                                       out = opts.WithTypeMode(emitType).FormatType(p.Type(), out)
+                                       out = textWrap{"*", out, ""}
+                                       list = append(list, textRecord{Value: out})
+                                       continue
+                               }
+                       }
+                       s := opts.WithTypeMode(elideType).FormatValue(vi, m)
+                       list = append(list, textRecord{Value: s})
+               }
+               return textWrap{ptr + "{", list, "}"}
+       case reflect.Map:
+               if v.IsNil() {
+                       return textNil
+               }
+               if m.Visit(v) {
+                       return textLine(formatPointer(v))
+               }
+
+               var list textList
+               for _, k := range value.SortKeys(v.MapKeys()) {
+                       sk := formatMapKey(k)
+                       sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), m)
+                       list = append(list, textRecord{Key: sk, Value: sv})
+               }
+               if opts.PrintAddresses {
+                       ptr = formatPointer(v)
+               }
+               return textWrap{ptr + "{", list, "}"}
+       case reflect.Ptr:
+               if v.IsNil() {
+                       return textNil
+               }
+               if m.Visit(v) || opts.ShallowPointers {
+                       return textLine(formatPointer(v))
+               }
+               if opts.PrintAddresses {
+                       ptr = formatPointer(v)
+               }
+               skipType = true // Let the underlying value print the type instead
+               return textWrap{"&" + ptr, opts.FormatValue(v.Elem(), m), ""}
+       case reflect.Interface:
+               if v.IsNil() {
+                       return textNil
+               }
+               // Interfaces accept different concrete types,
+               // so configure the underlying value to explicitly print the type.
+               skipType = true // Print the concrete type instead
+               return opts.WithTypeMode(emitType).FormatValue(v.Elem(), m)
+       default:
+               panic(fmt.Sprintf("%v kind not handled", v.Kind()))
+       }
+}
+
+// formatMapKey formats v as if it were a map key.
+// The result is guaranteed to be a single line.
+func formatMapKey(v reflect.Value) string {
+       var opts formatOptions
+       opts.TypeMode = elideType
+       opts.AvoidStringer = true
+       opts.ShallowPointers = true
+       s := opts.FormatValue(v, visitedPointers{}).String()
+       return strings.TrimSpace(s)
+}
+
+// formatString prints s as a double-quoted or backtick-quoted string.
+func formatString(s string) string {
+       // Use quoted string if it the same length as a raw string literal.
+       // Otherwise, attempt to use the raw string form.
+       qs := strconv.Quote(s)
+       if len(qs) == 1+len(s)+1 {
+               return qs
+       }
+
+       // Disallow newlines to ensure output is a single line.
+       // Only allow printable runes for readability purposes.
+       rawInvalid := func(r rune) bool {
+               return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
+       }
+       if strings.IndexFunc(s, rawInvalid) < 0 {
+               return "`" + s + "`"
+       }
+       return qs
+}
+
+// formatHex prints u as a hexadecimal integer in Go notation.
+func formatHex(u uint64) string {
+       var f string
+       switch {
+       case u <= 0xff:
+               f = "0x%02x"
+       case u <= 0xffff:
+               f = "0x%04x"
+       case u <= 0xffffff:
+               f = "0x%06x"
+       case u <= 0xffffffff:
+               f = "0x%08x"
+       case u <= 0xffffffffff:
+               f = "0x%010x"
+       case u <= 0xffffffffffff:
+               f = "0x%012x"
+       case u <= 0xffffffffffffff:
+               f = "0x%014x"
+       case u <= 0xffffffffffffffff:
+               f = "0x%016x"
+       }
+       return fmt.Sprintf(f, u)
+}
+
+// formatPointer prints the address of the pointer.
+func formatPointer(v reflect.Value) string {
+       p := v.Pointer()
+       if flags.Deterministic {
+               p = 0xdeadf00f // Only used for stable testing purposes
+       }
+       return fmt.Sprintf("⟪0x%x⟫", p)
+}
+
+type visitedPointers map[value.Pointer]struct{}
+
+// Visit inserts pointer v into the visited map and reports whether it had
+// already been visited before.
+func (m visitedPointers) Visit(v reflect.Value) bool {
+       p := value.PointerOf(v)
+       _, visited := m[p]
+       m[p] = struct{}{}
+       return visited
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go
new file mode 100644 (file)
index 0000000..8cb3265
--- /dev/null
@@ -0,0 +1,333 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+       "bytes"
+       "fmt"
+       "reflect"
+       "strings"
+       "unicode"
+       "unicode/utf8"
+
+       "github.com/google/go-cmp/cmp/internal/diff"
+)
+
+// CanFormatDiffSlice reports whether we support custom formatting for nodes
+// that are slices of primitive kinds or strings.
+func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
+       switch {
+       case opts.DiffMode != diffUnknown:
+               return false // Must be formatting in diff mode
+       case v.NumDiff == 0:
+               return false // No differences detected
+       case v.NumIgnored+v.NumCompared+v.NumTransformed > 0:
+               // TODO: Handle the case where someone uses bytes.Equal on a large slice.
+               return false // Some custom option was used to determined equality
+       case !v.ValueX.IsValid() || !v.ValueY.IsValid():
+               return false // Both values must be valid
+       }
+
+       switch t := v.Type; t.Kind() {
+       case reflect.String:
+       case reflect.Array, reflect.Slice:
+               // Only slices of primitive types have specialized handling.
+               switch t.Elem().Kind() {
+               case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+                       reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+                       reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+               default:
+                       return false
+               }
+
+               // If a sufficient number of elements already differ,
+               // use specialized formatting even if length requirement is not met.
+               if v.NumDiff > v.NumSame {
+                       return true
+               }
+       default:
+               return false
+       }
+
+       // Use specialized string diffing for longer slices or strings.
+       const minLength = 64
+       return v.ValueX.Len() >= minLength && v.ValueY.Len() >= minLength
+}
+
+// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
+// This provides custom-tailored logic to make printing of differences in
+// textual strings and slices of primitive kinds more readable.
+func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
+       assert(opts.DiffMode == diffUnknown)
+       t, vx, vy := v.Type, v.ValueX, v.ValueY
+
+       // Auto-detect the type of the data.
+       var isLinedText, isText, isBinary bool
+       var sx, sy string
+       switch {
+       case t.Kind() == reflect.String:
+               sx, sy = vx.String(), vy.String()
+               isText = true // Initial estimate, verify later
+       case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)):
+               sx, sy = string(vx.Bytes()), string(vy.Bytes())
+               isBinary = true // Initial estimate, verify later
+       case t.Kind() == reflect.Array:
+               // Arrays need to be addressable for slice operations to work.
+               vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
+               vx2.Set(vx)
+               vy2.Set(vy)
+               vx, vy = vx2, vy2
+       }
+       if isText || isBinary {
+               var numLines, lastLineIdx, maxLineLen int
+               isBinary = false
+               for i, r := range sx + sy {
+                       if !(unicode.IsPrint(r) || unicode.IsSpace(r)) || r == utf8.RuneError {
+                               isBinary = true
+                               break
+                       }
+                       if r == '\n' {
+                               if maxLineLen < i-lastLineIdx {
+                                       lastLineIdx = i - lastLineIdx
+                               }
+                               lastLineIdx = i + 1
+                               numLines++
+                       }
+               }
+               isText = !isBinary
+               isLinedText = isText && numLines >= 4 && maxLineLen <= 256
+       }
+
+       // Format the string into printable records.
+       var list textList
+       var delim string
+       switch {
+       // If the text appears to be multi-lined text,
+       // then perform differencing across individual lines.
+       case isLinedText:
+               ssx := strings.Split(sx, "\n")
+               ssy := strings.Split(sy, "\n")
+               list = opts.formatDiffSlice(
+                       reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
+                       func(v reflect.Value, d diffMode) textRecord {
+                               s := formatString(v.Index(0).String())
+                               return textRecord{Diff: d, Value: textLine(s)}
+                       },
+               )
+               delim = "\n"
+       // If the text appears to be single-lined text,
+       // then perform differencing in approximately fixed-sized chunks.
+       // The output is printed as quoted strings.
+       case isText:
+               list = opts.formatDiffSlice(
+                       reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
+                       func(v reflect.Value, d diffMode) textRecord {
+                               s := formatString(v.String())
+                               return textRecord{Diff: d, Value: textLine(s)}
+                       },
+               )
+               delim = ""
+       // If the text appears to be binary data,
+       // then perform differencing in approximately fixed-sized chunks.
+       // The output is inspired by hexdump.
+       case isBinary:
+               list = opts.formatDiffSlice(
+                       reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
+                       func(v reflect.Value, d diffMode) textRecord {
+                               var ss []string
+                               for i := 0; i < v.Len(); i++ {
+                                       ss = append(ss, formatHex(v.Index(i).Uint()))
+                               }
+                               s := strings.Join(ss, ", ")
+                               comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
+                               return textRecord{Diff: d, Value: textLine(s), Comment: comment}
+                       },
+               )
+       // For all other slices of primitive types,
+       // then perform differencing in approximately fixed-sized chunks.
+       // The size of each chunk depends on the width of the element kind.
+       default:
+               var chunkSize int
+               if t.Elem().Kind() == reflect.Bool {
+                       chunkSize = 16
+               } else {
+                       switch t.Elem().Bits() {
+                       case 8:
+                               chunkSize = 16
+                       case 16:
+                               chunkSize = 12
+                       case 32:
+                               chunkSize = 8
+                       default:
+                               chunkSize = 8
+                       }
+               }
+               list = opts.formatDiffSlice(
+                       vx, vy, chunkSize, t.Elem().Kind().String(),
+                       func(v reflect.Value, d diffMode) textRecord {
+                               var ss []string
+                               for i := 0; i < v.Len(); i++ {
+                                       switch t.Elem().Kind() {
+                                       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+                                               ss = append(ss, fmt.Sprint(v.Index(i).Int()))
+                                       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+                                               ss = append(ss, formatHex(v.Index(i).Uint()))
+                                       case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+                                               ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
+                                       }
+                               }
+                               s := strings.Join(ss, ", ")
+                               return textRecord{Diff: d, Value: textLine(s)}
+                       },
+               )
+       }
+
+       // Wrap the output with appropriate type information.
+       var out textNode = textWrap{"{", list, "}"}
+       if !isText {
+               // The "{...}" byte-sequence literal is not valid Go syntax for strings.
+               // Emit the type for extra clarity (e.g. "string{...}").
+               if t.Kind() == reflect.String {
+                       opts = opts.WithTypeMode(emitType)
+               }
+               return opts.FormatType(t, out)
+       }
+       switch t.Kind() {
+       case reflect.String:
+               out = textWrap{"strings.Join(", out, fmt.Sprintf(", %q)", delim)}
+               if t != reflect.TypeOf(string("")) {
+                       out = opts.FormatType(t, out)
+               }
+       case reflect.Slice:
+               out = textWrap{"bytes.Join(", out, fmt.Sprintf(", %q)", delim)}
+               if t != reflect.TypeOf([]byte(nil)) {
+                       out = opts.FormatType(t, out)
+               }
+       }
+       return out
+}
+
+// formatASCII formats s as an ASCII string.
+// This is useful for printing binary strings in a semi-legible way.
+func formatASCII(s string) string {
+       b := bytes.Repeat([]byte{'.'}, len(s))
+       for i := 0; i < len(s); i++ {
+               if ' ' <= s[i] && s[i] <= '~' {
+                       b[i] = s[i]
+               }
+       }
+       return string(b)
+}
+
+func (opts formatOptions) formatDiffSlice(
+       vx, vy reflect.Value, chunkSize int, name string,
+       makeRec func(reflect.Value, diffMode) textRecord,
+) (list textList) {
+       es := diff.Difference(vx.Len(), vy.Len(), func(ix int, iy int) diff.Result {
+               return diff.BoolResult(vx.Index(ix).Interface() == vy.Index(iy).Interface())
+       })
+
+       appendChunks := func(v reflect.Value, d diffMode) int {
+               n0 := v.Len()
+               for v.Len() > 0 {
+                       n := chunkSize
+                       if n > v.Len() {
+                               n = v.Len()
+                       }
+                       list = append(list, makeRec(v.Slice(0, n), d))
+                       v = v.Slice(n, v.Len())
+               }
+               return n0 - v.Len()
+       }
+
+       groups := coalesceAdjacentEdits(name, es)
+       groups = coalesceInterveningIdentical(groups, chunkSize/4)
+       for i, ds := range groups {
+               // Print equal.
+               if ds.NumDiff() == 0 {
+                       // Compute the number of leading and trailing equal bytes to print.
+                       var numLo, numHi int
+                       numEqual := ds.NumIgnored + ds.NumIdentical
+                       for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
+                               numLo++
+                       }
+                       for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+                               numHi++
+                       }
+                       if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
+                               numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
+                       }
+
+                       // Print the equal bytes.
+                       appendChunks(vx.Slice(0, numLo), diffIdentical)
+                       if numEqual > numLo+numHi {
+                               ds.NumIdentical -= numLo + numHi
+                               list.AppendEllipsis(ds)
+                       }
+                       appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
+                       vx = vx.Slice(numEqual, vx.Len())
+                       vy = vy.Slice(numEqual, vy.Len())
+                       continue
+               }
+
+               // Print unequal.
+               nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
+               vx = vx.Slice(nx, vx.Len())
+               ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
+               vy = vy.Slice(ny, vy.Len())
+       }
+       assert(vx.Len() == 0 && vy.Len() == 0)
+       return list
+}
+
+// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
+// equal or unequal counts.
+func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
+       var prevCase int // Arbitrary index into which case last occurred
+       lastStats := func(i int) *diffStats {
+               if prevCase != i {
+                       groups = append(groups, diffStats{Name: name})
+                       prevCase = i
+               }
+               return &groups[len(groups)-1]
+       }
+       for _, e := range es {
+               switch e {
+               case diff.Identity:
+                       lastStats(1).NumIdentical++
+               case diff.UniqueX:
+                       lastStats(2).NumRemoved++
+               case diff.UniqueY:
+                       lastStats(2).NumInserted++
+               case diff.Modified:
+                       lastStats(2).NumModified++
+               }
+       }
+       return groups
+}
+
+// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
+// equal groups into adjacent unequal groups that currently result in a
+// dual inserted/removed printout. This acts as a high-pass filter to smooth
+// out high-frequency changes within the windowSize.
+func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
+       groups, groupsOrig := groups[:0], groups
+       for i, ds := range groupsOrig {
+               if len(groups) >= 2 && ds.NumDiff() > 0 {
+                       prev := &groups[len(groups)-2] // Unequal group
+                       curr := &groups[len(groups)-1] // Equal group
+                       next := &groupsOrig[i]         // Unequal group
+                       hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
+                       hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
+                       if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
+                               *prev = (*prev).Append(*curr).Append(*next)
+                               groups = groups[:len(groups)-1] // Truncate off equal group
+                               continue
+                       }
+               }
+               groups = append(groups, ds)
+       }
+       return groups
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go
new file mode 100644 (file)
index 0000000..80605d0
--- /dev/null
@@ -0,0 +1,382 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import (
+       "bytes"
+       "fmt"
+       "math/rand"
+       "strings"
+       "time"
+
+       "github.com/google/go-cmp/cmp/internal/flags"
+)
+
+var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
+
+type indentMode int
+
+func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
+       if flags.Deterministic || randBool {
+               // Use regular spaces (U+0020).
+               switch d {
+               case diffUnknown, diffIdentical:
+                       b = append(b, "  "...)
+               case diffRemoved:
+                       b = append(b, "- "...)
+               case diffInserted:
+                       b = append(b, "+ "...)
+               }
+       } else {
+               // Use non-breaking spaces (U+00a0).
+               switch d {
+               case diffUnknown, diffIdentical:
+                       b = append(b, "  "...)
+               case diffRemoved:
+                       b = append(b, "- "...)
+               case diffInserted:
+                       b = append(b, "+ "...)
+               }
+       }
+       return repeatCount(n).appendChar(b, '\t')
+}
+
+type repeatCount int
+
+func (n repeatCount) appendChar(b []byte, c byte) []byte {
+       for ; n > 0; n-- {
+               b = append(b, c)
+       }
+       return b
+}
+
+// textNode is a simplified tree-based representation of structured text.
+// Possible node types are textWrap, textList, or textLine.
+type textNode interface {
+       // Len reports the length in bytes of a single-line version of the tree.
+       // Nested textRecord.Diff and textRecord.Comment fields are ignored.
+       Len() int
+       // Equal reports whether the two trees are structurally identical.
+       // Nested textRecord.Diff and textRecord.Comment fields are compared.
+       Equal(textNode) bool
+       // String returns the string representation of the text tree.
+       // It is not guaranteed that len(x.String()) == x.Len(),
+       // nor that x.String() == y.String() implies that x.Equal(y).
+       String() string
+
+       // formatCompactTo formats the contents of the tree as a single-line string
+       // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
+       // fields are ignored.
+       //
+       // However, not all nodes in the tree should be collapsed as a single-line.
+       // If a node can be collapsed as a single-line, it is replaced by a textLine
+       // node. Since the top-level node cannot replace itself, this also returns
+       // the current node itself.
+       //
+       // This does not mutate the receiver.
+       formatCompactTo([]byte, diffMode) ([]byte, textNode)
+       // formatExpandedTo formats the contents of the tree as a multi-line string
+       // to the provided buffer. In order for column alignment to operate well,
+       // formatCompactTo must be called before calling formatExpandedTo.
+       formatExpandedTo([]byte, diffMode, indentMode) []byte
+}
+
+// textWrap is a wrapper that concatenates a prefix and/or a suffix
+// to the underlying node.
+type textWrap struct {
+       Prefix string   // e.g., "bytes.Buffer{"
+       Value  textNode // textWrap | textList | textLine
+       Suffix string   // e.g., "}"
+}
+
+func (s textWrap) Len() int {
+       return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
+}
+func (s1 textWrap) Equal(s2 textNode) bool {
+       if s2, ok := s2.(textWrap); ok {
+               return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
+       }
+       return false
+}
+func (s textWrap) String() string {
+       var d diffMode
+       var n indentMode
+       _, s2 := s.formatCompactTo(nil, d)
+       b := n.appendIndent(nil, d)      // Leading indent
+       b = s2.formatExpandedTo(b, d, n) // Main body
+       b = append(b, '\n')              // Trailing newline
+       return string(b)
+}
+func (s textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+       n0 := len(b) // Original buffer length
+       b = append(b, s.Prefix...)
+       b, s.Value = s.Value.formatCompactTo(b, d)
+       b = append(b, s.Suffix...)
+       if _, ok := s.Value.(textLine); ok {
+               return b, textLine(b[n0:])
+       }
+       return b, s
+}
+func (s textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+       b = append(b, s.Prefix...)
+       b = s.Value.formatExpandedTo(b, d, n)
+       b = append(b, s.Suffix...)
+       return b
+}
+
+// textList is a comma-separated list of textWrap or textLine nodes.
+// The list may be formatted as multi-lines or single-line at the discretion
+// of the textList.formatCompactTo method.
+type textList []textRecord
+type textRecord struct {
+       Diff    diffMode     // e.g., 0 or '-' or '+'
+       Key     string       // e.g., "MyField"
+       Value   textNode     // textWrap | textLine
+       Comment fmt.Stringer // e.g., "6 identical fields"
+}
+
+// AppendEllipsis appends a new ellipsis node to the list if none already
+// exists at the end. If cs is non-zero it coalesces the statistics with the
+// previous diffStats.
+func (s *textList) AppendEllipsis(ds diffStats) {
+       hasStats := ds != diffStats{}
+       if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
+               if hasStats {
+                       *s = append(*s, textRecord{Value: textEllipsis, Comment: ds})
+               } else {
+                       *s = append(*s, textRecord{Value: textEllipsis})
+               }
+               return
+       }
+       if hasStats {
+               (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
+       }
+}
+
+func (s textList) Len() (n int) {
+       for i, r := range s {
+               n += len(r.Key)
+               if r.Key != "" {
+                       n += len(": ")
+               }
+               n += r.Value.Len()
+               if i < len(s)-1 {
+                       n += len(", ")
+               }
+       }
+       return n
+}
+
+func (s1 textList) Equal(s2 textNode) bool {
+       if s2, ok := s2.(textList); ok {
+               if len(s1) != len(s2) {
+                       return false
+               }
+               for i := range s1 {
+                       r1, r2 := s1[i], s2[i]
+                       if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
+                               return false
+                       }
+               }
+               return true
+       }
+       return false
+}
+
+func (s textList) String() string {
+       return textWrap{"{", s, "}"}.String()
+}
+
+func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+       s = append(textList(nil), s...) // Avoid mutating original
+
+       // Determine whether we can collapse this list as a single line.
+       n0 := len(b) // Original buffer length
+       var multiLine bool
+       for i, r := range s {
+               if r.Diff == diffInserted || r.Diff == diffRemoved {
+                       multiLine = true
+               }
+               b = append(b, r.Key...)
+               if r.Key != "" {
+                       b = append(b, ": "...)
+               }
+               b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
+               if _, ok := s[i].Value.(textLine); !ok {
+                       multiLine = true
+               }
+               if r.Comment != nil {
+                       multiLine = true
+               }
+               if i < len(s)-1 {
+                       b = append(b, ", "...)
+               }
+       }
+       // Force multi-lined output when printing a removed/inserted node that
+       // is sufficiently long.
+       if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > 80 {
+               multiLine = true
+       }
+       if !multiLine {
+               return b, textLine(b[n0:])
+       }
+       return b, s
+}
+
+func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+       alignKeyLens := s.alignLens(
+               func(r textRecord) bool {
+                       _, isLine := r.Value.(textLine)
+                       return r.Key == "" || !isLine
+               },
+               func(r textRecord) int { return len(r.Key) },
+       )
+       alignValueLens := s.alignLens(
+               func(r textRecord) bool {
+                       _, isLine := r.Value.(textLine)
+                       return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
+               },
+               func(r textRecord) int { return len(r.Value.(textLine)) },
+       )
+
+       // Format the list as a multi-lined output.
+       n++
+       for i, r := range s {
+               b = n.appendIndent(append(b, '\n'), d|r.Diff)
+               if r.Key != "" {
+                       b = append(b, r.Key+": "...)
+               }
+               b = alignKeyLens[i].appendChar(b, ' ')
+
+               b = r.Value.formatExpandedTo(b, d|r.Diff, n)
+               if !r.Value.Equal(textEllipsis) {
+                       b = append(b, ',')
+               }
+               b = alignValueLens[i].appendChar(b, ' ')
+
+               if r.Comment != nil {
+                       b = append(b, " // "+r.Comment.String()...)
+               }
+       }
+       n--
+
+       return n.appendIndent(append(b, '\n'), d)
+}
+
+func (s textList) alignLens(
+       skipFunc func(textRecord) bool,
+       lenFunc func(textRecord) int,
+) []repeatCount {
+       var startIdx, endIdx, maxLen int
+       lens := make([]repeatCount, len(s))
+       for i, r := range s {
+               if skipFunc(r) {
+                       for j := startIdx; j < endIdx && j < len(s); j++ {
+                               lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+                       }
+                       startIdx, endIdx, maxLen = i+1, i+1, 0
+               } else {
+                       if maxLen < lenFunc(r) {
+                               maxLen = lenFunc(r)
+                       }
+                       endIdx = i + 1
+               }
+       }
+       for j := startIdx; j < endIdx && j < len(s); j++ {
+               lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+       }
+       return lens
+}
+
+// textLine is a single-line segment of text and is always a leaf node
+// in the textNode tree.
+type textLine []byte
+
+var (
+       textNil      = textLine("nil")
+       textEllipsis = textLine("...")
+)
+
+func (s textLine) Len() int {
+       return len(s)
+}
+func (s1 textLine) Equal(s2 textNode) bool {
+       if s2, ok := s2.(textLine); ok {
+               return bytes.Equal([]byte(s1), []byte(s2))
+       }
+       return false
+}
+func (s textLine) String() string {
+       return string(s)
+}
+func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+       return append(b, s...), s
+}
+func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
+       return append(b, s...)
+}
+
+type diffStats struct {
+       Name         string
+       NumIgnored   int
+       NumIdentical int
+       NumRemoved   int
+       NumInserted  int
+       NumModified  int
+}
+
+func (s diffStats) NumDiff() int {
+       return s.NumRemoved + s.NumInserted + s.NumModified
+}
+
+func (s diffStats) Append(ds diffStats) diffStats {
+       assert(s.Name == ds.Name)
+       s.NumIgnored += ds.NumIgnored
+       s.NumIdentical += ds.NumIdentical
+       s.NumRemoved += ds.NumRemoved
+       s.NumInserted += ds.NumInserted
+       s.NumModified += ds.NumModified
+       return s
+}
+
+// String prints a humanly-readable summary of coalesced records.
+//
+// Example:
+//     diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
+func (s diffStats) String() string {
+       var ss []string
+       var sum int
+       labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
+       counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
+       for i, n := range counts {
+               if n > 0 {
+                       ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
+               }
+               sum += n
+       }
+
+       // Pluralize the name (adjusting for some obscure English grammar rules).
+       name := s.Name
+       if sum > 1 {
+               name = name + "s"
+               if strings.HasSuffix(name, "ys") {
+                       name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
+               }
+       }
+
+       // Format the list according to English grammar (with Oxford comma).
+       switch n := len(ss); n {
+       case 0:
+               return ""
+       case 1, 2:
+               return strings.Join(ss, " and ") + " " + name
+       default:
+               return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
+       }
+}
+
+type commentString string
+
+func (s commentString) String() string { return string(s) }
diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go
new file mode 100644 (file)
index 0000000..83031a7
--- /dev/null
@@ -0,0 +1,121 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE.md file.
+
+package cmp
+
+import "reflect"
+
+// valueNode represents a single node within a report, which is a
+// structured representation of the value tree, containing information
+// regarding which nodes are equal or not.
+type valueNode struct {
+       parent *valueNode
+
+       Type   reflect.Type
+       ValueX reflect.Value
+       ValueY reflect.Value
+
+       // NumSame is the number of leaf nodes that are equal.
+       // All descendants are equal only if NumDiff is 0.
+       NumSame int
+       // NumDiff is the number of leaf nodes that are not equal.
+       NumDiff int
+       // NumIgnored is the number of leaf nodes that are ignored.
+       NumIgnored int
+       // NumCompared is the number of leaf nodes that were compared
+       // using an Equal method or Comparer function.
+       NumCompared int
+       // NumTransformed is the number of non-leaf nodes that were transformed.
+       NumTransformed int
+       // NumChildren is the number of transitive descendants of this node.
+       // This counts from zero; thus, leaf nodes have no descendants.
+       NumChildren int
+       // MaxDepth is the maximum depth of the tree. This counts from zero;
+       // thus, leaf nodes have a depth of zero.
+       MaxDepth int
+
+       // Records is a list of struct fields, slice elements, or map entries.
+       Records []reportRecord // If populated, implies Value is not populated
+
+       // Value is the result of a transformation, pointer indirect, of
+       // type assertion.
+       Value *valueNode // If populated, implies Records is not populated
+
+       // TransformerName is the name of the transformer.
+       TransformerName string // If non-empty, implies Value is populated
+}
+type reportRecord struct {
+       Key   reflect.Value // Invalid for slice element
+       Value *valueNode
+}
+
+func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
+       vx, vy := ps.Values()
+       child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
+       switch s := ps.(type) {
+       case StructField:
+               assert(parent.Value == nil)
+               parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
+       case SliceIndex:
+               assert(parent.Value == nil)
+               parent.Records = append(parent.Records, reportRecord{Value: child})
+       case MapIndex:
+               assert(parent.Value == nil)
+               parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
+       case Indirect:
+               assert(parent.Value == nil && parent.Records == nil)
+               parent.Value = child
+       case TypeAssertion:
+               assert(parent.Value == nil && parent.Records == nil)
+               parent.Value = child
+       case Transform:
+               assert(parent.Value == nil && parent.Records == nil)
+               parent.Value = child
+               parent.TransformerName = s.Name()
+               parent.NumTransformed++
+       default:
+               assert(parent == nil) // Must be the root step
+       }
+       return child
+}
+
+func (r *valueNode) Report(rs Result) {
+       assert(r.MaxDepth == 0) // May only be called on leaf nodes
+
+       if rs.ByIgnore() {
+               r.NumIgnored++
+       } else {
+               if rs.Equal() {
+                       r.NumSame++
+               } else {
+                       r.NumDiff++
+               }
+       }
+       assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
+
+       if rs.ByMethod() {
+               r.NumCompared++
+       }
+       if rs.ByFunc() {
+               r.NumCompared++
+       }
+       assert(r.NumCompared <= 1)
+}
+
+func (child *valueNode) PopStep() (parent *valueNode) {
+       if child.parent == nil {
+               return nil
+       }
+       parent = child.parent
+       parent.NumSame += child.NumSame
+       parent.NumDiff += child.NumDiff
+       parent.NumIgnored += child.NumIgnored
+       parent.NumCompared += child.NumCompared
+       parent.NumTransformed += child.NumTransformed
+       parent.NumChildren += child.NumChildren + 1
+       if parent.MaxDepth < child.MaxDepth+1 {
+               parent.MaxDepth = child.MaxDepth + 1
+       }
+       return parent
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/reporter.go b/vendor/github.com/google/go-cmp/cmp/reporter.go
deleted file mode 100644 (file)
index 20e9f18..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017, The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE.md file.
-
-package cmp
-
-import (
-       "fmt"
-       "reflect"
-       "strings"
-
-       "github.com/google/go-cmp/cmp/internal/value"
-)
-
-type defaultReporter struct {
-       Option
-       diffs  []string // List of differences, possibly truncated
-       ndiffs int      // Total number of differences
-       nbytes int      // Number of bytes in diffs
-       nlines int      // Number of lines in diffs
-}
-
-var _ reporter = (*defaultReporter)(nil)
-
-func (r *defaultReporter) Report(x, y reflect.Value, eq bool, p Path) {
-       if eq {
-               return // Ignore equal results
-       }
-       const maxBytes = 4096
-       const maxLines = 256
-       r.ndiffs++
-       if r.nbytes < maxBytes && r.nlines < maxLines {
-               sx := value.Format(x, value.FormatConfig{UseStringer: true})
-               sy := value.Format(y, value.FormatConfig{UseStringer: true})
-               if sx == sy {
-                       // Unhelpful output, so use more exact formatting.
-                       sx = value.Format(x, value.FormatConfig{PrintPrimitiveType: true})
-                       sy = value.Format(y, value.FormatConfig{PrintPrimitiveType: true})
-               }
-               s := fmt.Sprintf("%#v:\n\t-: %s\n\t+: %s\n", p, sx, sy)
-               r.diffs = append(r.diffs, s)
-               r.nbytes += len(s)
-               r.nlines += strings.Count(s, "\n")
-       }
-}
-
-func (r *defaultReporter) String() string {
-       s := strings.Join(r.diffs, "")
-       if r.ndiffs == len(r.diffs) {
-               return s
-       }
-       return fmt.Sprintf("%s... %d more differences ...", s, r.ndiffs-len(r.diffs))
-}
diff --git a/vendor/github.com/google/go-querystring/LICENSE b/vendor/github.com/google/go-querystring/LICENSE
new file mode 100644 (file)
index 0000000..ae121a1
--- /dev/null
@@ -0,0 +1,27 @@
+Copyright (c) 2013 Google. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go
new file mode 100644 (file)
index 0000000..37080b1
--- /dev/null
@@ -0,0 +1,320 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package query implements encoding of structs into URL query parameters.
+//
+// As a simple example:
+//
+//     type Options struct {
+//             Query   string `url:"q"`
+//             ShowAll bool   `url:"all"`
+//             Page    int    `url:"page"`
+//     }
+//
+//     opt := Options{ "foo", true, 2 }
+//     v, _ := query.Values(opt)
+//     fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2"
+//
+// The exact mapping between Go values and url.Values is described in the
+// documentation for the Values() function.
+package query
+
+import (
+       "bytes"
+       "fmt"
+       "net/url"
+       "reflect"
+       "strconv"
+       "strings"
+       "time"
+)
+
+var timeType = reflect.TypeOf(time.Time{})
+
+var encoderType = reflect.TypeOf(new(Encoder)).Elem()
+
+// Encoder is an interface implemented by any type that wishes to encode
+// itself into URL values in a non-standard way.
+type Encoder interface {
+       EncodeValues(key string, v *url.Values) error
+}
+
+// Values returns the url.Values encoding of v.
+//
+// Values expects to be passed a struct, and traverses it recursively using the
+// following encoding rules.
+//
+// Each exported struct field is encoded as a URL parameter unless
+//
+//     - the field's tag is "-", or
+//     - the field is empty and its tag specifies the "omitempty" option
+//
+// The empty values are false, 0, any nil pointer or interface value, any array
+// slice, map, or string of length zero, and any time.Time that returns true
+// for IsZero().
+//
+// The URL parameter name defaults to the struct field name but can be
+// specified in the struct field's tag value.  The "url" key in the struct
+// field's tag value is the key name, followed by an optional comma and
+// options.  For example:
+//
+//     // Field is ignored by this package.
+//     Field int `url:"-"`
+//
+//     // Field appears as URL parameter "myName".
+//     Field int `url:"myName"`
+//
+//     // Field appears as URL parameter "myName" and the field is omitted if
+//     // its value is empty
+//     Field int `url:"myName,omitempty"`
+//
+//     // Field appears as URL parameter "Field" (the default), but the field
+//     // is skipped if empty.  Note the leading comma.
+//     Field int `url:",omitempty"`
+//
+// For encoding individual field values, the following type-dependent rules
+// apply:
+//
+// Boolean values default to encoding as the strings "true" or "false".
+// Including the "int" option signals that the field should be encoded as the
+// strings "1" or "0".
+//
+// time.Time values default to encoding as RFC3339 timestamps.  Including the
+// "unix" option signals that the field should be encoded as a Unix time (see
+// time.Unix())
+//
+// Slice and Array values default to encoding as multiple URL values of the
+// same name.  Including the "comma" option signals that the field should be
+// encoded as a single comma-delimited value.  Including the "space" option
+// similarly encodes the value as a single space-delimited string. Including
+// the "semicolon" option will encode the value as a semicolon-delimited string.
+// Including the "brackets" option signals that the multiple URL values should
+// have "[]" appended to the value name. "numbered" will append a number to
+// the end of each incidence of the value name, example:
+// name0=value0&name1=value1, etc.
+//
+// Anonymous struct fields are usually encoded as if their inner exported
+// fields were fields in the outer struct, subject to the standard Go
+// visibility rules.  An anonymous struct field with a name given in its URL
+// tag is treated as having that name, rather than being anonymous.
+//
+// Non-nil pointer values are encoded as the value pointed to.
+//
+// Nested structs are encoded including parent fields in value names for
+// scoping. e.g:
+//
+//     "user[name]=acme&user[addr][postcode]=1234&user[addr][city]=SFO"
+//
+// All other values are encoded using their default string representation.
+//
+// Multiple fields that encode to the same URL parameter name will be included
+// as multiple URL values of the same name.
+func Values(v interface{}) (url.Values, error) {
+       values := make(url.Values)
+       val := reflect.ValueOf(v)
+       for val.Kind() == reflect.Ptr {
+               if val.IsNil() {
+                       return values, nil
+               }
+               val = val.Elem()
+       }
+
+       if v == nil {
+               return values, nil
+       }
+
+       if val.Kind() != reflect.Struct {
+               return nil, fmt.Errorf("query: Values() expects struct input. Got %v", val.Kind())
+       }
+
+       err := reflectValue(values, val, "")
+       return values, err
+}
+
+// reflectValue populates the values parameter from the struct fields in val.
+// Embedded structs are followed recursively (using the rules defined in the
+// Values function documentation) breadth-first.
+func reflectValue(values url.Values, val reflect.Value, scope string) error {
+       var embedded []reflect.Value
+
+       typ := val.Type()
+       for i := 0; i < typ.NumField(); i++ {
+               sf := typ.Field(i)
+               if sf.PkgPath != "" && !sf.Anonymous { // unexported
+                       continue
+               }
+
+               sv := val.Field(i)
+               tag := sf.Tag.Get("url")
+               if tag == "-" {
+                       continue
+               }
+               name, opts := parseTag(tag)
+               if name == "" {
+                       if sf.Anonymous && sv.Kind() == reflect.Struct {
+                               // save embedded struct for later processing
+                               embedded = append(embedded, sv)
+                               continue
+                       }
+
+                       name = sf.Name
+               }
+
+               if scope != "" {
+                       name = scope + "[" + name + "]"
+               }
+
+               if opts.Contains("omitempty") && isEmptyValue(sv) {
+                       continue
+               }
+
+               if sv.Type().Implements(encoderType) {
+                       if !reflect.Indirect(sv).IsValid() {
+                               sv = reflect.New(sv.Type().Elem())
+                       }
+
+                       m := sv.Interface().(Encoder)
+                       if err := m.EncodeValues(name, &values); err != nil {
+                               return err
+                       }
+                       continue
+               }
+
+               if sv.Kind() == reflect.Slice || sv.Kind() == reflect.Array {
+                       var del byte
+                       if opts.Contains("comma") {
+                               del = ','
+                       } else if opts.Contains("space") {
+                               del = ' '
+                       } else if opts.Contains("semicolon") {
+                               del = ';'
+                       } else if opts.Contains("brackets") {
+                               name = name + "[]"
+                       }
+
+                       if del != 0 {
+                               s := new(bytes.Buffer)
+                               first := true
+                               for i := 0; i < sv.Len(); i++ {
+                                       if first {
+                                               first = false
+                                       } else {
+                                               s.WriteByte(del)
+                                       }
+                                       s.WriteString(valueString(sv.Index(i), opts))
+                               }
+                               values.Add(name, s.String())
+                       } else {
+                               for i := 0; i < sv.Len(); i++ {
+                                       k := name
+                                       if opts.Contains("numbered") {
+                                               k = fmt.Sprintf("%s%d", name, i)
+                                       }
+                                       values.Add(k, valueString(sv.Index(i), opts))
+                               }
+                       }
+                       continue
+               }
+
+               for sv.Kind() == reflect.Ptr {
+                       if sv.IsNil() {
+                               break
+                       }
+                       sv = sv.Elem()
+               }
+
+               if sv.Type() == timeType {
+                       values.Add(name, valueString(sv, opts))
+                       continue
+               }
+
+               if sv.Kind() == reflect.Struct {
+                       reflectValue(values, sv, name)
+                       continue
+               }
+
+               values.Add(name, valueString(sv, opts))
+       }
+
+       for _, f := range embedded {
+               if err := reflectValue(values, f, scope); err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
+
+// valueString returns the string representation of a value.
+func valueString(v reflect.Value, opts tagOptions) string {
+       for v.Kind() == reflect.Ptr {
+               if v.IsNil() {
+                       return ""
+               }
+               v = v.Elem()
+       }
+
+       if v.Kind() == reflect.Bool && opts.Contains("int") {
+               if v.Bool() {
+                       return "1"
+               }
+               return "0"
+       }
+
+       if v.Type() == timeType {
+               t := v.Interface().(time.Time)
+               if opts.Contains("unix") {
+                       return strconv.FormatInt(t.Unix(), 10)
+               }
+               return t.Format(time.RFC3339)
+       }
+
+       return fmt.Sprint(v.Interface())
+}
+
+// isEmptyValue checks if a value should be considered empty for the purposes
+// of omitting fields with the "omitempty" option.
+func isEmptyValue(v reflect.Value) bool {
+       switch v.Kind() {
+       case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+               return v.Len() == 0
+       case reflect.Bool:
+               return !v.Bool()
+       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+               return v.Int() == 0
+       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+               return v.Uint() == 0
+       case reflect.Float32, reflect.Float64:
+               return v.Float() == 0
+       case reflect.Interface, reflect.Ptr:
+               return v.IsNil()
+       }
+
+       if v.Type() == timeType {
+               return v.Interface().(time.Time).IsZero()
+       }
+
+       return false
+}
+
+// tagOptions is the string following a comma in a struct field's "url" tag, or
+// the empty string. It does not include the leading comma.
+type tagOptions []string
+
+// parseTag splits a struct field's url tag into its name and comma-separated
+// options.
+func parseTag(tag string) (string, tagOptions) {
+       s := strings.Split(tag, ",")
+       return s[0], s[1:]
+}
+
+// Contains checks whether the tagOptions contains the specified option.
+func (o tagOptions) Contains(option string) bool {
+       for _, s := range o {
+               if s == option {
+                       return true
+               }
+       }
+       return false
+}
index bea7ed13c6303dc77bba8778866ddc3d0d7d203d..eeccfea9d3bf67c55fa45d8f1f8b2de2e36c675d 100644 (file)
@@ -19,8 +19,8 @@ import (
        urlhelper "github.com/hashicorp/go-getter/helper/url"
 )
 
-// fileChecksum helps verifying the checksum for a file.
-type fileChecksum struct {
+// FileChecksum helps verifying the checksum for a file.
+type FileChecksum struct {
        Type     string
        Hash     hash.Hash
        Value    []byte
@@ -50,7 +50,7 @@ func (cerr *ChecksumError) Error() string {
 
 // checksum is a simple method to compute the checksum of a source file
 // and compare it to the given expected value.
-func (c *fileChecksum) checksum(source string) error {
+func (c *FileChecksum) checksum(source string) error {
        f, err := os.Open(source)
        if err != nil {
                return fmt.Errorf("Failed to open file for checksum: %s", err)
@@ -74,7 +74,7 @@ func (c *fileChecksum) checksum(source string) error {
        return nil
 }
 
-// extractChecksum will return a fileChecksum based on the 'checksum'
+// extractChecksum will return a FileChecksum based on the 'checksum'
 // parameter of u.
 // ex:
 //  http://hashicorp.com/terraform?checksum=<checksumValue>
@@ -93,7 +93,7 @@ func (c *fileChecksum) checksum(source string) error {
 //  <checksum> *file2
 //
 // see parseChecksumLine for more detail on checksum file parsing
-func (c *Client) extractChecksum(u *url.URL) (*fileChecksum, error) {
+func (c *Client) extractChecksum(u *url.URL) (*FileChecksum, error) {
        q := u.Query()
        v := q.Get("checksum")
 
@@ -115,14 +115,14 @@ func (c *Client) extractChecksum(u *url.URL) (*fileChecksum, error) {
 
        switch checksumType {
        case "file":
-               return c.checksumFromFile(checksumValue, u)
+               return c.ChecksumFromFile(checksumValue, u)
        default:
                return newChecksumFromType(checksumType, checksumValue, filepath.Base(u.EscapedPath()))
        }
 }
 
-func newChecksum(checksumValue, filename string) (*fileChecksum, error) {
-       c := &fileChecksum{
+func newChecksum(checksumValue, filename string) (*FileChecksum, error) {
+       c := &FileChecksum{
                Filename: filename,
        }
        var err error
@@ -133,7 +133,7 @@ func newChecksum(checksumValue, filename string) (*fileChecksum, error) {
        return c, nil
 }
 
-func newChecksumFromType(checksumType, checksumValue, filename string) (*fileChecksum, error) {
+func newChecksumFromType(checksumType, checksumValue, filename string) (*FileChecksum, error) {
        c, err := newChecksum(checksumValue, filename)
        if err != nil {
                return nil, err
@@ -157,7 +157,7 @@ func newChecksumFromType(checksumType, checksumValue, filename string) (*fileChe
        return c, nil
 }
 
-func newChecksumFromValue(checksumValue, filename string) (*fileChecksum, error) {
+func newChecksumFromValue(checksumValue, filename string) (*FileChecksum, error) {
        c, err := newChecksum(checksumValue, filename)
        if err != nil {
                return nil, err
@@ -183,14 +183,14 @@ func newChecksumFromValue(checksumValue, filename string) (*fileChecksum, error)
        return c, nil
 }
 
-// checksumsFromFile will return all the fileChecksums found in file
+// ChecksumFromFile will return all the FileChecksums found in file
 //
-// checksumsFromFile will try to guess the hashing algorithm based on content
+// ChecksumFromFile will try to guess the hashing algorithm based on content
 // of checksum file
 //
-// checksumsFromFile will only return checksums for files that match file
+// ChecksumFromFile will only return checksums for files that match file
 // behind src
-func (c *Client) checksumFromFile(checksumFile string, src *url.URL) (*fileChecksum, error) {
+func (c *Client) ChecksumFromFile(checksumFile string, src *url.URL) (*FileChecksum, error) {
        checksumFileURL, err := urlhelper.Parse(checksumFile)
        if err != nil {
                return nil, err
@@ -286,7 +286,7 @@ func (c *Client) checksumFromFile(checksumFile string, src *url.URL) (*fileCheck
 // of a line.
 // for BSD type sums parseChecksumLine guesses the hashing algorithm
 // by checking the length of the checksum.
-func parseChecksumLine(line string) (*fileChecksum, error) {
+func parseChecksumLine(line string) (*FileChecksum, error) {
        parts := strings.Fields(line)
 
        switch len(parts) {
index a183a17dfe77dcabca3d707e0163b02d40c55df5..19047eb19792c5e3d7d083e2d6eaadd9cbe23958 100644 (file)
@@ -35,7 +35,7 @@ func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) {
        var info struct {
                SCM string `json:"scm"`
        }
-       infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path
+       infoUrl := "https://api.bitbucket.org/2.0/repositories" + u.Path
        resp, err := http.Get(infoUrl)
        if err != nil {
                return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err)
index 679e10ad7591468985f3866ef6f9c93188848edd..bc56559c6328d255a7407e5d0e67330cd3e4bc3c 100644 (file)
@@ -87,6 +87,10 @@ type Client struct {
        // goroutines.
        clientWaitGroup sync.WaitGroup
 
+       // stderrWaitGroup is used to prevent the command's Wait() function from
+       // being called before we've finished reading from the stderr pipe.
+       stderrWaitGroup sync.WaitGroup
+
        // processKilled is used for testing only, to flag when the process was
        // forcefully killed.
        processKilled bool
@@ -590,6 +594,12 @@ func (c *Client) Start() (addr net.Addr, err error) {
        // Create a context for when we kill
        c.doneCtx, c.ctxCancel = context.WithCancel(context.Background())
 
+       // Start goroutine that logs the stderr
+       c.clientWaitGroup.Add(1)
+       c.stderrWaitGroup.Add(1)
+       // logStderr calls Done()
+       go c.logStderr(cmdStderr)
+
        c.clientWaitGroup.Add(1)
        go func() {
                // ensure the context is cancelled when we're done
@@ -602,6 +612,10 @@ func (c *Client) Start() (addr net.Addr, err error) {
                pid := c.process.Pid
                path := cmd.Path
 
+               // wait to finish reading from stderr since the stderr pipe reader
+               // will be closed by the subsequent call to cmd.Wait().
+               c.stderrWaitGroup.Wait()
+
                // Wait for the command to end.
                err := cmd.Wait()
 
@@ -624,11 +638,6 @@ func (c *Client) Start() (addr net.Addr, err error) {
                c.exited = true
        }()
 
-       // Start goroutine that logs the stderr
-       c.clientWaitGroup.Add(1)
-       // logStderr calls Done()
-       go c.logStderr(cmdStderr)
-
        // Start a goroutine that is going to be reading the lines
        // out of stdout
        linesCh := make(chan string)
@@ -936,6 +945,7 @@ var stdErrBufferSize = 64 * 1024
 
 func (c *Client) logStderr(r io.Reader) {
        defer c.clientWaitGroup.Done()
+       defer c.stderrWaitGroup.Done()
        l := c.logger.Named(filepath.Base(c.config.Cmd.Path))
 
        reader := bufio.NewReaderSize(r, stdErrBufferSize)
index fc9f05a9fbcb75ea7784dce55470fa193028495b..4c230e3ab4cf1374d51dd1da00db3f9b4285a035 100644 (file)
@@ -363,14 +363,34 @@ func serverListener() (net.Listener, error) {
 }
 
 func serverListener_tcp() (net.Listener, error) {
-       minPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MIN_PORT"), 10, 32)
-       if err != nil {
-               return nil, err
+       envMinPort := os.Getenv("PLUGIN_MIN_PORT")
+       envMaxPort := os.Getenv("PLUGIN_MAX_PORT")
+
+       var minPort, maxPort int64
+       var err error
+
+       switch {
+       case len(envMinPort) == 0:
+               minPort = 0
+       default:
+               minPort, err = strconv.ParseInt(envMinPort, 10, 32)
+               if err != nil {
+                       return nil, fmt.Errorf("Couldn't get value from PLUGIN_MIN_PORT: %v", err)
+               }
        }
 
-       maxPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MAX_PORT"), 10, 32)
-       if err != nil {
-               return nil, err
+       switch {
+       case len(envMaxPort) == 0:
+               maxPort = 0
+       default:
+               maxPort, err = strconv.ParseInt(envMaxPort, 10, 32)
+               if err != nil {
+                       return nil, fmt.Errorf("Couldn't get value from PLUGIN_MAX_PORT: %v", err)
+               }
+       }
+
+       if minPort > maxPort {
+               return nil, fmt.Errorf("ENV_MIN_PORT value of %d is greater than PLUGIN_MAX_PORT value of %d", minPort, maxPort)
        }
 
        for port := minPort; port <= maxPort; port++ {
index 2b24fdbe87531993ffb750848a80500ce648bcba..f59ce92e94ed1b7da15991bda9b6211e4abadb90 100644 (file)
@@ -95,7 +95,7 @@ schema model provides a description of only one level of nested blocks at
 a time, and thus a new schema must be provided for each additional level of
 nesting.
 
-To make this arduous process as convenient as possbile, this package provides
+To make this arduous process as convenient as possible, this package provides
 a helper function `WalkForEachVariables`, which returns a `WalkVariablesNode`
 instance that can be used to find variables directly in a given body and also
 determine which nested blocks require recursive calls. Using this mechanism
index 26819a2dafcc3113e7cff5ec7176f8edff9eda1a..d3f7a74d399adac5f00f73aca01581535ccd3b67 100644 (file)
@@ -473,8 +473,35 @@ func (e *ConditionalExpr) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostic
        falseResult, falseDiags := e.FalseResult.Value(ctx)
        var diags hcl.Diagnostics
 
-       // Try to find a type that both results can be converted to.
-       resultType, convs := convert.UnifyUnsafe([]cty.Type{trueResult.Type(), falseResult.Type()})
+       resultType := cty.DynamicPseudoType
+       convs := make([]convert.Conversion, 2)
+
+       switch {
+       // If either case is a dynamic null value (which would result from a
+       // literal null in the config), we know that it can convert to the expected
+       // type of the opposite case, and we don't need to speculatively reduce the
+       // final result type to DynamicPseudoType.
+
+       // If we know that either Type is a DynamicPseudoType, we can be certain
+       // that the other value can convert since it's a pass-through, and we don't
+       // need to unify the types. If the final evaluation results in the dynamic
+       // value being returned, there's no conversion we can do, so we return the
+       // value directly.
+       case trueResult.RawEquals(cty.NullVal(cty.DynamicPseudoType)):
+               resultType = falseResult.Type()
+               convs[0] = convert.GetConversionUnsafe(cty.DynamicPseudoType, resultType)
+       case falseResult.RawEquals(cty.NullVal(cty.DynamicPseudoType)):
+               resultType = trueResult.Type()
+               convs[1] = convert.GetConversionUnsafe(cty.DynamicPseudoType, resultType)
+       case trueResult.Type() == cty.DynamicPseudoType, falseResult.Type() == cty.DynamicPseudoType:
+               // the final resultType type is still unknown
+               // we don't need to get the conversion, because both are a noop.
+
+       default:
+               // Try to find a type that both results can be converted to.
+               resultType, convs = convert.UnifyUnsafe([]cty.Type{trueResult.Type(), falseResult.Type()})
+       }
+
        if resultType == cty.NilType {
                return cty.DynamicVal, hcl.Diagnostics{
                        {
index fa79e3d08f7d83741a7a481a6b1b64a7be1bc2f6..ca3dae189f4c60719dd3540615a6939891a7deff 100644 (file)
@@ -89,6 +89,26 @@ func (e *TemplateExpr) StartRange() hcl.Range {
        return e.Parts[0].StartRange()
 }
 
+// IsStringLiteral returns true if and only if the template consists only of
+// single string literal, as would be created for a simple quoted string like
+// "foo".
+//
+// If this function returns true, then calling Value on the same expression
+// with a nil EvalContext will return the literal value.
+//
+// Note that "${"foo"}", "${1}", etc aren't considered literal values for the
+// purposes of this method, because the intent of this method is to identify
+// situations where the user seems to be explicitly intending literal string
+// interpretation, not situations that result in literals as a technicality
+// of the template expression unwrapping behavior.
+func (e *TemplateExpr) IsStringLiteral() bool {
+       if len(e.Parts) != 1 {
+               return false
+       }
+       _, ok := e.Parts[0].(*LiteralValueExpr)
+       return ok
+}
+
 // TemplateJoinExpr is used to convert tuples of strings produced by template
 // constructs (i.e. for loops) into flat strings, by converting the values
 // tos strings and joining them. This AST node is not used directly; it's
index 253ad5031a2550406c89b1be7105a47578577b16..772ebae2bc60e9190466dda2eaaef59c983e39d5 100644 (file)
@@ -853,6 +853,14 @@ Traversal:
                                                SrcRange: rng,
                                        }
                                        ret = makeRelativeTraversal(ret, step, rng)
+                               } else if tmpl, isTmpl := keyExpr.(*TemplateExpr); isTmpl && tmpl.IsStringLiteral() {
+                                       litKey, _ := tmpl.Value(nil)
+                                       rng := hcl.RangeBetween(open.Range, close.Range)
+                                       step := hcl.TraverseIndex{
+                                               Key:      litKey,
+                                               SrcRange: rng,
+                                       }
+                                       ret = makeRelativeTraversal(ret, step, rng)
                                } else {
                                        rng := hcl.RangeBetween(open.Range, close.Range)
                                        ret = &IndexExpr{
index 091c1c23c6f4a7b1e4d16c1ba13308dfc92069bf..d7faeedcef426d0b06c24586912b2316b02b187e 100644 (file)
@@ -187,7 +187,7 @@ for later evaluation by the calling application.
 ### Blocks
 
 A _block_ creates a child body that is annotated with a block _type_ and
-zero or more block _labels_. Blocks create a structural hierachy which can be
+zero or more block _labels_. Blocks create a structural hierarchy which can be
 interpreted by the calling application.
 
 Block labels can either be quoted literal strings or naked identifiers.
index bdc0e983e5eeaef19ec09e03e3b525d9ced3864d..74847c79a5570694bbaf3cf688d16c3e4912659d 100644 (file)
@@ -416,12 +416,14 @@ func (e *expression) Value(ctx *hcl.EvalContext) (cty.Value, hcl.Diagnostics) {
        case *booleanVal:
                return cty.BoolVal(v.Value), nil
        case *arrayVal:
+               var diags hcl.Diagnostics
                vals := []cty.Value{}
                for _, jsonVal := range v.Values {
-                       val, _ := (&expression{src: jsonVal}).Value(ctx)
+                       val, valDiags := (&expression{src: jsonVal}).Value(ctx)
                        vals = append(vals, val)
+                       diags = append(diags, valDiags...)
                }
-               return cty.TupleVal(vals), nil
+               return cty.TupleVal(vals), diags
        case *objectVal:
                var diags hcl.Diagnostics
                attrs := map[string]cty.Value{}
index 8bbaff817eda4df4017db6d502955764525967f5..97ef613182f79470d960a0f353582e90774fd8ff 100644 (file)
@@ -66,7 +66,7 @@ _block header schemata_:
 Within a schema, it is an error to request the same attribute name twice or
 to request a block type whose name is also an attribute name. While this can
 in principle be supported in some syntaxes, in other syntaxes the attribute
-and block namespaces are combined and so an an attribute cannot coexist with
+and block namespaces are combined and so an attribute cannot coexist with
 a block whose type name is identical to the attribute name.
 
 The result of applying a body schema to a body is _body content_, which
@@ -497,7 +497,7 @@ producing an unknown value of the target type.
 
 Conversion of any value _to_ the dynamic pseudo-type is a no-op. The result
 is the input value, verbatim. This is the only situation where the conversion
-result value is not of the the given target type.
+result value is not of the given target type.
 
 ### Primitive Type Conversions
 
index b336f300dd247460e2a0b2c38e88bffaab585644..aab09457d73618726c78b4998cb0dd23b27177a6 100644 (file)
@@ -33,9 +33,9 @@ type Blocks []*Block
 type Attributes map[string]*Attribute
 
 // Body is a container for attributes and blocks. It serves as the primary
-// unit of heirarchical structure within configuration.
+// unit of hierarchical structure within configuration.
 //
-// The content of a body cannot be meaningfully intepreted without a schema,
+// The content of a body cannot be meaningfully interpreted without a schema,
 // so Body represents the raw body content and has methods that allow the
 // content to be extracted in terms of a given schema.
 type Body interface {
index d4a565a5f580c89de007825808009b9cfd4693c3..f69d5fe9b287aa8571a71a36862cdec149e90976 100644 (file)
@@ -36,7 +36,7 @@ func AbsTraversalForExpr(expr Expression) (Traversal, Diagnostics) {
                &Diagnostic{
                        Severity: DiagError,
                        Summary:  "Invalid expression",
-                       Detail:   "A static variable reference is required.",
+                       Detail:   "A single static variable reference is required: only attribute access and indexing with constant keys. No calculations, function calls, template expressions, etc are allowed here.",
                        Subject:  expr.Range().Ptr(),
                },
        }
index f20ae23a8a8e8c71eecf9b7da52497e5627babbe..ded7fb4269aeeed8fb38ab764cd0ba7803e174a6 100644 (file)
@@ -54,22 +54,12 @@ func formatIndent(lines []formatLine) {
        // which should be more than enough for reasonable HCL uses.
        indents := make([]int, 0, 10)
 
-       inHeredoc := false
        for i := range lines {
                line := &lines[i]
                if len(line.lead) == 0 {
                        continue
                }
 
-               if inHeredoc {
-                       for _, token := range line.lead {
-                               if token.Type == hclsyntax.TokenCHeredoc {
-                                       inHeredoc = false
-                               }
-                       }
-                       continue // don't touch indentation inside heredocs
-               }
-
                if line.lead[0].Type == hclsyntax.TokenNewline {
                        // Never place spaces before a newline
                        line.lead[0].SpacesBefore = 0
@@ -80,9 +70,10 @@ func formatIndent(lines []formatLine) {
                for _, token := range line.lead {
                        netBrackets += tokenBracketChange(token)
                        if token.Type == hclsyntax.TokenOHeredoc {
-                               inHeredoc = true
+                               break
                        }
                }
+
                for _, token := range line.assign {
                        netBrackets += tokenBracketChange(token)
                }
@@ -391,9 +382,9 @@ func linesForFormat(tokens Tokens) []formatLine {
 
        // Now we'll pick off any trailing comments and attribute assignments
        // to shuffle off into the "comment" and "assign" cells.
-       inHeredoc := false
        for i := range lines {
                line := &lines[i]
+
                if len(line.lead) == 0 {
                        // if the line is empty then there's nothing for us to do
                        // (this should happen only for the final line, because all other
@@ -401,26 +392,6 @@ func linesForFormat(tokens Tokens) []formatLine {
                        continue
                }
 
-               if inHeredoc {
-                       for _, tok := range line.lead {
-                               if tok.Type == hclsyntax.TokenCHeredoc {
-                                       inHeredoc = false
-                                       break
-                               }
-                       }
-                       // Inside a heredoc everything is "lead", even if there's a
-                       // template interpolation embedded in there that might otherwise
-                       // confuse our logic below.
-                       continue
-               }
-
-               for _, tok := range line.lead {
-                       if tok.Type == hclsyntax.TokenOHeredoc {
-                               inHeredoc = true
-                               break
-                       }
-               }
-
                if len(line.lead) > 1 && line.lead[len(line.lead)-1].Type == hclsyntax.TokenComment {
                        line.comment = line.lead[len(line.lead)-1:]
                        line.lead = line.lead[:len(line.lead)-1]
diff --git a/vendor/github.com/hashicorp/terraform/addrs/for_each_attr.go b/vendor/github.com/hashicorp/terraform/addrs/for_each_attr.go
new file mode 100644 (file)
index 0000000..7a63850
--- /dev/null
@@ -0,0 +1,12 @@
+package addrs
+
+// ForEachAttr is the address of an attribute referencing the current "for_each" object in
+// the interpolation scope, addressed using the "each" keyword, ex. "each.key" and "each.value"
+type ForEachAttr struct {
+       referenceable
+       Name string
+}
+
+func (f ForEachAttr) String() string {
+       return "each." + f.Name
+}
index 84fe8a0d05b60b96166a61d932a328cfbba4f12a..a230d0cd1d8e52045cfaf5b8acee727ef696ad27 100644 (file)
@@ -85,6 +85,14 @@ func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
                        Remaining:   remain,
                }, diags
 
+       case "each":
+               name, rng, remain, diags := parseSingleAttrRef(traversal)
+               return &Reference{
+                       Subject:     ForEachAttr{Name: name},
+                       SourceRange: tfdiags.SourceRangeFromHCL(rng),
+                       Remaining:   remain,
+               }, diags
+
        case "data":
                if len(traversal) < 3 {
                        diags = diags.Append(&hcl.Diagnostic{
index 098653fcf69bb19e82100679bdd20e1c134bfc85..ef129a9289db062fdfa50c53df05e67bf513810e 100644 (file)
@@ -83,6 +83,10 @@ func NewPlan(changes *plans.Changes) *Plan {
                        continue
                }
 
+               if rc.Action == plans.NoOp {
+                       continue
+               }
+
                // For now we'll shim this to work with our old types.
                // TODO: Update for the new plan types, ideally also switching over to
                // a structural diff renderer instead of a flat renderer.
index f411ef9c61233e281b4d0e2379293eba4f6e98f5..be1ea24de96ecef2417514784909d3043ec63d16 100644 (file)
@@ -75,11 +75,14 @@ func State(opts *StateOpts) string {
                        v := m.OutputValues[k]
                        p.buf.WriteString(fmt.Sprintf("%s = ", k))
                        p.writeValue(v.Value, plans.NoOp, 0)
-                       p.buf.WriteString("\n\n")
+                       p.buf.WriteString("\n")
                }
        }
 
-       return opts.Color.Color(strings.TrimSpace(p.buf.String()))
+       trimmedOutput := strings.TrimSpace(p.buf.String())
+       trimmedOutput += "[reset]"
+
+       return opts.Color.Color(trimmedOutput)
 
 }
 
@@ -95,81 +98,114 @@ func formatStateModule(p blockBodyDiffPrinter, m *states.Module, schemas *terraf
        // Go through each resource and begin building up the output.
        for _, key := range names {
                for k, v := range m.Resources[key].Instances {
+                       // keep these in order to keep the current object first, and
+                       // provide deterministic output for the deposed objects
+                       type obj struct {
+                               header   string
+                               instance *states.ResourceInstanceObjectSrc
+                       }
+                       instances := []obj{}
+
                        addr := m.Resources[key].Addr
 
                        taintStr := ""
-                       if v.Current.Status == 'T' {
-                               taintStr = "(tainted)"
+                       if v.Current != nil && v.Current.Status == 'T' {
+                               taintStr = " (tainted)"
                        }
-                       p.buf.WriteString(fmt.Sprintf("# %s: %s\n", addr.Absolute(m.Addr).Instance(k), taintStr))
-
-                       var schema *configschema.Block
-                       provider := m.Resources[key].ProviderConfig.ProviderConfig.StringCompact()
-                       if _, exists := schemas.Providers[provider]; !exists {
-                               // This should never happen in normal use because we should've
-                               // loaded all of the schemas and checked things prior to this
-                               // point. We can't return errors here, but since this is UI code
-                               // we will try to do _something_ reasonable.
-                               p.buf.WriteString(fmt.Sprintf("# missing schema for provider %q\n\n", provider))
-                               continue
+
+                       instances = append(instances,
+                               obj{fmt.Sprintf("# %s:%s\n", addr.Absolute(m.Addr).Instance(k), taintStr), v.Current})
+
+                       for dk, v := range v.Deposed {
+                               instances = append(instances,
+                                       obj{fmt.Sprintf("# %s: (deposed object %s)\n", addr.Absolute(m.Addr).Instance(k), dk), v})
                        }
 
-                       switch addr.Mode {
-                       case addrs.ManagedResourceMode:
-                               schema, _ = schemas.ResourceTypeConfig(
-                                       provider,
-                                       addr.Mode,
-                                       addr.Type,
-                               )
-                               if schema == nil {
-                                       p.buf.WriteString(fmt.Sprintf(
-                                               "# missing schema for provider %q resource type %s\n\n", provider, addr.Type))
+                       // Sort the instances for consistent output.
+                       // Starting the sort from the second index, so the current instance
+                       // is always first.
+                       sort.Slice(instances[1:], func(i, j int) bool {
+                               return instances[i+1].header < instances[j+1].header
+                       })
+
+                       for _, obj := range instances {
+                               header := obj.header
+                               instance := obj.instance
+                               p.buf.WriteString(header)
+                               if instance == nil {
+                                       // this shouldn't happen, but there's nothing to do here so
+                                       // don't panic below.
                                        continue
                                }
 
-                               p.buf.WriteString(fmt.Sprintf(
-                                       "resource %q %q {",
-                                       addr.Type,
-                                       addr.Name,
-                               ))
-                       case addrs.DataResourceMode:
-                               schema, _ = schemas.ResourceTypeConfig(
-                                       provider,
-                                       addr.Mode,
-                                       addr.Type,
-                               )
-                               if schema == nil {
-                                       p.buf.WriteString(fmt.Sprintf(
-                                               "# missing schema for provider %q data source %s\n\n", provider, addr.Type))
+                               var schema *configschema.Block
+                               provider := m.Resources[key].ProviderConfig.ProviderConfig.StringCompact()
+                               if _, exists := schemas.Providers[provider]; !exists {
+                                       // This should never happen in normal use because we should've
+                                       // loaded all of the schemas and checked things prior to this
+                                       // point. We can't return errors here, but since this is UI code
+                                       // we will try to do _something_ reasonable.
+                                       p.buf.WriteString(fmt.Sprintf("# missing schema for provider %q\n\n", provider))
                                        continue
                                }
 
-                               p.buf.WriteString(fmt.Sprintf(
-                                       "data %q %q {",
-                                       addr.Type,
-                                       addr.Name,
-                               ))
-                       default:
-                               // should never happen, since the above is exhaustive
-                               p.buf.WriteString(addr.String())
-                       }
+                               switch addr.Mode {
+                               case addrs.ManagedResourceMode:
+                                       schema, _ = schemas.ResourceTypeConfig(
+                                               provider,
+                                               addr.Mode,
+                                               addr.Type,
+                                       )
+                                       if schema == nil {
+                                               p.buf.WriteString(fmt.Sprintf(
+                                                       "# missing schema for provider %q resource type %s\n\n", provider, addr.Type))
+                                               continue
+                                       }
 
-                       val, err := v.Current.Decode(schema.ImpliedType())
-                       if err != nil {
-                               fmt.Println(err.Error())
-                               break
-                       }
+                                       p.buf.WriteString(fmt.Sprintf(
+                                               "resource %q %q {",
+                                               addr.Type,
+                                               addr.Name,
+                                       ))
+                               case addrs.DataResourceMode:
+                                       schema, _ = schemas.ResourceTypeConfig(
+                                               provider,
+                                               addr.Mode,
+                                               addr.Type,
+                                       )
+                                       if schema == nil {
+                                               p.buf.WriteString(fmt.Sprintf(
+                                                       "# missing schema for provider %q data source %s\n\n", provider, addr.Type))
+                                               continue
+                                       }
 
-                       path := make(cty.Path, 0, 3)
-                       bodyWritten := p.writeBlockBodyDiff(schema, val.Value, val.Value, 2, path)
-                       if bodyWritten {
-                               p.buf.WriteString("\n")
-                       }
+                                       p.buf.WriteString(fmt.Sprintf(
+                                               "data %q %q {",
+                                               addr.Type,
+                                               addr.Name,
+                                       ))
+                               default:
+                                       // should never happen, since the above is exhaustive
+                                       p.buf.WriteString(addr.String())
+                               }
 
-                       p.buf.WriteString("}\n\n")
+                               val, err := instance.Decode(schema.ImpliedType())
+                               if err != nil {
+                                       fmt.Println(err.Error())
+                                       break
+                               }
+
+                               path := make(cty.Path, 0, 3)
+                               bodyWritten := p.writeBlockBodyDiff(schema, val.Value, val.Value, 2, path)
+                               if bodyWritten {
+                                       p.buf.WriteString("\n")
+                               }
+
+                               p.buf.WriteString("}\n\n")
+                       }
                }
        }
-       p.buf.WriteString("[reset]\n")
+       p.buf.WriteString("\n")
 }
 
 func formatNestedList(indent string, outputList []interface{}) string {
@@ -231,7 +267,7 @@ func formatListOutput(indent, outputName string, outputList []interface{}) strin
 
 func formatNestedMap(indent string, outputMap map[string]interface{}) string {
        ks := make([]string, 0, len(outputMap))
-       for k, _ := range outputMap {
+       for k := range outputMap {
                ks = append(ks, k)
        }
        sort.Strings(ks)
@@ -256,7 +292,7 @@ func formatNestedMap(indent string, outputMap map[string]interface{}) string {
 
 func formatMapOutput(indent, outputName string, outputMap map[string]interface{}) string {
        ks := make([]string, 0, len(outputMap))
-       for k, _ := range outputMap {
+       for k := range outputMap {
                ks = append(ks, k)
        }
        sort.Strings(ks)
index 1772fd7e3639e3813d10c7ed70cfc9b9a6bd8b31..f13a046c6a035d7113e79be5213fd263c187a727 100644 (file)
@@ -252,35 +252,6 @@ func (r *Resource) Id() string {
        }
 }
 
-// ProviderFullName returns the full name of the provider for this resource,
-// which may either be specified explicitly using the "provider" meta-argument
-// or implied by the prefix on the resource type name.
-func (r *Resource) ProviderFullName() string {
-       return ResourceProviderFullName(r.Type, r.Provider)
-}
-
-// ResourceProviderFullName returns the full (dependable) name of the
-// provider for a hypothetical resource with the given resource type and
-// explicit provider string. If the explicit provider string is empty then
-// the provider name is inferred from the resource type name.
-func ResourceProviderFullName(resourceType, explicitProvider string) string {
-       if explicitProvider != "" {
-               // check for an explicit provider name, or return the original
-               parts := strings.SplitAfter(explicitProvider, "provider.")
-               return parts[len(parts)-1]
-       }
-
-       idx := strings.IndexRune(resourceType, '_')
-       if idx == -1 {
-               // If no underscores, the resource name is assumed to be
-               // also the provider name, e.g. if the provider exposes
-               // only a single resource of each type.
-               return resourceType
-       }
-
-       return resourceType[:idx]
-}
-
 // Validate does some basic semantic checking of the configuration.
 func (c *Config) Validate() tfdiags.Diagnostics {
        if c == nil {
index 66a677d5d934d1f1fcc277786be59de1d2d78f61..ce33ab1a4c774f76e6bedc5f876260137fc05902 100644 (file)
@@ -7,6 +7,7 @@ import (
 
        "github.com/hashicorp/hil"
        "github.com/hashicorp/hil/ast"
+       "github.com/hashicorp/terraform/config/hcl2shim"
        "github.com/mitchellh/reflectwalk"
 )
 
@@ -160,7 +161,7 @@ func (w *interpolationWalker) Primitive(v reflect.Value) error {
                if w.loc == reflectwalk.SliceElem {
                        switch typedReplaceVal := replaceVal.(type) {
                        case string:
-                               if typedReplaceVal == UnknownVariableValue {
+                               if typedReplaceVal == hcl2shim.UnknownVariableValue {
                                        remove = true
                                }
                        case []interface{}:
@@ -168,7 +169,7 @@ func (w *interpolationWalker) Primitive(v reflect.Value) error {
                                        remove = true
                                }
                        }
-               } else if replaceVal == UnknownVariableValue {
+               } else if replaceVal == hcl2shim.UnknownVariableValue {
                        remove = true
                }
 
@@ -224,7 +225,7 @@ func (w *interpolationWalker) replaceCurrent(v reflect.Value) {
 func hasUnknownValue(variable []interface{}) bool {
        for _, value := range variable {
                if strVal, ok := value.(string); ok {
-                       if strVal == UnknownVariableValue {
+                       if strVal == hcl2shim.UnknownVariableValue {
                                return true
                        }
                }
index 6e3478167f427639d08abf6ec42a9271ddd848c7..612e25b9e6b0350cb94d77486037689f0119bec5 100644 (file)
@@ -135,21 +135,6 @@ func LoadDir(root string) (*Config, error) {
        return result, nil
 }
 
-// IsEmptyDir returns true if the directory given has no Terraform
-// configuration files.
-func IsEmptyDir(root string) (bool, error) {
-       if _, err := os.Stat(root); err != nil && os.IsNotExist(err) {
-               return true, nil
-       }
-
-       fs, os, err := dirFiles(root)
-       if err != nil {
-               return false, err
-       }
-
-       return len(fs) == 0 && len(os) == 0, nil
-}
-
 // Ext returns the Terraform configuration extension of the given
 // path, or a blank string if it is an invalid function.
 func ext(path string) string {
index 8348d4b1953537412c1ac1ac381f1f97c4a1738e..29701b931a9c91b870c01298fb9e826cf97ac30a 100644 (file)
@@ -3,7 +3,9 @@ package module
 import (
        "errors"
        "fmt"
+       "regexp"
        "sort"
+       "strings"
 
        version "github.com/hashicorp/go-version"
        "github.com/hashicorp/terraform/registry/response"
@@ -11,6 +13,8 @@ import (
 
 const anyVersion = ">=0.0.0"
 
+var explicitEqualityConstraint = regexp.MustCompile("^=[0-9]")
+
 // return the newest version that satisfies the provided constraint
 func newest(versions []string, constraint string) (string, error) {
        if constraint == "" {
@@ -21,6 +25,30 @@ func newest(versions []string, constraint string) (string, error) {
                return "", err
        }
 
+       // Find any build metadata in the constraints, and
+       // store whether the constraint is an explicit equality that
+       // contains a build metadata requirement, so we can return a specific,
+       // if requested, build metadata version
+       var constraintMetas []string
+       var equalsConstraint bool
+       for i := range cs {
+               constraintMeta := strings.SplitAfterN(cs[i].String(), "+", 2)
+               if len(constraintMeta) > 1 {
+                       constraintMetas = append(constraintMetas, constraintMeta[1])
+               }
+       }
+
+       if len(cs) == 1 {
+               equalsConstraint = explicitEqualityConstraint.MatchString(cs.String())
+       }
+
+       // If the version string includes metadata, this is valid in go-version,
+       // However, it's confusing as to what expected behavior should be,
+       // so give an error so the user can do something more logical
+       if (len(cs) > 1 || !equalsConstraint) && len(constraintMetas) > 0 {
+               return "", fmt.Errorf("Constraints including build metadata must have explicit equality, or are otherwise too ambiguous: %s", cs.String())
+       }
+
        switch len(versions) {
        case 0:
                return "", errors.New("no versions found")
@@ -58,6 +86,12 @@ func newest(versions []string, constraint string) (string, error) {
                        continue
                }
                if cs.Check(v) {
+                       // Constraint has metadata and is explicit equality
+                       if equalsConstraint && len(constraintMetas) > 0 {
+                               if constraintMetas[0] != v.Metadata() {
+                                       continue
+                               }
+                       }
                        return versions[i], nil
                }
        }
index 7a50782f3a547b1eef6191b7e8794c0d35b9db08..eeddabc32846a0278ef002e2b29342540c945e4f 100644 (file)
@@ -13,48 +13,6 @@ type ProviderVersionConstraint struct {
 // ProviderVersionConstraint, as produced by Config.RequiredProviders.
 type ProviderVersionConstraints map[string]ProviderVersionConstraint
 
-// RequiredProviders returns the ProviderVersionConstraints for this
-// module.
-//
-// This includes both providers that are explicitly requested by provider
-// blocks and those that are used implicitly by instantiating one of their
-// resource types. In the latter case, the returned semver Range will
-// accept any version of the provider.
-func (c *Config) RequiredProviders() ProviderVersionConstraints {
-       ret := make(ProviderVersionConstraints, len(c.ProviderConfigs))
-
-       configs := c.ProviderConfigsByFullName()
-
-       // In order to find the *implied* dependencies (those without explicit
-       // "provider" blocks) we need to walk over all of the resources and
-       // cross-reference with the provider configs.
-       for _, rc := range c.Resources {
-               providerName := rc.ProviderFullName()
-               var providerType string
-
-               // Default to (effectively) no constraint whatsoever, but we might
-               // override if there's an explicit constraint in config.
-               constraint := ">=0.0.0"
-
-               config, ok := configs[providerName]
-               if ok {
-                       if config.Version != "" {
-                               constraint = config.Version
-                       }
-                       providerType = config.Name
-               } else {
-                       providerType = providerName
-               }
-
-               ret[providerName] = ProviderVersionConstraint{
-                       ProviderType: providerType,
-                       Constraint:   constraint,
-               }
-       }
-
-       return ret
-}
-
 // RequiredRanges returns a semver.Range for each distinct provider type in
 // the constraint map. If the same provider type appears more than once
 // (e.g. because aliases are in use) then their respective constraints are
index 1854a8b2068dc86de811b4c4554819dc891eefd0..c5ac86d77fdf65b3a9afe9992672f462e7d05cb6 100644 (file)
@@ -17,12 +17,6 @@ import (
        "github.com/mitchellh/reflectwalk"
 )
 
-// UnknownVariableValue is a sentinel value that can be used
-// to denote that the value of a variable is unknown at this time.
-// RawConfig uses this information to build up data about
-// unknown keys.
-const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
-
 // RawConfig is a structure that holds a piece of configuration
 // where the overall structure is unknown since it will be used
 // to configure a plugin or some other similar external component.
index 948b2c8ffdaf65eda327730920fcd0e1c678af42..1ca1d77e5e4c0d127d7129c2b7b51395442b738d 100644 (file)
@@ -76,6 +76,7 @@ func buildChildModules(parent *Config, walker ModuleWalker) (map[string]*Config,
                }
 
                child.Children, modDiags = buildChildModules(child, walker)
+               diags = append(diags, modDiags...)
 
                ret[call.Name] = child
        }
index 4a3daceee46d487e2c18a8abe20a3986b2113e6e..75c7ef1f4174553b39042e9b6739efb4fb7ccfad 100644 (file)
@@ -20,6 +20,7 @@ import (
 var goGetterDetectors = []getter.Detector{
        new(getter.GitHubDetector),
        new(getter.BitBucketDetector),
+       new(getter.GCSDetector),
        new(getter.S3Detector),
        new(getter.FileDetector),
 }
@@ -44,6 +45,7 @@ var goGetterDecompressors = map[string]getter.Decompressor{
 
 var goGetterGetters = map[string]getter.Getter{
        "file":  new(getter.FileGetter),
+       "gcs":   new(getter.GCSGetter),
        "git":   new(getter.GitGetter),
        "hg":    new(getter.HgGetter),
        "s3":    new(getter.S3Getter),
index 93a94204fceaeec23359be3be852e6d1c7698316..0e6cba93d683e094af0fda682c8c788881aaf585 100644 (file)
@@ -64,7 +64,15 @@ func (l *Loader) moduleWalkerLoad(req *configs.ModuleRequest) (*configs.Module,
                        Subject:  &req.SourceAddrRange,
                })
        }
-       if !req.VersionConstraint.Required.Check(record.Version) {
+       if len(req.VersionConstraint.Required) > 0 && record.Version == nil {
+               diags = append(diags, &hcl.Diagnostic{
+                       Severity: hcl.DiagError,
+                       Summary:  "Module version requirements have changed",
+                       Detail:   "The version requirements have changed since this module was installed and the installed version is no longer acceptable. Run \"terraform init\" to install all modules required by this configuration.",
+                       Subject:  &req.SourceAddrRange,
+               })
+       }
+       if record.Version != nil && !req.VersionConstraint.Required.Check(record.Version) {
                diags = append(diags, &hcl.Diagnostic{
                        Severity: hcl.DiagError,
                        Summary:  "Module version requirements have changed",
index e59f58d8e5fc59622801e706d913f16f54fd4d7f..7996c383abf8f5b2e94091251561408869c1808f 100644 (file)
@@ -113,7 +113,10 @@ func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) {
                                        return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a list")
                                }
                                l := coll.LengthInt()
-                               if l < blockS.MinItems {
+
+                               // Assume that if there are unknowns this could have come from
+                               // a dynamic block, and we can't validate MinItems yet.
+                               if l < blockS.MinItems && coll.IsWhollyKnown() {
                                        return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems)
                                }
                                if l > blockS.MaxItems && blockS.MaxItems > 0 {
@@ -161,7 +164,10 @@ func (b *Block) coerceValue(in cty.Value, path cty.Path) (cty.Value, error) {
                                        return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("must be a set")
                                }
                                l := coll.LengthInt()
-                               if l < blockS.MinItems {
+
+                               // Assume that if there are unknowns this could have come from
+                               // a dynamic block, and we can't validate MinItems yet.
+                               if l < blockS.MinItems && coll.IsWhollyKnown() {
                                        return cty.UnknownVal(b.ImpliedType()), path.NewErrorf("insufficient items for attribute %q; must have at least %d", typeName, blockS.MinItems)
                                }
                                if l > blockS.MaxItems && blockS.MaxItems > 0 {
index d8f41eabc7363ae8a65b63bcf8860371f1185d45..e748dd20deb6b2befae3c926b7a4716532ab985c 100644 (file)
@@ -33,6 +33,14 @@ func (b *Block) DecoderSpec() hcldec.Spec {
 
                childSpec := blockS.Block.DecoderSpec()
 
+               // We can only validate 0 or 1 for MinItems, because a dynamic block
+               // may satisfy any number of min items while only having a single
+               // block in the config.
+               minItems := 0
+               if blockS.MinItems > 1 {
+                       minItems = 1
+               }
+
                switch blockS.Nesting {
                case NestingSingle, NestingGroup:
                        ret[name] = &hcldec.BlockSpec{
@@ -57,14 +65,14 @@ func (b *Block) DecoderSpec() hcldec.Spec {
                                ret[name] = &hcldec.BlockTupleSpec{
                                        TypeName: name,
                                        Nested:   childSpec,
-                                       MinItems: blockS.MinItems,
+                                       MinItems: minItems,
                                        MaxItems: blockS.MaxItems,
                                }
                        } else {
                                ret[name] = &hcldec.BlockListSpec{
                                        TypeName: name,
                                        Nested:   childSpec,
-                                       MinItems: blockS.MinItems,
+                                       MinItems: minItems,
                                        MaxItems: blockS.MaxItems,
                                }
                        }
@@ -77,7 +85,7 @@ func (b *Block) DecoderSpec() hcldec.Spec {
                        ret[name] = &hcldec.BlockSetSpec{
                                TypeName: name,
                                Nested:   childSpec,
-                               MinItems: blockS.MinItems,
+                               MinItems: minItems,
                                MaxItems: blockS.MaxItems,
                        }
                case NestingMap:
index 3014cb4b41ec37588166e71563704b5b6166eefb..752d6d9cabf530ea98744587f48877d3264b0e03 100644 (file)
@@ -2,6 +2,7 @@ package configs
 
 import (
        "fmt"
+       "os"
        "path/filepath"
        "strings"
 
@@ -140,3 +141,23 @@ func IsIgnoredFile(name string) bool {
                strings.HasSuffix(name, "~") || // vim
                strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs
 }
+
+// IsEmptyDir returns true if the given filesystem path contains no Terraform
+// configuration files.
+//
+// Unlike the methods of the Parser type, this function always consults the
+// real filesystem, and thus it isn't appropriate to use when working with
+// configuration loaded from a plan file.
+func IsEmptyDir(path string) (bool, error) {
+       if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
+               return true, nil
+       }
+
+       p := NewParser(nil)
+       fs, os, err := p.dirFiles(path)
+       if err != nil {
+               return false, err
+       }
+
+       return len(fs) == 0 && len(os) == 0, nil
+}
index de1a3434a47e5d9b2cf7d8af1e7f9fe4cef82cca..edf822c1bb630c479d1592e45ea14e59286285cb 100644 (file)
@@ -111,13 +111,15 @@ func decodeResourceBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) {
 
        if attr, exists := content.Attributes["for_each"]; exists {
                r.ForEach = attr.Expr
-               // We currently parse this, but don't yet do anything with it.
-               diags = append(diags, &hcl.Diagnostic{
-                       Severity: hcl.DiagError,
-                       Summary:  "Reserved argument name in resource block",
-                       Detail:   fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name),
-                       Subject:  &attr.NameRange,
-               })
+               // Cannot have count and for_each on the same resource block
+               if r.Count != nil {
+                       diags = append(diags, &hcl.Diagnostic{
+                               Severity: hcl.DiagError,
+                               Summary:  `Invalid combination of "count" and "for_each"`,
+                               Detail:   `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`,
+                               Subject:  &attr.NameRange,
+                       })
+               }
        }
 
        if attr, exists := content.Attributes["provider"]; exists {
@@ -300,13 +302,15 @@ func decodeDataBlock(block *hcl.Block) (*Resource, hcl.Diagnostics) {
 
        if attr, exists := content.Attributes["for_each"]; exists {
                r.ForEach = attr.Expr
-               // We currently parse this, but don't yet do anything with it.
-               diags = append(diags, &hcl.Diagnostic{
-                       Severity: hcl.DiagError,
-                       Summary:  "Reserved argument name in module block",
-                       Detail:   fmt.Sprintf("The name %q is reserved for use in a future version of Terraform.", attr.Name),
-                       Subject:  &attr.NameRange,
-               })
+               // Cannot have count and for_each on the same data block
+               if r.Count != nil {
+                       diags = append(diags, &hcl.Diagnostic{
+                               Severity: hcl.DiagError,
+                               Summary:  `Invalid combination of "count" and "for_each"`,
+                               Detail:   `The "count" and "for_each" meta-arguments are mutually-exclusive, only one should be used to be explicit about the number of resources to be created.`,
+                               Subject:  &attr.NameRange,
+                       })
+               }
        }
 
        if attr, exists := content.Attributes["provider"]; exists {
index 7aa19efc67673a20688e0cff1c7d4b7ebf72a033..e40ce16396955bcb26a7f74705be72a21728dbbc 100644 (file)
@@ -45,6 +45,13 @@ func decodeVersionConstraint(attr *hcl.Attribute) (VersionConstraint, hcl.Diagno
                return ret, diags
        }
 
+       if !val.IsWhollyKnown() {
+               // If there is a syntax error, HCL sets the value of the given attribute
+               // to cty.DynamicVal. A diagnostic for the syntax error will already
+               // bubble up, so we will move forward gracefully here.
+               return ret, diags
+       }
+
        constraintStr := val.AsString()
        constraints, err := version.NewConstraint(constraintStr)
        if err != nil {
index 510f47f3514b360d739b611a5fce5609cbed65ca..104c8f5f470c9d42b4e13db5ddc76d18f39d8daf 100644 (file)
@@ -2,7 +2,6 @@ package plugin
 
 import (
        "encoding/json"
-       "errors"
        "fmt"
        "log"
        "strconv"
@@ -16,6 +15,7 @@ import (
        "github.com/hashicorp/terraform/configs/configschema"
        "github.com/hashicorp/terraform/helper/schema"
        proto "github.com/hashicorp/terraform/internal/tfplugin5"
+       "github.com/hashicorp/terraform/plans/objchange"
        "github.com/hashicorp/terraform/plugin/convert"
        "github.com/hashicorp/terraform/terraform"
 )
@@ -284,6 +284,17 @@ func (s *GRPCProviderServer) UpgradeResourceState(_ context.Context, req *proto.
                return resp, nil
        }
 
+       // Now we need to make sure blocks are represented correctly, which means
+       // that missing blocks are empty collections, rather than null.
+       // First we need to CoerceValue to ensure that all object types match.
+       val, err = schemaBlock.CoerceValue(val)
+       if err != nil {
+               resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
+               return resp, nil
+       }
+       // Normalize the value and fill in any missing blocks.
+       val = objchange.NormalizeObjectFromLegacySDK(val, schemaBlock)
+
        // encode the final state to the expected msgpack format
        newStateMP, err := msgpack.Marshal(val, schemaBlock.ImpliedType())
        if err != nil {
@@ -316,11 +327,15 @@ func (s *GRPCProviderServer) upgradeFlatmapState(version int, m map[string]strin
                requiresMigrate = version < res.StateUpgraders[0].Version
        }
 
-       if requiresMigrate {
-               if res.MigrateState == nil {
-                       return nil, 0, errors.New("cannot upgrade state, missing MigrateState function")
+       if requiresMigrate && res.MigrateState == nil {
+               // Providers were previously allowed to bump the version
+               // without declaring MigrateState.
+               // If there are further upgraders, then we've only updated that far.
+               if len(res.StateUpgraders) > 0 {
+                       schemaType = res.StateUpgraders[0].Type
+                       upgradedVersion = res.StateUpgraders[0].Version
                }
-
+       } else if requiresMigrate {
                is := &terraform.InstanceState{
                        ID:         m["id"],
                        Attributes: m,
@@ -476,7 +491,12 @@ func (s *GRPCProviderServer) Configure(_ context.Context, req *proto.Configure_R
 }
 
 func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadResource_Request) (*proto.ReadResource_Response, error) {
-       resp := &proto.ReadResource_Response{}
+       resp := &proto.ReadResource_Response{
+               // helper/schema did previously handle private data during refresh, but
+               // core is now going to expect this to be maintained in order to
+               // persist it in the state.
+               Private: req.Private,
+       }
 
        res := s.provider.ResourcesMap[req.TypeName]
        schemaBlock := s.getResourceSchemaBlock(req.TypeName)
@@ -493,6 +513,15 @@ func (s *GRPCProviderServer) ReadResource(_ context.Context, req *proto.ReadReso
                return resp, nil
        }
 
+       private := make(map[string]interface{})
+       if len(req.Private) > 0 {
+               if err := json.Unmarshal(req.Private, &private); err != nil {
+                       resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
+                       return resp, nil
+               }
+       }
+       instanceState.Meta = private
+
        newInstanceState, err := res.RefreshWithoutUpgrade(instanceState, s.provider.Meta())
        if err != nil {
                resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
@@ -569,6 +598,7 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl
        // We don't usually plan destroys, but this can return early in any case.
        if proposedNewStateVal.IsNull() {
                resp.PlannedState = req.ProposedNewState
+               resp.PlannedPrivate = req.PriorPrivate
                return resp, nil
        }
 
@@ -623,6 +653,7 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl
                // description that _shows_ there are no changes. This is always the
                // prior state, because we force a diff above if this is a new instance.
                resp.PlannedState = req.PriorState
+               resp.PlannedPrivate = req.PriorPrivate
                return resp, nil
        }
 
@@ -683,6 +714,18 @@ func (s *GRPCProviderServer) PlanResourceChange(_ context.Context, req *proto.Pl
                Msgpack: plannedMP,
        }
 
+       // encode any timeouts into the diff Meta
+       t := &schema.ResourceTimeout{}
+       if err := t.ConfigDecode(res, cfg); err != nil {
+               resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
+               return resp, nil
+       }
+
+       if err := t.DiffEncode(diff); err != nil {
+               resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
+               return resp, nil
+       }
+
        // Now we need to store any NewExtra values, which are where any actual
        // StateFunc modified config fields are hidden.
        privateMap := diff.Meta
@@ -929,6 +972,9 @@ func (s *GRPCProviderServer) ImportResourceState(_ context.Context, req *proto.I
                        return resp, nil
                }
 
+               // Normalize the value and fill in any missing blocks.
+               newStateVal = objchange.NormalizeObjectFromLegacySDK(newStateVal, schemaBlock)
+
                newStateMP, err := msgpack.Marshal(newStateVal, schemaBlock.ImpliedType())
                if err != nil {
                        resp.Diagnostics = convert.AppendProtoDiag(resp.Diagnostics, err)
@@ -1160,6 +1206,8 @@ func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value {
                }
        }
 
+       // check the invariants that we need below, to ensure we are working with
+       // non-null and known values.
        if src.IsNull() || !src.IsKnown() || !dst.IsKnown() {
                return dst
        }
@@ -1278,8 +1326,12 @@ func normalizeNullValues(dst, src cty.Value, apply bool) cty.Value {
                        return cty.ListVal(dsts)
                }
 
-       case ty.IsPrimitiveType():
-               if dst.IsNull() && src.IsWhollyKnown() && apply {
+       case ty == cty.String:
+               // The legacy SDK should not be able to remove a value during plan or
+               // apply, however we are only going to overwrite this if the source was
+               // an empty string, since that is what is often equated with unset and
+               // lost in the diff process.
+               if dst.IsNull() && src.AsString() == "" {
                        return src
                }
        }
@@ -1305,11 +1357,19 @@ func validateConfigNulls(v cty.Value, path cty.Path) []*proto.Diagnostic {
                for it.Next() {
                        kv, ev := it.Element()
                        if ev.IsNull() {
+                               // if this is a set, the kv is also going to be null which
+                               // isn't a valid path element, so we can't append it to the
+                               // diagnostic.
+                               p := path
+                               if !kv.IsNull() {
+                                       p = append(p, cty.IndexStep{Key: kv})
+                               }
+
                                diags = append(diags, &proto.Diagnostic{
                                        Severity:  proto.Diagnostic_ERROR,
                                        Summary:   "Null value found in list",
                                        Detail:    "Null values are not allowed for this attribute value.",
-                                       Attribute: convert.PathToAttributePath(append(path, cty.IndexStep{Key: kv})),
+                                       Attribute: convert.PathToAttributePath(p),
                                })
                                continue
                        }
index b2aff99d10605fd7a8d8486e2796dfd51c60b5d5..f4882075dd5a164c7fcc0f1d003bdc68c31b3d23 100644 (file)
@@ -1,6 +1,7 @@
 package resource
 
 import (
+       "encoding/json"
        "fmt"
 
        "github.com/hashicorp/terraform/addrs"
@@ -52,43 +53,57 @@ func shimNewState(newState *states.State, providers map[string]terraform.Resourc
                        resource := getResource(providers, providerType, res.Addr)
 
                        for key, i := range res.Instances {
-                               flatmap, err := shimmedAttributes(i.Current, resource)
-                               if err != nil {
-                                       return nil, fmt.Errorf("error decoding state for %q: %s", resType, err)
+                               resState := &terraform.ResourceState{
+                                       Type:     resType,
+                                       Provider: res.ProviderConfig.String(),
                                }
 
-                               resState := &terraform.ResourceState{
-                                       Type: resType,
-                                       Primary: &terraform.InstanceState{
+                               // We should always have a Current instance here, but be safe about checking.
+                               if i.Current != nil {
+                                       flatmap, err := shimmedAttributes(i.Current, resource)
+                                       if err != nil {
+                                               return nil, fmt.Errorf("error decoding state for %q: %s", resType, err)
+                                       }
+
+                                       var meta map[string]interface{}
+                                       if i.Current.Private != nil {
+                                               err := json.Unmarshal(i.Current.Private, &meta)
+                                               if err != nil {
+                                                       return nil, err
+                                               }
+                                       }
+
+                                       resState.Primary = &terraform.InstanceState{
                                                ID:         flatmap["id"],
                                                Attributes: flatmap,
                                                Tainted:    i.Current.Status == states.ObjectTainted,
-                                       },
-                                       Provider: res.ProviderConfig.String(),
-                               }
-                               if i.Current.SchemaVersion != 0 {
-                                       resState.Primary.Meta = map[string]interface{}{
-                                               "schema_version": i.Current.SchemaVersion,
+                                               Meta:       meta,
                                        }
-                               }
 
-                               for _, dep := range i.Current.Dependencies {
-                                       resState.Dependencies = append(resState.Dependencies, dep.String())
-                               }
+                                       if i.Current.SchemaVersion != 0 {
+                                               resState.Primary.Meta = map[string]interface{}{
+                                                       "schema_version": i.Current.SchemaVersion,
+                                               }
+                                       }
 
-                               // convert the indexes to the old style flapmap indexes
-                               idx := ""
-                               switch key.(type) {
-                               case addrs.IntKey:
-                                       // don't add numeric index values to resources with a count of 0
-                                       if len(res.Instances) > 1 {
-                                               idx = fmt.Sprintf(".%d", key)
+                                       for _, dep := range i.Current.Dependencies {
+                                               resState.Dependencies = append(resState.Dependencies, dep.String())
                                        }
-                               case addrs.StringKey:
-                                       idx = "." + key.String()
-                               }
 
-                               mod.Resources[res.Addr.String()+idx] = resState
+                                       // convert the indexes to the old style flapmap indexes
+                                       idx := ""
+                                       switch key.(type) {
+                                       case addrs.IntKey:
+                                               // don't add numeric index values to resources with a count of 0
+                                               if len(res.Instances) > 1 {
+                                                       idx = fmt.Sprintf(".%d", key)
+                                               }
+                                       case addrs.StringKey:
+                                               idx = "." + key.String()
+                                       }
+
+                                       mod.Resources[res.Addr.String()+idx] = resState
+                               }
 
                                // add any deposed instances
                                for _, dep := range i.Deposed {
@@ -97,10 +112,19 @@ func shimNewState(newState *states.State, providers map[string]terraform.Resourc
                                                return nil, fmt.Errorf("error decoding deposed state for %q: %s", resType, err)
                                        }
 
+                                       var meta map[string]interface{}
+                                       if dep.Private != nil {
+                                               err := json.Unmarshal(dep.Private, &meta)
+                                               if err != nil {
+                                                       return nil, err
+                                               }
+                                       }
+
                                        deposed := &terraform.InstanceState{
                                                ID:         flatmap["id"],
                                                Attributes: flatmap,
                                                Tainted:    dep.Status == states.ObjectTainted,
+                                               Meta:       meta,
                                        }
                                        if dep.SchemaVersion != 0 {
                                                deposed.Meta = map[string]interface{}{
index 311fdb6ef5b64ab06b6e3af47c1a45e9da8b8b28..f34e17a2e1cad53650ef52d2914eda7f3ebeeba4 100644 (file)
@@ -10,7 +10,6 @@ import (
        "strings"
 
        "github.com/hashicorp/terraform/addrs"
-       "github.com/hashicorp/terraform/config"
        "github.com/hashicorp/terraform/config/hcl2shim"
        "github.com/hashicorp/terraform/states"
 
@@ -341,7 +340,7 @@ func legacyDiffComparisonString(changes *plans.Changes) string {
                                v := newAttrs[attrK]
                                u := oldAttrs[attrK]
 
-                               if v == config.UnknownVariableValue {
+                               if v == hcl2shim.UnknownVariableValue {
                                        v = "<computed>"
                                }
                                // NOTE: we don't support <sensitive> here because we would
index 808375ceb7eb4c08285024edc54eee2e7f67443b..6ad3f13cb96d1d75b1210634e671abed9bee5a61 100644 (file)
@@ -219,6 +219,9 @@ func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult,
                        v, _ := r.Config.Get(key)
                        result[ik] = v
                }
+       case nil:
+               // the map may have been empty on the configuration, so we leave the
+               // empty result
        default:
                panic(fmt.Sprintf("unknown type: %#v", mraw))
        }
index ae35b4a87619c5e2faebdb5b2992bc580fd4876b..3e70acf0b0a9d838d6ae2e24e2f533442d2ed538 100644 (file)
@@ -95,7 +95,9 @@ func (r *DiffFieldReader) readMap(
                return FieldReadResult{}, err
        }
        if source.Exists {
-               result = source.Value.(map[string]interface{})
+               // readMap may return a nil value, or an unknown value placeholder in
+               // some cases, causing the type assertion to panic if we don't assign the ok value
+               result, _ = source.Value.(map[string]interface{})
                resultSet = true
        }
 
index b5e30657455565959838a3891d17f1bbdf9b4b89..b59e4e82e7a1a3576b9c836c9e74b6e63aca3eb7 100644 (file)
@@ -95,9 +95,10 @@ type Resource struct {
        //
        // Exists is a function that is called to check if a resource still
        // exists. If this returns false, then this will affect the diff
-       // accordingly. If this function isn't set, it will not be called. It
-       // is highly recommended to set it. The *ResourceData passed to Exists
-       // should _not_ be modified.
+       // accordingly. If this function isn't set, it will not be called. You
+       // can also signal existence in the Read method by calling d.SetId("")
+       // if the Resource is no longer present and should be removed from state.
+       // The *ResourceData passed to Exists should _not_ be modified.
        Create CreateFunc
        Read   ReadFunc
        Update UpdateFunc
@@ -329,21 +330,13 @@ func (r *Resource) simpleDiff(
        c *terraform.ResourceConfig,
        meta interface{}) (*terraform.InstanceDiff, error) {
 
-       t := &ResourceTimeout{}
-       err := t.ConfigDecode(r, c)
-
-       if err != nil {
-               return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err)
-       }
-
        instanceDiff, err := schemaMap(r.Schema).Diff(s, c, r.CustomizeDiff, meta, false)
        if err != nil {
                return instanceDiff, err
        }
 
        if instanceDiff == nil {
-               log.Printf("[DEBUG] Instance Diff is nil in SimpleDiff()")
-               return nil, err
+               instanceDiff = terraform.NewInstanceDiff()
        }
 
        // Make sure the old value is set in each of the instance diffs.
@@ -357,10 +350,7 @@ func (r *Resource) simpleDiff(
                }
        }
 
-       if err := t.DiffEncode(instanceDiff); err != nil {
-               log.Printf("[ERR] Error encoding timeout to instance diff: %s", err)
-       }
-       return instanceDiff, err
+       return instanceDiff, nil
 }
 
 // Validate validates the resource configuration against the schema.
index 9e422c1a6f075aea1e717d9030bdb6bb0bd78493..222b2cc910fbb1965d1b43baa6a4898f2ffc2320 100644 (file)
@@ -5,7 +5,7 @@ import (
        "log"
        "time"
 
-       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/hcl2shim"
        "github.com/hashicorp/terraform/terraform"
        "github.com/mitchellh/copystructure"
 )
@@ -70,7 +70,7 @@ func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig)
                case []map[string]interface{}:
                        rawTimeouts = raw
                case string:
-                       if raw == config.UnknownVariableValue {
+                       if raw == hcl2shim.UnknownVariableValue {
                                // Timeout is not defined in the config
                                // Defaults will be used instead
                                return nil
index 6a3c15a6465bb7d19176a15ce6b52b084c6a5da7..bcc8e4ba3457e6c71eb4adb04f87fdde1941e1fd 100644 (file)
@@ -22,7 +22,7 @@ import (
        "strings"
        "sync"
 
-       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/hcl2shim"
        "github.com/hashicorp/terraform/terraform"
        "github.com/mitchellh/copystructure"
        "github.com/mitchellh/mapstructure"
@@ -1365,10 +1365,15 @@ func (m schemaMap) validate(
                        "%q: this field cannot be set", k)}
        }
 
-       if raw == config.UnknownVariableValue {
-               // If the value is unknown then we can't validate it yet.
-               // In particular, this avoids spurious type errors where downstream
-               // validation code sees UnknownVariableValue as being just a string.
+       // If the value is unknown then we can't validate it yet.
+       // In particular, this avoids spurious type errors where downstream
+       // validation code sees UnknownVariableValue as being just a string.
+       // The SDK has to allow the unknown value through initially, so that
+       // Required fields set via an interpolated value are accepted.
+       if !isWhollyKnown(raw) {
+               if schema.Deprecated != "" {
+                       return []string{fmt.Sprintf("%q: [DEPRECATED] %s", k, schema.Deprecated)}, nil
+               }
                return nil, nil
        }
 
@@ -1380,6 +1385,28 @@ func (m schemaMap) validate(
        return m.validateType(k, raw, schema, c)
 }
 
+// isWhollyKnown returns false if the argument contains an UnknownVariableValue
+func isWhollyKnown(raw interface{}) bool {
+       switch raw := raw.(type) {
+       case string:
+               if raw == hcl2shim.UnknownVariableValue {
+                       return false
+               }
+       case []interface{}:
+               for _, v := range raw {
+                       if !isWhollyKnown(v) {
+                               return false
+                       }
+               }
+       case map[string]interface{}:
+               for _, v := range raw {
+                       if !isWhollyKnown(v) {
+                               return false
+                       }
+               }
+       }
+       return true
+}
 func (m schemaMap) validateConflictingAttributes(
        k string,
        schema *Schema,
@@ -1391,7 +1418,7 @@ func (m schemaMap) validateConflictingAttributes(
 
        for _, conflictingKey := range schema.ConflictsWith {
                if raw, ok := c.Get(conflictingKey); ok {
-                       if raw == config.UnknownVariableValue {
+                       if raw == hcl2shim.UnknownVariableValue {
                                // An unknown value might become unset (null) once known, so
                                // we must defer validation until it's known.
                                continue
@@ -1411,11 +1438,16 @@ func (m schemaMap) validateList(
        c *terraform.ResourceConfig) ([]string, []error) {
        // first check if the list is wholly unknown
        if s, ok := raw.(string); ok {
-               if s == config.UnknownVariableValue {
+               if s == hcl2shim.UnknownVariableValue {
                        return nil, nil
                }
        }
 
+       // schemaMap can't validate nil
+       if raw == nil {
+               return nil, nil
+       }
+
        // We use reflection to verify the slice because you can't
        // case to []interface{} unless the slice is exactly that type.
        rawV := reflect.ValueOf(raw)
@@ -1432,6 +1464,15 @@ func (m schemaMap) validateList(
                        "%s: should be a list", k)}
        }
 
+       // We can't validate list length if this came from a dynamic block.
+       // Since there's no way to determine if something was from a dynamic block
+       // at this point, we're going to skip validation in the new protocol if
+       // there are any unknowns. Validate will eventually be called again once
+       // all values are known.
+       if isProto5() && !isWhollyKnown(raw) {
+               return nil, nil
+       }
+
        // Validate length
        if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems {
                return nil, []error{fmt.Errorf(
@@ -1489,11 +1530,15 @@ func (m schemaMap) validateMap(
        c *terraform.ResourceConfig) ([]string, []error) {
        // first check if the list is wholly unknown
        if s, ok := raw.(string); ok {
-               if s == config.UnknownVariableValue {
+               if s == hcl2shim.UnknownVariableValue {
                        return nil, nil
                }
        }
 
+       // schemaMap can't validate nil
+       if raw == nil {
+               return nil, nil
+       }
        // We use reflection to verify the slice because you can't
        // case to []interface{} unless the slice is exactly that type.
        rawV := reflect.ValueOf(raw)
@@ -1620,6 +1665,12 @@ func (m schemaMap) validateObject(
        schema map[string]*Schema,
        c *terraform.ResourceConfig) ([]string, []error) {
        raw, _ := c.Get(k)
+
+       // schemaMap can't validate nil
+       if raw == nil {
+               return nil, nil
+       }
+
        if _, ok := raw.(map[string]interface{}); !ok && !c.IsComputed(k) {
                return nil, []error{fmt.Errorf(
                        "%s: expected object, got %s",
@@ -1664,6 +1715,14 @@ func (m schemaMap) validatePrimitive(
        raw interface{},
        schema *Schema,
        c *terraform.ResourceConfig) ([]string, []error) {
+
+       // a nil value shouldn't happen in the old protocol, and in the new
+       // protocol the types have already been validated. Either way, we can't
+       // reflect on nil, so don't panic.
+       if raw == nil {
+               return nil, nil
+       }
+
        // Catch if the user gave a complex type where a primitive was
        // expected, so we can return a friendly error message that
        // doesn't contain Go type system terminology.
index 203d01704f4f47503986df282d496271e1376c49..988573e274a88f74314363eb2b992f69d0487ca3 100644 (file)
@@ -6,7 +6,7 @@ import (
        "github.com/zclconf/go-cty/cty"
        ctyjson "github.com/zclconf/go-cty/cty/json"
 
-       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/hcl2shim"
        "github.com/hashicorp/terraform/configs/configschema"
        "github.com/hashicorp/terraform/terraform"
 )
@@ -50,7 +50,7 @@ func removeConfigUnknowns(cfg map[string]interface{}) {
        for k, v := range cfg {
                switch v := v.(type) {
                case string:
-                       if v == config.UnknownVariableValue {
+                       if v == hcl2shim.UnknownVariableValue {
                                delete(cfg, k)
                        }
                case []interface{}:
index 50e2572afdacf1606f51366430f6d478a4e75088..2f306be73bc8dd0c641f4b1e60527a4a1c71a387 100644 (file)
@@ -22,6 +22,7 @@ import (
 var goGetterDetectors = []getter.Detector{
        new(getter.GitHubDetector),
        new(getter.BitBucketDetector),
+       new(getter.GCSDetector),
        new(getter.S3Detector),
        new(getter.FileDetector),
 }
@@ -46,6 +47,7 @@ var goGetterDecompressors = map[string]getter.Decompressor{
 
 var goGetterGetters = map[string]getter.Getter{
        "file":  new(getter.FileGetter),
+       "gcs":   new(getter.GCSGetter),
        "git":   new(getter.GitGetter),
        "hg":    new(getter.HgGetter),
        "s3":    new(getter.S3Getter),
index 87a6bec75c4cb6cea143624590b860af7072093a..b2bdf8888edb96963bd9c77e724b53a203d8f1e6 100644 (file)
@@ -3,13 +3,12 @@
 
 package tfplugin5
 
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
 import (
+       fmt "fmt"
+       proto "github.com/golang/protobuf/proto"
        context "golang.org/x/net/context"
        grpc "google.golang.org/grpc"
+       math "math"
 )
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -36,6 +35,7 @@ var Diagnostic_Severity_name = map[int32]string{
        1: "ERROR",
        2: "WARNING",
 }
+
 var Diagnostic_Severity_value = map[string]int32{
        "INVALID": 0,
        "ERROR":   1,
@@ -45,8 +45,9 @@ var Diagnostic_Severity_value = map[string]int32{
 func (x Diagnostic_Severity) String() string {
        return proto.EnumName(Diagnostic_Severity_name, int32(x))
 }
+
 func (Diagnostic_Severity) EnumDescriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{1, 0}
+       return fileDescriptor_17ae6090ff270234, []int{1, 0}
 }
 
 type Schema_NestedBlock_NestingMode int32
@@ -68,6 +69,7 @@ var Schema_NestedBlock_NestingMode_name = map[int32]string{
        4: "MAP",
        5: "GROUP",
 }
+
 var Schema_NestedBlock_NestingMode_value = map[string]int32{
        "INVALID": 0,
        "SINGLE":  1,
@@ -80,8 +82,9 @@ var Schema_NestedBlock_NestingMode_value = map[string]int32{
 func (x Schema_NestedBlock_NestingMode) String() string {
        return proto.EnumName(Schema_NestedBlock_NestingMode_name, int32(x))
 }
+
 func (Schema_NestedBlock_NestingMode) EnumDescriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 2, 0}
+       return fileDescriptor_17ae6090ff270234, []int{5, 2, 0}
 }
 
 // DynamicValue is an opaque encoding of terraform data, with the field name
@@ -98,16 +101,17 @@ func (m *DynamicValue) Reset()         { *m = DynamicValue{} }
 func (m *DynamicValue) String() string { return proto.CompactTextString(m) }
 func (*DynamicValue) ProtoMessage()    {}
 func (*DynamicValue) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{0}
+       return fileDescriptor_17ae6090ff270234, []int{0}
 }
+
 func (m *DynamicValue) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_DynamicValue.Unmarshal(m, b)
 }
 func (m *DynamicValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_DynamicValue.Marshal(b, m, deterministic)
 }
-func (dst *DynamicValue) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_DynamicValue.Merge(dst, src)
+func (m *DynamicValue) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_DynamicValue.Merge(m, src)
 }
 func (m *DynamicValue) XXX_Size() int {
        return xxx_messageInfo_DynamicValue.Size(m)
@@ -146,16 +150,17 @@ func (m *Diagnostic) Reset()         { *m = Diagnostic{} }
 func (m *Diagnostic) String() string { return proto.CompactTextString(m) }
 func (*Diagnostic) ProtoMessage()    {}
 func (*Diagnostic) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{1}
+       return fileDescriptor_17ae6090ff270234, []int{1}
 }
+
 func (m *Diagnostic) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_Diagnostic.Unmarshal(m, b)
 }
 func (m *Diagnostic) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_Diagnostic.Marshal(b, m, deterministic)
 }
-func (dst *Diagnostic) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_Diagnostic.Merge(dst, src)
+func (m *Diagnostic) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_Diagnostic.Merge(m, src)
 }
 func (m *Diagnostic) XXX_Size() int {
        return xxx_messageInfo_Diagnostic.Size(m)
@@ -205,16 +210,17 @@ func (m *AttributePath) Reset()         { *m = AttributePath{} }
 func (m *AttributePath) String() string { return proto.CompactTextString(m) }
 func (*AttributePath) ProtoMessage()    {}
 func (*AttributePath) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{2}
+       return fileDescriptor_17ae6090ff270234, []int{2}
 }
+
 func (m *AttributePath) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_AttributePath.Unmarshal(m, b)
 }
 func (m *AttributePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_AttributePath.Marshal(b, m, deterministic)
 }
-func (dst *AttributePath) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_AttributePath.Merge(dst, src)
+func (m *AttributePath) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_AttributePath.Merge(m, src)
 }
 func (m *AttributePath) XXX_Size() int {
        return xxx_messageInfo_AttributePath.Size(m)
@@ -247,16 +253,17 @@ func (m *AttributePath_Step) Reset()         { *m = AttributePath_Step{} }
 func (m *AttributePath_Step) String() string { return proto.CompactTextString(m) }
 func (*AttributePath_Step) ProtoMessage()    {}
 func (*AttributePath_Step) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{2, 0}
+       return fileDescriptor_17ae6090ff270234, []int{2, 0}
 }
+
 func (m *AttributePath_Step) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_AttributePath_Step.Unmarshal(m, b)
 }
 func (m *AttributePath_Step) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_AttributePath_Step.Marshal(b, m, deterministic)
 }
-func (dst *AttributePath_Step) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_AttributePath_Step.Merge(dst, src)
+func (m *AttributePath_Step) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_AttributePath_Step.Merge(m, src)
 }
 func (m *AttributePath_Step) XXX_Size() int {
        return xxx_messageInfo_AttributePath_Step.Size(m)
@@ -407,16 +414,17 @@ func (m *Stop) Reset()         { *m = Stop{} }
 func (m *Stop) String() string { return proto.CompactTextString(m) }
 func (*Stop) ProtoMessage()    {}
 func (*Stop) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{3}
+       return fileDescriptor_17ae6090ff270234, []int{3}
 }
+
 func (m *Stop) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_Stop.Unmarshal(m, b)
 }
 func (m *Stop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_Stop.Marshal(b, m, deterministic)
 }
-func (dst *Stop) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_Stop.Merge(dst, src)
+func (m *Stop) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_Stop.Merge(m, src)
 }
 func (m *Stop) XXX_Size() int {
        return xxx_messageInfo_Stop.Size(m)
@@ -437,16 +445,17 @@ func (m *Stop_Request) Reset()         { *m = Stop_Request{} }
 func (m *Stop_Request) String() string { return proto.CompactTextString(m) }
 func (*Stop_Request) ProtoMessage()    {}
 func (*Stop_Request) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{3, 0}
+       return fileDescriptor_17ae6090ff270234, []int{3, 0}
 }
+
 func (m *Stop_Request) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_Stop_Request.Unmarshal(m, b)
 }
 func (m *Stop_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_Stop_Request.Marshal(b, m, deterministic)
 }
-func (dst *Stop_Request) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_Stop_Request.Merge(dst, src)
+func (m *Stop_Request) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_Stop_Request.Merge(m, src)
 }
 func (m *Stop_Request) XXX_Size() int {
        return xxx_messageInfo_Stop_Request.Size(m)
@@ -468,16 +477,17 @@ func (m *Stop_Response) Reset()         { *m = Stop_Response{} }
 func (m *Stop_Response) String() string { return proto.CompactTextString(m) }
 func (*Stop_Response) ProtoMessage()    {}
 func (*Stop_Response) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{3, 1}
+       return fileDescriptor_17ae6090ff270234, []int{3, 1}
 }
+
 func (m *Stop_Response) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_Stop_Response.Unmarshal(m, b)
 }
 func (m *Stop_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_Stop_Response.Marshal(b, m, deterministic)
 }
-func (dst *Stop_Response) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_Stop_Response.Merge(dst, src)
+func (m *Stop_Response) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_Stop_Response.Merge(m, src)
 }
 func (m *Stop_Response) XXX_Size() int {
        return xxx_messageInfo_Stop_Response.Size(m)
@@ -510,16 +520,17 @@ func (m *RawState) Reset()         { *m = RawState{} }
 func (m *RawState) String() string { return proto.CompactTextString(m) }
 func (*RawState) ProtoMessage()    {}
 func (*RawState) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{4}
+       return fileDescriptor_17ae6090ff270234, []int{4}
 }
+
 func (m *RawState) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_RawState.Unmarshal(m, b)
 }
 func (m *RawState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_RawState.Marshal(b, m, deterministic)
 }
-func (dst *RawState) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_RawState.Merge(dst, src)
+func (m *RawState) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_RawState.Merge(m, src)
 }
 func (m *RawState) XXX_Size() int {
        return xxx_messageInfo_RawState.Size(m)
@@ -561,16 +572,17 @@ func (m *Schema) Reset()         { *m = Schema{} }
 func (m *Schema) String() string { return proto.CompactTextString(m) }
 func (*Schema) ProtoMessage()    {}
 func (*Schema) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5}
+       return fileDescriptor_17ae6090ff270234, []int{5}
 }
+
 func (m *Schema) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_Schema.Unmarshal(m, b)
 }
 func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_Schema.Marshal(b, m, deterministic)
 }
-func (dst *Schema) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_Schema.Merge(dst, src)
+func (m *Schema) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_Schema.Merge(m, src)
 }
 func (m *Schema) XXX_Size() int {
        return xxx_messageInfo_Schema.Size(m)
@@ -608,16 +620,17 @@ func (m *Schema_Block) Reset()         { *m = Schema_Block{} }
 func (m *Schema_Block) String() string { return proto.CompactTextString(m) }
 func (*Schema_Block) ProtoMessage()    {}
 func (*Schema_Block) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 0}
+       return fileDescriptor_17ae6090ff270234, []int{5, 0}
 }
+
 func (m *Schema_Block) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_Schema_Block.Unmarshal(m, b)
 }
 func (m *Schema_Block) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_Schema_Block.Marshal(b, m, deterministic)
 }
-func (dst *Schema_Block) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_Schema_Block.Merge(dst, src)
+func (m *Schema_Block) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_Schema_Block.Merge(m, src)
 }
 func (m *Schema_Block) XXX_Size() int {
        return xxx_messageInfo_Schema_Block.Size(m)
@@ -666,16 +679,17 @@ func (m *Schema_Attribute) Reset()         { *m = Schema_Attribute{} }
 func (m *Schema_Attribute) String() string { return proto.CompactTextString(m) }
 func (*Schema_Attribute) ProtoMessage()    {}
 func (*Schema_Attribute) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 1}
+       return fileDescriptor_17ae6090ff270234, []int{5, 1}
 }
+
 func (m *Schema_Attribute) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_Schema_Attribute.Unmarshal(m, b)
 }
 func (m *Schema_Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_Schema_Attribute.Marshal(b, m, deterministic)
 }
-func (dst *Schema_Attribute) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_Schema_Attribute.Merge(dst, src)
+func (m *Schema_Attribute) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_Schema_Attribute.Merge(m, src)
 }
 func (m *Schema_Attribute) XXX_Size() int {
        return xxx_messageInfo_Schema_Attribute.Size(m)
@@ -750,16 +764,17 @@ func (m *Schema_NestedBlock) Reset()         { *m = Schema_NestedBlock{} }
 func (m *Schema_NestedBlock) String() string { return proto.CompactTextString(m) }
 func (*Schema_NestedBlock) ProtoMessage()    {}
 func (*Schema_NestedBlock) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{5, 2}
+       return fileDescriptor_17ae6090ff270234, []int{5, 2}
 }
+
 func (m *Schema_NestedBlock) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_Schema_NestedBlock.Unmarshal(m, b)
 }
 func (m *Schema_NestedBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_Schema_NestedBlock.Marshal(b, m, deterministic)
 }
-func (dst *Schema_NestedBlock) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_Schema_NestedBlock.Merge(dst, src)
+func (m *Schema_NestedBlock) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_Schema_NestedBlock.Merge(m, src)
 }
 func (m *Schema_NestedBlock) XXX_Size() int {
        return xxx_messageInfo_Schema_NestedBlock.Size(m)
@@ -815,16 +830,17 @@ func (m *GetProviderSchema) Reset()         { *m = GetProviderSchema{} }
 func (m *GetProviderSchema) String() string { return proto.CompactTextString(m) }
 func (*GetProviderSchema) ProtoMessage()    {}
 func (*GetProviderSchema) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{6}
+       return fileDescriptor_17ae6090ff270234, []int{6}
 }
+
 func (m *GetProviderSchema) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_GetProviderSchema.Unmarshal(m, b)
 }
 func (m *GetProviderSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_GetProviderSchema.Marshal(b, m, deterministic)
 }
-func (dst *GetProviderSchema) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_GetProviderSchema.Merge(dst, src)
+func (m *GetProviderSchema) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_GetProviderSchema.Merge(m, src)
 }
 func (m *GetProviderSchema) XXX_Size() int {
        return xxx_messageInfo_GetProviderSchema.Size(m)
@@ -845,16 +861,17 @@ func (m *GetProviderSchema_Request) Reset()         { *m = GetProviderSchema_Req
 func (m *GetProviderSchema_Request) String() string { return proto.CompactTextString(m) }
 func (*GetProviderSchema_Request) ProtoMessage()    {}
 func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{6, 0}
+       return fileDescriptor_17ae6090ff270234, []int{6, 0}
 }
+
 func (m *GetProviderSchema_Request) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_GetProviderSchema_Request.Unmarshal(m, b)
 }
 func (m *GetProviderSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_GetProviderSchema_Request.Marshal(b, m, deterministic)
 }
-func (dst *GetProviderSchema_Request) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_GetProviderSchema_Request.Merge(dst, src)
+func (m *GetProviderSchema_Request) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_GetProviderSchema_Request.Merge(m, src)
 }
 func (m *GetProviderSchema_Request) XXX_Size() int {
        return xxx_messageInfo_GetProviderSchema_Request.Size(m)
@@ -879,16 +896,17 @@ func (m *GetProviderSchema_Response) Reset()         { *m = GetProviderSchema_Re
 func (m *GetProviderSchema_Response) String() string { return proto.CompactTextString(m) }
 func (*GetProviderSchema_Response) ProtoMessage()    {}
 func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{6, 1}
+       return fileDescriptor_17ae6090ff270234, []int{6, 1}
 }
+
 func (m *GetProviderSchema_Response) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_GetProviderSchema_Response.Unmarshal(m, b)
 }
 func (m *GetProviderSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_GetProviderSchema_Response.Marshal(b, m, deterministic)
 }
-func (dst *GetProviderSchema_Response) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_GetProviderSchema_Response.Merge(dst, src)
+func (m *GetProviderSchema_Response) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_GetProviderSchema_Response.Merge(m, src)
 }
 func (m *GetProviderSchema_Response) XXX_Size() int {
        return xxx_messageInfo_GetProviderSchema_Response.Size(m)
@@ -937,16 +955,17 @@ func (m *PrepareProviderConfig) Reset()         { *m = PrepareProviderConfig{} }
 func (m *PrepareProviderConfig) String() string { return proto.CompactTextString(m) }
 func (*PrepareProviderConfig) ProtoMessage()    {}
 func (*PrepareProviderConfig) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{7}
+       return fileDescriptor_17ae6090ff270234, []int{7}
 }
+
 func (m *PrepareProviderConfig) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_PrepareProviderConfig.Unmarshal(m, b)
 }
 func (m *PrepareProviderConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_PrepareProviderConfig.Marshal(b, m, deterministic)
 }
-func (dst *PrepareProviderConfig) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_PrepareProviderConfig.Merge(dst, src)
+func (m *PrepareProviderConfig) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_PrepareProviderConfig.Merge(m, src)
 }
 func (m *PrepareProviderConfig) XXX_Size() int {
        return xxx_messageInfo_PrepareProviderConfig.Size(m)
@@ -968,16 +987,17 @@ func (m *PrepareProviderConfig_Request) Reset()         { *m = PrepareProviderCo
 func (m *PrepareProviderConfig_Request) String() string { return proto.CompactTextString(m) }
 func (*PrepareProviderConfig_Request) ProtoMessage()    {}
 func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{7, 0}
+       return fileDescriptor_17ae6090ff270234, []int{7, 0}
 }
+
 func (m *PrepareProviderConfig_Request) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_PrepareProviderConfig_Request.Unmarshal(m, b)
 }
 func (m *PrepareProviderConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_PrepareProviderConfig_Request.Marshal(b, m, deterministic)
 }
-func (dst *PrepareProviderConfig_Request) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_PrepareProviderConfig_Request.Merge(dst, src)
+func (m *PrepareProviderConfig_Request) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_PrepareProviderConfig_Request.Merge(m, src)
 }
 func (m *PrepareProviderConfig_Request) XXX_Size() int {
        return xxx_messageInfo_PrepareProviderConfig_Request.Size(m)
@@ -1007,16 +1027,17 @@ func (m *PrepareProviderConfig_Response) Reset()         { *m = PrepareProviderC
 func (m *PrepareProviderConfig_Response) String() string { return proto.CompactTextString(m) }
 func (*PrepareProviderConfig_Response) ProtoMessage()    {}
 func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{7, 1}
+       return fileDescriptor_17ae6090ff270234, []int{7, 1}
 }
+
 func (m *PrepareProviderConfig_Response) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_PrepareProviderConfig_Response.Unmarshal(m, b)
 }
 func (m *PrepareProviderConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_PrepareProviderConfig_Response.Marshal(b, m, deterministic)
 }
-func (dst *PrepareProviderConfig_Response) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_PrepareProviderConfig_Response.Merge(dst, src)
+func (m *PrepareProviderConfig_Response) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_PrepareProviderConfig_Response.Merge(m, src)
 }
 func (m *PrepareProviderConfig_Response) XXX_Size() int {
        return xxx_messageInfo_PrepareProviderConfig_Response.Size(m)
@@ -1051,16 +1072,17 @@ func (m *UpgradeResourceState) Reset()         { *m = UpgradeResourceState{} }
 func (m *UpgradeResourceState) String() string { return proto.CompactTextString(m) }
 func (*UpgradeResourceState) ProtoMessage()    {}
 func (*UpgradeResourceState) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{8}
+       return fileDescriptor_17ae6090ff270234, []int{8}
 }
+
 func (m *UpgradeResourceState) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_UpgradeResourceState.Unmarshal(m, b)
 }
 func (m *UpgradeResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_UpgradeResourceState.Marshal(b, m, deterministic)
 }
-func (dst *UpgradeResourceState) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_UpgradeResourceState.Merge(dst, src)
+func (m *UpgradeResourceState) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_UpgradeResourceState.Merge(m, src)
 }
 func (m *UpgradeResourceState) XXX_Size() int {
        return xxx_messageInfo_UpgradeResourceState.Size(m)
@@ -1090,16 +1112,17 @@ func (m *UpgradeResourceState_Request) Reset()         { *m = UpgradeResourceSta
 func (m *UpgradeResourceState_Request) String() string { return proto.CompactTextString(m) }
 func (*UpgradeResourceState_Request) ProtoMessage()    {}
 func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{8, 0}
+       return fileDescriptor_17ae6090ff270234, []int{8, 0}
 }
+
 func (m *UpgradeResourceState_Request) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_UpgradeResourceState_Request.Unmarshal(m, b)
 }
 func (m *UpgradeResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_UpgradeResourceState_Request.Marshal(b, m, deterministic)
 }
-func (dst *UpgradeResourceState_Request) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_UpgradeResourceState_Request.Merge(dst, src)
+func (m *UpgradeResourceState_Request) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_UpgradeResourceState_Request.Merge(m, src)
 }
 func (m *UpgradeResourceState_Request) XXX_Size() int {
        return xxx_messageInfo_UpgradeResourceState_Request.Size(m)
@@ -1149,16 +1172,17 @@ func (m *UpgradeResourceState_Response) Reset()         { *m = UpgradeResourceSt
 func (m *UpgradeResourceState_Response) String() string { return proto.CompactTextString(m) }
 func (*UpgradeResourceState_Response) ProtoMessage()    {}
 func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{8, 1}
+       return fileDescriptor_17ae6090ff270234, []int{8, 1}
 }
+
 func (m *UpgradeResourceState_Response) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_UpgradeResourceState_Response.Unmarshal(m, b)
 }
 func (m *UpgradeResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_UpgradeResourceState_Response.Marshal(b, m, deterministic)
 }
-func (dst *UpgradeResourceState_Response) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_UpgradeResourceState_Response.Merge(dst, src)
+func (m *UpgradeResourceState_Response) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_UpgradeResourceState_Response.Merge(m, src)
 }
 func (m *UpgradeResourceState_Response) XXX_Size() int {
        return xxx_messageInfo_UpgradeResourceState_Response.Size(m)
@@ -1193,16 +1217,17 @@ func (m *ValidateResourceTypeConfig) Reset()         { *m = ValidateResourceType
 func (m *ValidateResourceTypeConfig) String() string { return proto.CompactTextString(m) }
 func (*ValidateResourceTypeConfig) ProtoMessage()    {}
 func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{9}
+       return fileDescriptor_17ae6090ff270234, []int{9}
 }
+
 func (m *ValidateResourceTypeConfig) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ValidateResourceTypeConfig.Unmarshal(m, b)
 }
 func (m *ValidateResourceTypeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ValidateResourceTypeConfig.Marshal(b, m, deterministic)
 }
-func (dst *ValidateResourceTypeConfig) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ValidateResourceTypeConfig.Merge(dst, src)
+func (m *ValidateResourceTypeConfig) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ValidateResourceTypeConfig.Merge(m, src)
 }
 func (m *ValidateResourceTypeConfig) XXX_Size() int {
        return xxx_messageInfo_ValidateResourceTypeConfig.Size(m)
@@ -1225,16 +1250,17 @@ func (m *ValidateResourceTypeConfig_Request) Reset()         { *m = ValidateReso
 func (m *ValidateResourceTypeConfig_Request) String() string { return proto.CompactTextString(m) }
 func (*ValidateResourceTypeConfig_Request) ProtoMessage()    {}
 func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{9, 0}
+       return fileDescriptor_17ae6090ff270234, []int{9, 0}
 }
+
 func (m *ValidateResourceTypeConfig_Request) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ValidateResourceTypeConfig_Request.Unmarshal(m, b)
 }
 func (m *ValidateResourceTypeConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ValidateResourceTypeConfig_Request.Marshal(b, m, deterministic)
 }
-func (dst *ValidateResourceTypeConfig_Request) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ValidateResourceTypeConfig_Request.Merge(dst, src)
+func (m *ValidateResourceTypeConfig_Request) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ValidateResourceTypeConfig_Request.Merge(m, src)
 }
 func (m *ValidateResourceTypeConfig_Request) XXX_Size() int {
        return xxx_messageInfo_ValidateResourceTypeConfig_Request.Size(m)
@@ -1270,16 +1296,17 @@ func (m *ValidateResourceTypeConfig_Response) Reset()         { *m = ValidateRes
 func (m *ValidateResourceTypeConfig_Response) String() string { return proto.CompactTextString(m) }
 func (*ValidateResourceTypeConfig_Response) ProtoMessage()    {}
 func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{9, 1}
+       return fileDescriptor_17ae6090ff270234, []int{9, 1}
 }
+
 func (m *ValidateResourceTypeConfig_Response) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ValidateResourceTypeConfig_Response.Unmarshal(m, b)
 }
 func (m *ValidateResourceTypeConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ValidateResourceTypeConfig_Response.Marshal(b, m, deterministic)
 }
-func (dst *ValidateResourceTypeConfig_Response) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ValidateResourceTypeConfig_Response.Merge(dst, src)
+func (m *ValidateResourceTypeConfig_Response) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ValidateResourceTypeConfig_Response.Merge(m, src)
 }
 func (m *ValidateResourceTypeConfig_Response) XXX_Size() int {
        return xxx_messageInfo_ValidateResourceTypeConfig_Response.Size(m)
@@ -1307,16 +1334,17 @@ func (m *ValidateDataSourceConfig) Reset()         { *m = ValidateDataSourceConf
 func (m *ValidateDataSourceConfig) String() string { return proto.CompactTextString(m) }
 func (*ValidateDataSourceConfig) ProtoMessage()    {}
 func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{10}
+       return fileDescriptor_17ae6090ff270234, []int{10}
 }
+
 func (m *ValidateDataSourceConfig) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ValidateDataSourceConfig.Unmarshal(m, b)
 }
 func (m *ValidateDataSourceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ValidateDataSourceConfig.Marshal(b, m, deterministic)
 }
-func (dst *ValidateDataSourceConfig) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ValidateDataSourceConfig.Merge(dst, src)
+func (m *ValidateDataSourceConfig) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ValidateDataSourceConfig.Merge(m, src)
 }
 func (m *ValidateDataSourceConfig) XXX_Size() int {
        return xxx_messageInfo_ValidateDataSourceConfig.Size(m)
@@ -1339,16 +1367,17 @@ func (m *ValidateDataSourceConfig_Request) Reset()         { *m = ValidateDataSo
 func (m *ValidateDataSourceConfig_Request) String() string { return proto.CompactTextString(m) }
 func (*ValidateDataSourceConfig_Request) ProtoMessage()    {}
 func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{10, 0}
+       return fileDescriptor_17ae6090ff270234, []int{10, 0}
 }
+
 func (m *ValidateDataSourceConfig_Request) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ValidateDataSourceConfig_Request.Unmarshal(m, b)
 }
 func (m *ValidateDataSourceConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ValidateDataSourceConfig_Request.Marshal(b, m, deterministic)
 }
-func (dst *ValidateDataSourceConfig_Request) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ValidateDataSourceConfig_Request.Merge(dst, src)
+func (m *ValidateDataSourceConfig_Request) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ValidateDataSourceConfig_Request.Merge(m, src)
 }
 func (m *ValidateDataSourceConfig_Request) XXX_Size() int {
        return xxx_messageInfo_ValidateDataSourceConfig_Request.Size(m)
@@ -1384,16 +1413,17 @@ func (m *ValidateDataSourceConfig_Response) Reset()         { *m = ValidateDataS
 func (m *ValidateDataSourceConfig_Response) String() string { return proto.CompactTextString(m) }
 func (*ValidateDataSourceConfig_Response) ProtoMessage()    {}
 func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{10, 1}
+       return fileDescriptor_17ae6090ff270234, []int{10, 1}
 }
+
 func (m *ValidateDataSourceConfig_Response) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ValidateDataSourceConfig_Response.Unmarshal(m, b)
 }
 func (m *ValidateDataSourceConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ValidateDataSourceConfig_Response.Marshal(b, m, deterministic)
 }
-func (dst *ValidateDataSourceConfig_Response) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ValidateDataSourceConfig_Response.Merge(dst, src)
+func (m *ValidateDataSourceConfig_Response) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ValidateDataSourceConfig_Response.Merge(m, src)
 }
 func (m *ValidateDataSourceConfig_Response) XXX_Size() int {
        return xxx_messageInfo_ValidateDataSourceConfig_Response.Size(m)
@@ -1421,16 +1451,17 @@ func (m *Configure) Reset()         { *m = Configure{} }
 func (m *Configure) String() string { return proto.CompactTextString(m) }
 func (*Configure) ProtoMessage()    {}
 func (*Configure) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{11}
+       return fileDescriptor_17ae6090ff270234, []int{11}
 }
+
 func (m *Configure) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_Configure.Unmarshal(m, b)
 }
 func (m *Configure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_Configure.Marshal(b, m, deterministic)
 }
-func (dst *Configure) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_Configure.Merge(dst, src)
+func (m *Configure) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_Configure.Merge(m, src)
 }
 func (m *Configure) XXX_Size() int {
        return xxx_messageInfo_Configure.Size(m)
@@ -1453,16 +1484,17 @@ func (m *Configure_Request) Reset()         { *m = Configure_Request{} }
 func (m *Configure_Request) String() string { return proto.CompactTextString(m) }
 func (*Configure_Request) ProtoMessage()    {}
 func (*Configure_Request) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{11, 0}
+       return fileDescriptor_17ae6090ff270234, []int{11, 0}
 }
+
 func (m *Configure_Request) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_Configure_Request.Unmarshal(m, b)
 }
 func (m *Configure_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_Configure_Request.Marshal(b, m, deterministic)
 }
-func (dst *Configure_Request) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_Configure_Request.Merge(dst, src)
+func (m *Configure_Request) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_Configure_Request.Merge(m, src)
 }
 func (m *Configure_Request) XXX_Size() int {
        return xxx_messageInfo_Configure_Request.Size(m)
@@ -1498,16 +1530,17 @@ func (m *Configure_Response) Reset()         { *m = Configure_Response{} }
 func (m *Configure_Response) String() string { return proto.CompactTextString(m) }
 func (*Configure_Response) ProtoMessage()    {}
 func (*Configure_Response) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{11, 1}
+       return fileDescriptor_17ae6090ff270234, []int{11, 1}
 }
+
 func (m *Configure_Response) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_Configure_Response.Unmarshal(m, b)
 }
 func (m *Configure_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_Configure_Response.Marshal(b, m, deterministic)
 }
-func (dst *Configure_Response) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_Configure_Response.Merge(dst, src)
+func (m *Configure_Response) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_Configure_Response.Merge(m, src)
 }
 func (m *Configure_Response) XXX_Size() int {
        return xxx_messageInfo_Configure_Response.Size(m)
@@ -1535,16 +1568,17 @@ func (m *ReadResource) Reset()         { *m = ReadResource{} }
 func (m *ReadResource) String() string { return proto.CompactTextString(m) }
 func (*ReadResource) ProtoMessage()    {}
 func (*ReadResource) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{12}
+       return fileDescriptor_17ae6090ff270234, []int{12}
 }
+
 func (m *ReadResource) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ReadResource.Unmarshal(m, b)
 }
 func (m *ReadResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ReadResource.Marshal(b, m, deterministic)
 }
-func (dst *ReadResource) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ReadResource.Merge(dst, src)
+func (m *ReadResource) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ReadResource.Merge(m, src)
 }
 func (m *ReadResource) XXX_Size() int {
        return xxx_messageInfo_ReadResource.Size(m)
@@ -1558,6 +1592,7 @@ var xxx_messageInfo_ReadResource proto.InternalMessageInfo
 type ReadResource_Request struct {
        TypeName             string        `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
        CurrentState         *DynamicValue `protobuf:"bytes,2,opt,name=current_state,json=currentState,proto3" json:"current_state,omitempty"`
+       Private              []byte        `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"`
        XXX_NoUnkeyedLiteral struct{}      `json:"-"`
        XXX_unrecognized     []byte        `json:"-"`
        XXX_sizecache        int32         `json:"-"`
@@ -1567,16 +1602,17 @@ func (m *ReadResource_Request) Reset()         { *m = ReadResource_Request{} }
 func (m *ReadResource_Request) String() string { return proto.CompactTextString(m) }
 func (*ReadResource_Request) ProtoMessage()    {}
 func (*ReadResource_Request) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{12, 0}
+       return fileDescriptor_17ae6090ff270234, []int{12, 0}
 }
+
 func (m *ReadResource_Request) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ReadResource_Request.Unmarshal(m, b)
 }
 func (m *ReadResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ReadResource_Request.Marshal(b, m, deterministic)
 }
-func (dst *ReadResource_Request) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ReadResource_Request.Merge(dst, src)
+func (m *ReadResource_Request) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ReadResource_Request.Merge(m, src)
 }
 func (m *ReadResource_Request) XXX_Size() int {
        return xxx_messageInfo_ReadResource_Request.Size(m)
@@ -1601,9 +1637,17 @@ func (m *ReadResource_Request) GetCurrentState() *DynamicValue {
        return nil
 }
 
+func (m *ReadResource_Request) GetPrivate() []byte {
+       if m != nil {
+               return m.Private
+       }
+       return nil
+}
+
 type ReadResource_Response struct {
        NewState             *DynamicValue `protobuf:"bytes,1,opt,name=new_state,json=newState,proto3" json:"new_state,omitempty"`
        Diagnostics          []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"`
+       Private              []byte        `protobuf:"bytes,3,opt,name=private,proto3" json:"private,omitempty"`
        XXX_NoUnkeyedLiteral struct{}      `json:"-"`
        XXX_unrecognized     []byte        `json:"-"`
        XXX_sizecache        int32         `json:"-"`
@@ -1613,16 +1657,17 @@ func (m *ReadResource_Response) Reset()         { *m = ReadResource_Response{} }
 func (m *ReadResource_Response) String() string { return proto.CompactTextString(m) }
 func (*ReadResource_Response) ProtoMessage()    {}
 func (*ReadResource_Response) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{12, 1}
+       return fileDescriptor_17ae6090ff270234, []int{12, 1}
 }
+
 func (m *ReadResource_Response) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ReadResource_Response.Unmarshal(m, b)
 }
 func (m *ReadResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ReadResource_Response.Marshal(b, m, deterministic)
 }
-func (dst *ReadResource_Response) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ReadResource_Response.Merge(dst, src)
+func (m *ReadResource_Response) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ReadResource_Response.Merge(m, src)
 }
 func (m *ReadResource_Response) XXX_Size() int {
        return xxx_messageInfo_ReadResource_Response.Size(m)
@@ -1647,6 +1692,13 @@ func (m *ReadResource_Response) GetDiagnostics() []*Diagnostic {
        return nil
 }
 
+func (m *ReadResource_Response) GetPrivate() []byte {
+       if m != nil {
+               return m.Private
+       }
+       return nil
+}
+
 type PlanResourceChange struct {
        XXX_NoUnkeyedLiteral struct{} `json:"-"`
        XXX_unrecognized     []byte   `json:"-"`
@@ -1657,16 +1709,17 @@ func (m *PlanResourceChange) Reset()         { *m = PlanResourceChange{} }
 func (m *PlanResourceChange) String() string { return proto.CompactTextString(m) }
 func (*PlanResourceChange) ProtoMessage()    {}
 func (*PlanResourceChange) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{13}
+       return fileDescriptor_17ae6090ff270234, []int{13}
 }
+
 func (m *PlanResourceChange) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_PlanResourceChange.Unmarshal(m, b)
 }
 func (m *PlanResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_PlanResourceChange.Marshal(b, m, deterministic)
 }
-func (dst *PlanResourceChange) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_PlanResourceChange.Merge(dst, src)
+func (m *PlanResourceChange) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_PlanResourceChange.Merge(m, src)
 }
 func (m *PlanResourceChange) XXX_Size() int {
        return xxx_messageInfo_PlanResourceChange.Size(m)
@@ -1692,16 +1745,17 @@ func (m *PlanResourceChange_Request) Reset()         { *m = PlanResourceChange_R
 func (m *PlanResourceChange_Request) String() string { return proto.CompactTextString(m) }
 func (*PlanResourceChange_Request) ProtoMessage()    {}
 func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{13, 0}
+       return fileDescriptor_17ae6090ff270234, []int{13, 0}
 }
+
 func (m *PlanResourceChange_Request) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_PlanResourceChange_Request.Unmarshal(m, b)
 }
 func (m *PlanResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_PlanResourceChange_Request.Marshal(b, m, deterministic)
 }
-func (dst *PlanResourceChange_Request) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_PlanResourceChange_Request.Merge(dst, src)
+func (m *PlanResourceChange_Request) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_PlanResourceChange_Request.Merge(m, src)
 }
 func (m *PlanResourceChange_Request) XXX_Size() int {
        return xxx_messageInfo_PlanResourceChange_Request.Size(m)
@@ -1773,16 +1827,17 @@ func (m *PlanResourceChange_Response) Reset()         { *m = PlanResourceChange_
 func (m *PlanResourceChange_Response) String() string { return proto.CompactTextString(m) }
 func (*PlanResourceChange_Response) ProtoMessage()    {}
 func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{13, 1}
+       return fileDescriptor_17ae6090ff270234, []int{13, 1}
 }
+
 func (m *PlanResourceChange_Response) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_PlanResourceChange_Response.Unmarshal(m, b)
 }
 func (m *PlanResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_PlanResourceChange_Response.Marshal(b, m, deterministic)
 }
-func (dst *PlanResourceChange_Response) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_PlanResourceChange_Response.Merge(dst, src)
+func (m *PlanResourceChange_Response) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_PlanResourceChange_Response.Merge(m, src)
 }
 func (m *PlanResourceChange_Response) XXX_Size() int {
        return xxx_messageInfo_PlanResourceChange_Response.Size(m)
@@ -1838,16 +1893,17 @@ func (m *ApplyResourceChange) Reset()         { *m = ApplyResourceChange{} }
 func (m *ApplyResourceChange) String() string { return proto.CompactTextString(m) }
 func (*ApplyResourceChange) ProtoMessage()    {}
 func (*ApplyResourceChange) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{14}
+       return fileDescriptor_17ae6090ff270234, []int{14}
 }
+
 func (m *ApplyResourceChange) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ApplyResourceChange.Unmarshal(m, b)
 }
 func (m *ApplyResourceChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ApplyResourceChange.Marshal(b, m, deterministic)
 }
-func (dst *ApplyResourceChange) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ApplyResourceChange.Merge(dst, src)
+func (m *ApplyResourceChange) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ApplyResourceChange.Merge(m, src)
 }
 func (m *ApplyResourceChange) XXX_Size() int {
        return xxx_messageInfo_ApplyResourceChange.Size(m)
@@ -1873,16 +1929,17 @@ func (m *ApplyResourceChange_Request) Reset()         { *m = ApplyResourceChange
 func (m *ApplyResourceChange_Request) String() string { return proto.CompactTextString(m) }
 func (*ApplyResourceChange_Request) ProtoMessage()    {}
 func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{14, 0}
+       return fileDescriptor_17ae6090ff270234, []int{14, 0}
 }
+
 func (m *ApplyResourceChange_Request) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ApplyResourceChange_Request.Unmarshal(m, b)
 }
 func (m *ApplyResourceChange_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ApplyResourceChange_Request.Marshal(b, m, deterministic)
 }
-func (dst *ApplyResourceChange_Request) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ApplyResourceChange_Request.Merge(dst, src)
+func (m *ApplyResourceChange_Request) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ApplyResourceChange_Request.Merge(m, src)
 }
 func (m *ApplyResourceChange_Request) XXX_Size() int {
        return xxx_messageInfo_ApplyResourceChange_Request.Size(m)
@@ -1953,16 +2010,17 @@ func (m *ApplyResourceChange_Response) Reset()         { *m = ApplyResourceChang
 func (m *ApplyResourceChange_Response) String() string { return proto.CompactTextString(m) }
 func (*ApplyResourceChange_Response) ProtoMessage()    {}
 func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{14, 1}
+       return fileDescriptor_17ae6090ff270234, []int{14, 1}
 }
+
 func (m *ApplyResourceChange_Response) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ApplyResourceChange_Response.Unmarshal(m, b)
 }
 func (m *ApplyResourceChange_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ApplyResourceChange_Response.Marshal(b, m, deterministic)
 }
-func (dst *ApplyResourceChange_Response) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ApplyResourceChange_Response.Merge(dst, src)
+func (m *ApplyResourceChange_Response) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ApplyResourceChange_Response.Merge(m, src)
 }
 func (m *ApplyResourceChange_Response) XXX_Size() int {
        return xxx_messageInfo_ApplyResourceChange_Response.Size(m)
@@ -2011,16 +2069,17 @@ func (m *ImportResourceState) Reset()         { *m = ImportResourceState{} }
 func (m *ImportResourceState) String() string { return proto.CompactTextString(m) }
 func (*ImportResourceState) ProtoMessage()    {}
 func (*ImportResourceState) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15}
+       return fileDescriptor_17ae6090ff270234, []int{15}
 }
+
 func (m *ImportResourceState) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ImportResourceState.Unmarshal(m, b)
 }
 func (m *ImportResourceState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ImportResourceState.Marshal(b, m, deterministic)
 }
-func (dst *ImportResourceState) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ImportResourceState.Merge(dst, src)
+func (m *ImportResourceState) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ImportResourceState.Merge(m, src)
 }
 func (m *ImportResourceState) XXX_Size() int {
        return xxx_messageInfo_ImportResourceState.Size(m)
@@ -2043,16 +2102,17 @@ func (m *ImportResourceState_Request) Reset()         { *m = ImportResourceState
 func (m *ImportResourceState_Request) String() string { return proto.CompactTextString(m) }
 func (*ImportResourceState_Request) ProtoMessage()    {}
 func (*ImportResourceState_Request) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15, 0}
+       return fileDescriptor_17ae6090ff270234, []int{15, 0}
 }
+
 func (m *ImportResourceState_Request) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ImportResourceState_Request.Unmarshal(m, b)
 }
 func (m *ImportResourceState_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ImportResourceState_Request.Marshal(b, m, deterministic)
 }
-func (dst *ImportResourceState_Request) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ImportResourceState_Request.Merge(dst, src)
+func (m *ImportResourceState_Request) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ImportResourceState_Request.Merge(m, src)
 }
 func (m *ImportResourceState_Request) XXX_Size() int {
        return xxx_messageInfo_ImportResourceState_Request.Size(m)
@@ -2090,16 +2150,17 @@ func (m *ImportResourceState_ImportedResource) Reset()         { *m = ImportReso
 func (m *ImportResourceState_ImportedResource) String() string { return proto.CompactTextString(m) }
 func (*ImportResourceState_ImportedResource) ProtoMessage()    {}
 func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15, 1}
+       return fileDescriptor_17ae6090ff270234, []int{15, 1}
 }
+
 func (m *ImportResourceState_ImportedResource) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ImportResourceState_ImportedResource.Unmarshal(m, b)
 }
 func (m *ImportResourceState_ImportedResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ImportResourceState_ImportedResource.Marshal(b, m, deterministic)
 }
-func (dst *ImportResourceState_ImportedResource) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ImportResourceState_ImportedResource.Merge(dst, src)
+func (m *ImportResourceState_ImportedResource) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ImportResourceState_ImportedResource.Merge(m, src)
 }
 func (m *ImportResourceState_ImportedResource) XXX_Size() int {
        return xxx_messageInfo_ImportResourceState_ImportedResource.Size(m)
@@ -2143,16 +2204,17 @@ func (m *ImportResourceState_Response) Reset()         { *m = ImportResourceStat
 func (m *ImportResourceState_Response) String() string { return proto.CompactTextString(m) }
 func (*ImportResourceState_Response) ProtoMessage()    {}
 func (*ImportResourceState_Response) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{15, 2}
+       return fileDescriptor_17ae6090ff270234, []int{15, 2}
 }
+
 func (m *ImportResourceState_Response) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ImportResourceState_Response.Unmarshal(m, b)
 }
 func (m *ImportResourceState_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ImportResourceState_Response.Marshal(b, m, deterministic)
 }
-func (dst *ImportResourceState_Response) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ImportResourceState_Response.Merge(dst, src)
+func (m *ImportResourceState_Response) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ImportResourceState_Response.Merge(m, src)
 }
 func (m *ImportResourceState_Response) XXX_Size() int {
        return xxx_messageInfo_ImportResourceState_Response.Size(m)
@@ -2187,16 +2249,17 @@ func (m *ReadDataSource) Reset()         { *m = ReadDataSource{} }
 func (m *ReadDataSource) String() string { return proto.CompactTextString(m) }
 func (*ReadDataSource) ProtoMessage()    {}
 func (*ReadDataSource) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{16}
+       return fileDescriptor_17ae6090ff270234, []int{16}
 }
+
 func (m *ReadDataSource) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ReadDataSource.Unmarshal(m, b)
 }
 func (m *ReadDataSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ReadDataSource.Marshal(b, m, deterministic)
 }
-func (dst *ReadDataSource) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ReadDataSource.Merge(dst, src)
+func (m *ReadDataSource) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ReadDataSource.Merge(m, src)
 }
 func (m *ReadDataSource) XXX_Size() int {
        return xxx_messageInfo_ReadDataSource.Size(m)
@@ -2219,16 +2282,17 @@ func (m *ReadDataSource_Request) Reset()         { *m = ReadDataSource_Request{}
 func (m *ReadDataSource_Request) String() string { return proto.CompactTextString(m) }
 func (*ReadDataSource_Request) ProtoMessage()    {}
 func (*ReadDataSource_Request) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{16, 0}
+       return fileDescriptor_17ae6090ff270234, []int{16, 0}
 }
+
 func (m *ReadDataSource_Request) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ReadDataSource_Request.Unmarshal(m, b)
 }
 func (m *ReadDataSource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ReadDataSource_Request.Marshal(b, m, deterministic)
 }
-func (dst *ReadDataSource_Request) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ReadDataSource_Request.Merge(dst, src)
+func (m *ReadDataSource_Request) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ReadDataSource_Request.Merge(m, src)
 }
 func (m *ReadDataSource_Request) XXX_Size() int {
        return xxx_messageInfo_ReadDataSource_Request.Size(m)
@@ -2265,16 +2329,17 @@ func (m *ReadDataSource_Response) Reset()         { *m = ReadDataSource_Response
 func (m *ReadDataSource_Response) String() string { return proto.CompactTextString(m) }
 func (*ReadDataSource_Response) ProtoMessage()    {}
 func (*ReadDataSource_Response) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{16, 1}
+       return fileDescriptor_17ae6090ff270234, []int{16, 1}
 }
+
 func (m *ReadDataSource_Response) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ReadDataSource_Response.Unmarshal(m, b)
 }
 func (m *ReadDataSource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ReadDataSource_Response.Marshal(b, m, deterministic)
 }
-func (dst *ReadDataSource_Response) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ReadDataSource_Response.Merge(dst, src)
+func (m *ReadDataSource_Response) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ReadDataSource_Response.Merge(m, src)
 }
 func (m *ReadDataSource_Response) XXX_Size() int {
        return xxx_messageInfo_ReadDataSource_Response.Size(m)
@@ -2309,16 +2374,17 @@ func (m *GetProvisionerSchema) Reset()         { *m = GetProvisionerSchema{} }
 func (m *GetProvisionerSchema) String() string { return proto.CompactTextString(m) }
 func (*GetProvisionerSchema) ProtoMessage()    {}
 func (*GetProvisionerSchema) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{17}
+       return fileDescriptor_17ae6090ff270234, []int{17}
 }
+
 func (m *GetProvisionerSchema) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_GetProvisionerSchema.Unmarshal(m, b)
 }
 func (m *GetProvisionerSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_GetProvisionerSchema.Marshal(b, m, deterministic)
 }
-func (dst *GetProvisionerSchema) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_GetProvisionerSchema.Merge(dst, src)
+func (m *GetProvisionerSchema) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_GetProvisionerSchema.Merge(m, src)
 }
 func (m *GetProvisionerSchema) XXX_Size() int {
        return xxx_messageInfo_GetProvisionerSchema.Size(m)
@@ -2339,16 +2405,17 @@ func (m *GetProvisionerSchema_Request) Reset()         { *m = GetProvisionerSche
 func (m *GetProvisionerSchema_Request) String() string { return proto.CompactTextString(m) }
 func (*GetProvisionerSchema_Request) ProtoMessage()    {}
 func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{17, 0}
+       return fileDescriptor_17ae6090ff270234, []int{17, 0}
 }
+
 func (m *GetProvisionerSchema_Request) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_GetProvisionerSchema_Request.Unmarshal(m, b)
 }
 func (m *GetProvisionerSchema_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_GetProvisionerSchema_Request.Marshal(b, m, deterministic)
 }
-func (dst *GetProvisionerSchema_Request) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_GetProvisionerSchema_Request.Merge(dst, src)
+func (m *GetProvisionerSchema_Request) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_GetProvisionerSchema_Request.Merge(m, src)
 }
 func (m *GetProvisionerSchema_Request) XXX_Size() int {
        return xxx_messageInfo_GetProvisionerSchema_Request.Size(m)
@@ -2371,16 +2438,17 @@ func (m *GetProvisionerSchema_Response) Reset()         { *m = GetProvisionerSch
 func (m *GetProvisionerSchema_Response) String() string { return proto.CompactTextString(m) }
 func (*GetProvisionerSchema_Response) ProtoMessage()    {}
 func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{17, 1}
+       return fileDescriptor_17ae6090ff270234, []int{17, 1}
 }
+
 func (m *GetProvisionerSchema_Response) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_GetProvisionerSchema_Response.Unmarshal(m, b)
 }
 func (m *GetProvisionerSchema_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_GetProvisionerSchema_Response.Marshal(b, m, deterministic)
 }
-func (dst *GetProvisionerSchema_Response) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_GetProvisionerSchema_Response.Merge(dst, src)
+func (m *GetProvisionerSchema_Response) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_GetProvisionerSchema_Response.Merge(m, src)
 }
 func (m *GetProvisionerSchema_Response) XXX_Size() int {
        return xxx_messageInfo_GetProvisionerSchema_Response.Size(m)
@@ -2415,16 +2483,17 @@ func (m *ValidateProvisionerConfig) Reset()         { *m = ValidateProvisionerCo
 func (m *ValidateProvisionerConfig) String() string { return proto.CompactTextString(m) }
 func (*ValidateProvisionerConfig) ProtoMessage()    {}
 func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{18}
+       return fileDescriptor_17ae6090ff270234, []int{18}
 }
+
 func (m *ValidateProvisionerConfig) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ValidateProvisionerConfig.Unmarshal(m, b)
 }
 func (m *ValidateProvisionerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ValidateProvisionerConfig.Marshal(b, m, deterministic)
 }
-func (dst *ValidateProvisionerConfig) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ValidateProvisionerConfig.Merge(dst, src)
+func (m *ValidateProvisionerConfig) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ValidateProvisionerConfig.Merge(m, src)
 }
 func (m *ValidateProvisionerConfig) XXX_Size() int {
        return xxx_messageInfo_ValidateProvisionerConfig.Size(m)
@@ -2446,16 +2515,17 @@ func (m *ValidateProvisionerConfig_Request) Reset()         { *m = ValidateProvi
 func (m *ValidateProvisionerConfig_Request) String() string { return proto.CompactTextString(m) }
 func (*ValidateProvisionerConfig_Request) ProtoMessage()    {}
 func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{18, 0}
+       return fileDescriptor_17ae6090ff270234, []int{18, 0}
 }
+
 func (m *ValidateProvisionerConfig_Request) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ValidateProvisionerConfig_Request.Unmarshal(m, b)
 }
 func (m *ValidateProvisionerConfig_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ValidateProvisionerConfig_Request.Marshal(b, m, deterministic)
 }
-func (dst *ValidateProvisionerConfig_Request) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ValidateProvisionerConfig_Request.Merge(dst, src)
+func (m *ValidateProvisionerConfig_Request) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ValidateProvisionerConfig_Request.Merge(m, src)
 }
 func (m *ValidateProvisionerConfig_Request) XXX_Size() int {
        return xxx_messageInfo_ValidateProvisionerConfig_Request.Size(m)
@@ -2484,16 +2554,17 @@ func (m *ValidateProvisionerConfig_Response) Reset()         { *m = ValidateProv
 func (m *ValidateProvisionerConfig_Response) String() string { return proto.CompactTextString(m) }
 func (*ValidateProvisionerConfig_Response) ProtoMessage()    {}
 func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{18, 1}
+       return fileDescriptor_17ae6090ff270234, []int{18, 1}
 }
+
 func (m *ValidateProvisionerConfig_Response) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ValidateProvisionerConfig_Response.Unmarshal(m, b)
 }
 func (m *ValidateProvisionerConfig_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ValidateProvisionerConfig_Response.Marshal(b, m, deterministic)
 }
-func (dst *ValidateProvisionerConfig_Response) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ValidateProvisionerConfig_Response.Merge(dst, src)
+func (m *ValidateProvisionerConfig_Response) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ValidateProvisionerConfig_Response.Merge(m, src)
 }
 func (m *ValidateProvisionerConfig_Response) XXX_Size() int {
        return xxx_messageInfo_ValidateProvisionerConfig_Response.Size(m)
@@ -2521,16 +2592,17 @@ func (m *ProvisionResource) Reset()         { *m = ProvisionResource{} }
 func (m *ProvisionResource) String() string { return proto.CompactTextString(m) }
 func (*ProvisionResource) ProtoMessage()    {}
 func (*ProvisionResource) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{19}
+       return fileDescriptor_17ae6090ff270234, []int{19}
 }
+
 func (m *ProvisionResource) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ProvisionResource.Unmarshal(m, b)
 }
 func (m *ProvisionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ProvisionResource.Marshal(b, m, deterministic)
 }
-func (dst *ProvisionResource) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ProvisionResource.Merge(dst, src)
+func (m *ProvisionResource) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ProvisionResource.Merge(m, src)
 }
 func (m *ProvisionResource) XXX_Size() int {
        return xxx_messageInfo_ProvisionResource.Size(m)
@@ -2553,16 +2625,17 @@ func (m *ProvisionResource_Request) Reset()         { *m = ProvisionResource_Req
 func (m *ProvisionResource_Request) String() string { return proto.CompactTextString(m) }
 func (*ProvisionResource_Request) ProtoMessage()    {}
 func (*ProvisionResource_Request) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{19, 0}
+       return fileDescriptor_17ae6090ff270234, []int{19, 0}
 }
+
 func (m *ProvisionResource_Request) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ProvisionResource_Request.Unmarshal(m, b)
 }
 func (m *ProvisionResource_Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ProvisionResource_Request.Marshal(b, m, deterministic)
 }
-func (dst *ProvisionResource_Request) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ProvisionResource_Request.Merge(dst, src)
+func (m *ProvisionResource_Request) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ProvisionResource_Request.Merge(m, src)
 }
 func (m *ProvisionResource_Request) XXX_Size() int {
        return xxx_messageInfo_ProvisionResource_Request.Size(m)
@@ -2599,16 +2672,17 @@ func (m *ProvisionResource_Response) Reset()         { *m = ProvisionResource_Re
 func (m *ProvisionResource_Response) String() string { return proto.CompactTextString(m) }
 func (*ProvisionResource_Response) ProtoMessage()    {}
 func (*ProvisionResource_Response) Descriptor() ([]byte, []int) {
-       return fileDescriptor_tfplugin5_56820f4fb67360c5, []int{19, 1}
+       return fileDescriptor_17ae6090ff270234, []int{19, 1}
 }
+
 func (m *ProvisionResource_Response) XXX_Unmarshal(b []byte) error {
        return xxx_messageInfo_ProvisionResource_Response.Unmarshal(m, b)
 }
 func (m *ProvisionResource_Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
        return xxx_messageInfo_ProvisionResource_Response.Marshal(b, m, deterministic)
 }
-func (dst *ProvisionResource_Response) XXX_Merge(src proto.Message) {
-       xxx_messageInfo_ProvisionResource_Response.Merge(dst, src)
+func (m *ProvisionResource_Response) XXX_Merge(src proto.Message) {
+       xxx_messageInfo_ProvisionResource_Response.Merge(m, src)
 }
 func (m *ProvisionResource_Response) XXX_Size() int {
        return xxx_messageInfo_ProvisionResource_Response.Size(m)
@@ -2634,6 +2708,8 @@ func (m *ProvisionResource_Response) GetDiagnostics() []*Diagnostic {
 }
 
 func init() {
+       proto.RegisterEnum("tfplugin5.Diagnostic_Severity", Diagnostic_Severity_name, Diagnostic_Severity_value)
+       proto.RegisterEnum("tfplugin5.Schema_NestedBlock_NestingMode", Schema_NestedBlock_NestingMode_name, Schema_NestedBlock_NestingMode_value)
        proto.RegisterType((*DynamicValue)(nil), "tfplugin5.DynamicValue")
        proto.RegisterType((*Diagnostic)(nil), "tfplugin5.Diagnostic")
        proto.RegisterType((*AttributePath)(nil), "tfplugin5.AttributePath")
@@ -2692,8 +2768,130 @@ func init() {
        proto.RegisterType((*ProvisionResource)(nil), "tfplugin5.ProvisionResource")
        proto.RegisterType((*ProvisionResource_Request)(nil), "tfplugin5.ProvisionResource.Request")
        proto.RegisterType((*ProvisionResource_Response)(nil), "tfplugin5.ProvisionResource.Response")
-       proto.RegisterEnum("tfplugin5.Diagnostic_Severity", Diagnostic_Severity_name, Diagnostic_Severity_value)
-       proto.RegisterEnum("tfplugin5.Schema_NestedBlock_NestingMode", Schema_NestedBlock_NestingMode_name, Schema_NestedBlock_NestingMode_value)
+}
+
+func init() { proto.RegisterFile("tfplugin5.proto", fileDescriptor_17ae6090ff270234) }
+
+var fileDescriptor_17ae6090ff270234 = []byte{
+       // 1880 bytes of a gzipped FileDescriptorProto
+       0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcb, 0x6f, 0x23, 0x49,
+       0x19, 0x9f, 0xf6, 0x23, 0xb1, 0x3f, 0xe7, 0xe1, 0xd4, 0xcc, 0x0e, 0xa6, 0x77, 0x17, 0x82, 0x79,
+       0x24, 0xab, 0xdd, 0xf1, 0xac, 0x32, 0xb0, 0xbb, 0x84, 0xd1, 0x8a, 0x6c, 0x26, 0x64, 0x22, 0x66,
+       0xb2, 0xa1, 0x3c, 0x0f, 0x24, 0xa4, 0xb5, 0x6a, 0xdc, 0x15, 0x4f, 0x33, 0x76, 0x77, 0x6f, 0x75,
+       0x39, 0x89, 0x85, 0xc4, 0x05, 0xc1, 0x19, 0x09, 0xf1, 0x90, 0x78, 0x5c, 0x40, 0xe2, 0x1f, 0xe0,
+       0x00, 0xdc, 0x38, 0xf1, 0x0f, 0x70, 0x03, 0x4e, 0x08, 0x6e, 0x9c, 0xe1, 0x82, 0x84, 0xea, 0xd5,
+       0x5d, 0xb6, 0xdb, 0x4e, 0x4f, 0xb2, 0x23, 0xc4, 0xad, 0xab, 0xbe, 0x5f, 0x7d, 0xdf, 0x57, 0xdf,
+       0xab, 0xbe, 0xcf, 0x86, 0x55, 0x7e, 0x1c, 0xf5, 0x87, 0x3d, 0x3f, 0xf8, 0x42, 0x2b, 0x62, 0x21,
+       0x0f, 0x51, 0x35, 0xd9, 0x68, 0xde, 0x86, 0xa5, 0x3b, 0xa3, 0x80, 0x0c, 0xfc, 0xee, 0x23, 0xd2,
+       0x1f, 0x52, 0xd4, 0x80, 0xc5, 0x41, 0xdc, 0x8b, 0x48, 0xf7, 0x59, 0xc3, 0x59, 0x77, 0x36, 0x97,
+       0xb0, 0x59, 0x22, 0x04, 0xa5, 0x6f, 0xc6, 0x61, 0xd0, 0x28, 0xc8, 0x6d, 0xf9, 0xdd, 0xfc, 0x9b,
+       0x03, 0x70, 0xc7, 0x27, 0xbd, 0x20, 0x8c, 0xb9, 0xdf, 0x45, 0xdb, 0x50, 0x89, 0xe9, 0x09, 0x65,
+       0x3e, 0x1f, 0xc9, 0xd3, 0x2b, 0x5b, 0x9f, 0x68, 0xa5, 0xb2, 0x53, 0x60, 0xab, 0xad, 0x51, 0x38,
+       0xc1, 0x0b, 0xc1, 0xf1, 0x70, 0x30, 0x20, 0x6c, 0x24, 0x25, 0x54, 0xb1, 0x59, 0xa2, 0xeb, 0xb0,
+       0xe0, 0x51, 0x4e, 0xfc, 0x7e, 0xa3, 0x28, 0x09, 0x7a, 0x85, 0xde, 0x82, 0x2a, 0xe1, 0x9c, 0xf9,
+       0x4f, 0x86, 0x9c, 0x36, 0x4a, 0xeb, 0xce, 0x66, 0x6d, 0xab, 0x61, 0x89, 0xdb, 0x31, 0xb4, 0x23,
+       0xc2, 0x9f, 0xe2, 0x14, 0xda, 0xbc, 0x09, 0x15, 0x23, 0x1f, 0xd5, 0x60, 0xf1, 0xe0, 0xf0, 0xd1,
+       0xce, 0xbd, 0x83, 0x3b, 0xf5, 0x2b, 0xa8, 0x0a, 0xe5, 0x3d, 0x8c, 0xdf, 0xc7, 0x75, 0x47, 0xec,
+       0x3f, 0xde, 0xc1, 0x87, 0x07, 0x87, 0xfb, 0xf5, 0x42, 0xf3, 0x2f, 0x0e, 0x2c, 0x8f, 0x71, 0x43,
+       0xb7, 0xa0, 0x1c, 0x73, 0x1a, 0xc5, 0x0d, 0x67, 0xbd, 0xb8, 0x59, 0xdb, 0x7a, 0x75, 0x96, 0xd8,
+       0x56, 0x9b, 0xd3, 0x08, 0x2b, 0xac, 0xfb, 0x43, 0x07, 0x4a, 0x62, 0x8d, 0x36, 0x60, 0x25, 0xd1,
+       0xa6, 0x13, 0x90, 0x01, 0x95, 0xc6, 0xaa, 0xde, 0xbd, 0x82, 0x97, 0x93, 0xfd, 0x43, 0x32, 0xa0,
+       0xa8, 0x05, 0x88, 0xf6, 0xe9, 0x80, 0x06, 0xbc, 0xf3, 0x8c, 0x8e, 0x3a, 0x31, 0x67, 0x7e, 0xd0,
+       0x53, 0xe6, 0xb9, 0x7b, 0x05, 0xd7, 0x35, 0xed, 0xab, 0x74, 0xd4, 0x96, 0x14, 0xb4, 0x09, 0xab,
+       0x36, 0xde, 0x0f, 0xb8, 0x34, 0x59, 0x51, 0x70, 0x4e, 0xc1, 0x07, 0x01, 0x7f, 0x0f, 0x84, 0xa7,
+       0xfa, 0xb4, 0xcb, 0x43, 0xd6, 0xbc, 0x25, 0xd4, 0x0a, 0x23, 0xb7, 0x0a, 0x8b, 0x98, 0x7e, 0x38,
+       0xa4, 0x31, 0x77, 0xd7, 0xa1, 0x82, 0x69, 0x1c, 0x85, 0x41, 0x4c, 0xd1, 0x35, 0x28, 0xef, 0x31,
+       0x16, 0x32, 0xa5, 0x24, 0x56, 0x8b, 0xe6, 0x8f, 0x1c, 0xa8, 0x60, 0x72, 0xda, 0xe6, 0x84, 0xd3,
+       0x24, 0x34, 0x9c, 0x34, 0x34, 0xd0, 0x36, 0x2c, 0x1e, 0xf7, 0x09, 0x1f, 0x90, 0xa8, 0x51, 0x90,
+       0x46, 0x5a, 0xb7, 0x8c, 0x64, 0x4e, 0xb6, 0xbe, 0xa2, 0x20, 0x7b, 0x01, 0x67, 0x23, 0x6c, 0x0e,
+       0xb8, 0xdb, 0xb0, 0x64, 0x13, 0x50, 0x1d, 0x8a, 0xcf, 0xe8, 0x48, 0x2b, 0x20, 0x3e, 0x85, 0x52,
+       0x27, 0x22, 0x5e, 0x75, 0xac, 0xa8, 0xc5, 0x76, 0xe1, 0x1d, 0xa7, 0xf9, 0x8f, 0x32, 0x2c, 0xb4,
+       0xbb, 0x4f, 0xe9, 0x80, 0x88, 0x90, 0x3a, 0xa1, 0x2c, 0xf6, 0xb5, 0x66, 0x45, 0x6c, 0x96, 0xe8,
+       0x06, 0x94, 0x9f, 0xf4, 0xc3, 0xee, 0x33, 0x79, 0xbc, 0xb6, 0xf5, 0x31, 0x4b, 0x35, 0x75, 0xb6,
+       0xf5, 0x9e, 0x20, 0x63, 0x85, 0x72, 0x7f, 0xe1, 0x40, 0x59, 0x6e, 0xcc, 0x61, 0xf9, 0x25, 0x80,
+       0xc4, 0x79, 0xb1, 0xbe, 0xf2, 0xcb, 0xd3, 0x7c, 0x93, 0xf0, 0xc0, 0x16, 0x1c, 0xbd, 0x0b, 0x35,
+       0x29, 0xa9, 0xc3, 0x47, 0x11, 0x8d, 0x1b, 0xc5, 0xa9, 0xa8, 0xd2, 0xa7, 0x0f, 0x69, 0xcc, 0xa9,
+       0xa7, 0x74, 0x03, 0x79, 0xe2, 0x81, 0x38, 0xe0, 0xfe, 0xd1, 0x81, 0x6a, 0xc2, 0x59, 0xb8, 0x23,
+       0x8d, 0x2a, 0x2c, 0xbf, 0xc5, 0x9e, 0xe0, 0x6d, 0xb2, 0x57, 0x7c, 0xa3, 0x75, 0xa8, 0x79, 0x34,
+       0xee, 0x32, 0x3f, 0xe2, 0xe2, 0x42, 0x2a, 0xbb, 0xec, 0x2d, 0xe4, 0x42, 0x85, 0xd1, 0x0f, 0x87,
+       0x3e, 0xa3, 0x9e, 0xcc, 0xb0, 0x0a, 0x4e, 0xd6, 0x82, 0x16, 0x4a, 0x14, 0xe9, 0x37, 0xca, 0x8a,
+       0x66, 0xd6, 0x82, 0xd6, 0x0d, 0x07, 0xd1, 0x90, 0x53, 0xaf, 0xb1, 0xa0, 0x68, 0x66, 0x8d, 0x5e,
+       0x81, 0x6a, 0x4c, 0x83, 0xd8, 0xe7, 0xfe, 0x09, 0x6d, 0x2c, 0x4a, 0x62, 0xba, 0xe1, 0xfe, 0xba,
+       0x00, 0x35, 0xeb, 0x96, 0xe8, 0x65, 0xa8, 0x0a, 0x5d, 0xad, 0x34, 0xc1, 0x15, 0xb1, 0x21, 0xf3,
+       0xe3, 0xf9, 0xdc, 0x88, 0x76, 0x61, 0x31, 0xa0, 0x31, 0x17, 0x39, 0x54, 0x94, 0xd5, 0xe9, 0xb5,
+       0xb9, 0x16, 0x96, 0xdf, 0x7e, 0xd0, 0xbb, 0x1f, 0x7a, 0x14, 0x9b, 0x93, 0x42, 0xa1, 0x81, 0x1f,
+       0x74, 0x7c, 0x4e, 0x07, 0xb1, 0xb4, 0x49, 0x11, 0x57, 0x06, 0x7e, 0x70, 0x20, 0xd6, 0x92, 0x48,
+       0xce, 0x34, 0xb1, 0xac, 0x89, 0xe4, 0x4c, 0x12, 0x9b, 0xf7, 0xd5, 0xcd, 0x34, 0xc7, 0xf1, 0xd2,
+       0x03, 0xb0, 0xd0, 0x3e, 0x38, 0xdc, 0xbf, 0xb7, 0x57, 0x77, 0x50, 0x05, 0x4a, 0xf7, 0x0e, 0xda,
+       0x0f, 0xea, 0x05, 0xb4, 0x08, 0xc5, 0xf6, 0xde, 0x83, 0x7a, 0x51, 0x7c, 0xdc, 0xdf, 0x39, 0xaa,
+       0x97, 0x44, 0x89, 0xda, 0xc7, 0xef, 0x3f, 0x3c, 0xaa, 0x97, 0x9b, 0x3f, 0x29, 0xc1, 0xda, 0x3e,
+       0xe5, 0x47, 0x2c, 0x3c, 0xf1, 0x3d, 0xca, 0x94, 0xfe, 0x76, 0x12, 0xff, 0xab, 0x68, 0x65, 0xf1,
+       0x0d, 0xa8, 0x44, 0x1a, 0x29, 0xcd, 0x58, 0xdb, 0x5a, 0x9b, 0xba, 0x3c, 0x4e, 0x20, 0x88, 0x42,
+       0x9d, 0xd1, 0x38, 0x1c, 0xb2, 0x2e, 0xed, 0xc4, 0x92, 0x68, 0x62, 0x7a, 0xdb, 0x3a, 0x36, 0x25,
+       0xbe, 0x65, 0xe4, 0x89, 0x0f, 0x79, 0x5a, 0xed, 0xc7, 0x2a, 0xc1, 0x57, 0xd9, 0xf8, 0x2e, 0xea,
+       0xc3, 0x55, 0x8f, 0x70, 0xd2, 0x99, 0x90, 0xa4, 0xe2, 0xff, 0x76, 0x3e, 0x49, 0x77, 0x08, 0x27,
+       0xed, 0x69, 0x59, 0x6b, 0xde, 0xe4, 0x3e, 0x7a, 0x1b, 0x6a, 0x5e, 0xf2, 0x06, 0x09, 0xe7, 0x09,
+       0x29, 0x2f, 0x65, 0xbe, 0x50, 0xd8, 0x46, 0xba, 0x0f, 0xe1, 0x5a, 0xd6, 0x7d, 0x32, 0xea, 0xd2,
+       0x86, 0x5d, 0x97, 0x32, 0x6d, 0x9c, 0x96, 0x2a, 0xf7, 0x31, 0x5c, 0xcf, 0x56, 0xfe, 0x92, 0x8c,
+       0x9b, 0x7f, 0x76, 0xe0, 0xa5, 0x23, 0x46, 0x23, 0xc2, 0xa8, 0xb1, 0xda, 0x6e, 0x18, 0x1c, 0xfb,
+       0x3d, 0x77, 0x3b, 0x09, 0x0f, 0x74, 0x13, 0x16, 0xba, 0x72, 0x53, 0xc7, 0x83, 0x9d, 0x3d, 0x76,
+       0x4b, 0x80, 0x35, 0xcc, 0xfd, 0xae, 0x63, 0xc5, 0xd3, 0x97, 0x61, 0x35, 0x52, 0x12, 0xbc, 0x4e,
+       0x3e, 0x36, 0x2b, 0x06, 0xaf, 0x54, 0x99, 0xf4, 0x46, 0x21, 0xaf, 0x37, 0x9a, 0xdf, 0x2f, 0xc0,
+       0xb5, 0x87, 0x51, 0x8f, 0x11, 0x8f, 0x26, 0x5e, 0x11, 0x8f, 0x89, 0xcb, 0xd2, 0xcb, 0xcd, 0x2d,
+       0x1b, 0x56, 0x11, 0x2f, 0x8c, 0x17, 0xf1, 0x37, 0xa1, 0xca, 0xc8, 0x69, 0x27, 0x16, 0xec, 0x64,
+       0x8d, 0xa8, 0x6d, 0x5d, 0xcd, 0x78, 0xb6, 0x70, 0x85, 0xe9, 0x2f, 0xf7, 0x3b, 0xb6, 0x51, 0xde,
+       0x85, 0x95, 0xa1, 0x52, 0xcc, 0xd3, 0x3c, 0xce, 0xb1, 0xc9, 0xb2, 0x81, 0xab, 0x77, 0xf4, 0xc2,
+       0x26, 0xf9, 0xbd, 0x03, 0xee, 0x23, 0xd2, 0xf7, 0x3d, 0xa1, 0x9c, 0xb6, 0x89, 0x78, 0x19, 0xb4,
+       0xd7, 0x1f, 0xe7, 0x34, 0x4c, 0x1a, 0x12, 0x85, 0x7c, 0x21, 0xb1, 0x6b, 0x5d, 0x7e, 0x42, 0x79,
+       0x27, 0xb7, 0xf2, 0xbf, 0x75, 0xa0, 0x61, 0x94, 0x4f, 0xf3, 0xe1, 0xff, 0x42, 0xf5, 0xdf, 0x39,
+       0x50, 0x55, 0x8a, 0x0e, 0x19, 0x75, 0x7b, 0xa9, 0xae, 0xaf, 0xc3, 0x1a, 0xa7, 0x8c, 0x91, 0xe3,
+       0x90, 0x0d, 0x3a, 0x76, 0xc7, 0x50, 0xc5, 0xf5, 0x84, 0xf0, 0x48, 0x47, 0xdd, 0xff, 0x46, 0xf7,
+       0x5f, 0x15, 0x60, 0x09, 0x53, 0xe2, 0x99, 0x78, 0x71, 0xbf, 0x9d, 0xd3, 0xd4, 0xb7, 0x61, 0xb9,
+       0x3b, 0x64, 0x4c, 0x74, 0x99, 0x2a, 0xc8, 0xcf, 0xd1, 0x7a, 0x49, 0xa3, 0x55, 0x8c, 0x37, 0x60,
+       0x31, 0x62, 0xfe, 0x89, 0x49, 0xb0, 0x25, 0x6c, 0x96, 0xee, 0x0f, 0xec, 0x54, 0xfa, 0x3c, 0x54,
+       0x03, 0x7a, 0x9a, 0x2f, 0x8b, 0x2a, 0x01, 0x3d, 0xbd, 0x5c, 0x02, 0xcd, 0xd6, 0xaa, 0xf9, 0x9b,
+       0x12, 0xa0, 0xa3, 0x3e, 0x09, 0x8c, 0x99, 0x76, 0x9f, 0x92, 0xa0, 0x47, 0xdd, 0xff, 0x38, 0x39,
+       0xad, 0xf5, 0x0e, 0xd4, 0x22, 0xe6, 0x87, 0x2c, 0x9f, 0xad, 0x40, 0x62, 0xd5, 0x65, 0xf6, 0x00,
+       0x45, 0x2c, 0x8c, 0xc2, 0x98, 0x7a, 0x9d, 0xd4, 0x16, 0xc5, 0xf9, 0x0c, 0xea, 0xe6, 0xc8, 0xa1,
+       0xb1, 0x49, 0x1a, 0x5d, 0xa5, 0x5c, 0xd1, 0x85, 0x3e, 0x0d, 0xcb, 0x4a, 0x63, 0x63, 0x91, 0xb2,
+       0xb4, 0xc8, 0x92, 0xdc, 0x3c, 0xd2, 0xce, 0xfa, 0x79, 0xc1, 0x72, 0xd6, 0x6d, 0x58, 0x8e, 0xfa,
+       0x24, 0x08, 0xf2, 0x96, 0xbd, 0x25, 0x8d, 0x56, 0x0a, 0xee, 0x8a, 0x5e, 0x43, 0x36, 0x95, 0x71,
+       0x87, 0xd1, 0xa8, 0x4f, 0xba, 0x54, 0x7b, 0x6e, 0xf6, 0x38, 0xb7, 0x6a, 0x4e, 0x60, 0x75, 0x00,
+       0x6d, 0xc0, 0xaa, 0x51, 0x61, 0xdc, 0x91, 0x2b, 0x7a, 0x5b, 0x2b, 0x7e, 0xe1, 0x26, 0x00, 0xbd,
+       0x01, 0xa8, 0x4f, 0x7b, 0xa4, 0x3b, 0x92, 0x4d, 0x7a, 0x27, 0x1e, 0xc5, 0x9c, 0x0e, 0x74, 0xe7,
+       0x5b, 0x57, 0x14, 0x51, 0x72, 0xdb, 0x72, 0xbf, 0xf9, 0xa7, 0x22, 0x5c, 0xdd, 0x89, 0xa2, 0xfe,
+       0x68, 0x22, 0x6e, 0xfe, 0xfd, 0xe2, 0xe3, 0x66, 0xca, 0x1b, 0xc5, 0xe7, 0xf1, 0xc6, 0x73, 0x87,
+       0x4b, 0x86, 0xe5, 0xcb, 0x59, 0x96, 0x77, 0xff, 0x70, 0xf9, 0xfc, 0xb6, 0xd2, 0xb4, 0x30, 0x96,
+       0xa6, 0x93, 0x6e, 0x2d, 0x5e, 0xd2, 0xad, 0xa5, 0x19, 0x6e, 0xfd, 0x67, 0x01, 0xae, 0x1e, 0x0c,
+       0xa2, 0x90, 0xf1, 0xf1, 0xd6, 0xe3, 0xad, 0x9c, 0x5e, 0x5d, 0x81, 0x82, 0xef, 0xe9, 0xa1, 0xb5,
+       0xe0, 0x7b, 0xee, 0x19, 0xd4, 0x15, 0x3b, 0x9a, 0xd4, 0xe1, 0x73, 0x47, 0x9e, 0x5c, 0x01, 0xa1,
+       0x50, 0x73, 0xaa, 0xed, 0x2f, 0x6d, 0x6f, 0x7c, 0x00, 0xc8, 0xd7, 0x6a, 0x74, 0x4c, 0x8f, 0x6e,
+       0xde, 0x92, 0x9b, 0x96, 0x88, 0x8c, 0xab, 0xb7, 0x26, 0xf5, 0xc7, 0x6b, 0xfe, 0xc4, 0x4e, 0x7c,
+       0xf1, 0xc6, 0xe6, 0xaf, 0x0e, 0xac, 0x88, 0x47, 0x2a, 0xed, 0x0b, 0x5e, 0x5c, 0x47, 0xc0, 0xc6,
+       0xc6, 0xa5, 0x72, 0xae, 0xd0, 0xd4, 0x66, 0xbe, 0xf0, 0xfd, 0x7e, 0xea, 0xc0, 0x35, 0x33, 0xdb,
+       0x88, 0x5e, 0x20, 0x6b, 0x8e, 0x3b, 0xb3, 0xf4, 0xba, 0x25, 0xaa, 0x42, 0x82, 0x9d, 0x3d, 0xc9,
+       0xd9, 0xa8, 0x8b, 0x6b, 0xf7, 0x33, 0x07, 0x3e, 0x6e, 0x3a, 0x33, 0x4b, 0xc5, 0x8f, 0x60, 0x96,
+       0xf8, 0x48, 0x3a, 0x98, 0xbf, 0x3b, 0xb0, 0x96, 0xa8, 0x95, 0xb4, 0x31, 0xf1, 0xc5, 0xd5, 0x42,
+       0x6f, 0x03, 0x74, 0xc3, 0x20, 0xa0, 0x5d, 0x6e, 0x86, 0x83, 0x79, 0x35, 0x37, 0x85, 0xba, 0xdf,
+       0xb0, 0xee, 0x73, 0x1d, 0x16, 0xc2, 0x21, 0x8f, 0x86, 0x5c, 0x87, 0xa4, 0x5e, 0x5d, 0xd8, 0x0d,
+       0x5b, 0x3f, 0xae, 0x42, 0xc5, 0xcc, 0x71, 0xe8, 0xeb, 0x50, 0xdd, 0xa7, 0x5c, 0xff, 0xc2, 0xf5,
+       0x99, 0x73, 0x46, 0x64, 0x15, 0x40, 0x9f, 0xcd, 0x35, 0x48, 0xa3, 0xfe, 0x8c, 0xa1, 0x11, 0x6d,
+       0x5a, 0xe7, 0x33, 0x11, 0x89, 0xa4, 0xd7, 0x72, 0x20, 0xb5, 0xb4, 0x6f, 0xcd, 0x9b, 0x58, 0xd0,
+       0x0d, 0x8b, 0xd1, 0x6c, 0x58, 0x22, 0xb7, 0x95, 0x17, 0xae, 0x85, 0x0f, 0x67, 0x4f, 0x1c, 0xe8,
+       0xf5, 0x0c, 0x5e, 0x93, 0xa0, 0x44, 0xf0, 0x1b, 0xf9, 0xc0, 0x5a, 0xac, 0x9f, 0x3d, 0xb8, 0xa2,
+       0x0d, 0x8b, 0x4b, 0x16, 0x20, 0x11, 0xb7, 0x79, 0x3e, 0x50, 0x8b, 0xba, 0x6b, 0x0d, 0x26, 0xe8,
+       0x15, 0xeb, 0x58, 0xb2, 0x9b, 0x30, 0x7d, 0x75, 0x06, 0x55, 0x73, 0xfa, 0xda, 0xf8, 0x98, 0x80,
+       0x3e, 0x69, 0x0f, 0xc4, 0x16, 0x21, 0xe1, 0xb7, 0x3e, 0x1b, 0xa0, 0x59, 0x76, 0xb3, 0x5a, 0x6a,
+       0x64, 0x87, 0xe9, 0x34, 0x39, 0x61, 0xff, 0xb9, 0xf3, 0x60, 0x5a, 0xc8, 0x71, 0x66, 0x03, 0x86,
+       0xec, 0xe3, 0x19, 0xf4, 0x44, 0xcc, 0xc6, 0xb9, 0xb8, 0x54, 0x4e, 0xc6, 0xb3, 0x38, 0x26, 0x27,
+       0xeb, 0xd9, 0xcc, 0x92, 0x93, 0x8d, 0xd3, 0x72, 0x1e, 0x4f, 0xbe, 0x84, 0xe8, 0x53, 0x13, 0x86,
+       0x4e, 0x49, 0x09, 0xf7, 0xe6, 0x3c, 0x88, 0x66, 0xfc, 0x45, 0xf5, 0xfb, 0x3f, 0x1a, 0xfb, 0xf9,
+       0x94, 0x87, 0x51, 0xc2, 0xa4, 0x31, 0x4d, 0x50, 0x47, 0xb7, 0xbe, 0x57, 0x84, 0x9a, 0xf5, 0x30,
+       0xa0, 0x0f, 0xec, 0xe2, 0xb4, 0x91, 0x51, 0x76, 0xec, 0x37, 0x2e, 0x33, 0xaa, 0x67, 0x00, 0xb5,
+       0xaa, 0x67, 0x73, 0xde, 0x23, 0x94, 0x95, 0x8b, 0x53, 0xa8, 0x44, 0xe8, 0x8d, 0x9c, 0x68, 0x2d,
+       0xf9, 0x49, 0xc6, 0x53, 0x33, 0x56, 0x7e, 0xa7, 0xa8, 0x99, 0xe5, 0x37, 0x0b, 0xa5, 0x24, 0xbc,
+       0xe9, 0x5c, 0xc2, 0x11, 0x4f, 0x16, 0xe4, 0x1f, 0x7b, 0xb7, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff,
+       0x8a, 0x61, 0xfa, 0xcc, 0xeb, 0x1b, 0x00, 0x00,
 }
 
 // Reference imports to suppress errors if they are not otherwise used.
@@ -3329,127 +3527,3 @@ var _Provisioner_serviceDesc = grpc.ServiceDesc{
        },
        Metadata: "tfplugin5.proto",
 }
-
-func init() { proto.RegisterFile("tfplugin5.proto", fileDescriptor_tfplugin5_56820f4fb67360c5) }
-
-var fileDescriptor_tfplugin5_56820f4fb67360c5 = []byte{
-       // 1876 bytes of a gzipped FileDescriptorProto
-       0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0xcd, 0x6f, 0x23, 0x49,
-       0x15, 0x9f, 0x76, 0xdb, 0x89, 0xfd, 0x9c, 0x0f, 0xa7, 0x66, 0x76, 0x30, 0xbd, 0xbb, 0x10, 0xcc,
-       0x47, 0xb2, 0xda, 0x1d, 0xcf, 0x2a, 0x03, 0xbb, 0x4b, 0x18, 0xad, 0xc8, 0x66, 0x42, 0x26, 0x62,
-       0x26, 0x1b, 0xca, 0xf3, 0x81, 0x84, 0xb4, 0x56, 0x8d, 0xbb, 0xe2, 0x69, 0xc6, 0xee, 0xee, 0xad,
-       0x2e, 0x67, 0x62, 0x71, 0x44, 0x70, 0xe6, 0xc2, 0x87, 0xc4, 0xc7, 0x85, 0x03, 0xff, 0x00, 0x07,
-       0xe0, 0xc6, 0x89, 0x7f, 0x80, 0x1b, 0x70, 0x42, 0x70, 0x43, 0x1c, 0xe1, 0x82, 0x84, 0xea, 0xab,
-       0xbb, 0x6c, 0xb7, 0x93, 0x9e, 0x64, 0x57, 0x88, 0x5b, 0x57, 0xbd, 0x5f, 0xbd, 0xf7, 0xab, 0xf7,
-       0x5e, 0xbd, 0x57, 0x65, 0xc3, 0x2a, 0x3f, 0x8e, 0x07, 0xa3, 0x7e, 0x10, 0x7e, 0xa9, 0x1d, 0xb3,
-       0x88, 0x47, 0xa8, 0x96, 0x4e, 0xb4, 0x6e, 0xc3, 0xd2, 0x9d, 0x71, 0x48, 0x86, 0x41, 0xef, 0x11,
-       0x19, 0x8c, 0x28, 0x6a, 0xc2, 0xe2, 0x30, 0xe9, 0xc7, 0xa4, 0xf7, 0xac, 0xe9, 0xac, 0x3b, 0x9b,
-       0x4b, 0xd8, 0x0c, 0x11, 0x82, 0xf2, 0xb7, 0x93, 0x28, 0x6c, 0x96, 0xe4, 0xb4, 0xfc, 0x6e, 0xfd,
-       0xd5, 0x01, 0xb8, 0x13, 0x90, 0x7e, 0x18, 0x25, 0x3c, 0xe8, 0xa1, 0x6d, 0xa8, 0x26, 0xf4, 0x84,
-       0xb2, 0x80, 0x8f, 0xe5, 0xea, 0x95, 0xad, 0x4f, 0xb5, 0x33, 0xdb, 0x19, 0xb0, 0xdd, 0xd1, 0x28,
-       0x9c, 0xe2, 0x85, 0xe1, 0x64, 0x34, 0x1c, 0x12, 0x36, 0x96, 0x16, 0x6a, 0xd8, 0x0c, 0xd1, 0x75,
-       0x58, 0xf0, 0x29, 0x27, 0xc1, 0xa0, 0xe9, 0x4a, 0x81, 0x1e, 0xa1, 0xb7, 0xa0, 0x46, 0x38, 0x67,
-       0xc1, 0x93, 0x11, 0xa7, 0xcd, 0xf2, 0xba, 0xb3, 0x59, 0xdf, 0x6a, 0x5a, 0xe6, 0x76, 0x8c, 0xec,
-       0x88, 0xf0, 0xa7, 0x38, 0x83, 0xb6, 0x6e, 0x42, 0xd5, 0xd8, 0x47, 0x75, 0x58, 0x3c, 0x38, 0x7c,
-       0xb4, 0x73, 0xef, 0xe0, 0x4e, 0xe3, 0x0a, 0xaa, 0x41, 0x65, 0x0f, 0xe3, 0xf7, 0x71, 0xc3, 0x11,
-       0xf3, 0x8f, 0x77, 0xf0, 0xe1, 0xc1, 0xe1, 0x7e, 0xa3, 0xd4, 0xfa, 0xb3, 0x03, 0xcb, 0x13, 0xda,
-       0xd0, 0x2d, 0xa8, 0x24, 0x9c, 0xc6, 0x49, 0xd3, 0x59, 0x77, 0x37, 0xeb, 0x5b, 0xaf, 0xce, 0x33,
-       0xdb, 0xee, 0x70, 0x1a, 0x63, 0x85, 0xf5, 0x7e, 0xe8, 0x40, 0x59, 0x8c, 0xd1, 0x06, 0xac, 0xa4,
-       0x6c, 0xba, 0x21, 0x19, 0x52, 0xe9, 0xac, 0xda, 0xdd, 0x2b, 0x78, 0x39, 0x9d, 0x3f, 0x24, 0x43,
-       0x8a, 0xda, 0x80, 0xe8, 0x80, 0x0e, 0x69, 0xc8, 0xbb, 0xcf, 0xe8, 0xb8, 0x9b, 0x70, 0x16, 0x84,
-       0x7d, 0xe5, 0x9e, 0xbb, 0x57, 0x70, 0x43, 0xcb, 0xbe, 0x4e, 0xc7, 0x1d, 0x29, 0x41, 0x9b, 0xb0,
-       0x6a, 0xe3, 0x83, 0x90, 0x4b, 0x97, 0xb9, 0x42, 0x73, 0x06, 0x3e, 0x08, 0xf9, 0x7b, 0x20, 0x22,
-       0x35, 0xa0, 0x3d, 0x1e, 0xb1, 0xd6, 0x2d, 0x41, 0x2b, 0x8a, 0xbd, 0x1a, 0x2c, 0x62, 0xfa, 0xe1,
-       0x88, 0x26, 0xdc, 0x5b, 0x87, 0x2a, 0xa6, 0x49, 0x1c, 0x85, 0x09, 0x45, 0xd7, 0xa0, 0xb2, 0xc7,
-       0x58, 0xc4, 0x14, 0x49, 0xac, 0x06, 0xad, 0x1f, 0x39, 0x50, 0xc5, 0xe4, 0x79, 0x87, 0x13, 0x4e,
-       0xd3, 0xd4, 0x70, 0xb2, 0xd4, 0x40, 0xdb, 0xb0, 0x78, 0x3c, 0x20, 0x7c, 0x48, 0xe2, 0x66, 0x49,
-       0x3a, 0x69, 0xdd, 0x72, 0x92, 0x59, 0xd9, 0xfe, 0x9a, 0x82, 0xec, 0x85, 0x9c, 0x8d, 0xb1, 0x59,
-       0xe0, 0x6d, 0xc3, 0x92, 0x2d, 0x40, 0x0d, 0x70, 0x9f, 0xd1, 0xb1, 0x26, 0x20, 0x3e, 0x05, 0xa9,
-       0x13, 0x91, 0xaf, 0x3a, 0x57, 0xd4, 0x60, 0xbb, 0xf4, 0x8e, 0xd3, 0xfa, 0x7b, 0x05, 0x16, 0x3a,
-       0xbd, 0xa7, 0x74, 0x48, 0x44, 0x4a, 0x9d, 0x50, 0x96, 0x04, 0x9a, 0x99, 0x8b, 0xcd, 0x10, 0xdd,
-       0x80, 0xca, 0x93, 0x41, 0xd4, 0x7b, 0x26, 0x97, 0xd7, 0xb7, 0x3e, 0x61, 0x51, 0x53, 0x6b, 0xdb,
-       0xef, 0x09, 0x31, 0x56, 0x28, 0xef, 0x17, 0x0e, 0x54, 0xe4, 0xc4, 0x19, 0x2a, 0xbf, 0x02, 0x90,
-       0x06, 0x2f, 0xd1, 0x5b, 0x7e, 0x79, 0x56, 0x6f, 0x9a, 0x1e, 0xd8, 0x82, 0xa3, 0x77, 0xa1, 0x2e,
-       0x2d, 0x75, 0xf9, 0x38, 0xa6, 0x49, 0xd3, 0x9d, 0xc9, 0x2a, 0xbd, 0xfa, 0x90, 0x26, 0x9c, 0xfa,
-       0x8a, 0x1b, 0xc8, 0x15, 0x0f, 0xc4, 0x02, 0xef, 0x0f, 0x0e, 0xd4, 0x52, 0xcd, 0x22, 0x1c, 0x59,
-       0x56, 0x61, 0xf9, 0x2d, 0xe6, 0x84, 0x6e, 0x73, 0x7a, 0xc5, 0x37, 0x5a, 0x87, 0xba, 0x4f, 0x93,
-       0x1e, 0x0b, 0x62, 0x2e, 0x36, 0xa4, 0x4e, 0x97, 0x3d, 0x85, 0x3c, 0xa8, 0x32, 0xfa, 0xe1, 0x28,
-       0x60, 0xd4, 0x97, 0x27, 0xac, 0x8a, 0xd3, 0xb1, 0x90, 0x45, 0x12, 0x45, 0x06, 0xcd, 0x8a, 0x92,
-       0x99, 0xb1, 0x90, 0xf5, 0xa2, 0x61, 0x3c, 0xe2, 0xd4, 0x6f, 0x2e, 0x28, 0x99, 0x19, 0xa3, 0x57,
-       0xa0, 0x96, 0xd0, 0x30, 0x09, 0x78, 0x70, 0x42, 0x9b, 0x8b, 0x52, 0x98, 0x4d, 0x78, 0xbf, 0x2a,
-       0x41, 0xdd, 0xda, 0x25, 0x7a, 0x19, 0x6a, 0x82, 0xab, 0x75, 0x4c, 0x70, 0x55, 0x4c, 0xc8, 0xf3,
-       0xf1, 0x62, 0x61, 0x44, 0xbb, 0xb0, 0x18, 0xd2, 0x84, 0x8b, 0x33, 0xe4, 0xca, 0xea, 0xf4, 0xda,
-       0x99, 0x1e, 0x96, 0xdf, 0x41, 0xd8, 0xbf, 0x1f, 0xf9, 0x14, 0x9b, 0x95, 0x82, 0xd0, 0x30, 0x08,
-       0xbb, 0x01, 0xa7, 0xc3, 0x44, 0xfa, 0xc4, 0xc5, 0xd5, 0x61, 0x10, 0x1e, 0x88, 0xb1, 0x14, 0x92,
-       0x53, 0x2d, 0xac, 0x68, 0x21, 0x39, 0x95, 0xc2, 0xd6, 0x7d, 0xb5, 0x33, 0xad, 0x71, 0xb2, 0xf4,
-       0x00, 0x2c, 0x74, 0x0e, 0x0e, 0xf7, 0xef, 0xed, 0x35, 0x1c, 0x54, 0x85, 0xf2, 0xbd, 0x83, 0xce,
-       0x83, 0x46, 0x09, 0x2d, 0x82, 0xdb, 0xd9, 0x7b, 0xd0, 0x70, 0xc5, 0xc7, 0xfd, 0x9d, 0xa3, 0x46,
-       0x59, 0x94, 0xa8, 0x7d, 0xfc, 0xfe, 0xc3, 0xa3, 0x46, 0xa5, 0xf5, 0x93, 0x32, 0xac, 0xed, 0x53,
-       0x7e, 0xc4, 0xa2, 0x93, 0xc0, 0xa7, 0x4c, 0xf1, 0xb7, 0x0f, 0xf1, 0xbf, 0x5c, 0xeb, 0x14, 0xdf,
-       0x80, 0x6a, 0xac, 0x91, 0xd2, 0x8d, 0xf5, 0xad, 0xb5, 0x99, 0xcd, 0xe3, 0x14, 0x82, 0x28, 0x34,
-       0x18, 0x4d, 0xa2, 0x11, 0xeb, 0xd1, 0x6e, 0x22, 0x85, 0x26, 0xa7, 0xb7, 0xad, 0x65, 0x33, 0xe6,
-       0xdb, 0xc6, 0x9e, 0xf8, 0x90, 0xab, 0xd5, 0x7c, 0xa2, 0x0e, 0xf8, 0x2a, 0x9b, 0x9c, 0x45, 0x03,
-       0xb8, 0xea, 0x13, 0x4e, 0xba, 0x53, 0x96, 0x54, 0xfe, 0xdf, 0x2e, 0x66, 0xe9, 0x0e, 0xe1, 0xa4,
-       0x33, 0x6b, 0x6b, 0xcd, 0x9f, 0x9e, 0x47, 0x6f, 0x43, 0xdd, 0x4f, 0x7b, 0x90, 0x08, 0x9e, 0xb0,
-       0xf2, 0x52, 0x6e, 0x87, 0xc2, 0x36, 0xd2, 0x7b, 0x08, 0xd7, 0xf2, 0xf6, 0x93, 0x53, 0x97, 0x36,
-       0xec, 0xba, 0x94, 0xeb, 0xe3, 0xac, 0x54, 0x79, 0x8f, 0xe1, 0x7a, 0x3e, 0xf9, 0x4b, 0x2a, 0x6e,
-       0xfd, 0xc9, 0x81, 0x97, 0x8e, 0x18, 0x8d, 0x09, 0xa3, 0xc6, 0x6b, 0xbb, 0x51, 0x78, 0x1c, 0xf4,
-       0xbd, 0xed, 0x34, 0x3d, 0xd0, 0x4d, 0x58, 0xe8, 0xc9, 0x49, 0x9d, 0x0f, 0xf6, 0xe9, 0xb1, 0xaf,
-       0x04, 0x58, 0xc3, 0xbc, 0xef, 0x39, 0x56, 0x3e, 0x7d, 0x15, 0x56, 0x63, 0x65, 0xc1, 0xef, 0x16,
-       0x53, 0xb3, 0x62, 0xf0, 0x8a, 0xca, 0x74, 0x34, 0x4a, 0x45, 0xa3, 0xd1, 0xfa, 0x41, 0x09, 0xae,
-       0x3d, 0x8c, 0xfb, 0x8c, 0xf8, 0x34, 0x8d, 0x8a, 0x68, 0x26, 0x1e, 0xcb, 0x36, 0x77, 0x66, 0xd9,
-       0xb0, 0x8a, 0x78, 0x69, 0xb2, 0x88, 0xbf, 0x09, 0x35, 0x46, 0x9e, 0x77, 0x13, 0xa1, 0x4e, 0xd6,
-       0x88, 0xfa, 0xd6, 0xd5, 0x9c, 0xb6, 0x85, 0xab, 0x4c, 0x7f, 0x79, 0xdf, 0xb5, 0x9d, 0xf2, 0x2e,
-       0xac, 0x8c, 0x14, 0x31, 0x5f, 0xeb, 0x38, 0xc7, 0x27, 0xcb, 0x06, 0xae, 0xfa, 0xe8, 0x85, 0x5d,
-       0xf2, 0x3b, 0x07, 0xbc, 0x47, 0x64, 0x10, 0xf8, 0x82, 0x9c, 0xf6, 0x89, 0xe8, 0x0c, 0x3a, 0xea,
-       0x8f, 0x0b, 0x3a, 0x26, 0x4b, 0x89, 0x52, 0xb1, 0x94, 0xd8, 0xb5, 0x36, 0x3f, 0x45, 0xde, 0x29,
-       0x4c, 0xfe, 0x37, 0x0e, 0x34, 0x0d, 0xf9, 0xec, 0x3c, 0xfc, 0x5f, 0x50, 0xff, 0xad, 0x03, 0x35,
-       0x45, 0x74, 0xc4, 0xa8, 0xd7, 0xcf, 0xb8, 0xbe, 0x0e, 0x6b, 0x9c, 0x32, 0x46, 0x8e, 0x23, 0x36,
-       0xec, 0xda, 0x37, 0x86, 0x1a, 0x6e, 0xa4, 0x82, 0x47, 0x3a, 0xeb, 0xfe, 0x37, 0xdc, 0xff, 0xe9,
-       0xc0, 0x12, 0xa6, 0xc4, 0x37, 0xf9, 0xe2, 0xf9, 0x05, 0x5d, 0x7d, 0x1b, 0x96, 0x7b, 0x23, 0xc6,
-       0xc4, 0x2d, 0x53, 0x25, 0xf9, 0x39, 0xac, 0x97, 0x34, 0x5a, 0x1d, 0x98, 0xb1, 0xc5, 0xfd, 0x8b,
-       0x50, 0x0b, 0xe9, 0xf3, 0x62, 0x47, 0xa5, 0x1a, 0xd2, 0xe7, 0x97, 0x3c, 0x25, 0xbf, 0x2e, 0x03,
-       0x3a, 0x1a, 0x90, 0xd0, 0xec, 0x78, 0xf7, 0x29, 0x09, 0xfb, 0xd4, 0xfb, 0x8f, 0x53, 0x70, 0xe3,
-       0xef, 0x40, 0x3d, 0x66, 0x41, 0xc4, 0x8a, 0x6d, 0x1b, 0x24, 0x56, 0x51, 0xde, 0x03, 0x14, 0xb3,
-       0x28, 0x8e, 0x12, 0xea, 0x77, 0xb3, 0x1d, 0xbb, 0x67, 0x2b, 0x68, 0x98, 0x25, 0x87, 0x66, 0xe7,
-       0x59, 0xa2, 0x94, 0x0b, 0x25, 0x0a, 0xfa, 0x2c, 0x2c, 0x2b, 0xc6, 0x31, 0x0b, 0x4e, 0x84, 0xc9,
-       0x8a, 0xbc, 0xfe, 0x2d, 0xc9, 0xc9, 0x23, 0x35, 0xe7, 0xfd, 0xbc, 0x64, 0x85, 0xe4, 0x36, 0x2c,
-       0xc7, 0x03, 0x12, 0x86, 0x45, 0x2b, 0xd8, 0x92, 0x46, 0x2b, 0x82, 0xbb, 0xe2, 0xda, 0x20, 0xef,
-       0x87, 0x49, 0x97, 0xd1, 0x78, 0x40, 0x7a, 0x54, 0xc7, 0x67, 0xfe, 0xcb, 0x6c, 0xd5, 0xac, 0xc0,
-       0x6a, 0x01, 0xda, 0x80, 0x55, 0x43, 0xc1, 0xd0, 0x76, 0x25, 0xed, 0x15, 0x3d, 0xad, 0x89, 0x5f,
-       0xb8, 0x9f, 0xa3, 0x37, 0x00, 0x0d, 0x68, 0x9f, 0xf4, 0xc6, 0xf2, 0xbe, 0xdd, 0x4d, 0xc6, 0x09,
-       0xa7, 0x43, 0x7d, 0x89, 0x6d, 0x28, 0x89, 0xa8, 0x9e, 0x1d, 0x39, 0xdf, 0xfa, 0xa3, 0x0b, 0x57,
-       0x77, 0xe2, 0x78, 0x30, 0x9e, 0xca, 0x9b, 0x7f, 0x7f, 0xfc, 0x79, 0x33, 0x13, 0x0d, 0xf7, 0x45,
-       0xa2, 0xf1, 0xc2, 0xe9, 0x92, 0xe3, 0xf9, 0x4a, 0x9e, 0xe7, 0xbd, 0xdf, 0x3b, 0x97, 0x3e, 0xc5,
-       0x4d, 0x58, 0x34, 0x36, 0xd4, 0x9b, 0xc4, 0x0c, 0xa7, 0xc3, 0xea, 0x5e, 0x32, 0xac, 0xe5, 0x39,
-       0x61, 0xfd, 0x47, 0x09, 0xae, 0x1e, 0x0c, 0xe3, 0x88, 0xf1, 0xc9, 0x5b, 0xc4, 0x5b, 0x05, 0xa3,
-       0xba, 0x02, 0xa5, 0xc0, 0xd7, 0xef, 0xcf, 0x52, 0xe0, 0x7b, 0xa7, 0xd0, 0x50, 0xea, 0x68, 0x5a,
-       0x52, 0xcf, 0x7d, 0xbd, 0x14, 0x4a, 0x08, 0x85, 0xb2, 0x1d, 0xe6, 0x4e, 0x38, 0xcc, 0xfb, 0xa5,
-       0x1d, 0x8d, 0x0f, 0x00, 0x05, 0x9a, 0x46, 0xd7, 0x5c, 0xb7, 0x4d, 0x5b, 0xb8, 0x69, 0x99, 0xc8,
-       0xd9, 0x7a, 0x7b, 0x9a, 0x3f, 0x5e, 0x0b, 0xa6, 0x66, 0x92, 0x8b, 0x57, 0xdf, 0xbf, 0x38, 0xb0,
-       0x22, 0xfa, 0x4d, 0xd6, 0xe2, 0x3f, 0xbe, 0xe6, 0xce, 0x26, 0x5e, 0x3e, 0x95, 0x42, 0xa9, 0xa9,
-       0xdd, 0x7c, 0xe1, 0xfd, 0xfd, 0xd4, 0x81, 0x6b, 0xe6, 0x99, 0x22, 0xda, 0x7a, 0xde, 0x93, 0xec,
-       0xd4, 0xe2, 0x75, 0x4b, 0x54, 0x85, 0x14, 0x3b, 0xff, 0x51, 0x66, 0xa3, 0x2e, 0xce, 0xee, 0x67,
-       0x0e, 0x7c, 0xd2, 0x5c, 0xb2, 0x2c, 0x8a, 0x1f, 0xc1, 0xb3, 0xe0, 0x23, 0xb9, 0x8c, 0xfc, 0xcd,
-       0x81, 0xb5, 0x94, 0x56, 0x7a, 0x23, 0x49, 0x2e, 0x4e, 0x0b, 0xbd, 0x0d, 0xd0, 0x8b, 0xc2, 0x90,
-       0xf6, 0xb8, 0xb9, 0xe7, 0x9f, 0x55, 0x73, 0x33, 0xa8, 0xf7, 0x2d, 0x6b, 0x3f, 0xd7, 0x61, 0x21,
-       0x1a, 0xf1, 0x78, 0xc4, 0x75, 0x4a, 0xea, 0xd1, 0x85, 0xc3, 0xb0, 0xf5, 0xe3, 0x1a, 0x54, 0xcd,
-       0x93, 0x0c, 0x7d, 0x13, 0x6a, 0xfb, 0x94, 0xeb, 0x1f, 0xab, 0x3e, 0x77, 0xce, 0x6b, 0x57, 0x25,
-       0xd0, 0xe7, 0x0b, 0xbd, 0x89, 0xd1, 0x60, 0xce, 0xfb, 0x0f, 0x6d, 0x5a, 0xeb, 0x73, 0x11, 0xa9,
-       0xa5, 0xd7, 0x0a, 0x20, 0xb5, 0xb5, 0xef, 0x9c, 0xf5, 0xf8, 0x40, 0x37, 0x2c, 0x45, 0xf3, 0x61,
-       0xa9, 0xdd, 0x76, 0x51, 0xb8, 0x36, 0x3e, 0x9a, 0xff, 0x78, 0x40, 0xaf, 0xe7, 0xe8, 0x9a, 0x06,
-       0xa5, 0x86, 0xdf, 0x28, 0x06, 0xd6, 0x66, 0x83, 0xfc, 0x37, 0x28, 0xda, 0xb0, 0xb4, 0xe4, 0x01,
-       0x52, 0x73, 0x9b, 0xe7, 0x03, 0xb5, 0xa9, 0xbb, 0xd6, 0x1b, 0x03, 0xbd, 0x62, 0x2d, 0x4b, 0x67,
-       0x53, 0xa5, 0xaf, 0xce, 0x91, 0x6a, 0x4d, 0xdf, 0x98, 0xbc, 0xf1, 0xa3, 0x4f, 0xdb, 0x6f, 0x5b,
-       0x4b, 0x90, 0xea, 0x5b, 0x9f, 0x0f, 0xd0, 0x2a, 0x7b, 0x79, 0x57, 0x6a, 0x64, 0xa7, 0xe9, 0xac,
-       0x38, 0x55, 0xff, 0x85, 0xf3, 0x60, 0xda, 0xc8, 0x71, 0xee, 0x05, 0x0c, 0xd9, 0xcb, 0x73, 0xe4,
-       0xa9, 0x99, 0x8d, 0x73, 0x71, 0x99, 0x9d, 0x9c, 0xb6, 0x38, 0x61, 0x27, 0xaf, 0x6d, 0xe6, 0xd9,
-       0xc9, 0xc7, 0x69, 0x3b, 0x8f, 0xa7, 0x3b, 0x21, 0xfa, 0xcc, 0x94, 0xa3, 0x33, 0x51, 0xaa, 0xbd,
-       0x75, 0x16, 0x44, 0x2b, 0xfe, 0xb2, 0xfa, 0x29, 0x1f, 0x4d, 0xfc, 0x12, 0xca, 0xa3, 0x38, 0x55,
-       0xd2, 0x9c, 0x15, 0xa8, 0xa5, 0x5b, 0xdf, 0x77, 0xa1, 0x6e, 0x35, 0x06, 0xf4, 0x81, 0x5d, 0x9c,
-       0x36, 0x72, 0xca, 0x8e, 0xdd, 0xe3, 0x72, 0xb3, 0x7a, 0x0e, 0x50, 0x53, 0x3d, 0x3d, 0xa3, 0x1f,
-       0xa1, 0xbc, 0xb3, 0x38, 0x83, 0x4a, 0x8d, 0xde, 0x28, 0x88, 0xd6, 0x96, 0x9f, 0xe4, 0xb4, 0x9a,
-       0x89, 0xf2, 0x3b, 0x23, 0xcd, 0x2d, 0xbf, 0x79, 0x28, 0x65, 0xe1, 0x4d, 0xe7, 0x12, 0x81, 0x78,
-       0xb2, 0x20, 0xff, 0xa3, 0xbb, 0xf5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x16, 0x0b, 0x32,
-       0xb6, 0x1b, 0x00, 0x00,
-}
diff --git a/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto b/vendor/github.com/hashicorp/terraform/internal/tfplugin5/tfplugin5.proto
deleted file mode 100644 (file)
index 370faf7..0000000
+++ /dev/null
@@ -1,351 +0,0 @@
-// Terraform Plugin RPC protocol version 5.0
-//
-// This file defines version 5.0 of the RPC protocol. To implement a plugin
-// against this protocol, copy this definition into your own codebase and
-// use protoc to generate stubs for your target language.
-//
-// This file will be updated in-place in the source Terraform repository for
-// any minor versions of protocol 5, but later minor versions will always be
-// backwards compatible. Breaking changes, if any are required, will come
-// in a subsequent major version with its own separate proto definition.
-//
-// Note that only the proto files included in a release tag of Terraform are
-// official protocol releases. Proto files taken from other commits may include
-// incomplete changes or features that did not make it into a final release.
-// In all reasonable cases, plugin developers should take the proto file from
-// the tag of the most recent release of Terraform, and not from the master
-// branch or any other development branch.
-//
-syntax = "proto3";
-
-package tfplugin5;
-
-// DynamicValue is an opaque encoding of terraform data, with the field name
-// indicating the encoding scheme used.
-message DynamicValue {
-    bytes msgpack = 1;
-    bytes json = 2;
-}
-
-message Diagnostic {
-    enum Severity {
-        INVALID = 0;
-        ERROR = 1;
-        WARNING = 2;
-    }
-    Severity severity = 1;
-    string summary = 2;
-    string detail = 3;
-    AttributePath attribute = 4;
-}
-
-message AttributePath {
-    message Step {
-        oneof selector {
-            // Set "attribute_name" to represent looking up an attribute
-            // in the current object value.
-            string attribute_name = 1;
-            // Set "element_key_*" to represent looking up an element in
-            // an indexable collection type.
-            string element_key_string = 2;
-            int64 element_key_int = 3;
-        }
-    }
-    repeated Step steps = 1;
-}
-
-message Stop {
-    message Request {
-    }
-    message Response {
-               string Error = 1;
-    }
-}
-
-// RawState holds the stored state for a resource to be upgraded by the
-// provider. It can be in one of two formats, the current json encoded format
-// in bytes, or the legacy flatmap format as a map of strings.
-message RawState {
-    bytes json = 1;
-    map<string, string> flatmap = 2;
-}
-
-// Schema is the configuration schema for a Resource, Provider, or Provisioner.
-message Schema {
-    message Block {
-        int64 version = 1;
-        repeated Attribute attributes = 2;
-        repeated NestedBlock block_types = 3;
-    }
-
-    message Attribute {
-        string name = 1;
-        bytes type = 2;
-        string description = 3;
-        bool required = 4;
-        bool optional = 5;
-        bool computed = 6;
-        bool sensitive = 7;
-    }
-
-    message NestedBlock {
-        enum NestingMode {
-            INVALID = 0;
-            SINGLE = 1;
-            LIST = 2;
-            SET = 3;
-            MAP = 4;
-            GROUP = 5;
-        }
-
-        string type_name = 1;
-        Block block = 2;
-        NestingMode nesting = 3;
-        int64 min_items = 4;
-        int64 max_items = 5;
-    }
-
-    // The version of the schema.
-    // Schemas are versioned, so that providers can upgrade a saved resource
-    // state when the schema is changed. 
-    int64 version = 1;
-
-    // Block is the top level configuration block for this schema.
-    Block block = 2;
-}
-
-service Provider {
-    //////// Information about what a provider supports/expects
-    rpc GetSchema(GetProviderSchema.Request) returns (GetProviderSchema.Response);
-    rpc PrepareProviderConfig(PrepareProviderConfig.Request) returns (PrepareProviderConfig.Response);
-    rpc ValidateResourceTypeConfig(ValidateResourceTypeConfig.Request) returns (ValidateResourceTypeConfig.Response);
-    rpc ValidateDataSourceConfig(ValidateDataSourceConfig.Request) returns (ValidateDataSourceConfig.Response);
-    rpc UpgradeResourceState(UpgradeResourceState.Request) returns (UpgradeResourceState.Response);
-
-    //////// One-time initialization, called before other functions below
-    rpc Configure(Configure.Request) returns (Configure.Response);
-
-    //////// Managed Resource Lifecycle
-    rpc ReadResource(ReadResource.Request) returns (ReadResource.Response);
-    rpc PlanResourceChange(PlanResourceChange.Request) returns (PlanResourceChange.Response);
-    rpc ApplyResourceChange(ApplyResourceChange.Request) returns (ApplyResourceChange.Response);
-    rpc ImportResourceState(ImportResourceState.Request) returns (ImportResourceState.Response);
-
-    rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response);
-
-    //////// Graceful Shutdown
-    rpc Stop(Stop.Request) returns (Stop.Response);
-}
-
-message GetProviderSchema {
-    message Request {
-    }
-    message Response {
-        Schema provider = 1;
-        map<string, Schema> resource_schemas = 2;
-        map<string, Schema> data_source_schemas = 3;
-        repeated Diagnostic diagnostics = 4;
-    }
-}
-
-message PrepareProviderConfig {
-    message Request {
-        DynamicValue config = 1;
-    }
-    message Response {
-        DynamicValue prepared_config = 1;
-        repeated Diagnostic diagnostics = 2;
-    }
-}
-
-message UpgradeResourceState {
-    message Request {
-        string type_name = 1;
-
-        // version is the schema_version number recorded in the state file
-        int64 version = 2;
-
-        // raw_state is the raw states as stored for the resource.  Core does
-        // not have access to the schema of prior_version, so it's the
-        // provider's responsibility to interpret this value using the
-        // appropriate older schema. The raw_state will be the json encoded
-        // state, or a legacy flat-mapped format.
-        RawState raw_state = 3;
-    }
-    message Response {
-        // new_state is a msgpack-encoded data structure that, when interpreted with
-        // the _current_ schema for this resource type, is functionally equivalent to
-        // that which was given in prior_state_raw.
-        DynamicValue upgraded_state = 1;
-
-        // diagnostics describes any errors encountered during migration that could not
-        // be safely resolved, and warnings about any possibly-risky assumptions made
-        // in the upgrade process.
-        repeated Diagnostic diagnostics = 2;
-    }
-}
-
-message ValidateResourceTypeConfig {
-    message Request {
-        string type_name = 1;
-        DynamicValue config = 2;
-    }
-    message Response {
-        repeated Diagnostic diagnostics = 1;
-    }
-}
-
-message ValidateDataSourceConfig {
-    message Request {
-        string type_name = 1;
-        DynamicValue config = 2;
-    }
-    message Response {
-        repeated Diagnostic diagnostics = 1;
-    }
-}
-
-message Configure {
-    message Request {
-        string terraform_version = 1;
-        DynamicValue config = 2;
-    }
-    message Response {
-        repeated Diagnostic diagnostics = 1;
-    }
-}
-
-message ReadResource {
-    message Request {
-        string type_name = 1;
-        DynamicValue current_state = 2;
-    }
-    message Response {
-        DynamicValue new_state = 1;
-        repeated Diagnostic diagnostics = 2;
-    }
-}
-
-message PlanResourceChange {
-    message Request {
-        string type_name = 1;
-        DynamicValue prior_state = 2;
-        DynamicValue proposed_new_state = 3;
-        DynamicValue config = 4;
-        bytes prior_private = 5; 
-    }
-
-    message Response {
-        DynamicValue planned_state = 1;
-        repeated AttributePath requires_replace = 2;
-        bytes planned_private = 3; 
-        repeated Diagnostic diagnostics = 4;
-
-
-        // This may be set only by the helper/schema "SDK" in the main Terraform
-        // repository, to request that Terraform Core >=0.12 permit additional
-        // inconsistencies that can result from the legacy SDK type system
-        // and its imprecise mapping to the >=0.12 type system.
-        // The change in behavior implied by this flag makes sense only for the
-        // specific details of the legacy SDK type system, and are not a general
-        // mechanism to avoid proper type handling in providers.
-        //
-        //     ====              DO NOT USE THIS              ====
-        //     ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
-        //     ====              DO NOT USE THIS              ====
-        bool legacy_type_system = 5;
-    }
-}
-
-message ApplyResourceChange {
-    message Request {
-        string type_name = 1;
-        DynamicValue prior_state = 2;
-        DynamicValue planned_state = 3;
-        DynamicValue config = 4;
-        bytes planned_private = 5; 
-    }
-    message Response {
-        DynamicValue new_state = 1;
-        bytes private = 2; 
-        repeated Diagnostic diagnostics = 3;
-
-        // This may be set only by the helper/schema "SDK" in the main Terraform
-        // repository, to request that Terraform Core >=0.12 permit additional
-        // inconsistencies that can result from the legacy SDK type system
-        // and its imprecise mapping to the >=0.12 type system.
-        // The change in behavior implied by this flag makes sense only for the
-        // specific details of the legacy SDK type system, and are not a general
-        // mechanism to avoid proper type handling in providers.
-        //
-        //     ====              DO NOT USE THIS              ====
-        //     ==== THIS MUST BE LEFT UNSET IN ALL OTHER SDKS ====
-        //     ====              DO NOT USE THIS              ====
-        bool legacy_type_system = 4;
-    }
-}
-
-message ImportResourceState {
-    message Request {
-        string type_name = 1;
-        string id = 2;
-    }
-
-    message ImportedResource {
-        string type_name = 1;
-        DynamicValue state = 2;
-        bytes private = 3;
-    }
-
-    message Response {
-        repeated ImportedResource imported_resources = 1;
-        repeated Diagnostic diagnostics = 2;
-    }
-}
-
-message ReadDataSource {
-    message Request {
-        string type_name = 1;
-        DynamicValue config = 2;
-    }
-    message Response {
-        DynamicValue state = 1;
-        repeated Diagnostic diagnostics = 2;
-    }
-}
-
-service Provisioner {
-    rpc GetSchema(GetProvisionerSchema.Request) returns (GetProvisionerSchema.Response);
-    rpc ValidateProvisionerConfig(ValidateProvisionerConfig.Request) returns (ValidateProvisionerConfig.Response);
-    rpc ProvisionResource(ProvisionResource.Request) returns (stream ProvisionResource.Response);
-    rpc Stop(Stop.Request) returns (Stop.Response);
-}
-
-message GetProvisionerSchema {
-    message Request {
-    }
-    message Response {
-        Schema provisioner = 1;
-        repeated Diagnostic diagnostics = 2;
-    }
-}
-
-message ValidateProvisionerConfig {
-    message Request {
-        DynamicValue config = 1;
-    }
-    message Response {
-        repeated Diagnostic diagnostics = 1;
-    }
-}
-
-message ProvisionResource {
-    message Request {
-        DynamicValue config = 1;
-        DynamicValue connection = 2;
-    }
-    message Response {
-        string output  = 1;
-        repeated Diagnostic diagnostics = 2;
-    }   
-}
index 2f2463a5cdb4f4c27288652bd7165a1ccdca92d4..47a02565922899b2f9967d9f144aecf54f66a307 100644 (file)
@@ -55,10 +55,11 @@ func effectiveSchema(given *hcl.BodySchema, body hcl.Body, ambiguousNames map[st
                                },
                        }
                        content, _, _ = body.PartialContent(&probeSchema)
-                       if len(content.Blocks) > 0 {
-                               // No attribute present and at least one block present, so
-                               // we'll need to rewrite this one as a block for a successful
-                               // result.
+                       if len(content.Blocks) > 0 || dynamicExpanded {
+                               // A dynamic block with an empty iterator returns nothing.
+                               // If there's no attribute and we have either a block or a
+                               // dynamic expansion, we need to rewrite this one as a
+                               // block for a successful result.
                                appearsAsBlock[name] = struct{}{}
                        }
                }
index e123b8aab73a636c339370787c77a9d3beb2f1bd..b172805a0b6964506cdd8cc00f22f9e0c9cd1d55 100644 (file)
@@ -33,7 +33,7 @@ func walkVariables(node dynblock.WalkVariablesNode, body hcl.Body, schema *confi
        for _, child := range children {
                if blockS, exists := schema.BlockTypes[child.BlockTypeName]; exists {
                        vars = append(vars, walkVariables(child.Node, child.Body(), &blockS.Block)...)
-               } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists {
+               } else if attrS, exists := schema.Attributes[child.BlockTypeName]; exists && attrS.Type.ElementType().IsObjectType() {
                        synthSchema := SchemaForCtyElementType(attrS.Type.ElementType())
                        vars = append(vars, walkVariables(child.Node, child.Body(), synthSchema)...)
                }
index 80313d6c0ba88d03bb25680d264f3eb4880c0577..eca588ecba04b485042aa049f69addd7e8e715ff 100644 (file)
@@ -23,6 +23,7 @@ type Data interface {
        StaticValidateReferences(refs []*addrs.Reference, self addrs.Referenceable) tfdiags.Diagnostics
 
        GetCountAttr(addrs.CountAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
+       GetForEachAttr(addrs.ForEachAttr, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
        GetResourceInstance(addrs.ResourceInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
        GetLocalValue(addrs.LocalValue, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
        GetModuleInstance(addrs.ModuleCallInstance, tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics)
index a3fb363ec70682dc92659132038e50c83110bea8..a8fe8b662de33edc222860d1ddaebadec4ddbaf5 100644 (file)
@@ -203,6 +203,7 @@ func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceabl
        pathAttrs := map[string]cty.Value{}
        terraformAttrs := map[string]cty.Value{}
        countAttrs := map[string]cty.Value{}
+       forEachAttrs := map[string]cty.Value{}
        var self cty.Value
 
        for _, ref := range refs {
@@ -334,6 +335,14 @@ func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceabl
                                self = val
                        }
 
+               case addrs.ForEachAttr:
+                       val, valDiags := normalizeRefValue(s.Data.GetForEachAttr(subj, rng))
+                       diags = diags.Append(valDiags)
+                       forEachAttrs[subj.Name] = val
+                       if isSelf {
+                               self = val
+                       }
+
                default:
                        // Should never happen
                        panic(fmt.Errorf("Scope.buildEvalContext cannot handle address type %T", rawSubj))
@@ -350,6 +359,7 @@ func (s *Scope) evalContext(refs []*addrs.Reference, selfAddr addrs.Referenceabl
        vals["path"] = cty.ObjectVal(pathAttrs)
        vals["terraform"] = cty.ObjectVal(terraformAttrs)
        vals["count"] = cty.ObjectVal(countAttrs)
+       vals["each"] = cty.ObjectVal(forEachAttrs)
        if self != cty.NilVal {
                vals["self"] = self
        }
index 71b7a846667fa4e6411621313e4c97500999ea83..bcccc1fd2994fa4ac9707564d176573d3074dc1b 100644 (file)
@@ -246,7 +246,7 @@ var CompactFunc = function.New(&function.Spec{
 
                for it := listVal.ElementIterator(); it.Next(); {
                        _, v := it.Element()
-                       if v.AsString() == "" {
+                       if v.IsNull() || v.AsString() == "" {
                                continue
                        }
                        outputList = append(outputList, v)
@@ -363,6 +363,9 @@ var DistinctFunc = function.New(&function.Spec{
                        }
                }
 
+               if len(list) == 0 {
+                       return cty.ListValEmpty(retType.ElementType()), nil
+               }
                return cty.ListVal(list), nil
        },
 })
@@ -389,6 +392,10 @@ var ChunklistFunc = function.New(&function.Spec{
                        return cty.UnknownVal(retType), nil
                }
 
+               if listVal.LengthInt() == 0 {
+                       return cty.ListValEmpty(listVal.Type()), nil
+               }
+
                var size int
                err = gocty.FromCtyValue(args[1], &size)
                if err != nil {
@@ -686,8 +693,10 @@ var LookupFunc = function.New(&function.Spec{
                                        return cty.StringVal(v.AsString()), nil
                                case ty.Equals(cty.Number):
                                        return cty.NumberVal(v.AsBigFloat()), nil
+                               case ty.Equals(cty.Bool):
+                                       return cty.BoolVal(v.True()), nil
                                default:
-                                       return cty.NilVal, errors.New("lookup() can only be used with flat lists")
+                                       return cty.NilVal, errors.New("lookup() can only be used with maps of primitive types")
                                }
                        }
                }
@@ -797,10 +806,12 @@ var MatchkeysFunc = function.New(&function.Spec{
                },
        },
        Type: func(args []cty.Value) (cty.Type, error) {
-               if !args[1].Type().Equals(args[2].Type()) {
-                       return cty.NilType, errors.New("lists must be of the same type")
+               ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()})
+               if ty == cty.NilType {
+                       return cty.NilType, errors.New("keys and searchset must be of the same type")
                }
 
+               // the return type is based on args[0] (values)
                return args[0].Type(), nil
        },
        Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
@@ -813,10 +824,14 @@ var MatchkeysFunc = function.New(&function.Spec{
                }
 
                output := make([]cty.Value, 0)
-
                values := args[0]
-               keys := args[1]
-               searchset := args[2]
+
+               // Keys and searchset must be the same type.
+               // We can skip error checking here because we've already verified that
+               // they can be unified in the Type function
+               ty, _ := convert.UnifyUnsafe([]cty.Type{args[1].Type(), args[2].Type()})
+               keys, _ := convert.Convert(args[1], ty)
+               searchset, _ := convert.Convert(args[2], ty)
 
                // if searchset is empty, return an empty list.
                if searchset.LengthInt() == 0 {
@@ -867,7 +882,6 @@ var MergeFunc = function.New(&function.Spec{
                Name:             "maps",
                Type:             cty.DynamicPseudoType,
                AllowDynamicType: true,
-               AllowNull:        true,
        },
        Type: function.StaticReturnType(cty.DynamicPseudoType),
        Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
index 5cb4bc5c146cc28ed1865fe6f211b5155f551418..be006f821c7d7bd84e7f164f1478da6027ec2028 100644 (file)
@@ -14,6 +14,7 @@ import (
        "hash"
 
        uuid "github.com/hashicorp/go-uuid"
+       uuidv5 "github.com/satori/go.uuid"
        "github.com/zclconf/go-cty/cty"
        "github.com/zclconf/go-cty/cty/function"
        "github.com/zclconf/go-cty/cty/gocty"
@@ -32,6 +33,39 @@ var UUIDFunc = function.New(&function.Spec{
        },
 })
 
+var UUIDV5Func = function.New(&function.Spec{
+       Params: []function.Parameter{
+               {
+                       Name: "namespace",
+                       Type: cty.String,
+               },
+               {
+                       Name: "name",
+                       Type: cty.String,
+               },
+       },
+       Type: function.StaticReturnType(cty.String),
+       Impl: func(args []cty.Value, retType cty.Type) (ret cty.Value, err error) {
+               var namespace uuidv5.UUID
+               switch {
+               case args[0].AsString() == "dns":
+                       namespace = uuidv5.NamespaceDNS
+               case args[0].AsString() == "url":
+                       namespace = uuidv5.NamespaceURL
+               case args[0].AsString() == "oid":
+                       namespace = uuidv5.NamespaceOID
+               case args[0].AsString() == "x500":
+                       namespace = uuidv5.NamespaceX500
+               default:
+                       if namespace, err = uuidv5.FromString(args[0].AsString()); err != nil {
+                               return cty.UnknownVal(cty.String), fmt.Errorf("uuidv5() doesn't support namespace %s (%v)", args[0].AsString(), err)
+                       }
+               }
+               val := args[1].AsString()
+               return cty.StringVal(uuidv5.NewV5(namespace, val).String()), nil
+       },
+})
+
 // Base64Sha256Func constructs a function that computes the SHA256 hash of a given string
 // and encodes it with Base64.
 var Base64Sha256Func = makeStringHashFunction(sha256.New, base64.StdEncoding.EncodeToString)
@@ -228,6 +262,12 @@ func UUID() (cty.Value, error) {
        return UUIDFunc.Call(nil)
 }
 
+// UUIDV5 generates and returns a Type-5 UUID in the standard hexadecimal string
+// format.
+func UUIDV5(namespace cty.Value, name cty.Value) (cty.Value, error) {
+       return UUIDV5Func.Call([]cty.Value{namespace, name})
+}
+
 // Base64Sha256 computes the SHA256 hash of a given string and encodes it with
 // Base64.
 //
index 7dfc9058758b413715383131e7874cbdcd0837de..016b102d946b9e98405188d78236bb623fcf4e56 100644 (file)
@@ -237,6 +237,21 @@ var DirnameFunc = function.New(&function.Spec{
        },
 })
 
+// AbsPathFunc constructs a function that converts a filesystem path to an absolute path
+var AbsPathFunc = function.New(&function.Spec{
+       Params: []function.Parameter{
+               {
+                       Name: "path",
+                       Type: cty.String,
+               },
+       },
+       Type: function.StaticReturnType(cty.String),
+       Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
+               absPath, err := filepath.Abs(args[0].AsString())
+               return cty.StringVal(filepath.ToSlash(absPath)), err
+       },
+})
+
 // PathExpandFunc constructs a function that expands a leading ~ character to the current user's home directory.
 var PathExpandFunc = function.New(&function.Spec{
        Params: []function.Parameter{
index 2c7b5482b839242b81eba6bc18b3e0bdbbd61926..b77a55fdea2154f743e31dd593064a518d2046af 100644 (file)
@@ -3,6 +3,7 @@ package lang
 import (
        "fmt"
 
+       ctyyaml "github.com/zclconf/go-cty-yaml"
        "github.com/zclconf/go-cty/cty"
        "github.com/zclconf/go-cty/cty/function"
        "github.com/zclconf/go-cty/cty/function/stdlib"
@@ -30,6 +31,7 @@ func (s *Scope) Functions() map[string]function.Function {
 
                s.funcs = map[string]function.Function{
                        "abs":              stdlib.AbsoluteFunc,
+                       "abspath":          funcs.AbsPathFunc,
                        "basename":         funcs.BasenameFunc,
                        "base64decode":     funcs.Base64DecodeFunc,
                        "base64encode":     funcs.Base64EncodeFunc,
@@ -85,6 +87,7 @@ func (s *Scope) Functions() map[string]function.Function {
                        "min":              stdlib.MinFunc,
                        "pathexpand":       funcs.PathExpandFunc,
                        "pow":              funcs.PowFunc,
+                       "range":            stdlib.RangeFunc,
                        "replace":          funcs.ReplaceFunc,
                        "reverse":          funcs.ReverseFunc,
                        "rsadecrypt":       funcs.RsaDecryptFunc,
@@ -114,7 +117,10 @@ func (s *Scope) Functions() map[string]function.Function {
                        "upper":            stdlib.UpperFunc,
                        "urlencode":        funcs.URLEncodeFunc,
                        "uuid":             funcs.UUIDFunc,
+                       "uuidv5":           funcs.UUIDV5Func,
                        "values":           funcs.ValuesFunc,
+                       "yamldecode":       ctyyaml.YAMLDecodeFunc,
+                       "yamlencode":       ctyyaml.YAMLEncodeFunc,
                        "zipmap":           funcs.ZipmapFunc,
                }
 
index 8b7ef43fddfa0d27120cae228b7ba6cd0dd8de70..d85086c97c184282c5b7ba79fdbcc480b853107f 100644 (file)
@@ -84,7 +84,7 @@ func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Valu
                        // whether there are dynamically-typed attributes inside. However,
                        // both support a similar-enough API that we can treat them the
                        // same for our purposes here.
-                       if !plannedV.IsKnown() || plannedV.IsNull() || actualV.IsNull() {
+                       if !plannedV.IsKnown() || !actualV.IsKnown() || plannedV.IsNull() || actualV.IsNull() {
                                continue
                        }
 
@@ -169,6 +169,16 @@ func assertObjectCompatible(schema *configschema.Block, planned, actual cty.Valu
                        })
                        errs = append(errs, setErrs...)
 
+                       if maybeUnknownBlocks {
+                               // When unknown blocks are present the final number of blocks
+                               // may be different, either because the unknown set values
+                               // become equal and are collapsed, or the count is unknown due
+                               // a dynamic block. Unfortunately this means we can't do our
+                               // usual checks in this case without generating false
+                               // negatives.
+                               continue
+                       }
+
                        // There can be fewer elements in a set after its elements are all
                        // known (values that turn out to be equal will coalesce) but the
                        // number of elements must never get larger.
index b1d01fb9ada1459e05242c6558e2e96d09c4e143..c1d9e3bed667cc2c804992715966d21353d6d77a 100644 (file)
@@ -204,6 +204,9 @@ func (i *ProviderInstaller) Get(provider string, req Constraints) (PluginMeta, t
        }
 
        downloadURLs, err := i.listProviderDownloadURLs(providerSource, versionMeta.Version)
+       if err != nil {
+               return PluginMeta{}, diags, err
+       }
        providerURL := downloadURLs.DownloadURL
 
        if !i.SkipVerify {
index ae9a4002dd75dc1ebbcc9404f54f5c51c85794ad..5b190e2c187b10ec8127a7cf6f168d39a9598d19 100644 (file)
@@ -330,6 +330,7 @@ func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp provi
        protoReq := &proto.ReadResource_Request{
                TypeName:     r.TypeName,
                CurrentState: &proto.DynamicValue{Msgpack: mp},
+               Private:      r.Private,
        }
 
        protoResp, err := p.client.ReadResource(p.ctx, protoReq)
@@ -348,6 +349,7 @@ func (p *GRPCProvider) ReadResource(r providers.ReadResourceRequest) (resp provi
                }
        }
        resp.NewState = state
+       resp.Private = protoResp.Private
 
        return resp
 }
index 1aa08c271d16df0e9951fb082abe4255e707a13a..7e0a74c58e96160a963eb23a5937c5187100d611 100644 (file)
@@ -176,6 +176,10 @@ type ReadResourceRequest struct {
 
        // PriorState contains the previously saved state value for this resource.
        PriorState cty.Value
+
+       // Private is an opaque blob that will be stored in state along with the
+       // resource. It is intended only for interpretation by the provider itself.
+       Private []byte
 }
 
 type ReadResourceResponse struct {
@@ -184,6 +188,10 @@ type ReadResourceResponse struct {
 
        // Diagnostics contains any warnings or errors from the method call.
        Diagnostics tfdiags.Diagnostics
+
+       // Private is an opaque blob that will be stored in state along with the
+       // resource. It is intended only for interpretation by the provider itself.
+       Private []byte
 }
 
 type PlanResourceChangeRequest struct {
index ea717d00ec27a96b14e2469b3a48391f79897b80..8664f3bea296f6ee11da56060d3218fe4ccf9541 100644 (file)
@@ -147,7 +147,7 @@ func (obj *ResourceInstanceObjectSrc) DeepCopy() *ResourceInstanceObjectSrc {
 
        var private []byte
        if obj.Private != nil {
-               private := make([]byte, len(obj.Private))
+               private = make([]byte, len(obj.Private))
                copy(private, obj.Private)
        }
 
@@ -181,14 +181,17 @@ func (obj *ResourceInstanceObject) DeepCopy() *ResourceInstanceObject {
 
        var private []byte
        if obj.Private != nil {
-               private := make([]byte, len(obj.Private))
+               private = make([]byte, len(obj.Private))
                copy(private, obj.Private)
        }
 
-       // Some addrs.Referencable implementations are technically mutable, but
+       // Some addrs.Referenceable implementations are technically mutable, but
        // we treat them as immutable by convention and so we don't deep-copy here.
-       dependencies := make([]addrs.Referenceable, len(obj.Dependencies))
-       copy(dependencies, obj.Dependencies)
+       var dependencies []addrs.Referenceable
+       if obj.Dependencies != nil {
+               dependencies = make([]addrs.Referenceable, len(obj.Dependencies))
+               copy(dependencies, obj.Dependencies)
+       }
 
        return &ResourceInstanceObject{
                Value:        obj.Value,
index 6fe2ab8ef8ac79b5da723b236166e871efa3277f..be93924a7a804ee9eead5fab56fe1e2e485f3d49 100644 (file)
@@ -205,5 +205,5 @@ type instanceStateV2 struct {
 type backendStateV2 struct {
        Type      string          `json:"type"`   // Backend type
        ConfigRaw json.RawMessage `json:"config"` // Backend raw config
-       Hash      int             `json:"hash"`   // Hash of portion of configuration from config files
+       Hash      uint64          `json:"hash"`   // Hash of portion of configuration from config files
 }
index 2cbe8a53c9ba998b84080eed505508a46bb5472b..fbec5477cab5e573ec67c86f53f0965e0bc805bf 100644 (file)
@@ -79,7 +79,7 @@ func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) {
                                case addrs.DataResourceMode:
                                        modeStr = "data"
                                default:
-                                       return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode", resAddr)
+                                       return nil, fmt.Errorf("state contains resource %s with an unsupported resource mode %#v", resAddr, resAddr.Mode)
                                }
 
                                // In state versions prior to 4 we allowed each instance of a
@@ -98,7 +98,7 @@ func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) {
                                        var diags tfdiags.Diagnostics
                                        providerAddr, diags = addrs.ParseAbsProviderConfigStr(oldProviderAddr)
                                        if diags.HasErrors() {
-                                               return nil, diags.Err()
+                                               return nil, fmt.Errorf("invalid provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err())
                                        }
                                } else {
                                        // Smells like an old-style module-local provider address,
@@ -109,7 +109,7 @@ func upgradeStateV3ToV4(old *stateV3) (*stateV4, error) {
                                        if oldProviderAddr != "" {
                                                localAddr, diags := addrs.ParseProviderConfigCompactStr(oldProviderAddr)
                                                if diags.HasErrors() {
-                                                       return nil, diags.Err()
+                                                       return nil, fmt.Errorf("invalid legacy provider config reference %q for %s: %s", oldProviderAddr, instAddr, diags.Err())
                                                }
                                                providerAddr = localAddr.Absolute(moduleAddr)
                                        } else {
@@ -272,7 +272,7 @@ func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2,
                instKeyRaw = string(tk)
        default:
                if instKeyRaw != nil {
-                       return nil, fmt.Errorf("insupported instance key: %#v", instKey)
+                       return nil, fmt.Errorf("unsupported instance key: %#v", instKey)
                }
        }
 
@@ -301,7 +301,11 @@ func upgradeInstanceObjectV3ToV4(rsOld *resourceStateV2, isOld *instanceStateV2,
 
        dependencies := make([]string, len(rsOld.Dependencies))
        for i, v := range rsOld.Dependencies {
-               dependencies[i] = parseLegacyDependency(v)
+               depStr, err := parseLegacyDependency(v)
+               if err != nil {
+                       return nil, fmt.Errorf("invalid dependency reference %q: %s", v, err)
+               }
+               dependencies[i] = depStr
        }
 
        return &instanceObjectStateV4{
@@ -414,7 +418,7 @@ func simplifyImpliedValueType(ty cty.Type) cty.Type {
        }
 }
 
-func parseLegacyDependency(s string) string {
+func parseLegacyDependency(s string) (string, error) {
        parts := strings.Split(s, ".")
        ret := parts[0]
        for _, part := range parts[1:] {
@@ -427,5 +431,14 @@ func parseLegacyDependency(s string) string {
                }
                ret = ret + "." + part
        }
-       return ret
+
+       // The result must parse as a reference, or else we'll create an invalid
+       // state file.
+       var diags tfdiags.Diagnostics
+       _, diags = addrs.ParseRefStr(ret)
+       if diags.HasErrors() {
+               return "", diags.Err()
+       }
+
+       return ret, nil
 }
index 7a6ef3d3282fb739fd224b707f5d4a056ff864ce..323462f0b15b4884f3b9f492ca0a8a0b7fed630d 100644 (file)
@@ -13,7 +13,6 @@ import (
        "sync"
 
        "github.com/hashicorp/terraform/addrs"
-       "github.com/hashicorp/terraform/config"
        "github.com/hashicorp/terraform/config/hcl2shim"
        "github.com/hashicorp/terraform/configs/configschema"
        "github.com/zclconf/go-cty/cty"
@@ -665,7 +664,7 @@ func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]strin
        old, exists := attrs[currentKey]
 
        if diff != nil && diff.NewComputed {
-               result[attr] = config.UnknownVariableValue
+               result[attr] = hcl2shim.UnknownVariableValue
                return result, nil
        }
 
@@ -673,7 +672,7 @@ func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]strin
        // This only applied to top-level "id" fields.
        if attr == "id" && len(path) == 1 {
                if old == "" {
-                       result[attr] = config.UnknownVariableValue
+                       result[attr] = hcl2shim.UnknownVariableValue
                } else {
                        result[attr] = old
                }
@@ -704,8 +703,8 @@ func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]strin
        // check for missmatched diff values
        if exists &&
                old != diff.Old &&
-               old != config.UnknownVariableValue &&
-               diff.Old != config.UnknownVariableValue {
+               old != hcl2shim.UnknownVariableValue &&
+               diff.Old != hcl2shim.UnknownVariableValue {
                return result, fmt.Errorf("diff apply conflict for %s: diff expects %q, but prior value has %q", attr, diff.Old, old)
        }
 
@@ -723,7 +722,7 @@ func (d *InstanceDiff) applySingleAttrDiff(path []string, attrs map[string]strin
        }
 
        if attrSchema.Computed && diff.NewComputed {
-               result[attr] = config.UnknownVariableValue
+               result[attr] = hcl2shim.UnknownVariableValue
                return result, nil
        }
 
@@ -756,7 +755,7 @@ func (d *InstanceDiff) applyCollectionDiff(path []string, attrs map[string]strin
                        }
 
                        if diff.NewComputed {
-                               result[k[len(prefix):]] = config.UnknownVariableValue
+                               result[k[len(prefix):]] = hcl2shim.UnknownVariableValue
                                return result, nil
                        }
 
index 09313f7fc8b67acc4bc51e44614d27684ab85b93..422f372c435d0bd85c81dcc61ea18cbcaeaa9031 100644 (file)
@@ -61,7 +61,8 @@ func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
        configVal := cty.NullVal(cty.DynamicPseudoType)
        if n.Config != nil {
                var configDiags tfdiags.Diagnostics
-               keyData := EvalDataForInstanceKey(n.Addr.Key)
+               forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx)
+               keyData := EvalDataForInstanceKey(n.Addr.Key, forEach)
                configVal, _, configDiags = ctx.EvaluateBlock(n.Config.Config, schema, nil, keyData)
                diags = diags.Append(configDiags)
                if configDiags.HasErrors() {
@@ -548,7 +549,8 @@ func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*configs.Provisio
                provisioner := ctx.Provisioner(prov.Type)
                schema := ctx.ProvisionerSchema(prov.Type)
 
-               keyData := EvalDataForInstanceKey(instanceAddr.Key)
+               // TODO the for_each val is not added here, which might causes issues with provisioners
+               keyData := EvalDataForInstanceKey(instanceAddr.Key, nil)
 
                // Evaluate the main provisioner configuration.
                config, _, configDiags := ctx.EvaluateBlock(prov.Config, schema, instanceAddr, keyData)
index b7acfb06dcbb400b5abf054d5f4b758fe750c01f..695b5fe0e132bd26140ddfafab1a2615fceafff8 100644 (file)
@@ -4,7 +4,6 @@ import (
        "bytes"
        "fmt"
        "log"
-       "reflect"
        "strings"
 
        "github.com/hashicorp/hcl2/hcl"
@@ -134,7 +133,8 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
                // Should be caught during validation, so we don't bother with a pretty error here
                return nil, fmt.Errorf("provider does not support resource type %q", n.Addr.Resource.Type)
        }
-       keyData := EvalDataForInstanceKey(n.Addr.Key)
+       forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx)
+       keyData := EvalDataForInstanceKey(n.Addr.Key, forEach)
        configVal, _, configDiags := ctx.EvaluateBlock(config.Config, schema, nil, keyData)
        diags = diags.Append(configDiags)
        if configDiags.HasErrors() {
@@ -174,6 +174,20 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
                }
        }
 
+       log.Printf("[TRACE] Re-validating config for %q", n.Addr.Absolute(ctx.Path()))
+       // Allow the provider to validate the final set of values.
+       // The config was statically validated early on, but there may have been
+       // unknown values which the provider could not validate at the time.
+       validateResp := provider.ValidateResourceTypeConfig(
+               providers.ValidateResourceTypeConfigRequest{
+                       TypeName: n.Addr.Resource.Type,
+                       Config:   configVal,
+               },
+       )
+       if validateResp.Diagnostics.HasErrors() {
+               return nil, validateResp.Diagnostics.InConfigBody(config.Config).Err()
+       }
+
        // The provider gets an opportunity to customize the proposed new value,
        // which in turn produces the _planned_ new value.
        resp := provider.PlanResourceChange(providers.PlanResourceChangeRequest{
@@ -448,8 +462,9 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
                        // must _also_ record the returned change in the active plan,
                        // which the expression evaluator will use in preference to this
                        // incomplete value recorded in the state.
-                       Status: states.ObjectPlanned,
-                       Value:  plannedNewVal,
+                       Status:  states.ObjectPlanned,
+                       Value:   plannedNewVal,
+                       Private: plannedPrivate,
                }
        }
 
@@ -517,7 +532,7 @@ func processIgnoreChangesIndividual(prior, proposed cty.Value, ignoreChanges []h
                // away any deeper values we already produced at that point.
                var ignoreTraversal hcl.Traversal
                for i, candidate := range ignoreChangesPath {
-                       if reflect.DeepEqual(path, candidate) {
+                       if path.Equals(candidate) {
                                ignoreTraversal = ignoreChanges[i]
                        }
                }
@@ -790,6 +805,7 @@ func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
                        Before: state.Value,
                        After:  cty.NullVal(cty.DynamicPseudoType),
                },
+               Private:      state.Private,
                ProviderAddr: n.ProviderAddr,
        }
 
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_for_each.go b/vendor/github.com/hashicorp/terraform/terraform/eval_for_each.go
new file mode 100644 (file)
index 0000000..b86bf37
--- /dev/null
@@ -0,0 +1,85 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/hcl2/hcl"
+       "github.com/hashicorp/terraform/tfdiags"
+       "github.com/zclconf/go-cty/cty"
+)
+
+// evaluateResourceForEachExpression interprets a "for_each" argument on a resource.
+//
+// Returns a cty.Value map, and diagnostics if necessary. It will return nil if
+// the expression is nil, and is used to distinguish between an unset for_each and an
+// empty map
+func evaluateResourceForEachExpression(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, diags tfdiags.Diagnostics) {
+       forEachMap, known, diags := evaluateResourceForEachExpressionKnown(expr, ctx)
+       if !known {
+               // Attach a diag as we do with count, with the same downsides
+               diags = diags.Append(&hcl.Diagnostic{
+                       Severity: hcl.DiagError,
+                       Summary:  "Invalid forEach argument",
+                       Detail:   `The "for_each" value depends on resource attributes that cannot be determined until apply, so Terraform cannot predict how many instances will be created. To work around this, use the -target argument to first apply only the resources that the for_each depends on.`,
+               })
+       }
+       return forEachMap, diags
+}
+
+// evaluateResourceForEachExpressionKnown is like evaluateResourceForEachExpression
+// except that it handles an unknown result by returning an empty map and
+// a known = false, rather than by reporting the unknown value as an error
+// diagnostic.
+func evaluateResourceForEachExpressionKnown(expr hcl.Expression, ctx EvalContext) (forEach map[string]cty.Value, known bool, diags tfdiags.Diagnostics) {
+       if expr == nil {
+               return nil, true, nil
+       }
+
+       forEachVal, forEachDiags := ctx.EvaluateExpr(expr, cty.DynamicPseudoType, nil)
+       diags = diags.Append(forEachDiags)
+       if diags.HasErrors() {
+               return nil, true, diags
+       }
+
+       switch {
+       case forEachVal.IsNull():
+               diags = diags.Append(&hcl.Diagnostic{
+                       Severity: hcl.DiagError,
+                       Summary:  "Invalid for_each argument",
+                       Detail:   `The given "for_each" argument value is unsuitable: the given "for_each" argument value is null. A map, or set of strings is allowed.`,
+                       Subject:  expr.Range().Ptr(),
+               })
+               return nil, true, diags
+       case !forEachVal.IsKnown():
+               return map[string]cty.Value{}, false, diags
+       }
+
+       if !forEachVal.CanIterateElements() || forEachVal.Type().IsListType() {
+               diags = diags.Append(&hcl.Diagnostic{
+                       Severity: hcl.DiagError,
+                       Summary:  "Invalid for_each argument",
+                       Detail:   fmt.Sprintf(`The given "for_each" argument value is unsuitable: the "for_each" argument must be a map, or set of strings, and you have provided a value of type %s.`, forEachVal.Type().FriendlyName()),
+                       Subject:  expr.Range().Ptr(),
+               })
+               return nil, true, diags
+       }
+
+       if forEachVal.Type().IsSetType() {
+               if forEachVal.Type().ElementType() != cty.String {
+                       diags = diags.Append(&hcl.Diagnostic{
+                               Severity: hcl.DiagError,
+                               Summary:  "Invalid for_each set argument",
+                               Detail:   fmt.Sprintf(`The given "for_each" argument value is unsuitable: "for_each" supports maps and sets of strings, but you have provided a set containing type %s.`, forEachVal.Type().ElementType().FriendlyName()),
+                               Subject:  expr.Range().Ptr(),
+                       })
+                       return nil, true, diags
+               }
+       }
+
+       // If the map is empty ({}), return an empty map, because cty will return nil when representing {} AsValueMap
+       if forEachVal.LengthInt() == 0 {
+               return map[string]cty.Value{}, true, diags
+       }
+
+       return forEachVal.AsValueMap(), true, nil
+}
index 34f2d60adec83bc2104cbc36bfccce77cd1d9511..4999480f5df24ef361c414a62ffaba9a4200e67d 100644 (file)
@@ -95,7 +95,8 @@ func (n *EvalReadData) Eval(ctx EvalContext) (interface{}, error) {
        objTy := schema.ImpliedType()
        priorVal := cty.NullVal(objTy) // for data resources, prior is always null because we start fresh every time
 
-       keyData := EvalDataForInstanceKey(n.Addr.Key)
+       forEach, _ := evaluateResourceForEachExpression(n.Config.ForEach, ctx)
+       keyData := EvalDataForInstanceKey(n.Addr.Key, forEach)
 
        var configDiags tfdiags.Diagnostics
        configVal, _, configDiags = ctx.EvaluateBlock(config.Config, schema, nil, keyData)
@@ -179,6 +180,17 @@ func (n *EvalReadData) Eval(ctx EvalContext) (interface{}, error) {
                )
        }
 
+       log.Printf("[TRACE] Re-validating config for %s", absAddr)
+       validateResp := provider.ValidateDataSourceConfig(
+               providers.ValidateDataSourceConfigRequest{
+                       TypeName: n.Addr.Resource.Type,
+                       Config:   configVal,
+               },
+       )
+       if validateResp.Diagnostics.HasErrors() {
+               return nil, validateResp.Diagnostics.InConfigBody(n.Config.Config).Err()
+       }
+
        // If we get down here then our configuration is complete and we're read
        // to actually call the provider to read the data.
        log.Printf("[TRACE] EvalReadData: %s configuration is complete, so reading from provider", absAddr)
index 03bc948115d963d9ebf1a339e25ae6bd48fe57bc..4dfb5b4e93d5c10fec9b604dffbcd50af05c436f 100644 (file)
@@ -55,6 +55,7 @@ func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) {
        req := providers.ReadResourceRequest{
                TypeName:   n.Addr.Resource.Type,
                PriorState: priorVal,
+               Private:    state.Private,
        }
 
        provider := *n.Provider
@@ -87,6 +88,7 @@ func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) {
 
        newState := state.DeepCopy()
        newState.Value = resp.NewState
+       newState.Private = resp.Private
 
        // Call post-refresh hook
        err = ctx.Hook(func(h Hook) (HookAction, error) {
index d506ce3fe1a3b0cb341db728564caf3729d8100f..b611113e3c09838383f8776d63863956ba6fba51 100644 (file)
@@ -424,15 +424,21 @@ func (n *EvalWriteResourceState) Eval(ctx EvalContext) (interface{}, error) {
                return nil, diags.Err()
        }
 
-       // Currently we ony support NoEach and EachList, because for_each support
-       // is not fully wired up across Terraform. Once for_each support is added,
-       // we'll need to handle that here too, setting states.EachMap if the
-       // assigned expression is a map.
        eachMode := states.NoEach
        if count >= 0 { // -1 signals "count not set"
                eachMode = states.EachList
        }
 
+       forEach, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx)
+       diags = diags.Append(forEachDiags)
+       if forEachDiags.HasErrors() {
+               return nil, diags.Err()
+       }
+
+       if forEach != nil {
+               eachMode = states.EachMap
+       }
+
        // This method takes care of all of the business logic of updating this
        // while ensuring that any existing instances are preserved, etc.
        state.SetResourceMeta(absAddr, eachMode, n.ProviderAddr)
index 0033e01ac2f3f11a13335579c09c52dde5cb1b1f..6b809a281bc7f78eacd14c25c128dfd93ad790c0 100644 (file)
@@ -112,11 +112,12 @@ func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) {
 // the configuration of a provisioner belonging to a resource. The provisioner
 // config is expected to contain the merged connection configurations.
 type EvalValidateProvisioner struct {
-       ResourceAddr     addrs.Resource
-       Provisioner      *provisioners.Interface
-       Schema           **configschema.Block
-       Config           *configs.Provisioner
-       ResourceHasCount bool
+       ResourceAddr       addrs.Resource
+       Provisioner        *provisioners.Interface
+       Schema             **configschema.Block
+       Config             *configs.Provisioner
+       ResourceHasCount   bool
+       ResourceHasForEach bool
 }
 
 func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) {
@@ -198,6 +199,19 @@ func (n *EvalValidateProvisioner) evaluateBlock(ctx EvalContext, body hcl.Body,
                // expected type since none of these elements are known at this
                // point anyway.
                selfAddr = n.ResourceAddr.Instance(addrs.IntKey(0))
+       } else if n.ResourceHasForEach {
+               // For a resource that has for_each, we allow each.value and each.key
+               // but don't know at this stage what it will return.
+               keyData = InstanceKeyEvalData{
+                       EachKey:   cty.UnknownVal(cty.String),
+                       EachValue: cty.DynamicVal,
+               }
+
+               // "self" can't point to an unknown key, but we'll force it to be
+               // key "" here, which should return an unknown value of the
+               // expected type since none of these elements are known at
+               // this point anyway.
+               selfAddr = n.ResourceAddr.Instance(addrs.StringKey(""))
        }
 
        return ctx.EvaluateBlock(body, schema, selfAddr, keyData)
@@ -370,10 +384,21 @@ func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) {
                diags = diags.Append(countDiags)
        }
 
+       if n.Config.ForEach != nil {
+               keyData = InstanceKeyEvalData{
+                       EachKey:   cty.UnknownVal(cty.String),
+                       EachValue: cty.UnknownVal(cty.DynamicPseudoType),
+               }
+
+               // Evaluate the for_each expression here so we can expose the diagnostics
+               forEachDiags := n.validateForEach(ctx, n.Config.ForEach)
+               diags = diags.Append(forEachDiags)
+       }
+
        for _, traversal := range n.Config.DependsOn {
                ref, refDiags := addrs.ParseRef(traversal)
                diags = diags.Append(refDiags)
-               if len(ref.Remaining) != 0 {
+               if !refDiags.HasErrors() && len(ref.Remaining) != 0 {
                        diags = diags.Append(&hcl.Diagnostic{
                                Severity: hcl.DiagError,
                                Summary:  "Invalid depends_on reference",
@@ -542,3 +567,18 @@ func (n *EvalValidateResource) validateCount(ctx EvalContext, expr hcl.Expressio
 
        return diags
 }
+
+func (n *EvalValidateResource) validateForEach(ctx EvalContext, expr hcl.Expression) (diags tfdiags.Diagnostics) {
+       _, known, forEachDiags := evaluateResourceForEachExpressionKnown(expr, ctx)
+       // If the value isn't known then that's the best we can do for now, but
+       // we'll check more thoroughly during the plan walk
+       if !known {
+               return diags
+       }
+
+       if forEachDiags.HasErrors() {
+               diags = diags.Append(forEachDiags)
+       }
+
+       return diags
+}
index 68adf764df468fe0fe0f9879461eefe6d282a409..ea4697398b24c7d747c2765e83042f638edd1854 100644 (file)
@@ -12,6 +12,7 @@ import (
        "github.com/hashicorp/terraform/addrs"
 
        "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/hcl2shim"
        "github.com/hashicorp/terraform/config/module"
        "github.com/zclconf/go-cty/cty"
        "github.com/zclconf/go-cty/cty/convert"
@@ -60,7 +61,7 @@ func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) {
                        continue
                }
 
-               if proposedValue == config.UnknownVariableValue {
+               if proposedValue == hcl2shim.UnknownVariableValue {
                        continue
                }
 
index ab65d475b1e73a569aadaa3a6ee4c257197161f9..9bb600945f976b1f1768ec5ca4863e1f23da0005 100644 (file)
@@ -120,20 +120,24 @@ type InstanceKeyEvalData struct {
 
 // EvalDataForInstanceKey constructs a suitable InstanceKeyEvalData for
 // evaluating in a context that has the given instance key.
-func EvalDataForInstanceKey(key addrs.InstanceKey) InstanceKeyEvalData {
-       // At the moment we don't actually implement for_each, so we only
-       // ever populate CountIndex.
-       // (When we implement for_each later we may need to reorganize this some,
-       // so that we can resolve the ambiguity that an int key may either be
-       // a count.index or an each.key where for_each is over a list.)
-
+func EvalDataForInstanceKey(key addrs.InstanceKey, forEachMap map[string]cty.Value) InstanceKeyEvalData {
        var countIdx cty.Value
+       var eachKey cty.Value
+       var eachVal cty.Value
+
        if intKey, ok := key.(addrs.IntKey); ok {
                countIdx = cty.NumberIntVal(int64(intKey))
        }
 
+       if stringKey, ok := key.(addrs.StringKey); ok {
+               eachKey = cty.StringVal(string(stringKey))
+               eachVal = forEachMap[string(stringKey)]
+       }
+
        return InstanceKeyEvalData{
                CountIndex: countIdx,
+               EachKey:    eachKey,
+               EachValue:  eachVal,
        }
 }
 
@@ -173,6 +177,37 @@ func (d *evaluationStateData) GetCountAttr(addr addrs.CountAttr, rng tfdiags.Sou
        }
 }
 
+func (d *evaluationStateData) GetForEachAttr(addr addrs.ForEachAttr, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
+       var diags tfdiags.Diagnostics
+       var returnVal cty.Value
+       switch addr.Name {
+
+       case "key":
+               returnVal = d.InstanceKeyData.EachKey
+       case "value":
+               returnVal = d.InstanceKeyData.EachValue
+       default:
+               diags = diags.Append(&hcl.Diagnostic{
+                       Severity: hcl.DiagError,
+                       Summary:  `Invalid "each" attribute`,
+                       Detail:   fmt.Sprintf(`The "each" object does not have an attribute named %q. The supported attributes are each.key and each.value, the current key and value pair of the "for_each" attribute set.`, addr.Name),
+                       Subject:  rng.ToHCL().Ptr(),
+               })
+               return cty.DynamicVal, diags
+       }
+
+       if returnVal == cty.NilVal {
+               diags = diags.Append(&hcl.Diagnostic{
+                       Severity: hcl.DiagError,
+                       Summary:  `Reference to "each" in context without for_each`,
+                       Detail:   fmt.Sprintf(`The "each" object can be used only in "resource" blocks, and only when the "for_each" argument is set.`),
+                       Subject:  rng.ToHCL().Ptr(),
+               })
+               return cty.UnknownVal(cty.DynamicPseudoType), diags
+       }
+       return returnVal, diags
+}
+
 func (d *evaluationStateData) GetInputVariable(addr addrs.InputVariable, rng tfdiags.SourceRange) (cty.Value, tfdiags.Diagnostics) {
        var diags tfdiags.Diagnostics
 
@@ -569,7 +604,7 @@ func (d *evaluationStateData) GetResourceInstance(addr addrs.ResourceInstance, r
                }
        case states.EachMap:
                multi = key == addrs.NoKey
-               if _, ok := addr.Key.(addrs.IntKey); !multi && !ok {
+               if _, ok := addr.Key.(addrs.StringKey); !multi && !ok {
                        diags = diags.Append(&hcl.Diagnostic{
                                Severity: hcl.DiagError,
                                Summary:  "Invalid resource index",
@@ -696,7 +731,7 @@ func (d *evaluationStateData) getResourceInstancesAll(addr addrs.Resource, rng t
                        ty := schema.ImpliedType()
                        key := addrs.IntKey(i)
                        is, exists := rs.Instances[key]
-                       if exists {
+                       if exists && is.Current != nil {
                                instAddr := addr.Instance(key).Absolute(d.ModulePath)
 
                                // Prefer pending value in plan if present. See getResourceInstanceSingle
index 26c185751a69a14e99efe7d335c376acfd93e934..97bb1f6998f0deb354c9c1bf9f062f052b905256 100644 (file)
@@ -11,16 +11,11 @@ import (
        "github.com/hashicorp/hil"
        "github.com/hashicorp/hil/ast"
        "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/hcl2shim"
        "github.com/hashicorp/terraform/config/module"
        "github.com/hashicorp/terraform/flatmap"
 )
 
-const (
-       // VarEnvPrefix is the prefix of variables that are read from
-       // the environment to set variables here.
-       VarEnvPrefix = "TF_VAR_"
-)
-
 // Interpolater is the structure responsible for determining the values
 // for interpolations such as `aws_instance.foo.bar`.
 type Interpolater struct {
@@ -71,7 +66,7 @@ func (i *Interpolater) valueCountVar(
 func unknownVariable() ast.Variable {
        return ast.Variable{
                Type:  ast.TypeUnknown,
-               Value: config.UnknownVariableValue,
+               Value: hcl2shim.UnknownVariableValue,
        }
 }
 
@@ -659,7 +654,7 @@ func (i *Interpolater) interpolateComplexTypeAttribute(
                // ".#" count field is marked as unknown to indicate "this whole list is
                // unknown". We must honor that meaning here so computed references can be
                // treated properly during the plan phase.
-               if lengthAttr == config.UnknownVariableValue {
+               if lengthAttr == hcl2shim.UnknownVariableValue {
                        return unknownVariable(), nil
                }
 
@@ -675,7 +670,7 @@ func (i *Interpolater) interpolateComplexTypeAttribute(
                // ".%" count field is marked as unknown to indicate "this whole list is
                // unknown". We must honor that meaning here so computed references can be
                // treated properly during the plan phase.
-               if lengthAttr == config.UnknownVariableValue {
+               if lengthAttr == hcl2shim.UnknownVariableValue {
                        return unknownVariable(), nil
                }
 
index ab8216341232702017e35363470acd65b77d0a92..dd9286648b28e0ed22bc1c708183c65bb6071dc7 100644 (file)
@@ -38,6 +38,16 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
                return nil, nil
        }
 
+       forEachMap, forEachKnown, forEachDiags := evaluateResourceForEachExpressionKnown(n.Config.ForEach, ctx)
+       if forEachDiags.HasErrors() {
+               return nil, diags.Err()
+       }
+       if !forEachKnown {
+               // If the for_each isn't known yet, we'll skip refreshing and try expansion
+               // again during the plan walk.
+               return nil, nil
+       }
+
        // Next we need to potentially rename an instance address in the state
        // if we're transitioning whether "count" is set at all.
        fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1)
@@ -77,6 +87,7 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
                        Concrete: concreteResource,
                        Schema:   n.Schema,
                        Count:    count,
+                       ForEach:  forEachMap,
                        Addr:     n.ResourceAddr(),
                },
 
@@ -85,6 +96,7 @@ func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, er
                &OrphanResourceCountTransformer{
                        Concrete: concreteResourceDestroyable,
                        Count:    count,
+                       ForEach:  forEachMap,
                        Addr:     n.ResourceAddr(),
                        State:    state,
                },
index 3a0570c5b62a2383bbd35a0b30f7cf078ae7227c..d147b42e48a373dfa9cfb15650fb636e90fc6fe7 100644 (file)
@@ -187,6 +187,8 @@ func (n *NodeAbstractResource) References() []*addrs.Reference {
 
                refs, _ := lang.ReferencesInExpr(c.Count)
                result = append(result, refs...)
+               refs, _ = lang.ReferencesInExpr(c.ForEach)
+               result = append(result, refs...)
                refs, _ = lang.ReferencesInBlock(c.Config, n.Schema)
                result = append(result, refs...)
                if c.Managed != nil {
@@ -238,21 +240,31 @@ func (n *NodeAbstractResourceInstance) References() []*addrs.Reference {
                        // need to do a little work here to massage this to the form we now
                        // want.
                        var result []*addrs.Reference
-                       for _, addr := range s.Current.Dependencies {
-                               if addr == nil {
-                                       // Should never happen; indicates a bug in the state loader
-                                       panic(fmt.Sprintf("dependencies for current object on %s contains nil address", n.ResourceInstanceAddr()))
-                               }
 
-                               // This is a little weird: we need to manufacture an addrs.Reference
-                               // with a fake range here because the state isn't something we can
-                               // make source references into.
-                               result = append(result, &addrs.Reference{
-                                       Subject: addr,
-                                       SourceRange: tfdiags.SourceRange{
-                                               Filename: "(state file)",
-                                       },
-                               })
+                       // It is (apparently) possible for s.Current to be nil. This proved
+                       // difficult to reproduce, so we will fix the symptom here and hope
+                       // to find the root cause another time.
+                       //
+                       // https://github.com/hashicorp/terraform/issues/21407
+                       if s.Current == nil {
+                               log.Printf("[WARN] no current state found for %s", n.Name())
+                       } else {
+                               for _, addr := range s.Current.Dependencies {
+                                       if addr == nil {
+                                               // Should never happen; indicates a bug in the state loader
+                                               panic(fmt.Sprintf("dependencies for current object on %s contains nil address", n.ResourceInstanceAddr()))
+                                       }
+
+                                       // This is a little weird: we need to manufacture an addrs.Reference
+                                       // with a fake range here because the state isn't something we can
+                                       // make source references into.
+                                       result = append(result, &addrs.Reference{
+                                               Subject: addr,
+                                               SourceRange: tfdiags.SourceRange{
+                                                       Filename: "(state file)",
+                                               },
+                                       })
+                               }
                        }
                        return result
                }
index dad7bfc5fd98560054ac9abf6725eb9c3f0f01c4..d79532467f60f9f267c0c0f573b0e81c28c6eb41 100644 (file)
@@ -101,13 +101,6 @@ func (n *NodeApplyableResourceInstance) References() []*addrs.Reference {
 func (n *NodeApplyableResourceInstance) EvalTree() EvalNode {
        addr := n.ResourceInstanceAddr()
 
-       // State still uses legacy-style internal ids, so we need to shim to get
-       // a suitable key to use.
-       stateId := NewLegacyResourceInstanceAddress(addr).stateId()
-
-       // Determine the dependencies for the state.
-       stateDeps := n.StateReferences()
-
        if n.Config == nil {
                // This should not be possible, but we've got here in at least one
                // case as discussed in the following issue:
@@ -132,15 +125,15 @@ func (n *NodeApplyableResourceInstance) EvalTree() EvalNode {
        // Eval info is different depending on what kind of resource this is
        switch n.Config.Mode {
        case addrs.ManagedResourceMode:
-               return n.evalTreeManagedResource(addr, stateId, stateDeps)
+               return n.evalTreeManagedResource(addr)
        case addrs.DataResourceMode:
-               return n.evalTreeDataResource(addr, stateId, stateDeps)
+               return n.evalTreeDataResource(addr)
        default:
                panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
        }
 }
 
-func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode {
+func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode {
        var provider providers.Interface
        var providerSchema *ProviderSchema
        var change *plans.ResourceInstanceChange
@@ -206,7 +199,7 @@ func (n *NodeApplyableResourceInstance) evalTreeDataResource(addr addrs.AbsResou
        }
 }
 
-func (n *NodeApplyableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode {
+func (n *NodeApplyableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode {
        // Declare a bunch of variables that are used for state during
        // evaluation. Most of this are written to by-address below.
        var provider providers.Interface
index 633c1c466242889f152cfdbd60f1a5387e756a85..ec4aa9322a923bd461b36bb7176d9467af491327 100644 (file)
@@ -77,6 +77,11 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
                return nil, diags.Err()
        }
 
+       forEachMap, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx)
+       if forEachDiags.HasErrors() {
+               return nil, diags.Err()
+       }
+
        // Next we need to potentially rename an instance address in the state
        // if we're transitioning whether "count" is set at all.
        fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1)
@@ -119,18 +124,20 @@ func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
 
        // Start creating the steps
        steps := []GraphTransformer{
-               // Expand the count.
+               // Expand the count or for_each (if present)
                &ResourceCountTransformer{
                        Concrete: concreteResource,
                        Schema:   n.Schema,
                        Count:    count,
+                       ForEach:  forEachMap,
                        Addr:     n.ResourceAddr(),
                },
 
-               // Add the count orphans
+               // Add the count/for_each orphans
                &OrphanResourceCountTransformer{
                        Concrete: concreteResourceOrphan,
                        Count:    count,
+                       ForEach:  forEachMap,
                        Addr:     n.ResourceAddr(),
                        State:    state,
                },
index 75e0bcd343b9014067bb2f781fbf82a022bf98a8..0f74bbe61c64e7244293aaf4e8b16c27e39d2c9b 100644 (file)
@@ -34,25 +34,18 @@ var (
 func (n *NodePlannableResourceInstance) EvalTree() EvalNode {
        addr := n.ResourceInstanceAddr()
 
-       // State still uses legacy-style internal ids, so we need to shim to get
-       // a suitable key to use.
-       stateId := NewLegacyResourceInstanceAddress(addr).stateId()
-
-       // Determine the dependencies for the state.
-       stateDeps := n.StateReferences()
-
        // Eval info is different depending on what kind of resource this is
        switch addr.Resource.Resource.Mode {
        case addrs.ManagedResourceMode:
-               return n.evalTreeManagedResource(addr, stateId, stateDeps)
+               return n.evalTreeManagedResource(addr)
        case addrs.DataResourceMode:
-               return n.evalTreeDataResource(addr, stateId, stateDeps)
+               return n.evalTreeDataResource(addr)
        default:
                panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
        }
 }
 
-func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode {
+func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResourceInstance) EvalNode {
        config := n.Config
        var provider providers.Interface
        var providerSchema *ProviderSchema
@@ -147,7 +140,7 @@ func (n *NodePlannableResourceInstance) evalTreeDataResource(addr addrs.AbsResou
        }
 }
 
-func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance, stateId string, stateDeps []addrs.Referenceable) EvalNode {
+func (n *NodePlannableResourceInstance) evalTreeManagedResource(addr addrs.AbsResourceInstance) EvalNode {
        config := n.Config
        var provider providers.Interface
        var providerSchema *ProviderSchema
index 95060232068a9d922117d6dcf5a9bba9c2395700..9daeabfa69cbef77c2eb1c9d5e4db0b59affbaa6 100644 (file)
@@ -39,6 +39,11 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
                return nil, diags.Err()
        }
 
+       forEachMap, forEachDiags := evaluateResourceForEachExpression(n.Config.ForEach, ctx)
+       if forEachDiags.HasErrors() {
+               return nil, diags.Err()
+       }
+
        // Next we need to potentially rename an instance address in the state
        // if we're transitioning whether "count" is set at all.
        fixResourceCountSetTransition(ctx, n.ResourceAddr(), count != -1)
@@ -66,6 +71,7 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
                        Concrete: concreteResource,
                        Schema:   n.Schema,
                        Count:    count,
+                       ForEach:  forEachMap,
                        Addr:     n.ResourceAddr(),
                },
 
@@ -74,6 +80,7 @@ func (n *NodeRefreshableManagedResource) DynamicExpand(ctx EvalContext) (*Graph,
                &OrphanResourceCountTransformer{
                        Concrete: concreteResource,
                        Count:    count,
+                       ForEach:  forEachMap,
                        Addr:     n.ResourceAddr(),
                        State:    state,
                },
index 734ec9e2b7f38c99187f17910a51c88af9c92801..efa657bf0c99a5ffacc3448481aab24d19d220bc 100644 (file)
@@ -54,6 +54,7 @@ func (n *NodeValidatableResource) EvalTree() EvalNode {
 
        if managed := n.Config.Managed; managed != nil {
                hasCount := n.Config.Count != nil
+               hasForEach := n.Config.ForEach != nil
 
                // Validate all the provisioners
                for _, p := range managed.Provisioners {
@@ -74,11 +75,12 @@ func (n *NodeValidatableResource) EvalTree() EvalNode {
                                        Schema: &provisionerSchema,
                                },
                                &EvalValidateProvisioner{
-                                       ResourceAddr:     addr.Resource,
-                                       Provisioner:      &provisioner,
-                                       Schema:           &provisionerSchema,
-                                       Config:           p,
-                                       ResourceHasCount: hasCount,
+                                       ResourceAddr:       addr.Resource,
+                                       Provisioner:        &provisioner,
+                                       Schema:             &provisionerSchema,
+                                       Config:             p,
+                                       ResourceHasCount:   hasCount,
+                                       ResourceHasForEach: hasForEach,
                                },
                        )
                }
index 4ae346d7d25b5d379dcc3cc4199ab864b4a91578..8eede48211d4543b45d134324555bceefa5083ab 100644 (file)
@@ -8,7 +8,6 @@ import (
        "github.com/zclconf/go-cty/cty"
        ctyjson "github.com/zclconf/go-cty/cty/json"
 
-       "github.com/hashicorp/terraform/config"
        "github.com/hashicorp/terraform/config/hcl2shim"
        "github.com/hashicorp/terraform/providers"
        "github.com/hashicorp/terraform/tfdiags"
@@ -391,7 +390,7 @@ func (p *MockProvider) ApplyResourceChange(r providers.ApplyResourceChangeReques
                        for k, new := range plannedMap {
                                old := priorMap[k]
                                newComputed := false
-                               if new == config.UnknownVariableValue {
+                               if new == hcl2shim.UnknownVariableValue {
                                        new = ""
                                        newComputed = true
                                }
index 156ecf5c0eafdcade14aedda3331db80511ca8d5..5d8261a6763d424b72540b94cab9664d197c2028 100644 (file)
@@ -365,6 +365,8 @@ func NewLegacyResourceInstanceAddress(addr addrs.AbsResourceInstance) *ResourceA
                ret.Index = -1
        } else if ik, ok := addr.Resource.Key.(addrs.IntKey); ok {
                ret.Index = int(ik)
+       } else if _, ok := addr.Resource.Key.(addrs.StringKey); ok {
+               ret.Index = -1
        } else {
                panic(fmt.Errorf("cannot shim resource instance with key %#v to legacy ResourceAddress.Index", addr.Resource.Key))
        }
index 092b69079965a0335743d909526ff92c8d4cf484..6280fb0e4c057e4aa23cf168d8a48e3ad07b4f5e 100644 (file)
@@ -1201,7 +1201,7 @@ func (m *ModuleState) prune() {
        }
 
        for k, v := range m.Outputs {
-               if v.Value == config.UnknownVariableValue {
+               if v.Value == hcl2shim.UnknownVariableValue {
                        delete(m.Outputs, k)
                }
        }
@@ -1827,7 +1827,7 @@ func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState {
                                continue
                        }
                        if diff.NewComputed {
-                               result.Attributes[k] = config.UnknownVariableValue
+                               result.Attributes[k] = hcl2shim.UnknownVariableValue
                                continue
                        }
 
index eec762e55af644dd4ea820e8ea07bc15ccc3c0e1..4f323a7a0ed20aedf2c0c0ee9035b53dc2842757 100644 (file)
@@ -6,6 +6,7 @@ import (
        "github.com/hashicorp/terraform/addrs"
        "github.com/hashicorp/terraform/dag"
        "github.com/hashicorp/terraform/states"
+       "github.com/zclconf/go-cty/cty"
 )
 
 // OrphanResourceCountTransformer is a GraphTransformer that adds orphans
@@ -18,9 +19,10 @@ import (
 type OrphanResourceCountTransformer struct {
        Concrete ConcreteResourceInstanceNodeFunc
 
-       Count int               // Actual count of the resource, or -1 if count is not set at all
-       Addr  addrs.AbsResource // Addr of the resource to look for orphans
-       State *states.State     // Full global state
+       Count   int                  // Actual count of the resource, or -1 if count is not set at all
+       ForEach map[string]cty.Value // The ForEach map on the resource
+       Addr    addrs.AbsResource    // Addr of the resource to look for orphans
+       State   *states.State        // Full global state
 }
 
 func (t *OrphanResourceCountTransformer) Transform(g *Graph) error {
@@ -34,6 +36,10 @@ func (t *OrphanResourceCountTransformer) Transform(g *Graph) error {
                haveKeys[key] = struct{}{}
        }
 
+       // if for_each is set, use that transformer
+       if t.ForEach != nil {
+               return t.transformForEach(haveKeys, g)
+       }
        if t.Count < 0 {
                return t.transformNoCount(haveKeys, g)
        }
@@ -43,6 +49,25 @@ func (t *OrphanResourceCountTransformer) Transform(g *Graph) error {
        return t.transformCount(haveKeys, g)
 }
 
+func (t *OrphanResourceCountTransformer) transformForEach(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error {
+       for key := range haveKeys {
+               s, _ := key.(addrs.StringKey)
+               // If the key is present in our current for_each, carry on
+               if _, ok := t.ForEach[string(s)]; ok {
+                       continue
+               }
+
+               abstract := NewNodeAbstractResourceInstance(t.Addr.Instance(key))
+               var node dag.Vertex = abstract
+               if f := t.Concrete; f != nil {
+                       node = f(abstract)
+               }
+               log.Printf("[TRACE] OrphanResourceCount(non-zero): adding %s as %T", t.Addr, node)
+               g.Add(node)
+       }
+       return nil
+}
+
 func (t *OrphanResourceCountTransformer) transformCount(haveKeys map[addrs.InstanceKey]struct{}, g *Graph) error {
        // Due to the logic in Transform, we only get in here if our count is
        // at least one.
index 11237909f33400741d8b5b4ce57ed00782e284ab..c70a3c14484ffce26bb3d0f91e379f492230eece 100644 (file)
@@ -4,6 +4,7 @@ import (
        "github.com/hashicorp/terraform/addrs"
        "github.com/hashicorp/terraform/configs/configschema"
        "github.com/hashicorp/terraform/dag"
+       "github.com/zclconf/go-cty/cty"
 )
 
 // ResourceCountTransformer is a GraphTransformer that expands the count
@@ -17,12 +18,13 @@ type ResourceCountTransformer struct {
        // Count is either the number of indexed instances to create, or -1 to
        // indicate that count is not set at all and thus a no-key instance should
        // be created.
-       Count int
-       Addr  addrs.AbsResource
+       Count   int
+       ForEach map[string]cty.Value
+       Addr    addrs.AbsResource
 }
 
 func (t *ResourceCountTransformer) Transform(g *Graph) error {
-       if t.Count < 0 {
+       if t.Count < 0 && t.ForEach == nil {
                // Negative count indicates that count is not set at all.
                addr := t.Addr.Instance(addrs.NoKey)
 
@@ -37,6 +39,19 @@ func (t *ResourceCountTransformer) Transform(g *Graph) error {
                return nil
        }
 
+       // Add nodes related to the for_each expression
+       for key := range t.ForEach {
+               addr := t.Addr.Instance(addrs.StringKey(key))
+               abstract := NewNodeAbstractResourceInstance(addr)
+               abstract.Schema = t.Schema
+               var node dag.Vertex = abstract
+               if f := t.Concrete; f != nil {
+                       node = f(abstract)
+               }
+
+               g.Add(node)
+       }
+
        // For each count, build and add the node
        for i := 0; i < t.Count; i++ {
                key := addrs.IntKey(i)
index 752241af1ee438d53b82bd3e1cc8045b56fa808f..5428cd5a0a1750eb55f46a145b3b57ab81b5a829 100644 (file)
@@ -2,8 +2,6 @@ package terraform
 
 import (
        "sort"
-
-       "github.com/hashicorp/terraform/config"
 )
 
 // Semaphore is a wrapper around a channel to provide
@@ -48,10 +46,6 @@ func (s Semaphore) Release() {
        }
 }
 
-func resourceProvider(resourceType, explicitProvider string) string {
-       return config.ResourceProviderFullName(resourceType, explicitProvider)
-}
-
 // strSliceContains checks if a given string is contained in a slice
 // When anybody asks why Go needs generics, here you go.
 func strSliceContains(haystack []string, needle string) bool {
index 30d72844a4a31cda04eee53121af124cf196b051..c30595c5cde42eb26629d2db595c23f4970fb850 100644 (file)
@@ -11,7 +11,7 @@ import (
 )
 
 // The main version number that is being run at the moment.
-var Version = "0.12.0"
+var Version = "0.12.6"
 
 // A pre-release marker for the version. If this is "" (empty string)
 // then it means that it is a final release. Otherwise, this is a pre-release
diff --git a/vendor/github.com/satori/go.uuid/.travis.yml b/vendor/github.com/satori/go.uuid/.travis.yml
new file mode 100644 (file)
index 0000000..20dd53b
--- /dev/null
@@ -0,0 +1,23 @@
+language: go
+sudo: false
+go:
+    - 1.2
+    - 1.3
+    - 1.4
+    - 1.5
+    - 1.6
+    - 1.7
+    - 1.8
+    - 1.9
+    - tip
+matrix:
+    allow_failures:
+        - go: tip
+    fast_finish: true
+before_install:
+    - go get github.com/mattn/goveralls
+    - go get golang.org/x/tools/cmd/cover
+script:
+    - $HOME/gopath/bin/goveralls -service=travis-ci
+notifications:
+    email: false
diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE
new file mode 100644 (file)
index 0000000..926d549
--- /dev/null
@@ -0,0 +1,20 @@
+Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md
new file mode 100644 (file)
index 0000000..7b1a722
--- /dev/null
@@ -0,0 +1,65 @@
+# UUID package for Go language
+
+[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid)
+[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid)
+[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid)
+
+This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs.
+
+With 100% test coverage and benchmarks out of box.
+
+Supported versions:
+* Version 1, based on timestamp and MAC address (RFC 4122)
+* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1)
+* Version 3, based on MD5 hashing (RFC 4122)
+* Version 4, based on random numbers (RFC 4122)
+* Version 5, based on SHA-1 hashing (RFC 4122)
+
+## Installation
+
+Use the `go` command:
+
+       $ go get github.com/satori/go.uuid
+
+## Requirements
+
+UUID package requires Go >= 1.2.
+
+## Example
+
+```go
+package main
+
+import (
+       "fmt"
+       "github.com/satori/go.uuid"
+)
+
+func main() {
+       // Creating UUID Version 4
+       u1 := uuid.NewV4()
+       fmt.Printf("UUIDv4: %s\n", u1)
+
+       // Parsing UUID from string input
+       u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+       if err != nil {
+               fmt.Printf("Something gone wrong: %s", err)
+       }
+       fmt.Printf("Successfully parsed: %s", u2)
+}
+```
+
+## Documentation
+
+[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project.
+
+## Links
+* [RFC 4122](http://tools.ietf.org/html/rfc4122)
+* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01)
+
+## Copyright
+
+Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>.
+
+UUID package released under MIT License.
+See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details.
diff --git a/vendor/github.com/satori/go.uuid/codec.go b/vendor/github.com/satori/go.uuid/codec.go
new file mode 100644 (file)
index 0000000..656892c
--- /dev/null
@@ -0,0 +1,206 @@
+// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+       "bytes"
+       "encoding/hex"
+       "fmt"
+)
+
+// FromBytes returns UUID converted from raw byte slice input.
+// It will return error if the slice isn't 16 bytes long.
+func FromBytes(input []byte) (u UUID, err error) {
+       err = u.UnmarshalBinary(input)
+       return
+}
+
+// FromBytesOrNil returns UUID converted from raw byte slice input.
+// Same behavior as FromBytes, but returns a Nil UUID on error.
+func FromBytesOrNil(input []byte) UUID {
+       uuid, err := FromBytes(input)
+       if err != nil {
+               return Nil
+       }
+       return uuid
+}
+
+// FromString returns UUID parsed from string input.
+// Input is expected in a form accepted by UnmarshalText.
+func FromString(input string) (u UUID, err error) {
+       err = u.UnmarshalText([]byte(input))
+       return
+}
+
+// FromStringOrNil returns UUID parsed from string input.
+// Same behavior as FromString, but returns a Nil UUID on error.
+func FromStringOrNil(input string) UUID {
+       uuid, err := FromString(input)
+       if err != nil {
+               return Nil
+       }
+       return uuid
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The encoding is the same as returned by String.
+func (u UUID) MarshalText() (text []byte, err error) {
+       text = []byte(u.String())
+       return
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// Following formats are supported:
+//   "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
+//   "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
+//   "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+//   "6ba7b8109dad11d180b400c04fd430c8"
+// ABNF for supported UUID text representation follows:
+//   uuid := canonical | hashlike | braced | urn
+//   plain := canonical | hashlike
+//   canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct
+//   hashlike := 12hexoct
+//   braced := '{' plain '}'
+//   urn := URN ':' UUID-NID ':' plain
+//   URN := 'urn'
+//   UUID-NID := 'uuid'
+//   12hexoct := 6hexoct 6hexoct
+//   6hexoct := 4hexoct 2hexoct
+//   4hexoct := 2hexoct 2hexoct
+//   2hexoct := hexoct hexoct
+//   hexoct := hexdig hexdig
+//   hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' |
+//             'a' | 'b' | 'c' | 'd' | 'e' | 'f' |
+//             'A' | 'B' | 'C' | 'D' | 'E' | 'F'
+func (u *UUID) UnmarshalText(text []byte) (err error) {
+       switch len(text) {
+       case 32:
+               return u.decodeHashLike(text)
+       case 36:
+               return u.decodeCanonical(text)
+       case 38:
+               return u.decodeBraced(text)
+       case 41:
+               fallthrough
+       case 45:
+               return u.decodeURN(text)
+       default:
+               return fmt.Errorf("uuid: incorrect UUID length: %s", text)
+       }
+}
+
+// decodeCanonical decodes UUID string in format
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8".
+func (u *UUID) decodeCanonical(t []byte) (err error) {
+       if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' {
+               return fmt.Errorf("uuid: incorrect UUID format %s", t)
+       }
+
+       src := t[:]
+       dst := u[:]
+
+       for i, byteGroup := range byteGroups {
+               if i > 0 {
+                       src = src[1:] // skip dash
+               }
+               _, err = hex.Decode(dst[:byteGroup/2], src[:byteGroup])
+               if err != nil {
+                       return
+               }
+               src = src[byteGroup:]
+               dst = dst[byteGroup/2:]
+       }
+
+       return
+}
+
+// decodeHashLike decodes UUID string in format
+// "6ba7b8109dad11d180b400c04fd430c8".
+func (u *UUID) decodeHashLike(t []byte) (err error) {
+       src := t[:]
+       dst := u[:]
+
+       if _, err = hex.Decode(dst, src); err != nil {
+               return err
+       }
+       return
+}
+
+// decodeBraced decodes UUID string in format
+// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" or in format
+// "{6ba7b8109dad11d180b400c04fd430c8}".
+func (u *UUID) decodeBraced(t []byte) (err error) {
+       l := len(t)
+
+       if t[0] != '{' || t[l-1] != '}' {
+               return fmt.Errorf("uuid: incorrect UUID format %s", t)
+       }
+
+       return u.decodePlain(t[1 : l-1])
+}
+
+// decodeURN decodes UUID string in format
+// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in format
+// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8".
+func (u *UUID) decodeURN(t []byte) (err error) {
+       total := len(t)
+
+       urn_uuid_prefix := t[:9]
+
+       if !bytes.Equal(urn_uuid_prefix, urnPrefix) {
+               return fmt.Errorf("uuid: incorrect UUID format: %s", t)
+       }
+
+       return u.decodePlain(t[9:total])
+}
+
+// decodePlain decodes UUID string in canonical format
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format
+// "6ba7b8109dad11d180b400c04fd430c8".
+func (u *UUID) decodePlain(t []byte) (err error) {
+       switch len(t) {
+       case 32:
+               return u.decodeHashLike(t)
+       case 36:
+               return u.decodeCanonical(t)
+       default:
+               return fmt.Errorf("uuid: incorrrect UUID length: %s", t)
+       }
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (u UUID) MarshalBinary() (data []byte, err error) {
+       data = u.Bytes()
+       return
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+// It will return error if the slice isn't 16 bytes long.
+func (u *UUID) UnmarshalBinary(data []byte) (err error) {
+       if len(data) != Size {
+               err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
+               return
+       }
+       copy(u[:], data)
+
+       return
+}
diff --git a/vendor/github.com/satori/go.uuid/generator.go b/vendor/github.com/satori/go.uuid/generator.go
new file mode 100644 (file)
index 0000000..3f2f1da
--- /dev/null
@@ -0,0 +1,239 @@
+// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+       "crypto/md5"
+       "crypto/rand"
+       "crypto/sha1"
+       "encoding/binary"
+       "hash"
+       "net"
+       "os"
+       "sync"
+       "time"
+)
+
+// Difference in 100-nanosecond intervals between
+// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
+const epochStart = 122192928000000000
+
+var (
+       global = newDefaultGenerator()
+
+       epochFunc = unixTimeFunc
+       posixUID  = uint32(os.Getuid())
+       posixGID  = uint32(os.Getgid())
+)
+
+// NewV1 returns UUID based on current timestamp and MAC address.
+func NewV1() UUID {
+       return global.NewV1()
+}
+
+// NewV2 returns DCE Security UUID based on POSIX UID/GID.
+func NewV2(domain byte) UUID {
+       return global.NewV2(domain)
+}
+
+// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
+func NewV3(ns UUID, name string) UUID {
+       return global.NewV3(ns, name)
+}
+
+// NewV4 returns random generated UUID.
+func NewV4() UUID {
+       return global.NewV4()
+}
+
+// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
+func NewV5(ns UUID, name string) UUID {
+       return global.NewV5(ns, name)
+}
+
+// Generator provides interface for generating UUIDs.
+type Generator interface {
+       NewV1() UUID
+       NewV2(domain byte) UUID
+       NewV3(ns UUID, name string) UUID
+       NewV4() UUID
+       NewV5(ns UUID, name string) UUID
+}
+
+// Default generator implementation.
+type generator struct {
+       storageOnce  sync.Once
+       storageMutex sync.Mutex
+
+       lastTime      uint64
+       clockSequence uint16
+       hardwareAddr  [6]byte
+}
+
+func newDefaultGenerator() Generator {
+       return &generator{}
+}
+
+// NewV1 returns UUID based on current timestamp and MAC address.
+func (g *generator) NewV1() UUID {
+       u := UUID{}
+
+       timeNow, clockSeq, hardwareAddr := g.getStorage()
+
+       binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
+       binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
+       binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
+       binary.BigEndian.PutUint16(u[8:], clockSeq)
+
+       copy(u[10:], hardwareAddr)
+
+       u.SetVersion(V1)
+       u.SetVariant(VariantRFC4122)
+
+       return u
+}
+
+// NewV2 returns DCE Security UUID based on POSIX UID/GID.
+func (g *generator) NewV2(domain byte) UUID {
+       u := UUID{}
+
+       timeNow, clockSeq, hardwareAddr := g.getStorage()
+
+       switch domain {
+       case DomainPerson:
+               binary.BigEndian.PutUint32(u[0:], posixUID)
+       case DomainGroup:
+               binary.BigEndian.PutUint32(u[0:], posixGID)
+       }
+
+       binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
+       binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
+       binary.BigEndian.PutUint16(u[8:], clockSeq)
+       u[9] = domain
+
+       copy(u[10:], hardwareAddr)
+
+       u.SetVersion(V2)
+       u.SetVariant(VariantRFC4122)
+
+       return u
+}
+
+// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
+func (g *generator) NewV3(ns UUID, name string) UUID {
+       u := newFromHash(md5.New(), ns, name)
+       u.SetVersion(V3)
+       u.SetVariant(VariantRFC4122)
+
+       return u
+}
+
+// NewV4 returns random generated UUID.
+func (g *generator) NewV4() UUID {
+       u := UUID{}
+       g.safeRandom(u[:])
+       u.SetVersion(V4)
+       u.SetVariant(VariantRFC4122)
+
+       return u
+}
+
+// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
+func (g *generator) NewV5(ns UUID, name string) UUID {
+       u := newFromHash(sha1.New(), ns, name)
+       u.SetVersion(V5)
+       u.SetVariant(VariantRFC4122)
+
+       return u
+}
+
+func (g *generator) initStorage() {
+       g.initClockSequence()
+       g.initHardwareAddr()
+}
+
+func (g *generator) initClockSequence() {
+       buf := make([]byte, 2)
+       g.safeRandom(buf)
+       g.clockSequence = binary.BigEndian.Uint16(buf)
+}
+
+func (g *generator) initHardwareAddr() {
+       interfaces, err := net.Interfaces()
+       if err == nil {
+               for _, iface := range interfaces {
+                       if len(iface.HardwareAddr) >= 6 {
+                               copy(g.hardwareAddr[:], iface.HardwareAddr)
+                               return
+                       }
+               }
+       }
+
+       // Initialize hardwareAddr randomly in case
+       // of real network interfaces absence
+       g.safeRandom(g.hardwareAddr[:])
+
+       // Set multicast bit as recommended in RFC 4122
+       g.hardwareAddr[0] |= 0x01
+}
+
+func (g *generator) safeRandom(dest []byte) {
+       if _, err := rand.Read(dest); err != nil {
+               panic(err)
+       }
+}
+
+// Returns UUID v1/v2 storage state.
+// Returns epoch timestamp, clock sequence, and hardware address.
+func (g *generator) getStorage() (uint64, uint16, []byte) {
+       g.storageOnce.Do(g.initStorage)
+
+       g.storageMutex.Lock()
+       defer g.storageMutex.Unlock()
+
+       timeNow := epochFunc()
+       // Clock changed backwards since last UUID generation.
+       // Should increase clock sequence.
+       if timeNow <= g.lastTime {
+               g.clockSequence++
+       }
+       g.lastTime = timeNow
+
+       return timeNow, g.clockSequence, g.hardwareAddr[:]
+}
+
+// Returns difference in 100-nanosecond intervals between
+// UUID epoch (October 15, 1582) and current time.
+// This is default epoch calculation function.
+func unixTimeFunc() uint64 {
+       return epochStart + uint64(time.Now().UnixNano()/100)
+}
+
+// Returns UUID based on hashing of namespace UUID and name.
+func newFromHash(h hash.Hash, ns UUID, name string) UUID {
+       u := UUID{}
+       h.Write(ns[:])
+       h.Write([]byte(name))
+       copy(u[:], h.Sum(nil))
+
+       return u
+}
diff --git a/vendor/github.com/satori/go.uuid/sql.go b/vendor/github.com/satori/go.uuid/sql.go
new file mode 100644 (file)
index 0000000..56759d3
--- /dev/null
@@ -0,0 +1,78 @@
+// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+package uuid
+
+import (
+       "database/sql/driver"
+       "fmt"
+)
+
+// Value implements the driver.Valuer interface.
+func (u UUID) Value() (driver.Value, error) {
+       return u.String(), nil
+}
+
+// Scan implements the sql.Scanner interface.
+// A 16-byte slice is handled by UnmarshalBinary, while
+// a longer byte slice or a string is handled by UnmarshalText.
+func (u *UUID) Scan(src interface{}) error {
+       switch src := src.(type) {
+       case []byte:
+               if len(src) == Size {
+                       return u.UnmarshalBinary(src)
+               }
+               return u.UnmarshalText(src)
+
+       case string:
+               return u.UnmarshalText([]byte(src))
+       }
+
+       return fmt.Errorf("uuid: cannot convert %T to UUID", src)
+}
+
+// NullUUID can be used with the standard sql package to represent a
+// UUID value that can be NULL in the database
+type NullUUID struct {
+       UUID  UUID
+       Valid bool
+}
+
+// Value implements the driver.Valuer interface.
+func (u NullUUID) Value() (driver.Value, error) {
+       if !u.Valid {
+               return nil, nil
+       }
+       // Delegate to UUID Value function
+       return u.UUID.Value()
+}
+
+// Scan implements the sql.Scanner interface.
+func (u *NullUUID) Scan(src interface{}) error {
+       if src == nil {
+               u.UUID, u.Valid = Nil, false
+               return nil
+       }
+
+       // Delegate to UUID Scan function
+       u.Valid = true
+       return u.UUID.Scan(src)
+}
diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go
new file mode 100644 (file)
index 0000000..a2b8e2c
--- /dev/null
@@ -0,0 +1,161 @@
+// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+// Package uuid provides implementation of Universally Unique Identifier (UUID).
+// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and
+// version 2 (as specified in DCE 1.1).
+package uuid
+
+import (
+       "bytes"
+       "encoding/hex"
+)
+
+// Size of a UUID in bytes.
+const Size = 16
+
+// UUID representation compliant with specification
+// described in RFC 4122.
+type UUID [Size]byte
+
+// UUID versions
+const (
+       _ byte = iota
+       V1
+       V2
+       V3
+       V4
+       V5
+)
+
+// UUID layout variants.
+const (
+       VariantNCS byte = iota
+       VariantRFC4122
+       VariantMicrosoft
+       VariantFuture
+)
+
+// UUID DCE domains.
+const (
+       DomainPerson = iota
+       DomainGroup
+       DomainOrg
+)
+
+// String parse helpers.
+var (
+       urnPrefix  = []byte("urn:uuid:")
+       byteGroups = []int{8, 4, 4, 4, 12}
+)
+
+// Nil is special form of UUID that is specified to have all
+// 128 bits set to zero.
+var Nil = UUID{}
+
+// Predefined namespace UUIDs.
+var (
+       NamespaceDNS  = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
+       NamespaceURL  = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
+       NamespaceOID  = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
+       NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
+)
+
+// Equal returns true if u1 and u2 equals, otherwise returns false.
+func Equal(u1 UUID, u2 UUID) bool {
+       return bytes.Equal(u1[:], u2[:])
+}
+
+// Version returns algorithm version used to generate UUID.
+func (u UUID) Version() byte {
+       return u[6] >> 4
+}
+
+// Variant returns UUID layout variant.
+func (u UUID) Variant() byte {
+       switch {
+       case (u[8] >> 7) == 0x00:
+               return VariantNCS
+       case (u[8] >> 6) == 0x02:
+               return VariantRFC4122
+       case (u[8] >> 5) == 0x06:
+               return VariantMicrosoft
+       case (u[8] >> 5) == 0x07:
+               fallthrough
+       default:
+               return VariantFuture
+       }
+}
+
+// Bytes returns bytes slice representation of UUID.
+func (u UUID) Bytes() []byte {
+       return u[:]
+}
+
+// Returns canonical string representation of UUID:
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
+func (u UUID) String() string {
+       buf := make([]byte, 36)
+
+       hex.Encode(buf[0:8], u[0:4])
+       buf[8] = '-'
+       hex.Encode(buf[9:13], u[4:6])
+       buf[13] = '-'
+       hex.Encode(buf[14:18], u[6:8])
+       buf[18] = '-'
+       hex.Encode(buf[19:23], u[8:10])
+       buf[23] = '-'
+       hex.Encode(buf[24:], u[10:])
+
+       return string(buf)
+}
+
+// SetVersion sets version bits.
+func (u *UUID) SetVersion(v byte) {
+       u[6] = (u[6] & 0x0f) | (v << 4)
+}
+
+// SetVariant sets variant bits.
+func (u *UUID) SetVariant(v byte) {
+       switch v {
+       case VariantNCS:
+               u[8] = (u[8]&(0xff>>1) | (0x00 << 7))
+       case VariantRFC4122:
+               u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
+       case VariantMicrosoft:
+               u[8] = (u[8]&(0xff>>3) | (0x06 << 5))
+       case VariantFuture:
+               fallthrough
+       default:
+               u[8] = (u[8]&(0xff>>3) | (0x07 << 5))
+       }
+}
+
+// Must is a helper that wraps a call to a function returning (UUID, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initializations such as
+//     var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000"));
+func Must(u UUID, err error) UUID {
+       if err != nil {
+               panic(err)
+       }
+       return u
+}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/.travis.yml b/vendor/github.com/zclconf/go-cty-yaml/.travis.yml
new file mode 100644 (file)
index 0000000..13ff998
--- /dev/null
@@ -0,0 +1,5 @@
+language: go
+
+go:
+    - 1.12
+
diff --git a/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md b/vendor/github.com/zclconf/go-cty-yaml/CHANGELOG.md
new file mode 100644 (file)
index 0000000..b3bc3b6
--- /dev/null
@@ -0,0 +1,10 @@
+# 1.0.1 (July 30, 2019)
+
+* The YAML decoder is now correctly treating quoted scalars as verbatim literal
+  strings rather than using the fuzzy type selection rules for them. Fuzzy
+  type selection rules still apply to unquoted scalars.
+  ([#4](https://github.com/zclconf/go-cty-yaml/pull/4))
+
+# 1.0.0 (May 26, 2019)
+
+Initial release.
diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE b/vendor/github.com/zclconf/go-cty-yaml/LICENSE
new file mode 100644 (file)
index 0000000..8dada3e
--- /dev/null
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml b/vendor/github.com/zclconf/go-cty-yaml/LICENSE.libyaml
new file mode 100644 (file)
index 0000000..8da58fb
--- /dev/null
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+    apic.go
+    emitterc.go
+    parserc.go
+    readerc.go
+    scannerc.go
+    writerc.go
+    yamlh.go
+    yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/zclconf/go-cty-yaml/NOTICE b/vendor/github.com/zclconf/go-cty-yaml/NOTICE
new file mode 100644 (file)
index 0000000..4e6c00a
--- /dev/null
@@ -0,0 +1,20 @@
+This package is derived from gopkg.in/yaml.v2, which is copyright
+2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Includes mechanical ports of code from libyaml, distributed under its original
+license. See LICENSE.libyaml for more information.
+
+Modifications for cty interfacing copyright 2019 Martin Atkins, and
+distributed under the same license terms.
diff --git a/vendor/github.com/zclconf/go-cty-yaml/apic.go b/vendor/github.com/zclconf/go-cty-yaml/apic.go
new file mode 100644 (file)
index 0000000..1f7e87e
--- /dev/null
@@ -0,0 +1,739 @@
+package yaml
+
+import (
+       "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+       //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+       // Check if we can move the queue at the beginning of the buffer.
+       if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+               if parser.tokens_head != len(parser.tokens) {
+                       copy(parser.tokens, parser.tokens[parser.tokens_head:])
+               }
+               parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+               parser.tokens_head = 0
+       }
+       parser.tokens = append(parser.tokens, *token)
+       if pos < 0 {
+               return
+       }
+       copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+       parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+       *parser = yaml_parser_t{
+               raw_buffer: make([]byte, 0, input_raw_buffer_size),
+               buffer:     make([]byte, 0, input_buffer_size),
+       }
+       return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+       *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+       if parser.input_pos == len(parser.input) {
+               return 0, io.EOF
+       }
+       n = copy(buffer, parser.input[parser.input_pos:])
+       parser.input_pos += n
+       return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+       return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+       if parser.read_handler != nil {
+               panic("must set the input source only once")
+       }
+       parser.read_handler = yaml_string_read_handler
+       parser.input = input
+       parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+       if parser.read_handler != nil {
+               panic("must set the input source only once")
+       }
+       parser.read_handler = yaml_reader_read_handler
+       parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+       if parser.encoding != yaml_ANY_ENCODING {
+               panic("must set the encoding only once")
+       }
+       parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+       *emitter = yaml_emitter_t{
+               buffer:     make([]byte, output_buffer_size),
+               raw_buffer: make([]byte, 0, output_raw_buffer_size),
+               states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
+               events:     make([]yaml_event_t, 0, initial_queue_size),
+       }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+       *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+       *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+       return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+       _, err := emitter.output_writer.Write(buffer)
+       return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+       if emitter.write_handler != nil {
+               panic("must set the output target only once")
+       }
+       emitter.write_handler = yaml_string_write_handler
+       emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+       if emitter.write_handler != nil {
+               panic("must set the output target only once")
+       }
+       emitter.write_handler = yaml_writer_write_handler
+       emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+       if emitter.encoding != yaml_ANY_ENCODING {
+               panic("must set the output encoding only once")
+       }
+       emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+       emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+       if indent < 2 || indent > 9 {
+               indent = 2
+       }
+       emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+       if width < 0 {
+               width = -1
+       }
+       emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+       emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+       emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+//    assert(token);  // Non-NULL token object expected.
+//
+//    switch (token.type)
+//    {
+//        case YAML_TAG_DIRECTIVE_TOKEN:
+//            yaml_free(token.data.tag_directive.handle);
+//            yaml_free(token.data.tag_directive.prefix);
+//            break;
+//
+//        case YAML_ALIAS_TOKEN:
+//            yaml_free(token.data.alias.value);
+//            break;
+//
+//        case YAML_ANCHOR_TOKEN:
+//            yaml_free(token.data.anchor.value);
+//            break;
+//
+//        case YAML_TAG_TOKEN:
+//            yaml_free(token.data.tag.handle);
+//            yaml_free(token.data.tag.suffix);
+//            break;
+//
+//        case YAML_SCALAR_TOKEN:
+//            yaml_free(token.data.scalar.value);
+//            break;
+//
+//        default:
+//            break;
+//    }
+//
+//    memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+//    yaml_char_t *end = start+length;
+//    yaml_char_t *pointer = start;
+//
+//    while (pointer < end) {
+//        unsigned char octet;
+//        unsigned int width;
+//        unsigned int value;
+//        size_t k;
+//
+//        octet = pointer[0];
+//        width = (octet & 0x80) == 0x00 ? 1 :
+//                (octet & 0xE0) == 0xC0 ? 2 :
+//                (octet & 0xF0) == 0xE0 ? 3 :
+//                (octet & 0xF8) == 0xF0 ? 4 : 0;
+//        value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+//                (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+//                (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+//                (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+//        if (!width) return 0;
+//        if (pointer+width > end) return 0;
+//        for (k = 1; k < width; k ++) {
+//            octet = pointer[k];
+//            if ((octet & 0xC0) != 0x80) return 0;
+//            value = (value << 6) + (octet & 0x3F);
+//        }
+//        if (!((width == 1) ||
+//            (width == 2 && value >= 0x80) ||
+//            (width == 3 && value >= 0x800) ||
+//            (width == 4 && value >= 0x10000))) return 0;
+//
+//        pointer += width;
+//    }
+//
+//    return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+       *event = yaml_event_t{
+               typ:      yaml_STREAM_START_EVENT,
+               encoding: encoding,
+       }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+       *event = yaml_event_t{
+               typ: yaml_STREAM_END_EVENT,
+       }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+       event *yaml_event_t,
+       version_directive *yaml_version_directive_t,
+       tag_directives []yaml_tag_directive_t,
+       implicit bool,
+) {
+       *event = yaml_event_t{
+               typ:               yaml_DOCUMENT_START_EVENT,
+               version_directive: version_directive,
+               tag_directives:    tag_directives,
+               implicit:          implicit,
+       }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+       *event = yaml_event_t{
+               typ:      yaml_DOCUMENT_END_EVENT,
+               implicit: implicit,
+       }
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    anchor_copy *yaml_char_t = NULL
+//
+//    assert(event) // Non-NULL event object is expected.
+//    assert(anchor) // Non-NULL anchor is expected.
+//
+//    if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+//    anchor_copy = yaml_strdup(anchor)
+//    if (!anchor_copy)
+//        return 0
+//
+//    ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+//    return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+       *event = yaml_event_t{
+               typ:             yaml_SCALAR_EVENT,
+               anchor:          anchor,
+               tag:             tag,
+               value:           value,
+               implicit:        plain_implicit,
+               quoted_implicit: quoted_implicit,
+               style:           yaml_style_t(style),
+       }
+       return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+       *event = yaml_event_t{
+               typ:      yaml_SEQUENCE_START_EVENT,
+               anchor:   anchor,
+               tag:      tag,
+               implicit: implicit,
+               style:    yaml_style_t(style),
+       }
+       return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+       *event = yaml_event_t{
+               typ: yaml_SEQUENCE_END_EVENT,
+       }
+       return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+       *event = yaml_event_t{
+               typ:      yaml_MAPPING_START_EVENT,
+               anchor:   anchor,
+               tag:      tag,
+               implicit: implicit,
+               style:    yaml_style_t(style),
+       }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+       *event = yaml_event_t{
+               typ: yaml_MAPPING_END_EVENT,
+       }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+       *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+//        version_directive *yaml_version_directive_t,
+//        tag_directives_start *yaml_tag_directive_t,
+//        tag_directives_end *yaml_tag_directive_t,
+//        start_implicit int, end_implicit int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    struct {
+//        start *yaml_node_t
+//        end *yaml_node_t
+//        top *yaml_node_t
+//    } nodes = { NULL, NULL, NULL }
+//    version_directive_copy *yaml_version_directive_t = NULL
+//    struct {
+//        start *yaml_tag_directive_t
+//        end *yaml_tag_directive_t
+//        top *yaml_tag_directive_t
+//    } tag_directives_copy = { NULL, NULL, NULL }
+//    value yaml_tag_directive_t = { NULL, NULL }
+//    mark yaml_mark_t = { 0, 0, 0 }
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert((tag_directives_start && tag_directives_end) ||
+//            (tag_directives_start == tag_directives_end))
+//                            // Valid tag directives are expected.
+//
+//    if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+//    if (version_directive) {
+//        version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+//        if (!version_directive_copy) goto error
+//        version_directive_copy.major = version_directive.major
+//        version_directive_copy.minor = version_directive.minor
+//    }
+//
+//    if (tag_directives_start != tag_directives_end) {
+//        tag_directive *yaml_tag_directive_t
+//        if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+//            goto error
+//        for (tag_directive = tag_directives_start
+//                tag_directive != tag_directives_end; tag_directive ++) {
+//            assert(tag_directive.handle)
+//            assert(tag_directive.prefix)
+//            if (!yaml_check_utf8(tag_directive.handle,
+//                        strlen((char *)tag_directive.handle)))
+//                goto error
+//            if (!yaml_check_utf8(tag_directive.prefix,
+//                        strlen((char *)tag_directive.prefix)))
+//                goto error
+//            value.handle = yaml_strdup(tag_directive.handle)
+//            value.prefix = yaml_strdup(tag_directive.prefix)
+//            if (!value.handle || !value.prefix) goto error
+//            if (!PUSH(&context, tag_directives_copy, value))
+//                goto error
+//            value.handle = NULL
+//            value.prefix = NULL
+//        }
+//    }
+//
+//    DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+//            tag_directives_copy.start, tag_directives_copy.top,
+//            start_implicit, end_implicit, mark, mark)
+//
+//    return 1
+//
+//error:
+//    STACK_DEL(&context, nodes)
+//    yaml_free(version_directive_copy)
+//    while (!STACK_EMPTY(&context, tag_directives_copy)) {
+//        value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+//        yaml_free(value.handle)
+//        yaml_free(value.prefix)
+//    }
+//    STACK_DEL(&context, tag_directives_copy)
+//    yaml_free(value.handle)
+//    yaml_free(value.prefix)
+//
+//    return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    tag_directive *yaml_tag_directive_t
+//
+//    context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    while (!STACK_EMPTY(&context, document.nodes)) {
+//        node yaml_node_t = POP(&context, document.nodes)
+//        yaml_free(node.tag)
+//        switch (node.type) {
+//            case YAML_SCALAR_NODE:
+//                yaml_free(node.data.scalar.value)
+//                break
+//            case YAML_SEQUENCE_NODE:
+//                STACK_DEL(&context, node.data.sequence.items)
+//                break
+//            case YAML_MAPPING_NODE:
+//                STACK_DEL(&context, node.data.mapping.pairs)
+//                break
+//            default:
+//                assert(0) // Should not happen.
+//        }
+//    }
+//    STACK_DEL(&context, document.nodes)
+//
+//    yaml_free(document.version_directive)
+//    for (tag_directive = document.tag_directives.start
+//            tag_directive != document.tag_directives.end
+//            tag_directive++) {
+//        yaml_free(tag_directive.handle)
+//        yaml_free(tag_directive.prefix)
+//    }
+//    yaml_free(document.tag_directives.start)
+//
+//    memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+//        return document.nodes.start + index - 1
+//    }
+//    return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (document.nodes.top != document.nodes.start) {
+//        return document.nodes.start
+//    }
+//    return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+//        tag *yaml_char_t, value *yaml_char_t, length int,
+//        style yaml_scalar_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    value_copy *yaml_char_t = NULL
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert(value) // Non-NULL value is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (length < 0) {
+//        length = strlen((char *)value)
+//    }
+//
+//    if (!yaml_check_utf8(value, length)) goto error
+//    value_copy = yaml_malloc(length+1)
+//    if (!value_copy) goto error
+//    memcpy(value_copy, value, length)
+//    value_copy[length] = '\0'
+//
+//    SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    yaml_free(tag_copy)
+//    yaml_free(value_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_item_t
+//        end *yaml_node_item_t
+//        top *yaml_node_item_t
+//    } items = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+//    SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, items)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_pair_t
+//        end *yaml_node_pair_t
+//        top *yaml_node_pair_t
+//    } pairs = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+//    MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, pairs)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+//        sequence int, item int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(sequence > 0
+//            && document.nodes.start + sequence <= document.nodes.top)
+//                            // Valid sequence id is required.
+//    assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+//                            // A sequence node is required.
+//    assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+//                            // Valid item id is required.
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[sequence-1].data.sequence.items, item))
+//        return 0
+//
+//    return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+//        mapping int, key int, value int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    pair yaml_node_pair_t
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(mapping > 0
+//            && document.nodes.start + mapping <= document.nodes.top)
+//                            // Valid mapping id is required.
+//    assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+//                            // A mapping node is required.
+//    assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+//                            // Valid key id is required.
+//    assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+//                            // Valid value id is required.
+//
+//    pair.key = key
+//    pair.value = value
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[mapping-1].data.mapping.pairs, pair))
+//        return 0
+//
+//    return 1
+//}
+//
+//
diff --git a/vendor/github.com/zclconf/go-cty-yaml/converter.go b/vendor/github.com/zclconf/go-cty-yaml/converter.go
new file mode 100644 (file)
index 0000000..a73b34a
--- /dev/null
@@ -0,0 +1,69 @@
+package yaml
+
+import (
+       "github.com/zclconf/go-cty/cty"
+)
+
+// ConverterConfig is used to configure a new converter, using NewConverter.
+type ConverterConfig struct {
+       // EncodeAsFlow, when set to true, causes Marshal to produce flow-style
+       // mapping and sequence serializations.
+       EncodeAsFlow bool
+}
+
+// A Converter can marshal and unmarshal between cty values and YAML bytes.
+//
+// Because there are many different ways to map cty to YAML and vice-versa,
+// a converter is configurable using the settings in ConverterConfig, which
+// allow for a few different permutations of mapping to YAML.
+//
+// If you are just trying to work with generic, standard YAML, the predefined
+// converter in Standard should be good enough.
+type Converter struct {
+       encodeAsFlow bool
+}
+
+// NewConverter creates a new Converter with the given configuration.
+func NewConverter(config *ConverterConfig) *Converter {
+       return &Converter{
+               encodeAsFlow: config.EncodeAsFlow,
+       }
+}
+
+// Standard is a predefined Converter that produces and consumes generic YAML
+// using only built-in constructs that any other YAML implementation ought to
+// understand.
+var Standard *Converter = NewConverter(&ConverterConfig{})
+
+// ImpliedType analyzes the given source code and returns a suitable type that
+// it could be decoded into.
+//
+// For a converter that is using standard YAML rather than cty-specific custom
+// tags, only a subset of cty types can be produced: strings, numbers, bools,
+// tuple types, and object types.
+func (c *Converter) ImpliedType(src []byte) (cty.Type, error) {
+       return c.impliedType(src)
+}
+
+// Marshal serializes the given value into a YAML document, using a fixed
+// mapping from cty types to YAML constructs.
+//
+// Note that unlike the function of the same name in the cty JSON package,
+// this does not take a type constraint and therefore the YAML serialization
+// cannot preserve late-bound type information in the serialization to be
+// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type
+// constraint given to Unmarshal will be decoded as if the corresponding portion
+// of the input were processed with ImpliedType to find a target type.
+func (c *Converter) Marshal(v cty.Value) ([]byte, error) {
+       return c.marshal(v)
+}
+
+// Unmarshal reads the document found within the given source buffer
+// and attempts to convert it into a value conforming to the given type
+// constraint.
+//
+// An error is returned if the given source contains any YAML document
+// delimiters.
+func (c *Converter) Unmarshal(src []byte, ty cty.Type) (cty.Value, error) {
+       return c.unmarshal(src, ty)
+}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go b/vendor/github.com/zclconf/go-cty-yaml/cty_funcs.go
new file mode 100644 (file)
index 0000000..b91141c
--- /dev/null
@@ -0,0 +1,57 @@
+package yaml
+
+import (
+       "github.com/zclconf/go-cty/cty"
+       "github.com/zclconf/go-cty/cty/function"
+)
+
+// YAMLDecodeFunc is a cty function for decoding arbitrary YAML source code
+// into a cty Value, using the ImpliedType and Unmarshal methods of the
+// Standard pre-defined converter.
+var YAMLDecodeFunc = function.New(&function.Spec{
+       Params: []function.Parameter{
+               {
+                       Name: "src",
+                       Type: cty.String,
+               },
+       },
+       Type: func(args []cty.Value) (cty.Type, error) {
+               if !args[0].IsKnown() {
+                       return cty.DynamicPseudoType, nil
+               }
+               if args[0].IsNull() {
+                       return cty.NilType, function.NewArgErrorf(0, "YAML source code cannot be null")
+               }
+               return Standard.ImpliedType([]byte(args[0].AsString()))
+       },
+       Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
+               if retType == cty.DynamicPseudoType {
+                       return cty.DynamicVal, nil
+               }
+               return Standard.Unmarshal([]byte(args[0].AsString()), retType)
+       },
+})
+
+// YAMLEncodeFunc is a cty function for encoding an arbitrary cty value
+// into YAML.
+var YAMLEncodeFunc = function.New(&function.Spec{
+       Params: []function.Parameter{
+               {
+                       Name:             "value",
+                       Type:             cty.DynamicPseudoType,
+                       AllowNull:        true,
+                       AllowDynamicType: true,
+               },
+       },
+       Type: function.StaticReturnType(cty.String),
+       Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
+               if !args[0].IsWhollyKnown() {
+                       return cty.UnknownVal(retType), nil
+               }
+               raw, err := Standard.Marshal(args[0])
+               if err != nil {
+                       return cty.NilVal, err
+               }
+               return cty.StringVal(string(raw)), nil
+       },
+})
diff --git a/vendor/github.com/zclconf/go-cty-yaml/decode.go b/vendor/github.com/zclconf/go-cty-yaml/decode.go
new file mode 100644 (file)
index 0000000..e369ff2
--- /dev/null
@@ -0,0 +1,261 @@
+package yaml
+
+import (
+       "errors"
+       "fmt"
+
+       "github.com/zclconf/go-cty/cty"
+       "github.com/zclconf/go-cty/cty/convert"
+)
+
+func (c *Converter) unmarshal(src []byte, ty cty.Type) (cty.Value, error) {
+       p := &yaml_parser_t{}
+       if !yaml_parser_initialize(p) {
+               return cty.NilVal, errors.New("failed to initialize YAML parser")
+       }
+       if len(src) == 0 {
+               src = []byte{'\n'}
+       }
+
+       an := &valueAnalysis{
+               anchorsPending: map[string]int{},
+               anchorVals:     map[string]cty.Value{},
+       }
+
+       yaml_parser_set_input_string(p, src)
+
+       var evt yaml_event_t
+       if !yaml_parser_parse(p, &evt) {
+               return cty.NilVal, parserError(p)
+       }
+       if evt.typ != yaml_STREAM_START_EVENT {
+               return cty.NilVal, parseEventErrorf(&evt, "missing stream start token")
+       }
+       if !yaml_parser_parse(p, &evt) {
+               return cty.NilVal, parserError(p)
+       }
+       if evt.typ != yaml_DOCUMENT_START_EVENT {
+               return cty.NilVal, parseEventErrorf(&evt, "missing start of document")
+       }
+
+       v, err := c.unmarshalParse(an, p)
+       if err != nil {
+               return cty.NilVal, err
+       }
+
+       if !yaml_parser_parse(p, &evt) {
+               return cty.NilVal, parserError(p)
+       }
+       if evt.typ == yaml_DOCUMENT_START_EVENT {
+               return cty.NilVal, parseEventErrorf(&evt, "only a single document is allowed")
+       }
+       if evt.typ != yaml_DOCUMENT_END_EVENT {
+               return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String())
+       }
+       if !yaml_parser_parse(p, &evt) {
+               return cty.NilVal, parserError(p)
+       }
+       if evt.typ != yaml_STREAM_END_EVENT {
+               return cty.NilVal, parseEventErrorf(&evt, "unexpected extra content after value")
+       }
+
+       return convert.Convert(v, ty)
+}
+
+func (c *Converter) unmarshalParse(an *valueAnalysis, p *yaml_parser_t) (cty.Value, error) {
+       var evt yaml_event_t
+       if !yaml_parser_parse(p, &evt) {
+               return cty.NilVal, parserError(p)
+       }
+       return c.unmarshalParseRemainder(an, &evt, p)
+}
+
+func (c *Converter) unmarshalParseRemainder(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) {
+       switch evt.typ {
+       case yaml_SCALAR_EVENT:
+               return c.unmarshalScalar(an, evt, p)
+       case yaml_ALIAS_EVENT:
+               return c.unmarshalAlias(an, evt, p)
+       case yaml_MAPPING_START_EVENT:
+               return c.unmarshalMapping(an, evt, p)
+       case yaml_SEQUENCE_START_EVENT:
+               return c.unmarshalSequence(an, evt, p)
+       case yaml_DOCUMENT_START_EVENT:
+               return cty.NilVal, parseEventErrorf(evt, "only a single document is allowed")
+       case yaml_STREAM_END_EVENT:
+               // Decoding an empty buffer, probably
+               return cty.NilVal, parseEventErrorf(evt, "expecting value but found end of stream")
+       default:
+               // Should never happen; the above should be comprehensive
+               return cty.NilVal, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String())
+       }
+}
+
+func (c *Converter) unmarshalScalar(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) {
+       src := evt.value
+       tag := string(evt.tag)
+       anchor := string(evt.anchor)
+
+       if len(anchor) > 0 {
+               an.beginAnchor(anchor)
+       }
+
+       val, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style))
+       if err != nil {
+               return cty.NilVal, parseEventErrorWrap(evt, err)
+       }
+
+       if val.RawEquals(mergeMappingVal) {
+               // In any context other than a mapping key, this is just a plain string
+               val = cty.StringVal("<<")
+       }
+
+       if len(anchor) > 0 {
+               an.completeAnchor(anchor, val)
+       }
+       return val, nil
+}
+
+func (c *Converter) unmarshalMapping(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) {
+       tag := string(evt.tag)
+       anchor := string(evt.anchor)
+
+       if tag != "" && tag != yaml_MAP_TAG {
+               return cty.NilVal, parseEventErrorf(evt, "can't interpret mapping as %s", tag)
+       }
+
+       if anchor != "" {
+               an.beginAnchor(anchor)
+       }
+
+       vals := make(map[string]cty.Value)
+       for {
+               var nextEvt yaml_event_t
+               if !yaml_parser_parse(p, &nextEvt) {
+                       return cty.NilVal, parserError(p)
+               }
+               if nextEvt.typ == yaml_MAPPING_END_EVENT {
+                       v := cty.ObjectVal(vals)
+                       if anchor != "" {
+                               an.completeAnchor(anchor, v)
+                       }
+                       return v, nil
+               }
+
+               if nextEvt.typ != yaml_SCALAR_EVENT {
+                       return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys")
+               }
+               keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style))
+               if err != nil {
+                       return cty.NilVal, err
+               }
+               if keyVal.RawEquals(mergeMappingVal) {
+                       // Merging the value (which must be a mapping) into our mapping,
+                       // then.
+                       val, err := c.unmarshalParse(an, p)
+                       if err != nil {
+                               return cty.NilVal, err
+                       }
+                       ty := val.Type()
+                       if !(ty.IsObjectType() || ty.IsMapType()) {
+                               return cty.NilVal, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName())
+                       }
+                       for it := val.ElementIterator(); it.Next(); {
+                               k, v := it.Element()
+                               vals[k.AsString()] = v
+                       }
+                       continue
+               }
+               if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil {
+                       keyVal = keyValStr
+               } else {
+                       return cty.NilVal, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys")
+               }
+               if keyVal.IsNull() {
+                       return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key cannot be null")
+               }
+               if !keyVal.IsKnown() {
+                       return cty.NilVal, parseEventErrorf(&nextEvt, "mapping key must be known")
+               }
+               val, err := c.unmarshalParse(an, p)
+               if err != nil {
+                       return cty.NilVal, err
+               }
+
+               vals[keyVal.AsString()] = val
+       }
+}
+
+func (c *Converter) unmarshalSequence(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) {
+       tag := string(evt.tag)
+       anchor := string(evt.anchor)
+
+       if tag != "" && tag != yaml_SEQ_TAG {
+               return cty.NilVal, parseEventErrorf(evt, "can't interpret sequence as %s", tag)
+       }
+
+       if anchor != "" {
+               an.beginAnchor(anchor)
+       }
+
+       var vals []cty.Value
+       for {
+               var nextEvt yaml_event_t
+               if !yaml_parser_parse(p, &nextEvt) {
+                       return cty.NilVal, parserError(p)
+               }
+               if nextEvt.typ == yaml_SEQUENCE_END_EVENT {
+                       ty := cty.TupleVal(vals)
+                       if anchor != "" {
+                               an.completeAnchor(anchor, ty)
+                       }
+                       return ty, nil
+               }
+
+               val, err := c.unmarshalParseRemainder(an, &nextEvt, p)
+               if err != nil {
+                       return cty.NilVal, err
+               }
+
+               vals = append(vals, val)
+       }
+}
+
+func (c *Converter) unmarshalAlias(an *valueAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Value, error) {
+       v, err := an.anchorVal(string(evt.anchor))
+       if err != nil {
+               err = parseEventErrorWrap(evt, err)
+       }
+       return v, err
+}
+
+type valueAnalysis struct {
+       anchorsPending map[string]int
+       anchorVals     map[string]cty.Value
+}
+
+func (an *valueAnalysis) beginAnchor(name string) {
+       an.anchorsPending[name]++
+}
+
+func (an *valueAnalysis) completeAnchor(name string, v cty.Value) {
+       an.anchorsPending[name]--
+       if an.anchorsPending[name] == 0 {
+               delete(an.anchorsPending, name)
+       }
+       an.anchorVals[name] = v
+}
+
+func (an *valueAnalysis) anchorVal(name string) (cty.Value, error) {
+       if _, pending := an.anchorsPending[name]; pending {
+               // YAML normally allows self-referencing structures, but cty cannot
+               // represent them (it requires all structures to be finite) so we
+               // must fail here.
+               return cty.NilVal, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name)
+       }
+       ty, ok := an.anchorVals[name]
+       if !ok {
+               return cty.NilVal, fmt.Errorf("reference to undefined anchor %q", name)
+       }
+       return ty, nil
+}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/emitterc.go b/vendor/github.com/zclconf/go-cty-yaml/emitterc.go
new file mode 100644 (file)
index 0000000..a1c2cc5
--- /dev/null
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+       "bytes"
+       "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+       if emitter.buffer_pos+5 >= len(emitter.buffer) {
+               return yaml_emitter_flush(emitter)
+       }
+       return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+       if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+               return false
+       }
+       emitter.buffer[emitter.buffer_pos] = value
+       emitter.buffer_pos++
+       emitter.column++
+       return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+       if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+               return false
+       }
+       switch emitter.line_break {
+       case yaml_CR_BREAK:
+               emitter.buffer[emitter.buffer_pos] = '\r'
+               emitter.buffer_pos += 1
+       case yaml_LN_BREAK:
+               emitter.buffer[emitter.buffer_pos] = '\n'
+               emitter.buffer_pos += 1
+       case yaml_CRLN_BREAK:
+               emitter.buffer[emitter.buffer_pos+0] = '\r'
+               emitter.buffer[emitter.buffer_pos+1] = '\n'
+               emitter.buffer_pos += 2
+       default:
+               panic("unknown line break setting")
+       }
+       emitter.column = 0
+       emitter.line++
+       return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+       if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+               return false
+       }
+       p := emitter.buffer_pos
+       w := width(s[*i])
+       switch w {
+       case 4:
+               emitter.buffer[p+3] = s[*i+3]
+               fallthrough
+       case 3:
+               emitter.buffer[p+2] = s[*i+2]
+               fallthrough
+       case 2:
+               emitter.buffer[p+1] = s[*i+1]
+               fallthrough
+       case 1:
+               emitter.buffer[p+0] = s[*i+0]
+       default:
+               panic("unknown character width")
+       }
+       emitter.column++
+       emitter.buffer_pos += w
+       *i += w
+       return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+       for i := 0; i < len(s); {
+               if !write(emitter, s, &i) {
+                       return false
+               }
+       }
+       return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+       if s[*i] == '\n' {
+               if !put_break(emitter) {
+                       return false
+               }
+               *i++
+       } else {
+               if !write(emitter, s, i) {
+                       return false
+               }
+               emitter.column = 0
+               emitter.line++
+       }
+       return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+       emitter.error = yaml_EMITTER_ERROR
+       emitter.problem = problem
+       return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+       emitter.events = append(emitter.events, *event)
+       for !yaml_emitter_need_more_events(emitter) {
+               event := &emitter.events[emitter.events_head]
+               if !yaml_emitter_analyze_event(emitter, event) {
+                       return false
+               }
+               if !yaml_emitter_state_machine(emitter, event) {
+                       return false
+               }
+               yaml_event_delete(event)
+               emitter.events_head++
+       }
+       return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+//  - 1 event for DOCUMENT-START
+//  - 2 events for SEQUENCE-START
+//  - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+       if emitter.events_head == len(emitter.events) {
+               return true
+       }
+       var accumulate int
+       switch emitter.events[emitter.events_head].typ {
+       case yaml_DOCUMENT_START_EVENT:
+               accumulate = 1
+               break
+       case yaml_SEQUENCE_START_EVENT:
+               accumulate = 2
+               break
+       case yaml_MAPPING_START_EVENT:
+               accumulate = 3
+               break
+       default:
+               return false
+       }
+       if len(emitter.events)-emitter.events_head > accumulate {
+               return false
+       }
+       var level int
+       for i := emitter.events_head; i < len(emitter.events); i++ {
+               switch emitter.events[i].typ {
+               case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+                       level++
+               case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+                       level--
+               }
+               if level == 0 {
+                       return false
+               }
+       }
+       return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+       for i := 0; i < len(emitter.tag_directives); i++ {
+               if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+                       if allow_duplicates {
+                               return true
+                       }
+                       return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+               }
+       }
+
+       // [Go] Do we actually need to copy this given garbage collection
+       // and the lack of deallocating destructors?
+       tag_copy := yaml_tag_directive_t{
+               handle: make([]byte, len(value.handle)),
+               prefix: make([]byte, len(value.prefix)),
+       }
+       copy(tag_copy.handle, value.handle)
+       copy(tag_copy.prefix, value.prefix)
+       emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+       return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+       emitter.indents = append(emitter.indents, emitter.indent)
+       if emitter.indent < 0 {
+               if flow {
+                       emitter.indent = emitter.best_indent
+               } else {
+                       emitter.indent = 0
+               }
+       } else if !indentless {
+               emitter.indent += emitter.best_indent
+       }
+       return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+       switch emitter.state {
+       default:
+       case yaml_EMIT_STREAM_START_STATE:
+               return yaml_emitter_emit_stream_start(emitter, event)
+
+       case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+               return yaml_emitter_emit_document_start(emitter, event, true)
+
+       case yaml_EMIT_DOCUMENT_START_STATE:
+               return yaml_emitter_emit_document_start(emitter, event, false)
+
+       case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+               return yaml_emitter_emit_document_content(emitter, event)
+
+       case yaml_EMIT_DOCUMENT_END_STATE:
+               return yaml_emitter_emit_document_end(emitter, event)
+
+       case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+               return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+       case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+               return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+       case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+               return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+       case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+               return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+       case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+               return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+       case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+               return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+       case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+               return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+       case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+               return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+       case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+               return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+       case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+               return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+       case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+               return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+       case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+               return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+       case yaml_EMIT_END_STATE:
+               return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+       }
+       panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+       if event.typ != yaml_STREAM_START_EVENT {
+               return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+       }
+       if emitter.encoding == yaml_ANY_ENCODING {
+               emitter.encoding = event.encoding
+               if emitter.encoding == yaml_ANY_ENCODING {
+                       emitter.encoding = yaml_UTF8_ENCODING
+               }
+       }
+       if emitter.best_indent < 2 || emitter.best_indent > 9 {
+               emitter.best_indent = 2
+       }
+       if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+               emitter.best_width = 80
+       }
+       if emitter.best_width < 0 {
+               emitter.best_width = 1<<31 - 1
+       }
+       if emitter.line_break == yaml_ANY_BREAK {
+               emitter.line_break = yaml_LN_BREAK
+       }
+
+       emitter.indent = -1
+       emitter.line = 0
+       emitter.column = 0
+       emitter.whitespace = true
+       emitter.indention = true
+
+       if emitter.encoding != yaml_UTF8_ENCODING {
+               if !yaml_emitter_write_bom(emitter) {
+                       return false
+               }
+       }
+       emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+       return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+       if event.typ == yaml_DOCUMENT_START_EVENT {
+
+               if event.version_directive != nil {
+                       if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+                               return false
+                       }
+               }
+
+               for i := 0; i < len(event.tag_directives); i++ {
+                       tag_directive := &event.tag_directives[i]
+                       if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+                               return false
+                       }
+                       if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+                               return false
+                       }
+               }
+
+               for i := 0; i < len(default_tag_directives); i++ {
+                       tag_directive := &default_tag_directives[i]
+                       if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+                               return false
+                       }
+               }
+
+               implicit := event.implicit
+               if !first || emitter.canonical {
+                       implicit = false
+               }
+
+               if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+                       if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+                               return false
+                       }
+                       if !yaml_emitter_write_indent(emitter) {
+                               return false
+                       }
+               }
+
+               if event.version_directive != nil {
+                       implicit = false
+                       if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+                               return false
+                       }
+                       if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+                               return false
+                       }
+                       if !yaml_emitter_write_indent(emitter) {
+                               return false
+                       }
+               }
+
+               if len(event.tag_directives) > 0 {
+                       implicit = false
+                       for i := 0; i < len(event.tag_directives); i++ {
+                               tag_directive := &event.tag_directives[i]
+                               if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+                                       return false
+                               }
+                               if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+                                       return false
+                               }
+                               if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+                                       return false
+                               }
+                               if !yaml_emitter_write_indent(emitter) {
+                                       return false
+                               }
+                       }
+               }
+
+               if yaml_emitter_check_empty_document(emitter) {
+                       implicit = false
+               }
+               if !implicit {
+                       if !yaml_emitter_write_indent(emitter) {
+                               return false
+                       }
+                       if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+                               return false
+                       }
+                       if emitter.canonical {
+                               if !yaml_emitter_write_indent(emitter) {
+                                       return false
+                               }
+                       }
+               }
+
+               emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+               return true
+       }
+
+       if event.typ == yaml_STREAM_END_EVENT {
+               if emitter.open_ended {
+                       if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+                               return false
+                       }
+                       if !yaml_emitter_write_indent(emitter) {
+                               return false
+                       }
+               }
+               if !yaml_emitter_flush(emitter) {
+                       return false
+               }
+               emitter.state = yaml_EMIT_END_STATE
+               return true
+       }
+
+       return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+       emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+       return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+       if event.typ != yaml_DOCUMENT_END_EVENT {
+               return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+       }
+       if !yaml_emitter_write_indent(emitter) {
+               return false
+       }
+       if !event.implicit {
+               // [Go] Allocate the slice elsewhere.
+               if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+                       return false
+               }
+               if !yaml_emitter_write_indent(emitter) {
+                       return false
+               }
+       }
+       if !yaml_emitter_flush(emitter) {
+               return false
+       }
+       emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+       emitter.tag_directives = emitter.tag_directives[:0]
+       return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+       if first {
+               if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+                       return false
+               }
+               if !yaml_emitter_increase_indent(emitter, true, false) {
+                       return false
+               }
+               emitter.flow_level++
+       }
+
+       if event.typ == yaml_SEQUENCE_END_EVENT {
+               emitter.flow_level--
+               emitter.indent = emitter.indents[len(emitter.indents)-1]
+               emitter.indents = emitter.indents[:len(emitter.indents)-1]
+               if emitter.canonical && !first {
+                       if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+                               return false
+                       }
+                       if !yaml_emitter_write_indent(emitter) {
+                               return false
+                       }
+               }
+               if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+                       return false
+               }
+               emitter.state = emitter.states[len(emitter.states)-1]
+               emitter.states = emitter.states[:len(emitter.states)-1]
+
+               return true
+       }
+
+       if !first {
+               if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+                       return false
+               }
+       }
+
+       if emitter.canonical || emitter.column > emitter.best_width {
+               if !yaml_emitter_write_indent(emitter) {
+                       return false
+               }
+       }
+       emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+       return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+       if first {
+               if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+                       return false
+               }
+               if !yaml_emitter_increase_indent(emitter, true, false) {
+                       return false
+               }
+               emitter.flow_level++
+       }
+
+       if event.typ == yaml_MAPPING_END_EVENT {
+               emitter.flow_level--
+               emitter.indent = emitter.indents[len(emitter.indents)-1]
+               emitter.indents = emitter.indents[:len(emitter.indents)-1]
+               if emitter.canonical && !first {
+                       if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+                               return false
+                       }
+                       if !yaml_emitter_write_indent(emitter) {
+                               return false
+                       }
+               }
+               if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+                       return false
+               }
+               emitter.state = emitter.states[len(emitter.states)-1]
+               emitter.states = emitter.states[:len(emitter.states)-1]
+               return true
+       }
+
+       if !first {
+               if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+                       return false
+               }
+       }
+       if emitter.canonical || emitter.column > emitter.best_width {
+               if !yaml_emitter_write_indent(emitter) {
+                       return false
+               }
+       }
+
+       if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+               emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+               return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+       }
+       if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+               return false
+       }
+       emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+       return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+       if simple {
+               if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+                       return false
+               }
+       } else {
+               if emitter.canonical || emitter.column > emitter.best_width {
+                       if !yaml_emitter_write_indent(emitter) {
+                               return false
+                       }
+               }
+               if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+                       return false
+               }
+       }
+       emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+       return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+       if first {
+               if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+                       return false
+               }
+       }
+       if event.typ == yaml_SEQUENCE_END_EVENT {
+               emitter.indent = emitter.indents[len(emitter.indents)-1]
+               emitter.indents = emitter.indents[:len(emitter.indents)-1]
+               emitter.state = emitter.states[len(emitter.states)-1]
+               emitter.states = emitter.states[:len(emitter.states)-1]
+               return true
+       }
+       if !yaml_emitter_write_indent(emitter) {
+               return false
+       }
+       if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+               return false
+       }
+       emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+       return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+       if first {
+               if !yaml_emitter_increase_indent(emitter, false, false) {
+                       return false
+               }
+       }
+       if event.typ == yaml_MAPPING_END_EVENT {
+               emitter.indent = emitter.indents[len(emitter.indents)-1]
+               emitter.indents = emitter.indents[:len(emitter.indents)-1]
+               emitter.state = emitter.states[len(emitter.states)-1]
+               emitter.states = emitter.states[:len(emitter.states)-1]
+               return true
+       }
+       if !yaml_emitter_write_indent(emitter) {
+               return false
+       }
+       if yaml_emitter_check_simple_key(emitter) {
+               emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+               return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+       }
+       if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+               return false
+       }
+       emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+       return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+       if simple {
+               if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+                       return false
+               }
+       } else {
+               if !yaml_emitter_write_indent(emitter) {
+                       return false
+               }
+               if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+                       return false
+               }
+       }
+       emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+       return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+       root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+       emitter.root_context = root
+       emitter.sequence_context = sequence
+       emitter.mapping_context = mapping
+       emitter.simple_key_context = simple_key
+
+       switch event.typ {
+       case yaml_ALIAS_EVENT:
+               return yaml_emitter_emit_alias(emitter, event)
+       case yaml_SCALAR_EVENT:
+               return yaml_emitter_emit_scalar(emitter, event)
+       case yaml_SEQUENCE_START_EVENT:
+               return yaml_emitter_emit_sequence_start(emitter, event)
+       case yaml_MAPPING_START_EVENT:
+               return yaml_emitter_emit_mapping_start(emitter, event)
+       default:
+               return yaml_emitter_set_emitter_error(emitter,
+                       fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+       }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+       if !yaml_emitter_process_anchor(emitter) {
+               return false
+       }
+       emitter.state = emitter.states[len(emitter.states)-1]
+       emitter.states = emitter.states[:len(emitter.states)-1]
+       return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+       if !yaml_emitter_select_scalar_style(emitter, event) {
+               return false
+       }
+       if !yaml_emitter_process_anchor(emitter) {
+               return false
+       }
+       if !yaml_emitter_process_tag(emitter) {
+               return false
+       }
+       if !yaml_emitter_increase_indent(emitter, true, false) {
+               return false
+       }
+       if !yaml_emitter_process_scalar(emitter) {
+               return false
+       }
+       emitter.indent = emitter.indents[len(emitter.indents)-1]
+       emitter.indents = emitter.indents[:len(emitter.indents)-1]
+       emitter.state = emitter.states[len(emitter.states)-1]
+       emitter.states = emitter.states[:len(emitter.states)-1]
+       return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+       if !yaml_emitter_process_anchor(emitter) {
+               return false
+       }
+       if !yaml_emitter_process_tag(emitter) {
+               return false
+       }
+       if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+               yaml_emitter_check_empty_sequence(emitter) {
+               emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+       } else {
+               emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+       }
+       return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+       if !yaml_emitter_process_anchor(emitter) {
+               return false
+       }
+       if !yaml_emitter_process_tag(emitter) {
+               return false
+       }
+       if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+               yaml_emitter_check_empty_mapping(emitter) {
+               emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+       } else {
+               emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+       }
+       return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+       return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+       if len(emitter.events)-emitter.events_head < 2 {
+               return false
+       }
+       return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+               emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+       if len(emitter.events)-emitter.events_head < 2 {
+               return false
+       }
+       return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+               emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+       length := 0
+       switch emitter.events[emitter.events_head].typ {
+       case yaml_ALIAS_EVENT:
+               length += len(emitter.anchor_data.anchor)
+       case yaml_SCALAR_EVENT:
+               if emitter.scalar_data.multiline {
+                       return false
+               }
+               length += len(emitter.anchor_data.anchor) +
+                       len(emitter.tag_data.handle) +
+                       len(emitter.tag_data.suffix) +
+                       len(emitter.scalar_data.value)
+       case yaml_SEQUENCE_START_EVENT:
+               if !yaml_emitter_check_empty_sequence(emitter) {
+                       return false
+               }
+               length += len(emitter.anchor_data.anchor) +
+                       len(emitter.tag_data.handle) +
+                       len(emitter.tag_data.suffix)
+       case yaml_MAPPING_START_EVENT:
+               if !yaml_emitter_check_empty_mapping(emitter) {
+                       return false
+               }
+               length += len(emitter.anchor_data.anchor) +
+                       len(emitter.tag_data.handle) +
+                       len(emitter.tag_data.suffix)
+       default:
+               return false
+       }
+       return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+       no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+       if no_tag && !event.implicit && !event.quoted_implicit {
+               return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+       }
+
+       style := event.scalar_style()
+       if style == yaml_ANY_SCALAR_STYLE {
+               style = yaml_PLAIN_SCALAR_STYLE
+       }
+       if emitter.canonical {
+               style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+       }
+       if emitter.simple_key_context && emitter.scalar_data.multiline {
+               style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+       }
+
+       if style == yaml_PLAIN_SCALAR_STYLE {
+               if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+                       emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+                       style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+               }
+               if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+                       style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+               }
+               if no_tag && !event.implicit {
+                       style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+               }
+       }
+       if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+               if !emitter.scalar_data.single_quoted_allowed {
+                       style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+               }
+       }
+       if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+               if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+                       style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+               }
+       }
+
+       if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+               emitter.tag_data.handle = []byte{'!'}
+       }
+       emitter.scalar_data.style = style
+       return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+       if emitter.anchor_data.anchor == nil {
+               return true
+       }
+       c := []byte{'&'}
+       if emitter.anchor_data.alias {
+               c[0] = '*'
+       }
+       if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+               return false
+       }
+       return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+       if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+               return true
+       }
+       if len(emitter.tag_data.handle) > 0 {
+               if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+                       return false
+               }
+               if len(emitter.tag_data.suffix) > 0 {
+                       if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+                               return false
+                       }
+               }
+       } else {
+               // [Go] Allocate these slices elsewhere.
+               if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+                       return false
+               }
+               if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+                       return false
+               }
+               if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+                       return false
+               }
+       }
+       return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+       switch emitter.scalar_data.style {
+       case yaml_PLAIN_SCALAR_STYLE:
+               return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+       case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+               return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+       case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+               return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+       case yaml_LITERAL_SCALAR_STYLE:
+               return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+       case yaml_FOLDED_SCALAR_STYLE:
+               return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+       }
+       panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+       if version_directive.major != 1 || version_directive.minor != 1 {
+               return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+       }
+       return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+       handle := tag_directive.handle
+       prefix := tag_directive.prefix
+       if len(handle) == 0 {
+               return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+       }
+       if handle[0] != '!' {
+               return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+       }
+       if handle[len(handle)-1] != '!' {
+               return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+       }
+       for i := 1; i < len(handle)-1; i += width(handle[i]) {
+               if !is_alpha(handle, i) {
+                       return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+               }
+       }
+       if len(prefix) == 0 {
+               return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+       }
+       return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+       if len(anchor) == 0 {
+               problem := "anchor value must not be empty"
+               if alias {
+                       problem = "alias value must not be empty"
+               }
+               return yaml_emitter_set_emitter_error(emitter, problem)
+       }
+       for i := 0; i < len(anchor); i += width(anchor[i]) {
+               if !is_alpha(anchor, i) {
+                       problem := "anchor value must contain alphanumerical characters only"
+                       if alias {
+                               problem = "alias value must contain alphanumerical characters only"
+                       }
+                       return yaml_emitter_set_emitter_error(emitter, problem)
+               }
+       }
+       emitter.anchor_data.anchor = anchor
+       emitter.anchor_data.alias = alias
+       return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+       if len(tag) == 0 {
+               return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+       }
+       for i := 0; i < len(emitter.tag_directives); i++ {
+               tag_directive := &emitter.tag_directives[i]
+               if bytes.HasPrefix(tag, tag_directive.prefix) {
+                       emitter.tag_data.handle = tag_directive.handle
+                       emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+                       return true
+               }
+       }
+       emitter.tag_data.suffix = tag
+       return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+       var (
+               block_indicators   = false
+               flow_indicators    = false
+               line_breaks        = false
+               special_characters = false
+
+               leading_space  = false
+               leading_break  = false
+               trailing_space = false
+               trailing_break = false
+               break_space    = false
+               space_break    = false
+
+               preceded_by_whitespace = false
+               followed_by_whitespace = false
+               previous_space         = false
+               previous_break         = false
+       )
+
+       emitter.scalar_data.value = value
+
+       if len(value) == 0 {
+               emitter.scalar_data.multiline = false
+               emitter.scalar_data.flow_plain_allowed = false
+               emitter.scalar_data.block_plain_allowed = true
+               emitter.scalar_data.single_quoted_allowed = true
+               emitter.scalar_data.block_allowed = false
+               return true
+       }
+
+       if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+               block_indicators = true
+               flow_indicators = true
+       }
+
+       preceded_by_whitespace = true
+       for i, w := 0, 0; i < len(value); i += w {
+               w = width(value[i])
+               followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+               if i == 0 {
+                       switch value[i] {
+                       case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+                               flow_indicators = true
+                               block_indicators = true
+                       case '?', ':':
+                               flow_indicators = true
+                               if followed_by_whitespace {
+                                       block_indicators = true
+                               }
+                       case '-':
+                               if followed_by_whitespace {
+                                       flow_indicators = true
+                                       block_indicators = true
+                               }
+                       }
+               } else {
+                       switch value[i] {
+                       case ',', '?', '[', ']', '{', '}':
+                               flow_indicators = true
+                       case ':':
+                               flow_indicators = true
+                               if followed_by_whitespace {
+                                       block_indicators = true
+                               }
+                       case '#':
+                               if preceded_by_whitespace {
+                                       flow_indicators = true
+                                       block_indicators = true
+                               }
+                       }
+               }
+
+               if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+                       special_characters = true
+               }
+               if is_space(value, i) {
+                       if i == 0 {
+                               leading_space = true
+                       }
+                       if i+width(value[i]) == len(value) {
+                               trailing_space = true
+                       }
+                       if previous_break {
+                               break_space = true
+                       }
+                       previous_space = true
+                       previous_break = false
+               } else if is_break(value, i) {
+                       line_breaks = true
+                       if i == 0 {
+                               leading_break = true
+                       }
+                       if i+width(value[i]) == len(value) {
+                               trailing_break = true
+                       }
+                       if previous_space {
+                               space_break = true
+                       }
+                       previous_space = false
+                       previous_break = true
+               } else {
+                       previous_space = false
+                       previous_break = false
+               }
+
+               // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+               preceded_by_whitespace = is_blankz(value, i)
+       }
+
+       emitter.scalar_data.multiline = line_breaks
+       emitter.scalar_data.flow_plain_allowed = true
+       emitter.scalar_data.block_plain_allowed = true
+       emitter.scalar_data.single_quoted_allowed = true
+       emitter.scalar_data.block_allowed = true
+
+       if leading_space || leading_break || trailing_space || trailing_break {
+               emitter.scalar_data.flow_plain_allowed = false
+               emitter.scalar_data.block_plain_allowed = false
+       }
+       if trailing_space {
+               emitter.scalar_data.block_allowed = false
+       }
+       if break_space {
+               emitter.scalar_data.flow_plain_allowed = false
+               emitter.scalar_data.block_plain_allowed = false
+               emitter.scalar_data.single_quoted_allowed = false
+       }
+       if space_break || special_characters {
+               emitter.scalar_data.flow_plain_allowed = false
+               emitter.scalar_data.block_plain_allowed = false
+               emitter.scalar_data.single_quoted_allowed = false
+               emitter.scalar_data.block_allowed = false
+       }
+       if line_breaks {
+               emitter.scalar_data.flow_plain_allowed = false
+               emitter.scalar_data.block_plain_allowed = false
+       }
+       if flow_indicators {
+               emitter.scalar_data.flow_plain_allowed = false
+       }
+       if block_indicators {
+               emitter.scalar_data.block_plain_allowed = false
+       }
+       return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+       emitter.anchor_data.anchor = nil
+       emitter.tag_data.handle = nil
+       emitter.tag_data.suffix = nil
+       emitter.scalar_data.value = nil
+
+       switch event.typ {
+       case yaml_ALIAS_EVENT:
+               if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+                       return false
+               }
+
+       case yaml_SCALAR_EVENT:
+               if len(event.anchor) > 0 {
+                       if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+                               return false
+                       }
+               }
+               if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+                       if !yaml_emitter_analyze_tag(emitter, event.tag) {
+                               return false
+                       }
+               }
+               if !yaml_emitter_analyze_scalar(emitter, event.value) {
+                       return false
+               }
+
+       case yaml_SEQUENCE_START_EVENT:
+               if len(event.anchor) > 0 {
+                       if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+                               return false
+                       }
+               }
+               if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+                       if !yaml_emitter_analyze_tag(emitter, event.tag) {
+                               return false
+                       }
+               }
+
+       case yaml_MAPPING_START_EVENT:
+               if len(event.anchor) > 0 {
+                       if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+                               return false
+                       }
+               }
+               if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+                       if !yaml_emitter_analyze_tag(emitter, event.tag) {
+                               return false
+                       }
+               }
+       }
+       return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+       if !flush(emitter) {
+               return false
+       }
+       pos := emitter.buffer_pos
+       emitter.buffer[pos+0] = '\xEF'
+       emitter.buffer[pos+1] = '\xBB'
+       emitter.buffer[pos+2] = '\xBF'
+       emitter.buffer_pos += 3
+       return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+       indent := emitter.indent
+       if indent < 0 {
+               indent = 0
+       }
+       if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+               if !put_break(emitter) {
+                       return false
+               }
+       }
+       for emitter.column < indent {
+               if !put(emitter, ' ') {
+                       return false
+               }
+       }
+       emitter.whitespace = true
+       emitter.indention = true
+       return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+       if need_whitespace && !emitter.whitespace {
+               if !put(emitter, ' ') {
+                       return false
+               }
+       }
+       if !write_all(emitter, indicator) {
+               return false
+       }
+       emitter.whitespace = is_whitespace
+       emitter.indention = (emitter.indention && is_indention)
+       emitter.open_ended = false
+       return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+       if !write_all(emitter, value) {
+               return false
+       }
+       emitter.whitespace = false
+       emitter.indention = false
+       return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+       if !emitter.whitespace {
+               if !put(emitter, ' ') {
+                       return false
+               }
+       }
+       if !write_all(emitter, value) {
+               return false
+       }
+       emitter.whitespace = false
+       emitter.indention = false
+       return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+       if need_whitespace && !emitter.whitespace {
+               if !put(emitter, ' ') {
+                       return false
+               }
+       }
+       for i := 0; i < len(value); {
+               var must_write bool
+               switch value[i] {
+               case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+                       must_write = true
+               default:
+                       must_write = is_alpha(value, i)
+               }
+               if must_write {
+                       if !write(emitter, value, &i) {
+                               return false
+                       }
+               } else {
+                       w := width(value[i])
+                       for k := 0; k < w; k++ {
+                               octet := value[i]
+                               i++
+                               if !put(emitter, '%') {
+                                       return false
+                               }
+
+                               c := octet >> 4
+                               if c < 10 {
+                                       c += '0'
+                               } else {
+                                       c += 'A' - 10
+                               }
+                               if !put(emitter, c) {
+                                       return false
+                               }
+
+                               c = octet & 0x0f
+                               if c < 10 {
+                                       c += '0'
+                               } else {
+                                       c += 'A' - 10
+                               }
+                               if !put(emitter, c) {
+                                       return false
+                               }
+                       }
+               }
+       }
+       emitter.whitespace = false
+       emitter.indention = false
+       return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+       if !emitter.whitespace {
+               if !put(emitter, ' ') {
+                       return false
+               }
+       }
+
+       spaces := false
+       breaks := false
+       for i := 0; i < len(value); {
+               if is_space(value, i) {
+                       if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+                               if !yaml_emitter_write_indent(emitter) {
+                                       return false
+                               }
+                               i += width(value[i])
+                       } else {
+                               if !write(emitter, value, &i) {
+                                       return false
+                               }
+                       }
+                       spaces = true
+               } else if is_break(value, i) {
+                       if !breaks && value[i] == '\n' {
+                               if !put_break(emitter) {
+                                       return false
+                               }
+                       }
+                       if !write_break(emitter, value, &i) {
+                               return false
+                       }
+                       emitter.indention = true
+                       breaks = true
+               } else {
+                       if breaks {
+                               if !yaml_emitter_write_indent(emitter) {
+                                       return false
+                               }
+                       }
+                       if !write(emitter, value, &i) {
+                               return false
+                       }
+                       emitter.indention = false
+                       spaces = false
+                       breaks = false
+               }
+       }
+
+       emitter.whitespace = false
+       emitter.indention = false
+       if emitter.root_context {
+               emitter.open_ended = true
+       }
+
+       return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+       if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+               return false
+       }
+
+       spaces := false
+       breaks := false
+       for i := 0; i < len(value); {
+               if is_space(value, i) {
+                       if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+                               if !yaml_emitter_write_indent(emitter) {
+                                       return false
+                               }
+                               i += width(value[i])
+                       } else {
+                               if !write(emitter, value, &i) {
+                                       return false
+                               }
+                       }
+                       spaces = true
+               } else if is_break(value, i) {
+                       if !breaks && value[i] == '\n' {
+                               if !put_break(emitter) {
+                                       return false
+                               }
+                       }
+                       if !write_break(emitter, value, &i) {
+                               return false
+                       }
+                       emitter.indention = true
+                       breaks = true
+               } else {
+                       if breaks {
+                               if !yaml_emitter_write_indent(emitter) {
+                                       return false
+                               }
+                       }
+                       if value[i] == '\'' {
+                               if !put(emitter, '\'') {
+                                       return false
+                               }
+                       }
+                       if !write(emitter, value, &i) {
+                               return false
+                       }
+                       emitter.indention = false
+                       spaces = false
+                       breaks = false
+               }
+       }
+       if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+               return false
+       }
+       emitter.whitespace = false
+       emitter.indention = false
+       return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+       spaces := false
+       if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+               return false
+       }
+
+       for i := 0; i < len(value); {
+               if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+                       is_bom(value, i) || is_break(value, i) ||
+                       value[i] == '"' || value[i] == '\\' {
+
+                       octet := value[i]
+
+                       var w int
+                       var v rune
+                       switch {
+                       case octet&0x80 == 0x00:
+                               w, v = 1, rune(octet&0x7F)
+                       case octet&0xE0 == 0xC0:
+                               w, v = 2, rune(octet&0x1F)
+                       case octet&0xF0 == 0xE0:
+                               w, v = 3, rune(octet&0x0F)
+                       case octet&0xF8 == 0xF0:
+                               w, v = 4, rune(octet&0x07)
+                       }
+                       for k := 1; k < w; k++ {
+                               octet = value[i+k]
+                               v = (v << 6) + (rune(octet) & 0x3F)
+                       }
+                       i += w
+
+                       if !put(emitter, '\\') {
+                               return false
+                       }
+
+                       var ok bool
+                       switch v {
+                       case 0x00:
+                               ok = put(emitter, '0')
+                       case 0x07:
+                               ok = put(emitter, 'a')
+                       case 0x08:
+                               ok = put(emitter, 'b')
+                       case 0x09:
+                               ok = put(emitter, 't')
+                       case 0x0A:
+                               ok = put(emitter, 'n')
+                       case 0x0b:
+                               ok = put(emitter, 'v')
+                       case 0x0c:
+                               ok = put(emitter, 'f')
+                       case 0x0d:
+                               ok = put(emitter, 'r')
+                       case 0x1b:
+                               ok = put(emitter, 'e')
+                       case 0x22:
+                               ok = put(emitter, '"')
+                       case 0x5c:
+                               ok = put(emitter, '\\')
+                       case 0x85:
+                               ok = put(emitter, 'N')
+                       case 0xA0:
+                               ok = put(emitter, '_')
+                       case 0x2028:
+                               ok = put(emitter, 'L')
+                       case 0x2029:
+                               ok = put(emitter, 'P')
+                       default:
+                               if v <= 0xFF {
+                                       ok = put(emitter, 'x')
+                                       w = 2
+                               } else if v <= 0xFFFF {
+                                       ok = put(emitter, 'u')
+                                       w = 4
+                               } else {
+                                       ok = put(emitter, 'U')
+                                       w = 8
+                               }
+                               for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+                                       digit := byte((v >> uint(k)) & 0x0F)
+                                       if digit < 10 {
+                                               ok = put(emitter, digit+'0')
+                                       } else {
+                                               ok = put(emitter, digit+'A'-10)
+                                       }
+                               }
+                       }
+                       if !ok {
+                               return false
+                       }
+                       spaces = false
+               } else if is_space(value, i) {
+                       if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+                               if !yaml_emitter_write_indent(emitter) {
+                                       return false
+                               }
+                               if is_space(value, i+1) {
+                                       if !put(emitter, '\\') {
+                                               return false
+                                       }
+                               }
+                               i += width(value[i])
+                       } else if !write(emitter, value, &i) {
+                               return false
+                       }
+                       spaces = true
+               } else {
+                       if !write(emitter, value, &i) {
+                               return false
+                       }
+                       spaces = false
+               }
+       }
+       if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+               return false
+       }
+       emitter.whitespace = false
+       emitter.indention = false
+       return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+       if is_space(value, 0) || is_break(value, 0) {
+               indent_hint := []byte{'0' + byte(emitter.best_indent)}
+               if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+                       return false
+               }
+       }
+
+       emitter.open_ended = false
+
+       var chomp_hint [1]byte
+       if len(value) == 0 {
+               chomp_hint[0] = '-'
+       } else {
+               i := len(value) - 1
+               for value[i]&0xC0 == 0x80 {
+                       i--
+               }
+               if !is_break(value, i) {
+                       chomp_hint[0] = '-'
+               } else if i == 0 {
+                       chomp_hint[0] = '+'
+                       emitter.open_ended = true
+               } else {
+                       i--
+                       for value[i]&0xC0 == 0x80 {
+                               i--
+                       }
+                       if is_break(value, i) {
+                               chomp_hint[0] = '+'
+                               emitter.open_ended = true
+                       }
+               }
+       }
+       if chomp_hint[0] != 0 {
+               if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+                       return false
+               }
+       }
+       return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+       if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+               return false
+       }
+       if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+               return false
+       }
+       if !put_break(emitter) {
+               return false
+       }
+       emitter.indention = true
+       emitter.whitespace = true
+       breaks := true
+       for i := 0; i < len(value); {
+               if is_break(value, i) {
+                       if !write_break(emitter, value, &i) {
+                               return false
+                       }
+                       emitter.indention = true
+                       breaks = true
+               } else {
+                       if breaks {
+                               if !yaml_emitter_write_indent(emitter) {
+                                       return false
+                               }
+                       }
+                       if !write(emitter, value, &i) {
+                               return false
+                       }
+                       emitter.indention = false
+                       breaks = false
+               }
+       }
+
+       return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+       if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+               return false
+       }
+       if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+               return false
+       }
+
+       if !put_break(emitter) {
+               return false
+       }
+       emitter.indention = true
+       emitter.whitespace = true
+
+       breaks := true
+       leading_spaces := true
+       for i := 0; i < len(value); {
+               if is_break(value, i) {
+                       if !breaks && !leading_spaces && value[i] == '\n' {
+                               k := 0
+                               for is_break(value, k) {
+                                       k += width(value[k])
+                               }
+                               if !is_blankz(value, k) {
+                                       if !put_break(emitter) {
+                                               return false
+                                       }
+                               }
+                       }
+                       if !write_break(emitter, value, &i) {
+                               return false
+                       }
+                       emitter.indention = true
+                       breaks = true
+               } else {
+                       if breaks {
+                               if !yaml_emitter_write_indent(emitter) {
+                                       return false
+                               }
+                               leading_spaces = is_blank(value, i)
+                       }
+                       if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+                               if !yaml_emitter_write_indent(emitter) {
+                                       return false
+                               }
+                               i += width(value[i])
+                       } else {
+                               if !write(emitter, value, &i) {
+                                       return false
+                               }
+                       }
+                       emitter.indention = false
+                       breaks = false
+               }
+       }
+       return true
+}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/encode.go b/vendor/github.com/zclconf/go-cty-yaml/encode.go
new file mode 100644 (file)
index 0000000..daa1478
--- /dev/null
@@ -0,0 +1,189 @@
+package yaml
+
+import (
+       "bytes"
+       "fmt"
+       "strings"
+
+       "github.com/zclconf/go-cty/cty"
+)
+
+func (c *Converter) marshal(v cty.Value) ([]byte, error) {
+       var buf bytes.Buffer
+
+       e := &yaml_emitter_t{}
+       yaml_emitter_initialize(e)
+       yaml_emitter_set_output_writer(e, &buf)
+       yaml_emitter_set_unicode(e, true)
+
+       var evt yaml_event_t
+       yaml_stream_start_event_initialize(&evt, yaml_UTF8_ENCODING)
+       if !yaml_emitter_emit(e, &evt) {
+               return nil, emitterError(e)
+       }
+       yaml_document_start_event_initialize(&evt, nil, nil, true)
+       if !yaml_emitter_emit(e, &evt) {
+               return nil, emitterError(e)
+       }
+
+       if err := c.marshalEmit(v, e); err != nil {
+               return nil, err
+       }
+
+       yaml_document_end_event_initialize(&evt, true)
+       if !yaml_emitter_emit(e, &evt) {
+               return nil, emitterError(e)
+       }
+       yaml_stream_end_event_initialize(&evt)
+       if !yaml_emitter_emit(e, &evt) {
+               return nil, emitterError(e)
+       }
+
+       return buf.Bytes(), nil
+}
+
+func (c *Converter) marshalEmit(v cty.Value, e *yaml_emitter_t) error {
+       ty := v.Type()
+       switch {
+       case v.IsNull():
+               return c.marshalPrimitive(v, e)
+       case !v.IsKnown():
+               return fmt.Errorf("cannot serialize unknown value as YAML")
+       case ty.IsPrimitiveType():
+               return c.marshalPrimitive(v, e)
+       case ty.IsTupleType(), ty.IsListType(), ty.IsSetType():
+               return c.marshalSequence(v, e)
+       case ty.IsObjectType(), ty.IsMapType():
+               return c.marshalMapping(v, e)
+       default:
+               return fmt.Errorf("can't marshal %s as YAML", ty.FriendlyName())
+       }
+}
+
+func (c *Converter) marshalPrimitive(v cty.Value, e *yaml_emitter_t) error {
+       var evt yaml_event_t
+
+       if v.IsNull() {
+               yaml_scalar_event_initialize(
+                       &evt,
+                       nil,
+                       nil,
+                       []byte("null"),
+                       true,
+                       true,
+                       yaml_PLAIN_SCALAR_STYLE,
+               )
+               if !yaml_emitter_emit(e, &evt) {
+                       return emitterError(e)
+               }
+               return nil
+       }
+
+       switch v.Type() {
+       case cty.String:
+               str := v.AsString()
+               style := yaml_DOUBLE_QUOTED_SCALAR_STYLE
+               if strings.Contains(str, "\n") {
+                       style = yaml_LITERAL_SCALAR_STYLE
+               }
+               yaml_scalar_event_initialize(
+                       &evt,
+                       nil,
+                       nil,
+                       []byte(str),
+                       true,
+                       true,
+                       style,
+               )
+       case cty.Number:
+               str := v.AsBigFloat().Text('f', -1)
+               yaml_scalar_event_initialize(
+                       &evt,
+                       nil,
+                       nil,
+                       []byte(str),
+                       true,
+                       true,
+                       yaml_PLAIN_SCALAR_STYLE,
+               )
+       case cty.Bool:
+               var str string
+               switch v {
+               case cty.True:
+                       str = "true"
+               case cty.False:
+                       str = "false"
+               }
+               yaml_scalar_event_initialize(
+                       &evt,
+                       nil,
+                       nil,
+                       []byte(str),
+                       true,
+                       true,
+                       yaml_PLAIN_SCALAR_STYLE,
+               )
+       }
+       if !yaml_emitter_emit(e, &evt) {
+               return emitterError(e)
+       }
+       return nil
+}
+
+func (c *Converter) marshalSequence(v cty.Value, e *yaml_emitter_t) error {
+       style := yaml_BLOCK_SEQUENCE_STYLE
+       if c.encodeAsFlow {
+               style = yaml_FLOW_SEQUENCE_STYLE
+       }
+
+       var evt yaml_event_t
+       yaml_sequence_start_event_initialize(&evt, nil, nil, true, style)
+       if !yaml_emitter_emit(e, &evt) {
+               return emitterError(e)
+       }
+
+       for it := v.ElementIterator(); it.Next(); {
+               _, v := it.Element()
+               err := c.marshalEmit(v, e)
+               if err != nil {
+                       return err
+               }
+       }
+
+       yaml_sequence_end_event_initialize(&evt)
+       if !yaml_emitter_emit(e, &evt) {
+               return emitterError(e)
+       }
+       return nil
+}
+
+func (c *Converter) marshalMapping(v cty.Value, e *yaml_emitter_t) error {
+       style := yaml_BLOCK_MAPPING_STYLE
+       if c.encodeAsFlow {
+               style = yaml_FLOW_MAPPING_STYLE
+       }
+
+       var evt yaml_event_t
+       yaml_mapping_start_event_initialize(&evt, nil, nil, true, style)
+       if !yaml_emitter_emit(e, &evt) {
+               return emitterError(e)
+       }
+
+       for it := v.ElementIterator(); it.Next(); {
+               k, v := it.Element()
+               err := c.marshalEmit(k, e)
+               if err != nil {
+                       return err
+               }
+               err = c.marshalEmit(v, e)
+               if err != nil {
+                       return err
+               }
+       }
+
+       yaml_mapping_end_event_initialize(&evt)
+       if !yaml_emitter_emit(e, &evt) {
+               return emitterError(e)
+       }
+       return nil
+}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/error.go b/vendor/github.com/zclconf/go-cty-yaml/error.go
new file mode 100644 (file)
index 0000000..ae41c48
--- /dev/null
@@ -0,0 +1,97 @@
+package yaml
+
+import (
+       "errors"
+       "fmt"
+)
+
+// Error is an error implementation used to report errors that correspond to
+// a particular position in an input buffer.
+type Error struct {
+       cause        error
+       Line, Column int
+}
+
+func (e Error) Error() string {
+       return fmt.Sprintf("on line %d, column %d: %s", e.Line, e.Column, e.cause.Error())
+}
+
+// Cause is an implementation of the interface used by
+// github.com/pkg/errors.Cause, returning the underlying error without the
+// position information.
+func (e Error) Cause() error {
+       return e.cause
+}
+
+// WrappedErrors is an implementation of github.com/hashicorp/errwrap.Wrapper
+// returning the underlying error without the position information.
+func (e Error) WrappedErrors() []error {
+       return []error{e.cause}
+}
+
+func parserError(p *yaml_parser_t) error {
+       var cause error
+       if len(p.problem) > 0 {
+               cause = errors.New(p.problem)
+       } else {
+               cause = errors.New("invalid YAML syntax") // useless generic error, then
+       }
+
+       return parserErrorWrap(p, cause)
+}
+
+func parserErrorWrap(p *yaml_parser_t, cause error) error {
+       switch {
+       case p.problem_mark.line != 0:
+               line := p.problem_mark.line
+               column := p.problem_mark.column
+               // Scanner errors don't iterate line before returning error
+               if p.error == yaml_SCANNER_ERROR {
+                       line++
+                       column = 0
+               }
+               return Error{
+                       cause:  cause,
+                       Line:   line,
+                       Column: column + 1,
+               }
+       case p.context_mark.line != 0:
+               return Error{
+                       cause:  cause,
+                       Line:   p.context_mark.line,
+                       Column: p.context_mark.column + 1,
+               }
+       default:
+               return cause
+       }
+}
+
+func parserErrorf(p *yaml_parser_t, f string, vals ...interface{}) error {
+       return parserErrorWrap(p, fmt.Errorf(f, vals...))
+}
+
+func parseEventErrorWrap(evt *yaml_event_t, cause error) error {
+       if evt.start_mark.line == 0 {
+               // Event does not have a start mark, so we won't wrap the error at all
+               return cause
+       }
+       return Error{
+               cause:  cause,
+               Line:   evt.start_mark.line,
+               Column: evt.start_mark.column + 1,
+       }
+}
+
+func parseEventErrorf(evt *yaml_event_t, f string, vals ...interface{}) error {
+       return parseEventErrorWrap(evt, fmt.Errorf(f, vals...))
+}
+
+func emitterError(e *yaml_emitter_t) error {
+       var cause error
+       if len(e.problem) > 0 {
+               cause = errors.New(e.problem)
+       } else {
+               cause = errors.New("failed to write YAML token") // useless generic error, then
+       }
+       return cause
+}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/go.mod b/vendor/github.com/zclconf/go-cty-yaml/go.mod
new file mode 100644 (file)
index 0000000..3d52268
--- /dev/null
@@ -0,0 +1,3 @@
+module github.com/zclconf/go-cty-yaml
+
+require github.com/zclconf/go-cty v1.0.0
diff --git a/vendor/github.com/zclconf/go-cty-yaml/go.sum b/vendor/github.com/zclconf/go-cty-yaml/go.sum
new file mode 100644 (file)
index 0000000..841f7fc
--- /dev/null
@@ -0,0 +1,18 @@
+github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk=
+github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
+github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
+github.com/zclconf/go-cty v1.0.0 h1:EWtv3gKe2wPLIB9hQRQJa7k/059oIfAqcEkCNnaVckk=
+github.com/zclconf/go-cty v1.0.0/go.mod h1:xnAOWiHeOqg2nWS62VtQ7pbOu17FtxJNW8RLEih+O3s=
+golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/vendor/github.com/zclconf/go-cty-yaml/implied_type.go b/vendor/github.com/zclconf/go-cty-yaml/implied_type.go
new file mode 100644 (file)
index 0000000..5b7b068
--- /dev/null
@@ -0,0 +1,268 @@
+package yaml
+
+import (
+       "errors"
+       "fmt"
+
+       "github.com/zclconf/go-cty/cty"
+       "github.com/zclconf/go-cty/cty/convert"
+)
+
+func (c *Converter) impliedType(src []byte) (cty.Type, error) {
+       p := &yaml_parser_t{}
+       if !yaml_parser_initialize(p) {
+               return cty.NilType, errors.New("failed to initialize YAML parser")
+       }
+       if len(src) == 0 {
+               src = []byte{'\n'}
+       }
+
+       an := &typeAnalysis{
+               anchorsPending: map[string]int{},
+               anchorTypes:    map[string]cty.Type{},
+       }
+
+       yaml_parser_set_input_string(p, src)
+
+       var evt yaml_event_t
+       if !yaml_parser_parse(p, &evt) {
+               return cty.NilType, parserError(p)
+       }
+       if evt.typ != yaml_STREAM_START_EVENT {
+               return cty.NilType, parseEventErrorf(&evt, "missing stream start token")
+       }
+       if !yaml_parser_parse(p, &evt) {
+               return cty.NilType, parserError(p)
+       }
+       if evt.typ != yaml_DOCUMENT_START_EVENT {
+               return cty.NilType, parseEventErrorf(&evt, "missing start of document")
+       }
+
+       ty, err := c.impliedTypeParse(an, p)
+       if err != nil {
+               return cty.NilType, err
+       }
+
+       if !yaml_parser_parse(p, &evt) {
+               return cty.NilType, parserError(p)
+       }
+       if evt.typ == yaml_DOCUMENT_START_EVENT {
+               return cty.NilType, parseEventErrorf(&evt, "only a single document is allowed")
+       }
+       if evt.typ != yaml_DOCUMENT_END_EVENT {
+               return cty.NilType, parseEventErrorf(&evt, "unexpected extra content (%s) after value", evt.typ.String())
+       }
+       if !yaml_parser_parse(p, &evt) {
+               return cty.NilType, parserError(p)
+       }
+       if evt.typ != yaml_STREAM_END_EVENT {
+               return cty.NilType, parseEventErrorf(&evt, "unexpected extra content after value")
+       }
+
+       return ty, err
+}
+
+func (c *Converter) impliedTypeParse(an *typeAnalysis, p *yaml_parser_t) (cty.Type, error) {
+       var evt yaml_event_t
+       if !yaml_parser_parse(p, &evt) {
+               return cty.NilType, parserError(p)
+       }
+       return c.impliedTypeParseRemainder(an, &evt, p)
+}
+
+func (c *Converter) impliedTypeParseRemainder(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) {
+       switch evt.typ {
+       case yaml_SCALAR_EVENT:
+               return c.impliedTypeScalar(an, evt, p)
+       case yaml_ALIAS_EVENT:
+               return c.impliedTypeAlias(an, evt, p)
+       case yaml_MAPPING_START_EVENT:
+               return c.impliedTypeMapping(an, evt, p)
+       case yaml_SEQUENCE_START_EVENT:
+               return c.impliedTypeSequence(an, evt, p)
+       case yaml_DOCUMENT_START_EVENT:
+               return cty.NilType, parseEventErrorf(evt, "only a single document is allowed")
+       case yaml_STREAM_END_EVENT:
+               // Decoding an empty buffer, probably
+               return cty.NilType, parseEventErrorf(evt, "expecting value but found end of stream")
+       default:
+               // Should never happen; the above should be comprehensive
+               return cty.NilType, parseEventErrorf(evt, "unexpected parser event %s", evt.typ.String())
+       }
+}
+
+func (c *Converter) impliedTypeScalar(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) {
+       src := evt.value
+       tag := string(evt.tag)
+       anchor := string(evt.anchor)
+       implicit := evt.implicit
+
+       if len(anchor) > 0 {
+               an.beginAnchor(anchor)
+       }
+
+       var ty cty.Type
+       switch {
+       case tag == "" && !implicit:
+               // Untagged explicit string
+               ty = cty.String
+       default:
+               v, err := c.resolveScalar(tag, string(src), yaml_scalar_style_t(evt.style))
+               if err != nil {
+                       return cty.NilType, parseEventErrorWrap(evt, err)
+               }
+               if v.RawEquals(mergeMappingVal) {
+                       // In any context other than a mapping key, this is just a plain string
+                       ty = cty.String
+               } else {
+                       ty = v.Type()
+               }
+       }
+
+       if len(anchor) > 0 {
+               an.completeAnchor(anchor, ty)
+       }
+       return ty, nil
+}
+
+func (c *Converter) impliedTypeMapping(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) {
+       tag := string(evt.tag)
+       anchor := string(evt.anchor)
+
+       if tag != "" && tag != yaml_MAP_TAG {
+               return cty.NilType, parseEventErrorf(evt, "can't interpret mapping as %s", tag)
+       }
+
+       if anchor != "" {
+               an.beginAnchor(anchor)
+       }
+
+       atys := make(map[string]cty.Type)
+       for {
+               var nextEvt yaml_event_t
+               if !yaml_parser_parse(p, &nextEvt) {
+                       return cty.NilType, parserError(p)
+               }
+               if nextEvt.typ == yaml_MAPPING_END_EVENT {
+                       ty := cty.Object(atys)
+                       if anchor != "" {
+                               an.completeAnchor(anchor, ty)
+                       }
+                       return ty, nil
+               }
+
+               if nextEvt.typ != yaml_SCALAR_EVENT {
+                       return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys")
+               }
+               keyVal, err := c.resolveScalar(string(nextEvt.tag), string(nextEvt.value), yaml_scalar_style_t(nextEvt.style))
+               if err != nil {
+                       return cty.NilType, err
+               }
+               if keyVal.RawEquals(mergeMappingVal) {
+                       // Merging the value (which must be a mapping) into our mapping,
+                       // then.
+                       ty, err := c.impliedTypeParse(an, p)
+                       if err != nil {
+                               return cty.NilType, err
+                       }
+                       if !ty.IsObjectType() {
+                               return cty.NilType, parseEventErrorf(&nextEvt, "cannot merge %s into mapping", ty.FriendlyName())
+                       }
+                       for name, aty := range ty.AttributeTypes() {
+                               atys[name] = aty
+                       }
+                       continue
+               }
+               if keyValStr, err := convert.Convert(keyVal, cty.String); err == nil {
+                       keyVal = keyValStr
+               } else {
+                       return cty.NilType, parseEventErrorf(&nextEvt, "only strings are allowed as mapping keys")
+               }
+               if keyVal.IsNull() {
+                       return cty.NilType, parseEventErrorf(&nextEvt, "mapping key cannot be null")
+               }
+               if !keyVal.IsKnown() {
+                       return cty.NilType, parseEventErrorf(&nextEvt, "mapping key must be known")
+               }
+               valTy, err := c.impliedTypeParse(an, p)
+               if err != nil {
+                       return cty.NilType, err
+               }
+
+               atys[keyVal.AsString()] = valTy
+       }
+}
+
+func (c *Converter) impliedTypeSequence(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) {
+       tag := string(evt.tag)
+       anchor := string(evt.anchor)
+
+       if tag != "" && tag != yaml_SEQ_TAG {
+               return cty.NilType, parseEventErrorf(evt, "can't interpret sequence as %s", tag)
+       }
+
+       if anchor != "" {
+               an.beginAnchor(anchor)
+       }
+
+       var atys []cty.Type
+       for {
+               var nextEvt yaml_event_t
+               if !yaml_parser_parse(p, &nextEvt) {
+                       return cty.NilType, parserError(p)
+               }
+               if nextEvt.typ == yaml_SEQUENCE_END_EVENT {
+                       ty := cty.Tuple(atys)
+                       if anchor != "" {
+                               an.completeAnchor(anchor, ty)
+                       }
+                       return ty, nil
+               }
+
+               valTy, err := c.impliedTypeParseRemainder(an, &nextEvt, p)
+               if err != nil {
+                       return cty.NilType, err
+               }
+
+               atys = append(atys, valTy)
+       }
+}
+
+func (c *Converter) impliedTypeAlias(an *typeAnalysis, evt *yaml_event_t, p *yaml_parser_t) (cty.Type, error) {
+       ty, err := an.anchorType(string(evt.anchor))
+       if err != nil {
+               err = parseEventErrorWrap(evt, err)
+       }
+       return ty, err
+}
+
+type typeAnalysis struct {
+       anchorsPending map[string]int
+       anchorTypes    map[string]cty.Type
+}
+
+func (an *typeAnalysis) beginAnchor(name string) {
+       an.anchorsPending[name]++
+}
+
+func (an *typeAnalysis) completeAnchor(name string, ty cty.Type) {
+       an.anchorsPending[name]--
+       if an.anchorsPending[name] == 0 {
+               delete(an.anchorsPending, name)
+       }
+       an.anchorTypes[name] = ty
+}
+
+func (an *typeAnalysis) anchorType(name string) (cty.Type, error) {
+       if _, pending := an.anchorsPending[name]; pending {
+               // YAML normally allows self-referencing structures, but cty cannot
+               // represent them (it requires all structures to be finite) so we
+               // must fail here.
+               return cty.NilType, fmt.Errorf("cannot refer to anchor %q from inside its own definition", name)
+       }
+       ty, ok := an.anchorTypes[name]
+       if !ok {
+               return cty.NilType, fmt.Errorf("reference to undefined anchor %q", name)
+       }
+       return ty, nil
+}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/parserc.go b/vendor/github.com/zclconf/go-cty-yaml/parserc.go
new file mode 100644 (file)
index 0000000..81d05df
--- /dev/null
@@ -0,0 +1,1095 @@
+package yaml
+
+import (
+       "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream               ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document    ::= block_node DOCUMENT-END*
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          | properties (block_content | indentless_block_sequence)?
+//                          | block_content
+//                          | indentless_block_sequence
+// block_node           ::= ALIAS
+//                          | properties block_content?
+//                          | block_content
+// flow_node            ::= ALIAS
+//                          | properties flow_content?
+//                          | flow_content
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content        ::= block_collection | flow_collection | SCALAR
+// flow_content         ::= flow_collection | SCALAR
+// block_collection     ::= block_sequence | block_mapping
+// flow_collection      ::= flow_sequence | flow_mapping
+// block_sequence       ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                          BLOCK-END
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                          flow_sequence_entry?
+//                          FLOW-SEQUENCE-END
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                          flow_mapping_entry?
+//                          FLOW-MAPPING-END
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+       if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+               return &parser.tokens[parser.tokens_head]
+       }
+       return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+       parser.token_available = false
+       parser.tokens_parsed++
+       parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+       parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+       // Erase the event object.
+       *event = yaml_event_t{}
+
+       // No events after the end of the stream or error.
+       if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+               return true
+       }
+
+       // Generate the next event.
+       return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+       parser.error = yaml_PARSER_ERROR
+       parser.problem = problem
+       parser.problem_mark = problem_mark
+       return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+       parser.error = yaml_PARSER_ERROR
+       parser.context = context
+       parser.context_mark = context_mark
+       parser.problem = problem
+       parser.problem_mark = problem_mark
+       return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+       //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+       switch parser.state {
+       case yaml_PARSE_STREAM_START_STATE:
+               return yaml_parser_parse_stream_start(parser, event)
+
+       case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+               return yaml_parser_parse_document_start(parser, event, true)
+
+       case yaml_PARSE_DOCUMENT_START_STATE:
+               return yaml_parser_parse_document_start(parser, event, false)
+
+       case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+               return yaml_parser_parse_document_content(parser, event)
+
+       case yaml_PARSE_DOCUMENT_END_STATE:
+               return yaml_parser_parse_document_end(parser, event)
+
+       case yaml_PARSE_BLOCK_NODE_STATE:
+               return yaml_parser_parse_node(parser, event, true, false)
+
+       case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+               return yaml_parser_parse_node(parser, event, true, true)
+
+       case yaml_PARSE_FLOW_NODE_STATE:
+               return yaml_parser_parse_node(parser, event, false, false)
+
+       case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+               return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+       case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+               return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+       case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+               return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+       case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+               return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+       case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+               return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+       case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+               return yaml_parser_parse_block_mapping_value(parser, event)
+
+       case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+               return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+       case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+               return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+       case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+               return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+       case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+               return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+       case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+               return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+       case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+               return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+       case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+               return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+       case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+               return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+       case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+               return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+       default:
+               panic("invalid parser state")
+       }
+}
+
+// Parse the production:
+// stream   ::= STREAM-START implicit_document? explicit_document* STREAM-END
+//              ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+       if token.typ != yaml_STREAM_START_TOKEN {
+               return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+       }
+       parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+       *event = yaml_event_t{
+               typ:        yaml_STREAM_START_EVENT,
+               start_mark: token.start_mark,
+               end_mark:   token.end_mark,
+               encoding:   token.encoding,
+       }
+       skip_token(parser)
+       return true
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                          *
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                          *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+
+       // Parse extra document end indicators.
+       if !implicit {
+               for token.typ == yaml_DOCUMENT_END_TOKEN {
+                       skip_token(parser)
+                       token = peek_token(parser)
+                       if token == nil {
+                               return false
+                       }
+               }
+       }
+
+       if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+               token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+               token.typ != yaml_DOCUMENT_START_TOKEN &&
+               token.typ != yaml_STREAM_END_TOKEN {
+               // Parse an implicit document.
+               if !yaml_parser_process_directives(parser, nil, nil) {
+                       return false
+               }
+               parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+               parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+               *event = yaml_event_t{
+                       typ:        yaml_DOCUMENT_START_EVENT,
+                       start_mark: token.start_mark,
+                       end_mark:   token.end_mark,
+               }
+
+       } else if token.typ != yaml_STREAM_END_TOKEN {
+               // Parse an explicit document.
+               var version_directive *yaml_version_directive_t
+               var tag_directives []yaml_tag_directive_t
+               start_mark := token.start_mark
+               if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+                       return false
+               }
+               token = peek_token(parser)
+               if token == nil {
+                       return false
+               }
+               if token.typ != yaml_DOCUMENT_START_TOKEN {
+                       yaml_parser_set_parser_error(parser,
+                               "did not find expected <document start>", token.start_mark)
+                       return false
+               }
+               parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+               parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+               end_mark := token.end_mark
+
+               *event = yaml_event_t{
+                       typ:               yaml_DOCUMENT_START_EVENT,
+                       start_mark:        start_mark,
+                       end_mark:          end_mark,
+                       version_directive: version_directive,
+                       tag_directives:    tag_directives,
+                       implicit:          false,
+               }
+               skip_token(parser)
+
+       } else {
+               // Parse the stream end.
+               parser.state = yaml_PARSE_END_STATE
+               *event = yaml_event_t{
+                       typ:        yaml_STREAM_END_EVENT,
+                       start_mark: token.start_mark,
+                       end_mark:   token.end_mark,
+               }
+               skip_token(parser)
+       }
+
+       return true
+}
+
+// Parse the productions:
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                                                    ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+       if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+               token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+               token.typ == yaml_DOCUMENT_START_TOKEN ||
+               token.typ == yaml_DOCUMENT_END_TOKEN ||
+               token.typ == yaml_STREAM_END_TOKEN {
+               parser.state = parser.states[len(parser.states)-1]
+               parser.states = parser.states[:len(parser.states)-1]
+               return yaml_parser_process_empty_scalar(parser, event,
+                       token.start_mark)
+       }
+       return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                                     *************
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+
+       start_mark := token.start_mark
+       end_mark := token.start_mark
+
+       implicit := true
+       if token.typ == yaml_DOCUMENT_END_TOKEN {
+               end_mark = token.end_mark
+               skip_token(parser)
+               implicit = false
+       }
+
+       parser.tag_directives = parser.tag_directives[:0]
+
+       parser.state = yaml_PARSE_DOCUMENT_START_STATE
+       *event = yaml_event_t{
+               typ:        yaml_DOCUMENT_END_EVENT,
+               start_mark: start_mark,
+               end_mark:   end_mark,
+               implicit:   implicit,
+       }
+       return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          *****
+//                          | properties (block_content | indentless_block_sequence)?
+//                            **********  *
+//                          | block_content | indentless_block_sequence
+//                            *
+// block_node           ::= ALIAS
+//                          *****
+//                          | properties block_content?
+//                            ********** *
+//                          | block_content
+//                            *
+// flow_node            ::= ALIAS
+//                          *****
+//                          | properties flow_content?
+//                            ********** *
+//                          | flow_content
+//                            *
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+//                          *************************
+// block_content        ::= block_collection | flow_collection | SCALAR
+//                                                               ******
+// flow_content         ::= flow_collection | SCALAR
+//                                            ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+       //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+
+       if token.typ == yaml_ALIAS_TOKEN {
+               parser.state = parser.states[len(parser.states)-1]
+               parser.states = parser.states[:len(parser.states)-1]
+               *event = yaml_event_t{
+                       typ:        yaml_ALIAS_EVENT,
+                       start_mark: token.start_mark,
+                       end_mark:   token.end_mark,
+                       anchor:     token.value,
+               }
+               skip_token(parser)
+               return true
+       }
+
+       start_mark := token.start_mark
+       end_mark := token.start_mark
+
+       var tag_token bool
+       var tag_handle, tag_suffix, anchor []byte
+       var tag_mark yaml_mark_t
+       if token.typ == yaml_ANCHOR_TOKEN {
+               anchor = token.value
+               start_mark = token.start_mark
+               end_mark = token.end_mark
+               skip_token(parser)
+               token = peek_token(parser)
+               if token == nil {
+                       return false
+               }
+               if token.typ == yaml_TAG_TOKEN {
+                       tag_token = true
+                       tag_handle = token.value
+                       tag_suffix = token.suffix
+                       tag_mark = token.start_mark
+                       end_mark = token.end_mark
+                       skip_token(parser)
+                       token = peek_token(parser)
+                       if token == nil {
+                               return false
+                       }
+               }
+       } else if token.typ == yaml_TAG_TOKEN {
+               tag_token = true
+               tag_handle = token.value
+               tag_suffix = token.suffix
+               start_mark = token.start_mark
+               tag_mark = token.start_mark
+               end_mark = token.end_mark
+               skip_token(parser)
+               token = peek_token(parser)
+               if token == nil {
+                       return false
+               }
+               if token.typ == yaml_ANCHOR_TOKEN {
+                       anchor = token.value
+                       end_mark = token.end_mark
+                       skip_token(parser)
+                       token = peek_token(parser)
+                       if token == nil {
+                               return false
+                       }
+               }
+       }
+
+       var tag []byte
+       if tag_token {
+               if len(tag_handle) == 0 {
+                       tag = tag_suffix
+                       tag_suffix = nil
+               } else {
+                       for i := range parser.tag_directives {
+                               if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+                                       tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+                                       tag = append(tag, tag_suffix...)
+                                       break
+                               }
+                       }
+                       if len(tag) == 0 {
+                               yaml_parser_set_parser_error_context(parser,
+                                       "while parsing a node", start_mark,
+                                       "found undefined tag handle", tag_mark)
+                               return false
+                       }
+               }
+       }
+
+       implicit := len(tag) == 0
+       if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+               end_mark = token.end_mark
+               parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+               *event = yaml_event_t{
+                       typ:        yaml_SEQUENCE_START_EVENT,
+                       start_mark: start_mark,
+                       end_mark:   end_mark,
+                       anchor:     anchor,
+                       tag:        tag,
+                       implicit:   implicit,
+                       style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+               }
+               return true
+       }
+       if token.typ == yaml_SCALAR_TOKEN {
+               var plain_implicit, quoted_implicit bool
+               end_mark = token.end_mark
+               if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+                       plain_implicit = true
+               } else if len(tag) == 0 {
+                       quoted_implicit = true
+               }
+               parser.state = parser.states[len(parser.states)-1]
+               parser.states = parser.states[:len(parser.states)-1]
+
+               *event = yaml_event_t{
+                       typ:             yaml_SCALAR_EVENT,
+                       start_mark:      start_mark,
+                       end_mark:        end_mark,
+                       anchor:          anchor,
+                       tag:             tag,
+                       value:           token.value,
+                       implicit:        plain_implicit,
+                       quoted_implicit: quoted_implicit,
+                       style:           yaml_style_t(token.style),
+               }
+               skip_token(parser)
+               return true
+       }
+       if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+               // [Go] Some of the events below can be merged as they differ only on style.
+               end_mark = token.end_mark
+               parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+               *event = yaml_event_t{
+                       typ:        yaml_SEQUENCE_START_EVENT,
+                       start_mark: start_mark,
+                       end_mark:   end_mark,
+                       anchor:     anchor,
+                       tag:        tag,
+                       implicit:   implicit,
+                       style:      yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+               }
+               return true
+       }
+       if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+               end_mark = token.end_mark
+               parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+               *event = yaml_event_t{
+                       typ:        yaml_MAPPING_START_EVENT,
+                       start_mark: start_mark,
+                       end_mark:   end_mark,
+                       anchor:     anchor,
+                       tag:        tag,
+                       implicit:   implicit,
+                       style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+               }
+               return true
+       }
+       if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+               end_mark = token.end_mark
+               parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+               *event = yaml_event_t{
+                       typ:        yaml_SEQUENCE_START_EVENT,
+                       start_mark: start_mark,
+                       end_mark:   end_mark,
+                       anchor:     anchor,
+                       tag:        tag,
+                       implicit:   implicit,
+                       style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+               }
+               return true
+       }
+       if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+               end_mark = token.end_mark
+               parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+               *event = yaml_event_t{
+                       typ:        yaml_MAPPING_START_EVENT,
+                       start_mark: start_mark,
+                       end_mark:   end_mark,
+                       anchor:     anchor,
+                       tag:        tag,
+                       implicit:   implicit,
+                       style:      yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+               }
+               return true
+       }
+       if len(anchor) > 0 || len(tag) > 0 {
+               parser.state = parser.states[len(parser.states)-1]
+               parser.states = parser.states[:len(parser.states)-1]
+
+               *event = yaml_event_t{
+                       typ:             yaml_SCALAR_EVENT,
+                       start_mark:      start_mark,
+                       end_mark:        end_mark,
+                       anchor:          anchor,
+                       tag:             tag,
+                       implicit:        implicit,
+                       quoted_implicit: false,
+                       style:           yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+               }
+               return true
+       }
+
+       context := "while parsing a flow node"
+       if block {
+               context = "while parsing a block node"
+       }
+       yaml_parser_set_parser_error_context(parser, context, start_mark,
+               "did not find expected node content", token.start_mark)
+       return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+//                    ********************  *********** *             *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+       if first {
+               token := peek_token(parser)
+               parser.marks = append(parser.marks, token.start_mark)
+               skip_token(parser)
+       }
+
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+
+       if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+               mark := token.end_mark
+               skip_token(parser)
+               token = peek_token(parser)
+               if token == nil {
+                       return false
+               }
+               if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+                       parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+                       return yaml_parser_parse_node(parser, event, true, false)
+               } else {
+                       parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+                       return yaml_parser_process_empty_scalar(parser, event, mark)
+               }
+       }
+       if token.typ == yaml_BLOCK_END_TOKEN {
+               parser.state = parser.states[len(parser.states)-1]
+               parser.states = parser.states[:len(parser.states)-1]
+               parser.marks = parser.marks[:len(parser.marks)-1]
+
+               *event = yaml_event_t{
+                       typ:        yaml_SEQUENCE_END_EVENT,
+                       start_mark: token.start_mark,
+                       end_mark:   token.end_mark,
+               }
+
+               skip_token(parser)
+               return true
+       }
+
+       context_mark := parser.marks[len(parser.marks)-1]
+       parser.marks = parser.marks[:len(parser.marks)-1]
+       return yaml_parser_set_parser_error_context(parser,
+               "while parsing a block collection", context_mark,
+               "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+//                           *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+
+       if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+               mark := token.end_mark
+               skip_token(parser)
+               token = peek_token(parser)
+               if token == nil {
+                       return false
+               }
+               if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+                       token.typ != yaml_KEY_TOKEN &&
+                       token.typ != yaml_VALUE_TOKEN &&
+                       token.typ != yaml_BLOCK_END_TOKEN {
+                       parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+                       return yaml_parser_parse_node(parser, event, true, false)
+               }
+               parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+               return yaml_parser_process_empty_scalar(parser, event, mark)
+       }
+       parser.state = parser.states[len(parser.states)-1]
+       parser.states = parser.states[:len(parser.states)-1]
+
+       *event = yaml_event_t{
+               typ:        yaml_SEQUENCE_END_EVENT,
+               start_mark: token.start_mark,
+               end_mark:   token.start_mark, // [Go] Shouldn't this be token.end_mark?
+       }
+       return true
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          *******************
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                            *** *
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//
+//                          BLOCK-END
+//                          *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+       if first {
+               token := peek_token(parser)
+               parser.marks = append(parser.marks, token.start_mark)
+               skip_token(parser)
+       }
+
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+
+       if token.typ == yaml_KEY_TOKEN {
+               mark := token.end_mark
+               skip_token(parser)
+               token = peek_token(parser)
+               if token == nil {
+                       return false
+               }
+               if token.typ != yaml_KEY_TOKEN &&
+                       token.typ != yaml_VALUE_TOKEN &&
+                       token.typ != yaml_BLOCK_END_TOKEN {
+                       parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+                       return yaml_parser_parse_node(parser, event, true, true)
+               } else {
+                       parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+                       return yaml_parser_process_empty_scalar(parser, event, mark)
+               }
+       } else if token.typ == yaml_BLOCK_END_TOKEN {
+               parser.state = parser.states[len(parser.states)-1]
+               parser.states = parser.states[:len(parser.states)-1]
+               parser.marks = parser.marks[:len(parser.marks)-1]
+               *event = yaml_event_t{
+                       typ:        yaml_MAPPING_END_EVENT,
+                       start_mark: token.start_mark,
+                       end_mark:   token.end_mark,
+               }
+               skip_token(parser)
+               return true
+       }
+
+       context_mark := parser.marks[len(parser.marks)-1]
+       parser.marks = parser.marks[:len(parser.marks)-1]
+       return yaml_parser_set_parser_error_context(parser,
+               "while parsing a block mapping", context_mark,
+               "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//
+//                          ((KEY block_node_or_indentless_sequence?)?
+//
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                           ***** *
+//                          BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+       if token.typ == yaml_VALUE_TOKEN {
+               mark := token.end_mark
+               skip_token(parser)
+               token = peek_token(parser)
+               if token == nil {
+                       return false
+               }
+               if token.typ != yaml_KEY_TOKEN &&
+                       token.typ != yaml_VALUE_TOKEN &&
+                       token.typ != yaml_BLOCK_END_TOKEN {
+                       parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+                       return yaml_parser_parse_node(parser, event, true, true)
+               }
+               parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+               return yaml_parser_process_empty_scalar(parser, event, mark)
+       }
+       parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+       return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          *******************
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                           *                   **********
+//                          flow_sequence_entry?
+//                          *
+//                          FLOW-SEQUENCE-END
+//                          *****************
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+       if first {
+               token := peek_token(parser)
+               parser.marks = append(parser.marks, token.start_mark)
+               skip_token(parser)
+       }
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+       if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+               if !first {
+                       if token.typ == yaml_FLOW_ENTRY_TOKEN {
+                               skip_token(parser)
+                               token = peek_token(parser)
+                               if token == nil {
+                                       return false
+                               }
+                       } else {
+                               context_mark := parser.marks[len(parser.marks)-1]
+                               parser.marks = parser.marks[:len(parser.marks)-1]
+                               return yaml_parser_set_parser_error_context(parser,
+                                       "while parsing a flow sequence", context_mark,
+                                       "did not find expected ',' or ']'", token.start_mark)
+                       }
+               }
+
+               if token.typ == yaml_KEY_TOKEN {
+                       parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+                       *event = yaml_event_t{
+                               typ:        yaml_MAPPING_START_EVENT,
+                               start_mark: token.start_mark,
+                               end_mark:   token.end_mark,
+                               implicit:   true,
+                               style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+                       }
+                       skip_token(parser)
+                       return true
+               } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+                       parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+                       return yaml_parser_parse_node(parser, event, false, false)
+               }
+       }
+
+       parser.state = parser.states[len(parser.states)-1]
+       parser.states = parser.states[:len(parser.states)-1]
+       parser.marks = parser.marks[:len(parser.marks)-1]
+
+       *event = yaml_event_t{
+               typ:        yaml_SEQUENCE_END_EVENT,
+               start_mark: token.start_mark,
+               end_mark:   token.end_mark,
+       }
+
+       skip_token(parser)
+       return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                      *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+       if token.typ != yaml_VALUE_TOKEN &&
+               token.typ != yaml_FLOW_ENTRY_TOKEN &&
+               token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+               parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+               return yaml_parser_parse_node(parser, event, false, false)
+       }
+       mark := token.end_mark
+       skip_token(parser)
+       parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+       return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                      ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+       if token.typ == yaml_VALUE_TOKEN {
+               skip_token(parser)
+               token := peek_token(parser)
+               if token == nil {
+                       return false
+               }
+               if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+                       parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+                       return yaml_parser_parse_node(parser, event, false, false)
+               }
+       }
+       parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+       return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                                      *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+       parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+       *event = yaml_event_t{
+               typ:        yaml_MAPPING_END_EVENT,
+               start_mark: token.start_mark,
+               end_mark:   token.start_mark, // [Go] Shouldn't this be end_mark?
+       }
+       return true
+}
+
+// Parse the productions:
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          ******************
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                           *                  **********
+//                          flow_mapping_entry?
+//                          ******************
+//                          FLOW-MAPPING-END
+//                          ****************
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *           *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+       if first {
+               token := peek_token(parser)
+               parser.marks = append(parser.marks, token.start_mark)
+               skip_token(parser)
+       }
+
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+
+       if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+               if !first {
+                       if token.typ == yaml_FLOW_ENTRY_TOKEN {
+                               skip_token(parser)
+                               token = peek_token(parser)
+                               if token == nil {
+                                       return false
+                               }
+                       } else {
+                               context_mark := parser.marks[len(parser.marks)-1]
+                               parser.marks = parser.marks[:len(parser.marks)-1]
+                               return yaml_parser_set_parser_error_context(parser,
+                                       "while parsing a flow mapping", context_mark,
+                                       "did not find expected ',' or '}'", token.start_mark)
+                       }
+               }
+
+               if token.typ == yaml_KEY_TOKEN {
+                       skip_token(parser)
+                       token = peek_token(parser)
+                       if token == nil {
+                               return false
+                       }
+                       if token.typ != yaml_VALUE_TOKEN &&
+                               token.typ != yaml_FLOW_ENTRY_TOKEN &&
+                               token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+                               parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+                               return yaml_parser_parse_node(parser, event, false, false)
+                       } else {
+                               parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+                               return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+                       }
+               } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+                       parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+                       return yaml_parser_parse_node(parser, event, false, false)
+               }
+       }
+
+       parser.state = parser.states[len(parser.states)-1]
+       parser.states = parser.states[:len(parser.states)-1]
+       parser.marks = parser.marks[:len(parser.marks)-1]
+       *event = yaml_event_t{
+               typ:        yaml_MAPPING_END_EVENT,
+               start_mark: token.start_mark,
+               end_mark:   token.end_mark,
+       }
+       skip_token(parser)
+       return true
+}
+
+// Parse the productions:
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                   *                  ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+       if empty {
+               parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+               return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+       }
+       if token.typ == yaml_VALUE_TOKEN {
+               skip_token(parser)
+               token = peek_token(parser)
+               if token == nil {
+                       return false
+               }
+               if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+                       parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+                       return yaml_parser_parse_node(parser, event, false, false)
+               }
+       }
+       parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+       return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+       *event = yaml_event_t{
+               typ:        yaml_SCALAR_EVENT,
+               start_mark: mark,
+               end_mark:   mark,
+               value:      nil, // Empty
+               implicit:   true,
+               style:      yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+       }
+       return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+       {[]byte("!"), []byte("!")},
+       {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+       version_directive_ref **yaml_version_directive_t,
+       tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+       var version_directive *yaml_version_directive_t
+       var tag_directives []yaml_tag_directive_t
+
+       token := peek_token(parser)
+       if token == nil {
+               return false
+       }
+
+       for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+               if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+                       if version_directive != nil {
+                               yaml_parser_set_parser_error(parser,
+                                       "found duplicate %YAML directive", token.start_mark)
+                               return false
+                       }
+                       if token.major != 1 || token.minor != 1 {
+                               yaml_parser_set_parser_error(parser,
+                                       "found incompatible YAML document", token.start_mark)
+                               return false
+                       }
+                       version_directive = &yaml_version_directive_t{
+                               major: token.major,
+                               minor: token.minor,
+                       }
+               } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+                       value := yaml_tag_directive_t{
+                               handle: token.value,
+                               prefix: token.prefix,
+                       }
+                       if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+                               return false
+                       }
+                       tag_directives = append(tag_directives, value)
+               }
+
+               skip_token(parser)
+               token = peek_token(parser)
+               if token == nil {
+                       return false
+               }
+       }
+
+       for i := range default_tag_directives {
+               if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+                       return false
+               }
+       }
+
+       if version_directive_ref != nil {
+               *version_directive_ref = version_directive
+       }
+       if tag_directives_ref != nil {
+               *tag_directives_ref = tag_directives
+       }
+       return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+       for i := range parser.tag_directives {
+               if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+                       if allow_duplicates {
+                               return true
+                       }
+                       return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+               }
+       }
+
+       // [Go] I suspect the copy is unnecessary. This was likely done
+       // because there was no way to track ownership of the data.
+       value_copy := yaml_tag_directive_t{
+               handle: make([]byte, len(value.handle)),
+               prefix: make([]byte, len(value.prefix)),
+       }
+       copy(value_copy.handle, value.handle)
+       copy(value_copy.prefix, value.prefix)
+       parser.tag_directives = append(parser.tag_directives, value_copy)
+       return true
+}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/readerc.go b/vendor/github.com/zclconf/go-cty-yaml/readerc.go
new file mode 100644 (file)
index 0000000..7c1f5fa
--- /dev/null
@@ -0,0 +1,412 @@
+package yaml
+
+import (
+       "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+       parser.error = yaml_READER_ERROR
+       parser.problem = problem
+       parser.problem_offset = offset
+       parser.problem_value = value
+       return false
+}
+
+// Byte order marks.
+const (
+       bom_UTF8    = "\xef\xbb\xbf"
+       bom_UTF16LE = "\xff\xfe"
+       bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+       // Ensure that we had enough bytes in the raw buffer.
+       for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+               if !yaml_parser_update_raw_buffer(parser) {
+                       return false
+               }
+       }
+
+       // Determine the encoding.
+       buf := parser.raw_buffer
+       pos := parser.raw_buffer_pos
+       avail := len(buf) - pos
+       if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+               parser.encoding = yaml_UTF16LE_ENCODING
+               parser.raw_buffer_pos += 2
+               parser.offset += 2
+       } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+               parser.encoding = yaml_UTF16BE_ENCODING
+               parser.raw_buffer_pos += 2
+               parser.offset += 2
+       } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+               parser.encoding = yaml_UTF8_ENCODING
+               parser.raw_buffer_pos += 3
+               parser.offset += 3
+       } else {
+               parser.encoding = yaml_UTF8_ENCODING
+       }
+       return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+       size_read := 0
+
+       // Return if the raw buffer is full.
+       if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+               return true
+       }
+
+       // Return on EOF.
+       if parser.eof {
+               return true
+       }
+
+       // Move the remaining bytes in the raw buffer to the beginning.
+       if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+               copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+       }
+       parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+       parser.raw_buffer_pos = 0
+
+       // Call the read handler to fill the buffer.
+       size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+       parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+       if err == io.EOF {
+               parser.eof = true
+       } else if err != nil {
+               return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+       }
+       return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+       if parser.read_handler == nil {
+               panic("read handler must be set")
+       }
+
+       // [Go] This function was changed to guarantee the requested length size at EOF.
+       // The fact we need to do this is pretty awful, but the description above implies
+       // for that to be the case, and there are tests 
+
+       // If the EOF flag is set and the raw buffer is empty, do nothing.
+       if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+               // [Go] ACTUALLY! Read the documentation of this function above.
+               // This is just broken. To return true, we need to have the
+               // given length in the buffer. Not doing that means every single
+               // check that calls this function to make sure the buffer has a
+               // given length is Go) panicking; or C) accessing invalid memory.
+               //return true
+       }
+
+       // Return if the buffer contains enough characters.
+       if parser.unread >= length {
+               return true
+       }
+
+       // Determine the input encoding if it is not known yet.
+       if parser.encoding == yaml_ANY_ENCODING {
+               if !yaml_parser_determine_encoding(parser) {
+                       return false
+               }
+       }
+
+       // Move the unread characters to the beginning of the buffer.
+       buffer_len := len(parser.buffer)
+       if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+               copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+               buffer_len -= parser.buffer_pos
+               parser.buffer_pos = 0
+       } else if parser.buffer_pos == buffer_len {
+               buffer_len = 0
+               parser.buffer_pos = 0
+       }
+
+       // Open the whole buffer for writing, and cut it before returning.
+       parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+       // Fill the buffer until it has enough characters.
+       first := true
+       for parser.unread < length {
+
+               // Fill the raw buffer if necessary.
+               if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+                       if !yaml_parser_update_raw_buffer(parser) {
+                               parser.buffer = parser.buffer[:buffer_len]
+                               return false
+                       }
+               }
+               first = false
+
+               // Decode the raw buffer.
+       inner:
+               for parser.raw_buffer_pos != len(parser.raw_buffer) {
+                       var value rune
+                       var width int
+
+                       raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+                       // Decode the next character.
+                       switch parser.encoding {
+                       case yaml_UTF8_ENCODING:
+                               // Decode a UTF-8 character.  Check RFC 3629
+                               // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+                               //
+                               // The following table (taken from the RFC) is used for
+                               // decoding.
+                               //
+                               //    Char. number range |        UTF-8 octet sequence
+                               //      (hexadecimal)    |              (binary)
+                               //   --------------------+------------------------------------
+                               //   0000 0000-0000 007F | 0xxxxxxx
+                               //   0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+                               //   0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+                               //   0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+                               //
+                               // Additionally, the characters in the range 0xD800-0xDFFF
+                               // are prohibited as they are reserved for use with UTF-16
+                               // surrogate pairs.
+
+                               // Determine the length of the UTF-8 sequence.
+                               octet := parser.raw_buffer[parser.raw_buffer_pos]
+                               switch {
+                               case octet&0x80 == 0x00:
+                                       width = 1
+                               case octet&0xE0 == 0xC0:
+                                       width = 2
+                               case octet&0xF0 == 0xE0:
+                                       width = 3
+                               case octet&0xF8 == 0xF0:
+                                       width = 4
+                               default:
+                                       // The leading octet is invalid.
+                                       return yaml_parser_set_reader_error(parser,
+                                               "invalid leading UTF-8 octet",
+                                               parser.offset, int(octet))
+                               }
+
+                               // Check if the raw buffer contains an incomplete character.
+                               if width > raw_unread {
+                                       if parser.eof {
+                                               return yaml_parser_set_reader_error(parser,
+                                                       "incomplete UTF-8 octet sequence",
+                                                       parser.offset, -1)
+                                       }
+                                       break inner
+                               }
+
+                               // Decode the leading octet.
+                               switch {
+                               case octet&0x80 == 0x00:
+                                       value = rune(octet & 0x7F)
+                               case octet&0xE0 == 0xC0:
+                                       value = rune(octet & 0x1F)
+                               case octet&0xF0 == 0xE0:
+                                       value = rune(octet & 0x0F)
+                               case octet&0xF8 == 0xF0:
+                                       value = rune(octet & 0x07)
+                               default:
+                                       value = 0
+                               }
+
+                               // Check and decode the trailing octets.
+                               for k := 1; k < width; k++ {
+                                       octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+                                       // Check if the octet is valid.
+                                       if (octet & 0xC0) != 0x80 {
+                                               return yaml_parser_set_reader_error(parser,
+                                                       "invalid trailing UTF-8 octet",
+                                                       parser.offset+k, int(octet))
+                                       }
+
+                                       // Decode the octet.
+                                       value = (value << 6) + rune(octet&0x3F)
+                               }
+
+                               // Check the length of the sequence against the value.
+                               switch {
+                               case width == 1:
+                               case width == 2 && value >= 0x80:
+                               case width == 3 && value >= 0x800:
+                               case width == 4 && value >= 0x10000:
+                               default:
+                                       return yaml_parser_set_reader_error(parser,
+                                               "invalid length of a UTF-8 sequence",
+                                               parser.offset, -1)
+                               }
+
+                               // Check the range of the value.
+                               if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+                                       return yaml_parser_set_reader_error(parser,
+                                               "invalid Unicode character",
+                                               parser.offset, int(value))
+                               }
+
+                       case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+                               var low, high int
+                               if parser.encoding == yaml_UTF16LE_ENCODING {
+                                       low, high = 0, 1
+                               } else {
+                                       low, high = 1, 0
+                               }
+
+                               // The UTF-16 encoding is not as simple as one might
+                               // naively think.  Check RFC 2781
+                               // (http://www.ietf.org/rfc/rfc2781.txt).
+                               //
+                               // Normally, two subsequent bytes describe a Unicode
+                               // character.  However a special technique (called a
+                               // surrogate pair) is used for specifying character
+                               // values larger than 0xFFFF.
+                               //
+                               // A surrogate pair consists of two pseudo-characters:
+                               //      high surrogate area (0xD800-0xDBFF)
+                               //      low surrogate area (0xDC00-0xDFFF)
+                               //
+                               // The following formulas are used for decoding
+                               // and encoding characters using surrogate pairs:
+                               //
+                               //  U  = U' + 0x10000   (0x01 00 00 <= U <= 0x10 FF FF)
+                               //  U' = yyyyyyyyyyxxxxxxxxxx   (0 <= U' <= 0x0F FF FF)
+                               //  W1 = 110110yyyyyyyyyy
+                               //  W2 = 110111xxxxxxxxxx
+                               //
+                               // where U is the character value, W1 is the high surrogate
+                               // area, W2 is the low surrogate area.
+
+                               // Check for incomplete UTF-16 character.
+                               if raw_unread < 2 {
+                                       if parser.eof {
+                                               return yaml_parser_set_reader_error(parser,
+                                                       "incomplete UTF-16 character",
+                                                       parser.offset, -1)
+                                       }
+                                       break inner
+                               }
+
+                               // Get the character.
+                               value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+                                       (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+                               // Check for unexpected low surrogate area.
+                               if value&0xFC00 == 0xDC00 {
+                                       return yaml_parser_set_reader_error(parser,
+                                               "unexpected low surrogate area",
+                                               parser.offset, int(value))
+                               }
+
+                               // Check for a high surrogate area.
+                               if value&0xFC00 == 0xD800 {
+                                       width = 4
+
+                                       // Check for incomplete surrogate pair.
+                                       if raw_unread < 4 {
+                                               if parser.eof {
+                                                       return yaml_parser_set_reader_error(parser,
+                                                               "incomplete UTF-16 surrogate pair",
+                                                               parser.offset, -1)
+                                               }
+                                               break inner
+                                       }
+
+                                       // Get the next character.
+                                       value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+                                               (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+                                       // Check for a low surrogate area.
+                                       if value2&0xFC00 != 0xDC00 {
+                                               return yaml_parser_set_reader_error(parser,
+                                                       "expected low surrogate area",
+                                                       parser.offset+2, int(value2))
+                                       }
+
+                                       // Generate the value of the surrogate pair.
+                                       value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+                               } else {
+                                       width = 2
+                               }
+
+                       default:
+                               panic("impossible")
+                       }
+
+                       // Check if the character is in the allowed range:
+                       //      #x9 | #xA | #xD | [#x20-#x7E]               (8 bit)
+                       //      | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD]    (16 bit)
+                       //      | [#x10000-#x10FFFF]                        (32 bit)
+                       switch {
+                       case value == 0x09:
+                       case value == 0x0A:
+                       case value == 0x0D:
+                       case value >= 0x20 && value <= 0x7E:
+                       case value == 0x85:
+                       case value >= 0xA0 && value <= 0xD7FF:
+                       case value >= 0xE000 && value <= 0xFFFD:
+                       case value >= 0x10000 && value <= 0x10FFFF:
+                       default:
+                               return yaml_parser_set_reader_error(parser,
+                                       "control characters are not allowed",
+                                       parser.offset, int(value))
+                       }
+
+                       // Move the raw pointers.
+                       parser.raw_buffer_pos += width
+                       parser.offset += width
+
+                       // Finally put the character into the buffer.
+                       if value <= 0x7F {
+                               // 0000 0000-0000 007F . 0xxxxxxx
+                               parser.buffer[buffer_len+0] = byte(value)
+                               buffer_len += 1
+                       } else if value <= 0x7FF {
+                               // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+                               parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+                               parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+                               buffer_len += 2
+                       } else if value <= 0xFFFF {
+                               // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+                               parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+                               parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+                               parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+                               buffer_len += 3
+                       } else {
+                               // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+                               parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+                               parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+                               parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+                               parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+                               buffer_len += 4
+                       }
+
+                       parser.unread++
+               }
+
+               // On EOF, put NUL into the buffer and return.
+               if parser.eof {
+                       parser.buffer[buffer_len] = 0
+                       buffer_len++
+                       parser.unread++
+                       break
+               }
+       }
+       // [Go] Read the documentation of this function above. To return true,
+       // we need to have the given length in the buffer. Not doing that means
+       // every single check that calls this function to make sure the buffer
+       // has a given length is Go) panicking; or C) accessing invalid memory.
+       // This happens here due to the EOF above breaking early.
+       for buffer_len < length {
+               parser.buffer[buffer_len] = 0
+               buffer_len++
+       }
+       parser.buffer = parser.buffer[:buffer_len]
+       return true
+}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/resolve.go b/vendor/github.com/zclconf/go-cty-yaml/resolve.go
new file mode 100644 (file)
index 0000000..0f64383
--- /dev/null
@@ -0,0 +1,288 @@
+package yaml
+
+import (
+       "encoding/base64"
+       "fmt"
+       "reflect"
+       "regexp"
+       "strconv"
+       "strings"
+       "time"
+
+       "github.com/zclconf/go-cty/cty"
+)
+
+type resolveMapItem struct {
+       value cty.Value
+       tag   string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+       t := resolveTable
+       t[int('+')] = 'S' // Sign
+       t[int('-')] = 'S'
+       for _, c := range "0123456789" {
+               t[int(c)] = 'D' // Digit
+       }
+       for _, c := range "yYnNtTfFoO~" {
+               t[int(c)] = 'M' // In map
+       }
+       t[int('.')] = '.' // Float (potentially in map)
+
+       var resolveMapList = []struct {
+               v   cty.Value
+               tag string
+               l   []string
+       }{
+               {cty.True, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+               {cty.True, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+               {cty.True, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+               {cty.False, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+               {cty.False, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+               {cty.False, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+               {cty.NullVal(cty.DynamicPseudoType), yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+               {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+               {cty.PositiveInfinity, yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+               {cty.NegativeInfinity, yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+       }
+
+       m := resolveMap
+       for _, item := range resolveMapList {
+               for _, s := range item.l {
+                       m[s] = resolveMapItem{item.v, item.tag}
+               }
+       }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+       // TODO This can easily be made faster and produce less garbage.
+       if strings.HasPrefix(tag, longTagPrefix) {
+               return "!!" + tag[len(longTagPrefix):]
+       }
+       return tag
+}
+
+func longTag(tag string) string {
+       if strings.HasPrefix(tag, "!!") {
+               return longTagPrefix + tag[2:]
+       }
+       return tag
+}
+
+func resolvableTag(tag string) bool {
+       switch tag {
+       case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG, yaml_BINARY_TAG:
+               return true
+       }
+       return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func (c *Converter) resolveScalar(tag string, src string, style yaml_scalar_style_t) (cty.Value, error) {
+       if !resolvableTag(tag) {
+               return cty.NilVal, fmt.Errorf("unsupported tag %q", tag)
+       }
+
+       // Any data is accepted as a !!str or !!binary.
+       // Otherwise, the prefix is enough of a hint about what it might be.
+       hint := byte('N')
+       if src != "" {
+               hint = resolveTable[src[0]]
+       }
+       if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+               if style == yaml_SINGLE_QUOTED_SCALAR_STYLE || style == yaml_DOUBLE_QUOTED_SCALAR_STYLE {
+                       return cty.StringVal(src), nil
+               }
+
+               // Handle things we can lookup in a map.
+               if item, ok := resolveMap[src]; ok {
+                       return item.value, nil
+               }
+
+               if tag == "" {
+                       for _, nan := range []string{".nan", ".NaN", ".NAN"} {
+                               if src == nan {
+                                       // cty cannot represent NaN, so this is an error
+                                       return cty.NilVal, fmt.Errorf("floating point NaN is not supported")
+                               }
+                       }
+               }
+
+               // Base 60 floats are intentionally not supported.
+
+               switch hint {
+               case 'M':
+                       // We've already checked the map above.
+
+               case '.':
+                       // Not in the map, so maybe a normal float.
+                       if numberVal, err := cty.ParseNumberVal(src); err == nil {
+                               return numberVal, nil
+                       }
+
+               case 'D', 'S':
+                       // Int, float, or timestamp.
+                       // Only try values as a timestamp if the value is unquoted or there's an explicit
+                       // !!timestamp tag.
+                       if tag == "" || tag == yaml_TIMESTAMP_TAG {
+                               t, ok := parseTimestamp(src)
+                               if ok {
+                                       // cty has no timestamp type, but its functions stdlib
+                                       // conventionally uses strings in an RFC3339 encoding
+                                       // to represent time, so we'll follow that convention here.
+                                       return cty.StringVal(t.Format(time.RFC3339)), nil
+                               }
+                       }
+
+                       plain := strings.Replace(src, "_", "", -1)
+                       if numberVal, err := cty.ParseNumberVal(plain); err == nil {
+                               return numberVal, nil
+                       }
+                       if strings.HasPrefix(plain, "0b") || strings.HasPrefix(plain, "-0b") {
+                               tag = yaml_INT_TAG // will handle parsing below in our tag switch
+                       }
+               default:
+                       panic(fmt.Sprintf("cannot resolve tag %q with source %q", tag, src))
+               }
+       }
+
+       if tag == "" && src == "<<" {
+               return mergeMappingVal, nil
+       }
+
+       switch tag {
+       case yaml_STR_TAG, yaml_BINARY_TAG:
+               // If it's binary then we want to keep the base64 representation, because
+               // cty has no binary type, but we will check that it's actually base64.
+               if tag == yaml_BINARY_TAG {
+                       _, err := base64.StdEncoding.DecodeString(src)
+                       if err != nil {
+                               return cty.NilVal, fmt.Errorf("cannot parse %q as %s: not valid base64", src, tag)
+                       }
+               }
+               return cty.StringVal(src), nil
+       case yaml_BOOL_TAG:
+               item, ok := resolveMap[src]
+               if !ok || item.tag != yaml_BOOL_TAG {
+                       return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag)
+               }
+               return item.value, nil
+       case yaml_FLOAT_TAG, yaml_INT_TAG:
+               // Note: We don't actually check that a value tagged INT is a whole
+               // number here. We could, but cty generally doesn't care about the
+               // int/float distinction, so we'll just be generous and accept it.
+               plain := strings.Replace(src, "_", "", -1)
+               if numberVal, err := cty.ParseNumberVal(plain); err == nil { // handles decimal integers and floats
+                       return numberVal, nil
+               }
+               if intv, err := strconv.ParseInt(plain, 0, 64); err == nil { // handles 0x and 00 prefixes
+                       return cty.NumberIntVal(intv), nil
+               }
+               if uintv, err := strconv.ParseUint(plain, 0, 64); err == nil { // handles 0x and 00 prefixes
+                       return cty.NumberUIntVal(uintv), nil
+               }
+               if strings.HasPrefix(plain, "0b") {
+                       intv, err := strconv.ParseInt(plain[2:], 2, 64)
+                       if err == nil {
+                               return cty.NumberIntVal(intv), nil
+                       }
+                       uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+                       if err == nil {
+                               return cty.NumberUIntVal(uintv), nil
+                       }
+               } else if strings.HasPrefix(plain, "-0b") {
+                       intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
+                       if err == nil {
+                               return cty.NumberIntVal(intv), nil
+                       }
+               }
+               return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag)
+       case yaml_TIMESTAMP_TAG:
+               t, ok := parseTimestamp(src)
+               if ok {
+                       // cty has no timestamp type, but its functions stdlib
+                       // conventionally uses strings in an RFC3339 encoding
+                       // to represent time, so we'll follow that convention here.
+                       return cty.StringVal(t.Format(time.RFC3339)), nil
+               }
+               return cty.NilVal, fmt.Errorf("cannot parse %q as %s", src, tag)
+       case yaml_NULL_TAG:
+               return cty.NullVal(cty.DynamicPseudoType), nil
+       case "":
+               return cty.StringVal(src), nil
+       default:
+               return cty.NilVal, fmt.Errorf("unsupported tag %q", tag)
+       }
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+       const lineLen = 70
+       encLen := base64.StdEncoding.EncodedLen(len(s))
+       lines := encLen/lineLen + 1
+       buf := make([]byte, encLen*2+lines)
+       in := buf[0:encLen]
+       out := buf[encLen:]
+       base64.StdEncoding.Encode(in, []byte(s))
+       k := 0
+       for i := 0; i < len(in); i += lineLen {
+               j := i + lineLen
+               if j > len(in) {
+                       j = len(in)
+               }
+               k += copy(out[k:], in[i:j])
+               if lines > 1 {
+                       out[k] = '\n'
+                       k++
+               }
+       }
+       return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+       "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+       "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+       "2006-1-2 15:4:5.999999999",       // space separated with no time zone
+       "2006-1-2",                        // date only
+       // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+       // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+       // TODO write code to check all the formats supported by
+       // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+       // Quick check: all date formats start with YYYY-.
+       i := 0
+       for ; i < len(s); i++ {
+               if c := s[i]; c < '0' || c > '9' {
+                       break
+               }
+       }
+       if i != 4 || i == len(s) || s[i] != '-' {
+               return time.Time{}, false
+       }
+       for _, format := range allowedTimestampFormats {
+               if t, err := time.Parse(format, s); err == nil {
+                       return t, true
+               }
+       }
+       return time.Time{}, false
+}
+
+type mergeMapping struct{}
+
+var mergeMappingTy = cty.Capsule("merge mapping", reflect.TypeOf(mergeMapping{}))
+var mergeMappingVal = cty.CapsuleVal(mergeMappingTy, &mergeMapping{})
diff --git a/vendor/github.com/zclconf/go-cty-yaml/scannerc.go b/vendor/github.com/zclconf/go-cty-yaml/scannerc.go
new file mode 100644 (file)
index 0000000..077fd1d
--- /dev/null
@@ -0,0 +1,2696 @@
+package yaml
+
+import (
+       "bytes"
+       "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html).  We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward.  The issues are "block collection start" and
+// "simple keys".  Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented.  We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+//      STREAM-START(encoding)          # The stream start.
+//      STREAM-END                      # The stream end.
+//      VERSION-DIRECTIVE(major,minor)  # The '%YAML' directive.
+//      TAG-DIRECTIVE(handle,prefix)    # The '%TAG' directive.
+//      DOCUMENT-START                  # '---'
+//      DOCUMENT-END                    # '...'
+//      BLOCK-SEQUENCE-START            # Indentation increase denoting a block
+//      BLOCK-MAPPING-START             # sequence or a block mapping.
+//      BLOCK-END                       # Indentation decrease.
+//      FLOW-SEQUENCE-START             # '['
+//      FLOW-SEQUENCE-END               # ']'
+//      BLOCK-SEQUENCE-START            # '{'
+//      BLOCK-SEQUENCE-END              # '}'
+//      BLOCK-ENTRY                     # '-'
+//      FLOW-ENTRY                      # ','
+//      KEY                             # '?' or nothing (simple keys).
+//      VALUE                           # ':'
+//      ALIAS(anchor)                   # '*anchor'
+//      ANCHOR(anchor)                  # '&anchor'
+//      TAG(handle,suffix)              # '!handle!suffix'
+//      SCALAR(value,style)             # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+//      STREAM-START(encoding)
+//      STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+//      VERSION-DIRECTIVE(major,minor)
+//      TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+//      %YAML   1.1
+//      %TAG    !   !foo
+//      %TAG    !yaml!  tag:yaml.org,2002:
+//      ---
+//
+// The correspoding sequence of tokens:
+//
+//      STREAM-START(utf-8)
+//      VERSION-DIRECTIVE(1,1)
+//      TAG-DIRECTIVE("!","!foo")
+//      TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+//      DOCUMENT-START
+//      STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+//      DOCUMENT-START
+//      DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+//      1. An implicit document:
+//
+//          'a scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          STREAM-END
+//
+//      2. An explicit document:
+//
+//          ---
+//          'a scalar'
+//          ...
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-END
+//          STREAM-END
+//
+//      3. Several documents in a stream:
+//
+//          'a scalar'
+//          ---
+//          'another scalar'
+//          ---
+//          'yet another scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("another scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("yet another scalar",single-quoted)
+//          STREAM-END
+//
+// We have already introduced the SCALAR token above.  The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+//      ALIAS(anchor)
+//      ANCHOR(anchor)
+//      TAG(handle,suffix)
+//      SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+//      1. A recursive sequence:
+//
+//          &A [ *A ]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          ANCHOR("A")
+//          FLOW-SEQUENCE-START
+//          ALIAS("A")
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A tagged scalar:
+//
+//          !!float "3.14"  # A good approximation.
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          TAG("!!","float")
+//          SCALAR("3.14",double-quoted)
+//          STREAM-END
+//
+//      3. Various scalar styles:
+//
+//          --- # Implicit empty plain scalars do not produce tokens.
+//          --- a plain scalar
+//          --- 'a single-quoted scalar'
+//          --- "a double-quoted scalar"
+//          --- |-
+//            a literal scalar
+//          --- >-
+//            a folded
+//            scalar
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          DOCUMENT-START
+//          SCALAR("a plain scalar",plain)
+//          DOCUMENT-START
+//          SCALAR("a single-quoted scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("a double-quoted scalar",double-quoted)
+//          DOCUMENT-START
+//          SCALAR("a literal scalar",literal)
+//          DOCUMENT-START
+//          SCALAR("a folded scalar",folded)
+//          STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+//      FLOW-SEQUENCE-START
+//      FLOW-SEQUENCE-END
+//      FLOW-MAPPING-START
+//      FLOW-MAPPING-END
+//      FLOW-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly.  FLOW-ENTRY represent the ',' indicator.  Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+//      1. A flow sequence:
+//
+//          [item 1, item 2, item 3]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-SEQUENCE-START
+//          SCALAR("item 1",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 2",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 3",plain)
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A flow mapping:
+//
+//          {
+//              a simple key: a value,  # Note that the KEY token is produced.
+//              ? a complex key: another value,
+//          }
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          FLOW-ENTRY
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          FLOW-ENTRY
+//          FLOW-MAPPING-END
+//          STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator.  Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+//      BLOCK-SEQUENCE-START
+//      BLOCK-MAPPING-START
+//      BLOCK-END
+//      BLOCK-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python).  However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+//      1. Block sequences:
+//
+//          - item 1
+//          - item 2
+//          -
+//            - item 3.1
+//            - item 3.2
+//          -
+//            key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 3.1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 3.2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Block mappings:
+//
+//          a simple key: a value   # The KEY token is produced here.
+//          ? a complex key
+//          : another value
+//          a mapping:
+//            key 1: value 1
+//            key 2: value 2
+//          a sequence:
+//            - item 1
+//            - item 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          KEY
+//          SCALAR("a mapping",plain)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line.  If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line.  The following examples
+// illustrate this case:
+//
+//      1. Collections in a sequence:
+//
+//          - - item 1
+//            - item 2
+//          - key 1: value 1
+//            key 2: value 2
+//          - ? complex key
+//            : complex value
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("complex key")
+//          VALUE
+//          SCALAR("complex value")
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Collections in a mapping:
+//
+//          ? a sequence
+//          : - item 1
+//            - item 2
+//          ? a mapping
+//          : key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a mapping",plain)
+//          VALUE
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping.  In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+//      key:
+//      - item 1    # BLOCK-SEQUENCE-START is NOT produced here.
+//      - item 2
+//
+// Tokens:
+//
+//      STREAM-START(utf-8)
+//      BLOCK-MAPPING-START
+//      KEY
+//      SCALAR("key",plain)
+//      VALUE
+//      BLOCK-ENTRY
+//      SCALAR("item 1",plain)
+//      BLOCK-ENTRY
+//      SCALAR("item 2",plain)
+//      BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+       // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+       return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+       parser.mark.index++
+       parser.mark.column++
+       parser.unread--
+       parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+       if is_crlf(parser.buffer, parser.buffer_pos) {
+               parser.mark.index += 2
+               parser.mark.column = 0
+               parser.mark.line++
+               parser.unread -= 2
+               parser.buffer_pos += 2
+       } else if is_break(parser.buffer, parser.buffer_pos) {
+               parser.mark.index++
+               parser.mark.column = 0
+               parser.mark.line++
+               parser.unread--
+               parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+       }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+       w := width(parser.buffer[parser.buffer_pos])
+       if w == 0 {
+               panic("invalid character sequence")
+       }
+       if len(s) == 0 {
+               s = make([]byte, 0, 32)
+       }
+       if w == 1 && len(s)+w <= cap(s) {
+               s = s[:len(s)+1]
+               s[len(s)-1] = parser.buffer[parser.buffer_pos]
+               parser.buffer_pos++
+       } else {
+               s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+               parser.buffer_pos += w
+       }
+       parser.mark.index++
+       parser.mark.column++
+       parser.unread--
+       return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+       buf := parser.buffer
+       pos := parser.buffer_pos
+       switch {
+       case buf[pos] == '\r' && buf[pos+1] == '\n':
+               // CR LF . LF
+               s = append(s, '\n')
+               parser.buffer_pos += 2
+               parser.mark.index++
+               parser.unread--
+       case buf[pos] == '\r' || buf[pos] == '\n':
+               // CR|LF . LF
+               s = append(s, '\n')
+               parser.buffer_pos += 1
+       case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+               // NEL . LF
+               s = append(s, '\n')
+               parser.buffer_pos += 2
+       case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+               // LS|PS . LS|PS
+               s = append(s, buf[parser.buffer_pos:pos+3]...)
+               parser.buffer_pos += 3
+       default:
+               return s
+       }
+       parser.mark.index++
+       parser.mark.column = 0
+       parser.mark.line++
+       parser.unread--
+       return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+       // Erase the token object.
+       *token = yaml_token_t{} // [Go] Is this necessary?
+
+       // No tokens after STREAM-END or error.
+       if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+               return true
+       }
+
+       // Ensure that the tokens queue contains enough tokens.
+       if !parser.token_available {
+               if !yaml_parser_fetch_more_tokens(parser) {
+                       return false
+               }
+       }
+
+       // Fetch the next token from the queue.
+       *token = parser.tokens[parser.tokens_head]
+       parser.tokens_head++
+       parser.tokens_parsed++
+       parser.token_available = false
+
+       if token.typ == yaml_STREAM_END_TOKEN {
+               parser.stream_end_produced = true
+       }
+       return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+       parser.error = yaml_SCANNER_ERROR
+       parser.context = context
+       parser.context_mark = context_mark
+       parser.problem = problem
+       parser.problem_mark = parser.mark
+       return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+       context := "while parsing a tag"
+       if directive {
+               context = "while parsing a %TAG directive"
+       }
+       return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+       pargs := append([]interface{}{"+++"}, args...)
+       fmt.Println(pargs...)
+       pargs = append([]interface{}{"---"}, args...)
+       return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+       // While we need more tokens to fetch, do it.
+       for {
+               // Check if we really need to fetch more tokens.
+               need_more_tokens := false
+
+               if parser.tokens_head == len(parser.tokens) {
+                       // Queue is empty.
+                       need_more_tokens = true
+               } else {
+                       // Check if any potential simple key may occupy the head position.
+                       if !yaml_parser_stale_simple_keys(parser) {
+                               return false
+                       }
+
+                       for i := range parser.simple_keys {
+                               simple_key := &parser.simple_keys[i]
+                               if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+                                       need_more_tokens = true
+                                       break
+                               }
+                       }
+               }
+
+               // We are finished.
+               if !need_more_tokens {
+                       break
+               }
+               // Fetch the next token.
+               if !yaml_parser_fetch_next_token(parser) {
+                       return false
+               }
+       }
+
+       parser.token_available = true
+       return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+       // Ensure that the buffer is initialized.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+
+       // Check if we just started scanning.  Fetch STREAM-START then.
+       if !parser.stream_start_produced {
+               return yaml_parser_fetch_stream_start(parser)
+       }
+
+       // Eat whitespaces and comments until we reach the next token.
+       if !yaml_parser_scan_to_next_token(parser) {
+               return false
+       }
+
+       // Remove obsolete potential simple keys.
+       if !yaml_parser_stale_simple_keys(parser) {
+               return false
+       }
+
+       // Check the indentation level against the current column.
+       if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+               return false
+       }
+
+       // Ensure that the buffer contains at least 4 characters.  4 is the length
+       // of the longest indicators ('--- ' and '... ').
+       if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+               return false
+       }
+
+       // Is it the end of the stream?
+       if is_z(parser.buffer, parser.buffer_pos) {
+               return yaml_parser_fetch_stream_end(parser)
+       }
+
+       // Is it a directive?
+       if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+               return yaml_parser_fetch_directive(parser)
+       }
+
+       buf := parser.buffer
+       pos := parser.buffer_pos
+
+       // Is it the document start indicator?
+       if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+               return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+       }
+
+       // Is it the document end indicator?
+       if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+               return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+       }
+
+       // Is it the flow sequence start indicator?
+       if buf[pos] == '[' {
+               return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+       }
+
+       // Is it the flow mapping start indicator?
+       if parser.buffer[parser.buffer_pos] == '{' {
+               return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+       }
+
+       // Is it the flow sequence end indicator?
+       if parser.buffer[parser.buffer_pos] == ']' {
+               return yaml_parser_fetch_flow_collection_end(parser,
+                       yaml_FLOW_SEQUENCE_END_TOKEN)
+       }
+
+       // Is it the flow mapping end indicator?
+       if parser.buffer[parser.buffer_pos] == '}' {
+               return yaml_parser_fetch_flow_collection_end(parser,
+                       yaml_FLOW_MAPPING_END_TOKEN)
+       }
+
+       // Is it the flow entry indicator?
+       if parser.buffer[parser.buffer_pos] == ',' {
+               return yaml_parser_fetch_flow_entry(parser)
+       }
+
+       // Is it the block entry indicator?
+       if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+               return yaml_parser_fetch_block_entry(parser)
+       }
+
+       // Is it the key indicator?
+       if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+               return yaml_parser_fetch_key(parser)
+       }
+
+       // Is it the value indicator?
+       if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+               return yaml_parser_fetch_value(parser)
+       }
+
+       // Is it an alias?
+       if parser.buffer[parser.buffer_pos] == '*' {
+               return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+       }
+
+       // Is it an anchor?
+       if parser.buffer[parser.buffer_pos] == '&' {
+               return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+       }
+
+       // Is it a tag?
+       if parser.buffer[parser.buffer_pos] == '!' {
+               return yaml_parser_fetch_tag(parser)
+       }
+
+       // Is it a literal scalar?
+       if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+               return yaml_parser_fetch_block_scalar(parser, true)
+       }
+
+       // Is it a folded scalar?
+       if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+               return yaml_parser_fetch_block_scalar(parser, false)
+       }
+
+       // Is it a single-quoted scalar?
+       if parser.buffer[parser.buffer_pos] == '\'' {
+               return yaml_parser_fetch_flow_scalar(parser, true)
+       }
+
+       // Is it a double-quoted scalar?
+       if parser.buffer[parser.buffer_pos] == '"' {
+               return yaml_parser_fetch_flow_scalar(parser, false)
+       }
+
+       // Is it a plain scalar?
+       //
+       // A plain scalar may start with any non-blank characters except
+       //
+       //      '-', '?', ':', ',', '[', ']', '{', '}',
+       //      '#', '&', '*', '!', '|', '>', '\'', '\"',
+       //      '%', '@', '`'.
+       //
+       // In the block context (and, for the '-' indicator, in the flow context
+       // too), it may also start with the characters
+       //
+       //      '-', '?', ':'
+       //
+       // if it is followed by a non-space character.
+       //
+       // The last rule is more restrictive than the specification requires.
+       // [Go] Make this logic more reasonable.
+       //switch parser.buffer[parser.buffer_pos] {
+       //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+       //}
+       if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+               parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+               parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+               parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+               parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+               parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+               parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+               parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+               parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+               parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+               (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+               (parser.flow_level == 0 &&
+                       (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+                       !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+               return yaml_parser_fetch_plain_scalar(parser)
+       }
+
+       // If we don't determine the token type so far, it is an error.
+       return yaml_parser_set_scanner_error(parser,
+               "while scanning for the next token", parser.mark,
+               "found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+       // Check for a potential simple key for each flow level.
+       for i := range parser.simple_keys {
+               simple_key := &parser.simple_keys[i]
+
+               // The specification requires that a simple key
+               //
+               //  - is limited to a single line,
+               //  - is shorter than 1024 characters.
+               if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+                       // Check if the potential simple key to be removed is required.
+                       if simple_key.required {
+                               return yaml_parser_set_scanner_error(parser,
+                                       "while scanning a simple key", simple_key.mark,
+                                       "could not find expected ':'")
+                       }
+                       simple_key.possible = false
+               }
+       }
+       return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+       // A simple key is required at the current position if the scanner is in
+       // the block context and the current column coincides with the indentation
+       // level.
+
+       required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+       //
+       // If the current position may start a simple key, save it.
+       //
+       if parser.simple_key_allowed {
+               simple_key := yaml_simple_key_t{
+                       possible:     true,
+                       required:     required,
+                       token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+               }
+               simple_key.mark = parser.mark
+
+               if !yaml_parser_remove_simple_key(parser) {
+                       return false
+               }
+               parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+       }
+       return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+       i := len(parser.simple_keys) - 1
+       if parser.simple_keys[i].possible {
+               // If the key is required, it is an error.
+               if parser.simple_keys[i].required {
+                       return yaml_parser_set_scanner_error(parser,
+                               "while scanning a simple key", parser.simple_keys[i].mark,
+                               "could not find expected ':'")
+               }
+       }
+       // Remove the key from the stack.
+       parser.simple_keys[i].possible = false
+       return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+       // Reset the simple key on the next level.
+       parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+       // Increase the flow level.
+       parser.flow_level++
+       return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+       if parser.flow_level > 0 {
+               parser.flow_level--
+               parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+       }
+       return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level.  In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+       // In the flow context, do nothing.
+       if parser.flow_level > 0 {
+               return true
+       }
+
+       if parser.indent < column {
+               // Push the current indentation level to the stack and set the new
+               // indentation level.
+               parser.indents = append(parser.indents, parser.indent)
+               parser.indent = column
+
+               // Create a token and insert it into the queue.
+               token := yaml_token_t{
+                       typ:        typ,
+                       start_mark: mark,
+                       end_mark:   mark,
+               }
+               if number > -1 {
+                       number -= parser.tokens_parsed
+               }
+               yaml_insert_token(parser, number, &token)
+       }
+       return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column.  For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+       // In the flow context, do nothing.
+       if parser.flow_level > 0 {
+               return true
+       }
+
+       // Loop through the indentation levels in the stack.
+       for parser.indent > column {
+               // Create a token and append it to the queue.
+               token := yaml_token_t{
+                       typ:        yaml_BLOCK_END_TOKEN,
+                       start_mark: parser.mark,
+                       end_mark:   parser.mark,
+               }
+               yaml_insert_token(parser, -1, &token)
+
+               // Pop the indentation level.
+               parser.indent = parser.indents[len(parser.indents)-1]
+               parser.indents = parser.indents[:len(parser.indents)-1]
+       }
+       return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+       // Set the initial indentation.
+       parser.indent = -1
+
+       // Initialize the simple key stack.
+       parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+       // A simple key is allowed at the beginning of the stream.
+       parser.simple_key_allowed = true
+
+       // We have started.
+       parser.stream_start_produced = true
+
+       // Create the STREAM-START token and append it to the queue.
+       token := yaml_token_t{
+               typ:        yaml_STREAM_START_TOKEN,
+               start_mark: parser.mark,
+               end_mark:   parser.mark,
+               encoding:   parser.encoding,
+       }
+       yaml_insert_token(parser, -1, &token)
+       return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+       // Force new line.
+       if parser.mark.column != 0 {
+               parser.mark.column = 0
+               parser.mark.line++
+       }
+
+       // Reset the indentation level.
+       if !yaml_parser_unroll_indent(parser, -1) {
+               return false
+       }
+
+       // Reset simple keys.
+       if !yaml_parser_remove_simple_key(parser) {
+               return false
+       }
+
+       parser.simple_key_allowed = false
+
+       // Create the STREAM-END token and append it to the queue.
+       token := yaml_token_t{
+               typ:        yaml_STREAM_END_TOKEN,
+               start_mark: parser.mark,
+               end_mark:   parser.mark,
+       }
+       yaml_insert_token(parser, -1, &token)
+       return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+       // Reset the indentation level.
+       if !yaml_parser_unroll_indent(parser, -1) {
+               return false
+       }
+
+       // Reset simple keys.
+       if !yaml_parser_remove_simple_key(parser) {
+               return false
+       }
+
+       parser.simple_key_allowed = false
+
+       // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+       token := yaml_token_t{}
+       if !yaml_parser_scan_directive(parser, &token) {
+               return false
+       }
+       // Append the token to the queue.
+       yaml_insert_token(parser, -1, &token)
+       return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+       // Reset the indentation level.
+       if !yaml_parser_unroll_indent(parser, -1) {
+               return false
+       }
+
+       // Reset simple keys.
+       if !yaml_parser_remove_simple_key(parser) {
+               return false
+       }
+
+       parser.simple_key_allowed = false
+
+       // Consume the token.
+       start_mark := parser.mark
+
+       skip(parser)
+       skip(parser)
+       skip(parser)
+
+       end_mark := parser.mark
+
+       // Create the DOCUMENT-START or DOCUMENT-END token.
+       token := yaml_token_t{
+               typ:        typ,
+               start_mark: start_mark,
+               end_mark:   end_mark,
+       }
+       // Append the token to the queue.
+       yaml_insert_token(parser, -1, &token)
+       return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+       // The indicators '[' and '{' may start a simple key.
+       if !yaml_parser_save_simple_key(parser) {
+               return false
+       }
+
+       // Increase the flow level.
+       if !yaml_parser_increase_flow_level(parser) {
+               return false
+       }
+
+       // A simple key may follow the indicators '[' and '{'.
+       parser.simple_key_allowed = true
+
+       // Consume the token.
+       start_mark := parser.mark
+       skip(parser)
+       end_mark := parser.mark
+
+       // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+       token := yaml_token_t{
+               typ:        typ,
+               start_mark: start_mark,
+               end_mark:   end_mark,
+       }
+       // Append the token to the queue.
+       yaml_insert_token(parser, -1, &token)
+       return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+       // Reset any potential simple key on the current flow level.
+       if !yaml_parser_remove_simple_key(parser) {
+               return false
+       }
+
+       // Decrease the flow level.
+       if !yaml_parser_decrease_flow_level(parser) {
+               return false
+       }
+
+       // No simple keys after the indicators ']' and '}'.
+       parser.simple_key_allowed = false
+
+       // Consume the token.
+
+       start_mark := parser.mark
+       skip(parser)
+       end_mark := parser.mark
+
+       // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+       token := yaml_token_t{
+               typ:        typ,
+               start_mark: start_mark,
+               end_mark:   end_mark,
+       }
+       // Append the token to the queue.
+       yaml_insert_token(parser, -1, &token)
+       return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+       // Reset any potential simple keys on the current flow level.
+       if !yaml_parser_remove_simple_key(parser) {
+               return false
+       }
+
+       // Simple keys are allowed after ','.
+       parser.simple_key_allowed = true
+
+       // Consume the token.
+       start_mark := parser.mark
+       skip(parser)
+       end_mark := parser.mark
+
+       // Create the FLOW-ENTRY token and append it to the queue.
+       token := yaml_token_t{
+               typ:        yaml_FLOW_ENTRY_TOKEN,
+               start_mark: start_mark,
+               end_mark:   end_mark,
+       }
+       yaml_insert_token(parser, -1, &token)
+       return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+       // Check if the scanner is in the block context.
+       if parser.flow_level == 0 {
+               // Check if we are allowed to start a new entry.
+               if !parser.simple_key_allowed {
+                       return yaml_parser_set_scanner_error(parser, "", parser.mark,
+                               "block sequence entries are not allowed in this context")
+               }
+               // Add the BLOCK-SEQUENCE-START token if needed.
+               if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+                       return false
+               }
+       } else {
+               // It is an error for the '-' indicator to occur in the flow context,
+               // but we let the Parser detect and report about it because the Parser
+               // is able to point to the context.
+       }
+
+       // Reset any potential simple keys on the current flow level.
+       if !yaml_parser_remove_simple_key(parser) {
+               return false
+       }
+
+       // Simple keys are allowed after '-'.
+       parser.simple_key_allowed = true
+
+       // Consume the token.
+       start_mark := parser.mark
+       skip(parser)
+       end_mark := parser.mark
+
+       // Create the BLOCK-ENTRY token and append it to the queue.
+       token := yaml_token_t{
+               typ:        yaml_BLOCK_ENTRY_TOKEN,
+               start_mark: start_mark,
+               end_mark:   end_mark,
+       }
+       yaml_insert_token(parser, -1, &token)
+       return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+       // In the block context, additional checks are required.
+       if parser.flow_level == 0 {
+               // Check if we are allowed to start a new key (not nessesary simple).
+               if !parser.simple_key_allowed {
+                       return yaml_parser_set_scanner_error(parser, "", parser.mark,
+                               "mapping keys are not allowed in this context")
+               }
+               // Add the BLOCK-MAPPING-START token if needed.
+               if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+                       return false
+               }
+       }
+
+       // Reset any potential simple keys on the current flow level.
+       if !yaml_parser_remove_simple_key(parser) {
+               return false
+       }
+
+       // Simple keys are allowed after '?' in the block context.
+       parser.simple_key_allowed = parser.flow_level == 0
+
+       // Consume the token.
+       start_mark := parser.mark
+       skip(parser)
+       end_mark := parser.mark
+
+       // Create the KEY token and append it to the queue.
+       token := yaml_token_t{
+               typ:        yaml_KEY_TOKEN,
+               start_mark: start_mark,
+               end_mark:   end_mark,
+       }
+       yaml_insert_token(parser, -1, &token)
+       return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+       simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+       // Have we found a simple key?
+       if simple_key.possible {
+               // Create the KEY token and insert it into the queue.
+               token := yaml_token_t{
+                       typ:        yaml_KEY_TOKEN,
+                       start_mark: simple_key.mark,
+                       end_mark:   simple_key.mark,
+               }
+               yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+               // In the block context, we may need to add the BLOCK-MAPPING-START token.
+               if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+                       simple_key.token_number,
+                       yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+                       return false
+               }
+
+               // Remove the simple key.
+               simple_key.possible = false
+
+               // A simple key cannot follow another simple key.
+               parser.simple_key_allowed = false
+
+       } else {
+               // The ':' indicator follows a complex key.
+
+               // In the block context, extra checks are required.
+               if parser.flow_level == 0 {
+
+                       // Check if we are allowed to start a complex value.
+                       if !parser.simple_key_allowed {
+                               return yaml_parser_set_scanner_error(parser, "", parser.mark,
+                                       "mapping values are not allowed in this context")
+                       }
+
+                       // Add the BLOCK-MAPPING-START token if needed.
+                       if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+                               return false
+                       }
+               }
+
+               // Simple keys after ':' are allowed in the block context.
+               parser.simple_key_allowed = parser.flow_level == 0
+       }
+
+       // Consume the token.
+       start_mark := parser.mark
+       skip(parser)
+       end_mark := parser.mark
+
+       // Create the VALUE token and append it to the queue.
+       token := yaml_token_t{
+               typ:        yaml_VALUE_TOKEN,
+               start_mark: start_mark,
+               end_mark:   end_mark,
+       }
+       yaml_insert_token(parser, -1, &token)
+       return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+       // An anchor or an alias could be a simple key.
+       if !yaml_parser_save_simple_key(parser) {
+               return false
+       }
+
+       // A simple key cannot follow an anchor or an alias.
+       parser.simple_key_allowed = false
+
+       // Create the ALIAS or ANCHOR token and append it to the queue.
+       var token yaml_token_t
+       if !yaml_parser_scan_anchor(parser, &token, typ) {
+               return false
+       }
+       yaml_insert_token(parser, -1, &token)
+       return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+       // A tag could be a simple key.
+       if !yaml_parser_save_simple_key(parser) {
+               return false
+       }
+
+       // A simple key cannot follow a tag.
+       parser.simple_key_allowed = false
+
+       // Create the TAG token and append it to the queue.
+       var token yaml_token_t
+       if !yaml_parser_scan_tag(parser, &token) {
+               return false
+       }
+       yaml_insert_token(parser, -1, &token)
+       return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+       // Remove any potential simple keys.
+       if !yaml_parser_remove_simple_key(parser) {
+               return false
+       }
+
+       // A simple key may follow a block scalar.
+       parser.simple_key_allowed = true
+
+       // Create the SCALAR token and append it to the queue.
+       var token yaml_token_t
+       if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+               return false
+       }
+       yaml_insert_token(parser, -1, &token)
+       return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+       // A plain scalar could be a simple key.
+       if !yaml_parser_save_simple_key(parser) {
+               return false
+       }
+
+       // A simple key cannot follow a flow scalar.
+       parser.simple_key_allowed = false
+
+       // Create the SCALAR token and append it to the queue.
+       var token yaml_token_t
+       if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+               return false
+       }
+       yaml_insert_token(parser, -1, &token)
+       return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+       // A plain scalar could be a simple key.
+       if !yaml_parser_save_simple_key(parser) {
+               return false
+       }
+
+       // A simple key cannot follow a flow scalar.
+       parser.simple_key_allowed = false
+
+       // Create the SCALAR token and append it to the queue.
+       var token yaml_token_t
+       if !yaml_parser_scan_plain_scalar(parser, &token) {
+               return false
+       }
+       yaml_insert_token(parser, -1, &token)
+       return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+       // Until the next token is not found.
+       for {
+               // Allow the BOM mark to start a line.
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+               if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+                       skip(parser)
+               }
+
+               // Eat whitespaces.
+               // Tabs are allowed:
+               //  - in the flow context
+               //  - in the block context, but not at the beginning of the line or
+               //  after '-', '?', or ':' (complex value).
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+
+               for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+                       skip(parser)
+                       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                               return false
+                       }
+               }
+
+               // Eat a comment until a line break.
+               if parser.buffer[parser.buffer_pos] == '#' {
+                       for !is_breakz(parser.buffer, parser.buffer_pos) {
+                               skip(parser)
+                               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                                       return false
+                               }
+                       }
+               }
+
+               // If it is a line break, eat it.
+               if is_break(parser.buffer, parser.buffer_pos) {
+                       if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+                               return false
+                       }
+                       skip_line(parser)
+
+                       // In the block context, a new line may start a simple key.
+                       if parser.flow_level == 0 {
+                               parser.simple_key_allowed = true
+                       }
+               } else {
+                       break // We have found a token.
+               }
+       }
+
+       return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+//      %YAML    1.1    # a comment \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+       // Eat '%'.
+       start_mark := parser.mark
+       skip(parser)
+
+       // Scan the directive name.
+       var name []byte
+       if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+               return false
+       }
+
+       // Is it a YAML directive?
+       if bytes.Equal(name, []byte("YAML")) {
+               // Scan the VERSION directive value.
+               var major, minor int8
+               if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+                       return false
+               }
+               end_mark := parser.mark
+
+               // Create a VERSION-DIRECTIVE token.
+               *token = yaml_token_t{
+                       typ:        yaml_VERSION_DIRECTIVE_TOKEN,
+                       start_mark: start_mark,
+                       end_mark:   end_mark,
+                       major:      major,
+                       minor:      minor,
+               }
+
+               // Is it a TAG directive?
+       } else if bytes.Equal(name, []byte("TAG")) {
+               // Scan the TAG directive value.
+               var handle, prefix []byte
+               if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+                       return false
+               }
+               end_mark := parser.mark
+
+               // Create a TAG-DIRECTIVE token.
+               *token = yaml_token_t{
+                       typ:        yaml_TAG_DIRECTIVE_TOKEN,
+                       start_mark: start_mark,
+                       end_mark:   end_mark,
+                       value:      handle,
+                       prefix:     prefix,
+               }
+
+               // Unknown directive.
+       } else {
+               yaml_parser_set_scanner_error(parser, "while scanning a directive",
+                       start_mark, "found unknown directive name")
+               return false
+       }
+
+       // Eat the rest of the line including any comments.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+
+       for is_blank(parser.buffer, parser.buffer_pos) {
+               skip(parser)
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+       }
+
+       if parser.buffer[parser.buffer_pos] == '#' {
+               for !is_breakz(parser.buffer, parser.buffer_pos) {
+                       skip(parser)
+                       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                               return false
+                       }
+               }
+       }
+
+       // Check if we are at the end of the line.
+       if !is_breakz(parser.buffer, parser.buffer_pos) {
+               yaml_parser_set_scanner_error(parser, "while scanning a directive",
+                       start_mark, "did not find expected comment or line break")
+               return false
+       }
+
+       // Eat a line break.
+       if is_break(parser.buffer, parser.buffer_pos) {
+               if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+                       return false
+               }
+               skip_line(parser)
+       }
+
+       return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//       ^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//       ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+       // Consume the directive name.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+
+       var s []byte
+       for is_alpha(parser.buffer, parser.buffer_pos) {
+               s = read(parser, s)
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+       }
+
+       // Check if the name is empty.
+       if len(s) == 0 {
+               yaml_parser_set_scanner_error(parser, "while scanning a directive",
+                       start_mark, "could not find expected directive name")
+               return false
+       }
+
+       // Check for an blank character after the name.
+       if !is_blankz(parser.buffer, parser.buffer_pos) {
+               yaml_parser_set_scanner_error(parser, "while scanning a directive",
+                       start_mark, "found unexpected non-alphabetical character")
+               return false
+       }
+       *name = s
+       return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//           ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+       // Eat whitespaces.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+       for is_blank(parser.buffer, parser.buffer_pos) {
+               skip(parser)
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+       }
+
+       // Consume the major version number.
+       if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+               return false
+       }
+
+       // Eat '.'.
+       if parser.buffer[parser.buffer_pos] != '.' {
+               return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+                       start_mark, "did not find expected digit or '.' character")
+       }
+
+       skip(parser)
+
+       // Consume the minor version number.
+       if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+               return false
+       }
+       return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//              ^
+//      %YAML   1.1     # a comment \n
+//                ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+       // Repeat while the next character is digit.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+       var value, length int8
+       for is_digit(parser.buffer, parser.buffer_pos) {
+               // Check if the number is too long.
+               length++
+               if length > max_number_length {
+                       return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+                               start_mark, "found extremely long version number")
+               }
+               value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+               skip(parser)
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+       }
+
+       // Check if the number was present.
+       if length == 0 {
+               return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+                       start_mark, "did not find expected version number")
+       }
+       *number = value
+       return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+       var handle_value, prefix_value []byte
+
+       // Eat whitespaces.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+
+       for is_blank(parser.buffer, parser.buffer_pos) {
+               skip(parser)
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+       }
+
+       // Scan a handle.
+       if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+               return false
+       }
+
+       // Expect a whitespace.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+       if !is_blank(parser.buffer, parser.buffer_pos) {
+               yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+                       start_mark, "did not find expected whitespace")
+               return false
+       }
+
+       // Eat whitespaces.
+       for is_blank(parser.buffer, parser.buffer_pos) {
+               skip(parser)
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+       }
+
+       // Scan a prefix.
+       if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+               return false
+       }
+
+       // Expect a whitespace or line break.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+       if !is_blankz(parser.buffer, parser.buffer_pos) {
+               yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+                       start_mark, "did not find expected whitespace or line break")
+               return false
+       }
+
+       *handle = handle_value
+       *prefix = prefix_value
+       return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+       var s []byte
+
+       // Eat the indicator character.
+       start_mark := parser.mark
+       skip(parser)
+
+       // Consume the value.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+
+       for is_alpha(parser.buffer, parser.buffer_pos) {
+               s = read(parser, s)
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+       }
+
+       end_mark := parser.mark
+
+       /*
+        * Check if length of the anchor is greater than 0 and it is followed by
+        * a whitespace character or one of the indicators:
+        *
+        *      '?', ':', ',', ']', '}', '%', '@', '`'.
+        */
+
+       if len(s) == 0 ||
+               !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+                       parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+                       parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+                       parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+                       parser.buffer[parser.buffer_pos] == '`') {
+               context := "while scanning an alias"
+               if typ == yaml_ANCHOR_TOKEN {
+                       context = "while scanning an anchor"
+               }
+               yaml_parser_set_scanner_error(parser, context, start_mark,
+                       "did not find expected alphabetic or numeric character")
+               return false
+       }
+
+       // Create a token.
+       *token = yaml_token_t{
+               typ:        typ,
+               start_mark: start_mark,
+               end_mark:   end_mark,
+               value:      s,
+       }
+
+       return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+       var handle, suffix []byte
+
+       start_mark := parser.mark
+
+       // Check if the tag is in the canonical form.
+       if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+               return false
+       }
+
+       if parser.buffer[parser.buffer_pos+1] == '<' {
+               // Keep the handle as ''
+
+               // Eat '!<'
+               skip(parser)
+               skip(parser)
+
+               // Consume the tag value.
+               if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+                       return false
+               }
+
+               // Check for '>' and eat it.
+               if parser.buffer[parser.buffer_pos] != '>' {
+                       yaml_parser_set_scanner_error(parser, "while scanning a tag",
+                               start_mark, "did not find the expected '>'")
+                       return false
+               }
+
+               skip(parser)
+       } else {
+               // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+               // First, try to scan a handle.
+               if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+                       return false
+               }
+
+               // Check if it is, indeed, handle.
+               if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+                       // Scan the suffix now.
+                       if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+                               return false
+                       }
+               } else {
+                       // It wasn't a handle after all.  Scan the rest of the tag.
+                       if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+                               return false
+                       }
+
+                       // Set the handle to '!'.
+                       handle = []byte{'!'}
+
+                       // A special case: the '!' tag.  Set the handle to '' and the
+                       // suffix to '!'.
+                       if len(suffix) == 0 {
+                               handle, suffix = suffix, handle
+                       }
+               }
+       }
+
+       // Check the character which ends the tag.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+       if !is_blankz(parser.buffer, parser.buffer_pos) {
+               yaml_parser_set_scanner_error(parser, "while scanning a tag",
+                       start_mark, "did not find expected whitespace or line break")
+               return false
+       }
+
+       end_mark := parser.mark
+
+       // Create a token.
+       *token = yaml_token_t{
+               typ:        yaml_TAG_TOKEN,
+               start_mark: start_mark,
+               end_mark:   end_mark,
+               value:      handle,
+               suffix:     suffix,
+       }
+       return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+       // Check the initial '!' character.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+       if parser.buffer[parser.buffer_pos] != '!' {
+               yaml_parser_set_scanner_tag_error(parser, directive,
+                       start_mark, "did not find expected '!'")
+               return false
+       }
+
+       var s []byte
+
+       // Copy the '!' character.
+       s = read(parser, s)
+
+       // Copy all subsequent alphabetical and numerical characters.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+       for is_alpha(parser.buffer, parser.buffer_pos) {
+               s = read(parser, s)
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+       }
+
+       // Check if the trailing character is '!' and copy it.
+       if parser.buffer[parser.buffer_pos] == '!' {
+               s = read(parser, s)
+       } else {
+               // It's either the '!' tag or not really a tag handle.  If it's a %TAG
+               // directive, it's an error.  If it's a tag token, it must be a part of URI.
+               if directive && string(s) != "!" {
+                       yaml_parser_set_scanner_tag_error(parser, directive,
+                               start_mark, "did not find expected '!'")
+                       return false
+               }
+       }
+
+       *handle = s
+       return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+       //size_t length = head ? strlen((char *)head) : 0
+       var s []byte
+       hasTag := len(head) > 0
+
+       // Copy the head if needed.
+       //
+       // Note that we don't copy the leading '!' character.
+       if len(head) > 1 {
+               s = append(s, head[1:]...)
+       }
+
+       // Scan the tag.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+
+       // The set of characters that may appear in URI is as follows:
+       //
+       //      '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+       //      '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+       //      '%'.
+       // [Go] Convert this into more reasonable logic.
+       for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+               parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+               parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+               parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+               parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+               parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+               parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+               parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+               parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+               parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+               parser.buffer[parser.buffer_pos] == '%' {
+               // Check if it is a URI-escape sequence.
+               if parser.buffer[parser.buffer_pos] == '%' {
+                       if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+                               return false
+                       }
+               } else {
+                       s = read(parser, s)
+               }
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+               hasTag = true
+       }
+
+       if !hasTag {
+               yaml_parser_set_scanner_tag_error(parser, directive,
+                       start_mark, "did not find expected tag URI")
+               return false
+       }
+       *uri = s
+       return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+       // Decode the required number of characters.
+       w := 1024
+       for w > 0 {
+               // Check for a URI-escaped octet.
+               if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+                       return false
+               }
+
+               if !(parser.buffer[parser.buffer_pos] == '%' &&
+                       is_hex(parser.buffer, parser.buffer_pos+1) &&
+                       is_hex(parser.buffer, parser.buffer_pos+2)) {
+                       return yaml_parser_set_scanner_tag_error(parser, directive,
+                               start_mark, "did not find URI escaped octet")
+               }
+
+               // Get the octet.
+               octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+               // If it is the leading octet, determine the length of the UTF-8 sequence.
+               if w == 1024 {
+                       w = width(octet)
+                       if w == 0 {
+                               return yaml_parser_set_scanner_tag_error(parser, directive,
+                                       start_mark, "found an incorrect leading UTF-8 octet")
+                       }
+               } else {
+                       // Check if the trailing octet is correct.
+                       if octet&0xC0 != 0x80 {
+                               return yaml_parser_set_scanner_tag_error(parser, directive,
+                                       start_mark, "found an incorrect trailing UTF-8 octet")
+                       }
+               }
+
+               // Copy the octet and move the pointers.
+               *s = append(*s, octet)
+               skip(parser)
+               skip(parser)
+               skip(parser)
+               w--
+       }
+       return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+       // Eat the indicator '|' or '>'.
+       start_mark := parser.mark
+       skip(parser)
+
+       // Scan the additional block scalar indicators.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+
+       // Check for a chomping indicator.
+       var chomping, increment int
+       if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+               // Set the chomping method and eat the indicator.
+               if parser.buffer[parser.buffer_pos] == '+' {
+                       chomping = +1
+               } else {
+                       chomping = -1
+               }
+               skip(parser)
+
+               // Check for an indentation indicator.
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+               if is_digit(parser.buffer, parser.buffer_pos) {
+                       // Check that the indentation is greater than 0.
+                       if parser.buffer[parser.buffer_pos] == '0' {
+                               yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+                                       start_mark, "found an indentation indicator equal to 0")
+                               return false
+                       }
+
+                       // Get the indentation level and eat the indicator.
+                       increment = as_digit(parser.buffer, parser.buffer_pos)
+                       skip(parser)
+               }
+
+       } else if is_digit(parser.buffer, parser.buffer_pos) {
+               // Do the same as above, but in the opposite order.
+
+               if parser.buffer[parser.buffer_pos] == '0' {
+                       yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+                               start_mark, "found an indentation indicator equal to 0")
+                       return false
+               }
+               increment = as_digit(parser.buffer, parser.buffer_pos)
+               skip(parser)
+
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+               if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+                       if parser.buffer[parser.buffer_pos] == '+' {
+                               chomping = +1
+                       } else {
+                               chomping = -1
+                       }
+                       skip(parser)
+               }
+       }
+
+       // Eat whitespaces and comments to the end of the line.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+       for is_blank(parser.buffer, parser.buffer_pos) {
+               skip(parser)
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+       }
+       if parser.buffer[parser.buffer_pos] == '#' {
+               for !is_breakz(parser.buffer, parser.buffer_pos) {
+                       skip(parser)
+                       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                               return false
+                       }
+               }
+       }
+
+       // Check if we are at the end of the line.
+       if !is_breakz(parser.buffer, parser.buffer_pos) {
+               yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+                       start_mark, "did not find expected comment or line break")
+               return false
+       }
+
+       // Eat a line break.
+       if is_break(parser.buffer, parser.buffer_pos) {
+               if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+                       return false
+               }
+               skip_line(parser)
+       }
+
+       end_mark := parser.mark
+
+       // Set the indentation level if it was specified.
+       var indent int
+       if increment > 0 {
+               if parser.indent >= 0 {
+                       indent = parser.indent + increment
+               } else {
+                       indent = increment
+               }
+       }
+
+       // Scan the leading line breaks and determine the indentation level if needed.
+       var s, leading_break, trailing_breaks []byte
+       if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+               return false
+       }
+
+       // Scan the block scalar content.
+       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+               return false
+       }
+       var leading_blank, trailing_blank bool
+       for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+               // We are at the beginning of a non-empty line.
+
+               // Is it a trailing whitespace?
+               trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+               // Check if we need to fold the leading line break.
+               if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+                       // Do we need to join the lines by space?
+                       if len(trailing_breaks) == 0 {
+                               s = append(s, ' ')
+                       }
+               } else {
+                       s = append(s, leading_break...)
+               }
+               leading_break = leading_break[:0]
+
+               // Append the remaining line breaks.
+               s = append(s, trailing_breaks...)
+               trailing_breaks = trailing_breaks[:0]
+
+               // Is it a leading whitespace?
+               leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+               // Consume the current line.
+               for !is_breakz(parser.buffer, parser.buffer_pos) {
+                       s = read(parser, s)
+                       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                               return false
+                       }
+               }
+
+               // Consume the line break.
+               if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+                       return false
+               }
+
+               leading_break = read_line(parser, leading_break)
+
+               // Eat the following indentation spaces and line breaks.
+               if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+                       return false
+               }
+       }
+
+       // Chomp the tail.
+       if chomping != -1 {
+               s = append(s, leading_break...)
+       }
+       if chomping == 1 {
+               s = append(s, trailing_breaks...)
+       }
+
+       // Create a token.
+       *token = yaml_token_t{
+               typ:        yaml_SCALAR_TOKEN,
+               start_mark: start_mark,
+               end_mark:   end_mark,
+               value:      s,
+               style:      yaml_LITERAL_SCALAR_STYLE,
+       }
+       if !literal {
+               token.style = yaml_FOLDED_SCALAR_STYLE
+       }
+       return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar.  Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+       *end_mark = parser.mark
+
+       // Eat the indentation spaces and line breaks.
+       max_indent := 0
+       for {
+               // Eat the indentation spaces.
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+               for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+                       skip(parser)
+                       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                               return false
+                       }
+               }
+               if parser.mark.column > max_indent {
+                       max_indent = parser.mark.column
+               }
+
+               // Check for a tab character messing the indentation.
+               if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+                       return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+                               start_mark, "found a tab character where an indentation space is expected")
+               }
+
+               // Have we found a non-empty line?
+               if !is_break(parser.buffer, parser.buffer_pos) {
+                       break
+               }
+
+               // Consume the line break.
+               if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+                       return false
+               }
+               // [Go] Should really be returning breaks instead.
+               *breaks = read_line(parser, *breaks)
+               *end_mark = parser.mark
+       }
+
+       // Determine the indentation level if needed.
+       if *indent == 0 {
+               *indent = max_indent
+               if *indent < parser.indent+1 {
+                       *indent = parser.indent + 1
+               }
+               if *indent < 1 {
+                       *indent = 1
+               }
+       }
+       return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+       // Eat the left quote.
+       start_mark := parser.mark
+       skip(parser)
+
+       // Consume the content of the quoted scalar.
+       var s, leading_break, trailing_breaks, whitespaces []byte
+       for {
+               // Check that there are no document indicators at the beginning of the line.
+               if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+                       return false
+               }
+
+               if parser.mark.column == 0 &&
+                       ((parser.buffer[parser.buffer_pos+0] == '-' &&
+                               parser.buffer[parser.buffer_pos+1] == '-' &&
+                               parser.buffer[parser.buffer_pos+2] == '-') ||
+                               (parser.buffer[parser.buffer_pos+0] == '.' &&
+                                       parser.buffer[parser.buffer_pos+1] == '.' &&
+                                       parser.buffer[parser.buffer_pos+2] == '.')) &&
+                       is_blankz(parser.buffer, parser.buffer_pos+3) {
+                       yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+                               start_mark, "found unexpected document indicator")
+                       return false
+               }
+
+               // Check for EOF.
+               if is_z(parser.buffer, parser.buffer_pos) {
+                       yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+                               start_mark, "found unexpected end of stream")
+                       return false
+               }
+
+               // Consume non-blank characters.
+               leading_blanks := false
+               for !is_blankz(parser.buffer, parser.buffer_pos) {
+                       if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+                               // Is is an escaped single quote.
+                               s = append(s, '\'')
+                               skip(parser)
+                               skip(parser)
+
+                       } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+                               // It is a right single quote.
+                               break
+                       } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+                               // It is a right double quote.
+                               break
+
+                       } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+                               // It is an escaped line break.
+                               if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+                                       return false
+                               }
+                               skip(parser)
+                               skip_line(parser)
+                               leading_blanks = true
+                               break
+
+                       } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+                               // It is an escape sequence.
+                               code_length := 0
+
+                               // Check the escape character.
+                               switch parser.buffer[parser.buffer_pos+1] {
+                               case '0':
+                                       s = append(s, 0)
+                               case 'a':
+                                       s = append(s, '\x07')
+                               case 'b':
+                                       s = append(s, '\x08')
+                               case 't', '\t':
+                                       s = append(s, '\x09')
+                               case 'n':
+                                       s = append(s, '\x0A')
+                               case 'v':
+                                       s = append(s, '\x0B')
+                               case 'f':
+                                       s = append(s, '\x0C')
+                               case 'r':
+                                       s = append(s, '\x0D')
+                               case 'e':
+                                       s = append(s, '\x1B')
+                               case ' ':
+                                       s = append(s, '\x20')
+                               case '"':
+                                       s = append(s, '"')
+                               case '\'':
+                                       s = append(s, '\'')
+                               case '\\':
+                                       s = append(s, '\\')
+                               case 'N': // NEL (#x85)
+                                       s = append(s, '\xC2')
+                                       s = append(s, '\x85')
+                               case '_': // #xA0
+                                       s = append(s, '\xC2')
+                                       s = append(s, '\xA0')
+                               case 'L': // LS (#x2028)
+                                       s = append(s, '\xE2')
+                                       s = append(s, '\x80')
+                                       s = append(s, '\xA8')
+                               case 'P': // PS (#x2029)
+                                       s = append(s, '\xE2')
+                                       s = append(s, '\x80')
+                                       s = append(s, '\xA9')
+                               case 'x':
+                                       code_length = 2
+                               case 'u':
+                                       code_length = 4
+                               case 'U':
+                                       code_length = 8
+                               default:
+                                       yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+                                               start_mark, "found unknown escape character")
+                                       return false
+                               }
+
+                               skip(parser)
+                               skip(parser)
+
+                               // Consume an arbitrary escape code.
+                               if code_length > 0 {
+                                       var value int
+
+                                       // Scan the character value.
+                                       if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+                                               return false
+                                       }
+                                       for k := 0; k < code_length; k++ {
+                                               if !is_hex(parser.buffer, parser.buffer_pos+k) {
+                                                       yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+                                                               start_mark, "did not find expected hexdecimal number")
+                                                       return false
+                                               }
+                                               value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+                                       }
+
+                                       // Check the value and write the character.
+                                       if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+                                               yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+                                                       start_mark, "found invalid Unicode character escape code")
+                                               return false
+                                       }
+                                       if value <= 0x7F {
+                                               s = append(s, byte(value))
+                                       } else if value <= 0x7FF {
+                                               s = append(s, byte(0xC0+(value>>6)))
+                                               s = append(s, byte(0x80+(value&0x3F)))
+                                       } else if value <= 0xFFFF {
+                                               s = append(s, byte(0xE0+(value>>12)))
+                                               s = append(s, byte(0x80+((value>>6)&0x3F)))
+                                               s = append(s, byte(0x80+(value&0x3F)))
+                                       } else {
+                                               s = append(s, byte(0xF0+(value>>18)))
+                                               s = append(s, byte(0x80+((value>>12)&0x3F)))
+                                               s = append(s, byte(0x80+((value>>6)&0x3F)))
+                                               s = append(s, byte(0x80+(value&0x3F)))
+                                       }
+
+                                       // Advance the pointer.
+                                       for k := 0; k < code_length; k++ {
+                                               skip(parser)
+                                       }
+                               }
+                       } else {
+                               // It is a non-escaped non-blank character.
+                               s = read(parser, s)
+                       }
+                       if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+                               return false
+                       }
+               }
+
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+
+               // Check if we are at the end of the scalar.
+               if single {
+                       if parser.buffer[parser.buffer_pos] == '\'' {
+                               break
+                       }
+               } else {
+                       if parser.buffer[parser.buffer_pos] == '"' {
+                               break
+                       }
+               }
+
+               // Consume blank characters.
+               for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+                       if is_blank(parser.buffer, parser.buffer_pos) {
+                               // Consume a space or a tab character.
+                               if !leading_blanks {
+                                       whitespaces = read(parser, whitespaces)
+                               } else {
+                                       skip(parser)
+                               }
+                       } else {
+                               if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+                                       return false
+                               }
+
+                               // Check if it is a first line break.
+                               if !leading_blanks {
+                                       whitespaces = whitespaces[:0]
+                                       leading_break = read_line(parser, leading_break)
+                                       leading_blanks = true
+                               } else {
+                                       trailing_breaks = read_line(parser, trailing_breaks)
+                               }
+                       }
+                       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                               return false
+                       }
+               }
+
+               // Join the whitespaces or fold line breaks.
+               if leading_blanks {
+                       // Do we need to fold line breaks?
+                       if len(leading_break) > 0 && leading_break[0] == '\n' {
+                               if len(trailing_breaks) == 0 {
+                                       s = append(s, ' ')
+                               } else {
+                                       s = append(s, trailing_breaks...)
+                               }
+                       } else {
+                               s = append(s, leading_break...)
+                               s = append(s, trailing_breaks...)
+                       }
+                       trailing_breaks = trailing_breaks[:0]
+                       leading_break = leading_break[:0]
+               } else {
+                       s = append(s, whitespaces...)
+                       whitespaces = whitespaces[:0]
+               }
+       }
+
+       // Eat the right quote.
+       skip(parser)
+       end_mark := parser.mark
+
+       // Create a token.
+       *token = yaml_token_t{
+               typ:        yaml_SCALAR_TOKEN,
+               start_mark: start_mark,
+               end_mark:   end_mark,
+               value:      s,
+               style:      yaml_SINGLE_QUOTED_SCALAR_STYLE,
+       }
+       if !single {
+               token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+       }
+       return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+       var s, leading_break, trailing_breaks, whitespaces []byte
+       var leading_blanks bool
+       var indent = parser.indent + 1
+
+       start_mark := parser.mark
+       end_mark := parser.mark
+
+       // Consume the content of the plain scalar.
+       for {
+               // Check for a document indicator.
+               if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+                       return false
+               }
+               if parser.mark.column == 0 &&
+                       ((parser.buffer[parser.buffer_pos+0] == '-' &&
+                               parser.buffer[parser.buffer_pos+1] == '-' &&
+                               parser.buffer[parser.buffer_pos+2] == '-') ||
+                               (parser.buffer[parser.buffer_pos+0] == '.' &&
+                                       parser.buffer[parser.buffer_pos+1] == '.' &&
+                                       parser.buffer[parser.buffer_pos+2] == '.')) &&
+                       is_blankz(parser.buffer, parser.buffer_pos+3) {
+                       break
+               }
+
+               // Check for a comment.
+               if parser.buffer[parser.buffer_pos] == '#' {
+                       break
+               }
+
+               // Consume non-blank characters.
+               for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+                       // Check for indicators that may end a plain scalar.
+                       if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+                               (parser.flow_level > 0 &&
+                                       (parser.buffer[parser.buffer_pos] == ',' ||
+                                               parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+                                               parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+                                               parser.buffer[parser.buffer_pos] == '}')) {
+                               break
+                       }
+
+                       // Check if we need to join whitespaces and breaks.
+                       if leading_blanks || len(whitespaces) > 0 {
+                               if leading_blanks {
+                                       // Do we need to fold line breaks?
+                                       if leading_break[0] == '\n' {
+                                               if len(trailing_breaks) == 0 {
+                                                       s = append(s, ' ')
+                                               } else {
+                                                       s = append(s, trailing_breaks...)
+                                               }
+                                       } else {
+                                               s = append(s, leading_break...)
+                                               s = append(s, trailing_breaks...)
+                                       }
+                                       trailing_breaks = trailing_breaks[:0]
+                                       leading_break = leading_break[:0]
+                                       leading_blanks = false
+                               } else {
+                                       s = append(s, whitespaces...)
+                                       whitespaces = whitespaces[:0]
+                               }
+                       }
+
+                       // Copy the character.
+                       s = read(parser, s)
+
+                       end_mark = parser.mark
+                       if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+                               return false
+                       }
+               }
+
+               // Is it the end?
+               if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+                       break
+               }
+
+               // Consume blank characters.
+               if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                       return false
+               }
+
+               for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+                       if is_blank(parser.buffer, parser.buffer_pos) {
+
+                               // Check for tab characters that abuse indentation.
+                               if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+                                       yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+                                               start_mark, "found a tab character that violates indentation")
+                                       return false
+                               }
+
+                               // Consume a space or a tab character.
+                               if !leading_blanks {
+                                       whitespaces = read(parser, whitespaces)
+                               } else {
+                                       skip(parser)
+                               }
+                       } else {
+                               if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+                                       return false
+                               }
+
+                               // Check if it is a first line break.
+                               if !leading_blanks {
+                                       whitespaces = whitespaces[:0]
+                                       leading_break = read_line(parser, leading_break)
+                                       leading_blanks = true
+                               } else {
+                                       trailing_breaks = read_line(parser, trailing_breaks)
+                               }
+                       }
+                       if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+                               return false
+                       }
+               }
+
+               // Check indentation level.
+               if parser.flow_level == 0 && parser.mark.column < indent {
+                       break
+               }
+       }
+
+       // Create a token.
+       *token = yaml_token_t{
+               typ:        yaml_SCALAR_TOKEN,
+               start_mark: start_mark,
+               end_mark:   end_mark,
+               value:      s,
+               style:      yaml_PLAIN_SCALAR_STYLE,
+       }
+
+       // Note that we change the 'simple_key_allowed' flag.
+       if leading_blanks {
+               parser.simple_key_allowed = true
+       }
+       return true
+}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/writerc.go b/vendor/github.com/zclconf/go-cty-yaml/writerc.go
new file mode 100644 (file)
index 0000000..a2dde60
--- /dev/null
@@ -0,0 +1,26 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+       emitter.error = yaml_WRITER_ERROR
+       emitter.problem = problem
+       return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+       if emitter.write_handler == nil {
+               panic("write handler not set")
+       }
+
+       // Check if the buffer is empty.
+       if emitter.buffer_pos == 0 {
+               return true
+       }
+
+       if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+               return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+       }
+       emitter.buffer_pos = 0
+       return true
+}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/yaml.go b/vendor/github.com/zclconf/go-cty-yaml/yaml.go
new file mode 100644 (file)
index 0000000..2c314cc
--- /dev/null
@@ -0,0 +1,215 @@
+// Package yaml can marshal and unmarshal cty values in YAML format.
+package yaml
+
+import (
+       "errors"
+       "fmt"
+       "reflect"
+       "strings"
+       "sync"
+
+       "github.com/zclconf/go-cty/cty"
+)
+
+// Unmarshal reads the document found within the given source buffer
+// and attempts to convert it into a value conforming to the given type
+// constraint.
+//
+// This is an alias for Unmarshal on the predefined Converter in "Standard".
+//
+// An error is returned if the given source contains any YAML document
+// delimiters.
+func Unmarshal(src []byte, ty cty.Type) (cty.Value, error) {
+       return Standard.Unmarshal(src, ty)
+}
+
+// Marshal serializes the given value into a YAML document, using a fixed
+// mapping from cty types to YAML constructs.
+//
+// This is an alias for Marshal on the predefined Converter in "Standard".
+//
+// Note that unlike the function of the same name in the cty JSON package,
+// this does not take a type constraint and therefore the YAML serialization
+// cannot preserve late-bound type information in the serialization to be
+// recovered from Unmarshal. Instead, any cty.DynamicPseudoType in the type
+// constraint given to Unmarshal will be decoded as if the corresponding portion
+// of the input were processed with ImpliedType to find a target type.
+func Marshal(v cty.Value) ([]byte, error) {
+       return Standard.Marshal(v)
+}
+
+// ImpliedType analyzes the given source code and returns a suitable type that
+// it could be decoded into.
+//
+// For a converter that is using standard YAML rather than cty-specific custom
+// tags, only a subset of cty types can be produced: strings, numbers, bools,
+// tuple types, and object types.
+//
+// This is an alias for ImpliedType on the predefined Converter in "Standard".
+func ImpliedType(src []byte) (cty.Type, error) {
+       return Standard.ImpliedType(src)
+}
+
+func handleErr(err *error) {
+       if v := recover(); v != nil {
+               if e, ok := v.(yamlError); ok {
+                       *err = e.err
+               } else {
+                       panic(v)
+               }
+       }
+}
+
+type yamlError struct {
+       err error
+}
+
+func fail(err error) {
+       panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+       panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+       FieldsMap  map[string]fieldInfo
+       FieldsList []fieldInfo
+
+       // InlineMap is the number of the field in the struct that
+       // contains an ,inline map, or -1 if there's none.
+       InlineMap int
+}
+
+type fieldInfo struct {
+       Key       string
+       Num       int
+       OmitEmpty bool
+       Flow      bool
+       // Id holds the unique field identifier, so we can cheaply
+       // check for field duplicates without maintaining an extra map.
+       Id int
+
+       // Inline holds the field index if the field is part of an inlined struct.
+       Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+       fieldMapMutex.RLock()
+       sinfo, found := structMap[st]
+       fieldMapMutex.RUnlock()
+       if found {
+               return sinfo, nil
+       }
+
+       n := st.NumField()
+       fieldsMap := make(map[string]fieldInfo)
+       fieldsList := make([]fieldInfo, 0, n)
+       inlineMap := -1
+       for i := 0; i != n; i++ {
+               field := st.Field(i)
+               if field.PkgPath != "" && !field.Anonymous {
+                       continue // Private field
+               }
+
+               info := fieldInfo{Num: i}
+
+               tag := field.Tag.Get("yaml")
+               if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+                       tag = string(field.Tag)
+               }
+               if tag == "-" {
+                       continue
+               }
+
+               inline := false
+               fields := strings.Split(tag, ",")
+               if len(fields) > 1 {
+                       for _, flag := range fields[1:] {
+                               switch flag {
+                               case "omitempty":
+                                       info.OmitEmpty = true
+                               case "flow":
+                                       info.Flow = true
+                               case "inline":
+                                       inline = true
+                               default:
+                                       return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+                               }
+                       }
+                       tag = fields[0]
+               }
+
+               if inline {
+                       switch field.Type.Kind() {
+                       case reflect.Map:
+                               if inlineMap >= 0 {
+                                       return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+                               }
+                               if field.Type.Key() != reflect.TypeOf("") {
+                                       return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+                               }
+                               inlineMap = info.Num
+                       case reflect.Struct:
+                               sinfo, err := getStructInfo(field.Type)
+                               if err != nil {
+                                       return nil, err
+                               }
+                               for _, finfo := range sinfo.FieldsList {
+                                       if _, found := fieldsMap[finfo.Key]; found {
+                                               msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+                                               return nil, errors.New(msg)
+                                       }
+                                       if finfo.Inline == nil {
+                                               finfo.Inline = []int{i, finfo.Num}
+                                       } else {
+                                               finfo.Inline = append([]int{i}, finfo.Inline...)
+                                       }
+                                       finfo.Id = len(fieldsList)
+                                       fieldsMap[finfo.Key] = finfo
+                                       fieldsList = append(fieldsList, finfo)
+                               }
+                       default:
+                               //return nil, errors.New("Option ,inline needs a struct value or map field")
+                               return nil, errors.New("Option ,inline needs a struct value field")
+                       }
+                       continue
+               }
+
+               if tag != "" {
+                       info.Key = tag
+               } else {
+                       info.Key = strings.ToLower(field.Name)
+               }
+
+               if _, found = fieldsMap[info.Key]; found {
+                       msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+                       return nil, errors.New(msg)
+               }
+
+               info.Id = len(fieldsList)
+               fieldsList = append(fieldsList, info)
+               fieldsMap[info.Key] = info
+       }
+
+       sinfo = &structInfo{
+               FieldsMap:  fieldsMap,
+               FieldsList: fieldsList,
+               InlineMap:  inlineMap,
+       }
+
+       fieldMapMutex.Lock()
+       structMap[st] = sinfo
+       fieldMapMutex.Unlock()
+       return sinfo, nil
+}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlh.go
new file mode 100644 (file)
index 0000000..e25cee5
--- /dev/null
@@ -0,0 +1,738 @@
+package yaml
+
+import (
+       "fmt"
+       "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+       major int8 // The major version number.
+       minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+       handle []byte // The tag handle.
+       prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+       // Let the parser choose the encoding.
+       yaml_ANY_ENCODING yaml_encoding_t = iota
+
+       yaml_UTF8_ENCODING    // The default UTF-8 encoding.
+       yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+       yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+       // Let the parser choose the break type.
+       yaml_ANY_BREAK yaml_break_t = iota
+
+       yaml_CR_BREAK   // Use CR for line breaks (Mac style).
+       yaml_LN_BREAK   // Use LN for line breaks (Unix style).
+       yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+       // No error is produced.
+       yaml_NO_ERROR yaml_error_type_t = iota
+
+       yaml_MEMORY_ERROR   // Cannot allocate or reallocate a block of memory.
+       yaml_READER_ERROR   // Cannot read or decode the input stream.
+       yaml_SCANNER_ERROR  // Cannot scan the input stream.
+       yaml_PARSER_ERROR   // Cannot parse the input stream.
+       yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+       yaml_WRITER_ERROR   // Cannot write to the output stream.
+       yaml_EMITTER_ERROR  // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+       index  int // The position index.
+       line   int // The position line.
+       column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+       // Let the emitter choose the style.
+       yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+       yaml_PLAIN_SCALAR_STYLE         // The plain scalar style.
+       yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+       yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+       yaml_LITERAL_SCALAR_STYLE       // The literal scalar style.
+       yaml_FOLDED_SCALAR_STYLE        // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+       // Let the emitter choose the style.
+       yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+       yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+       yaml_FLOW_SEQUENCE_STYLE  // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+       // Let the emitter choose the style.
+       yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+       yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+       yaml_FLOW_MAPPING_STYLE  // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+       // An empty token.
+       yaml_NO_TOKEN yaml_token_type_t = iota
+
+       yaml_STREAM_START_TOKEN // A STREAM-START token.
+       yaml_STREAM_END_TOKEN   // A STREAM-END token.
+
+       yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+       yaml_TAG_DIRECTIVE_TOKEN     // A TAG-DIRECTIVE token.
+       yaml_DOCUMENT_START_TOKEN    // A DOCUMENT-START token.
+       yaml_DOCUMENT_END_TOKEN      // A DOCUMENT-END token.
+
+       yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+       yaml_BLOCK_MAPPING_START_TOKEN  // A BLOCK-SEQUENCE-END token.
+       yaml_BLOCK_END_TOKEN            // A BLOCK-END token.
+
+       yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+       yaml_FLOW_SEQUENCE_END_TOKEN   // A FLOW-SEQUENCE-END token.
+       yaml_FLOW_MAPPING_START_TOKEN  // A FLOW-MAPPING-START token.
+       yaml_FLOW_MAPPING_END_TOKEN    // A FLOW-MAPPING-END token.
+
+       yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+       yaml_FLOW_ENTRY_TOKEN  // A FLOW-ENTRY token.
+       yaml_KEY_TOKEN         // A KEY token.
+       yaml_VALUE_TOKEN       // A VALUE token.
+
+       yaml_ALIAS_TOKEN  // An ALIAS token.
+       yaml_ANCHOR_TOKEN // An ANCHOR token.
+       yaml_TAG_TOKEN    // A TAG token.
+       yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+       switch tt {
+       case yaml_NO_TOKEN:
+               return "yaml_NO_TOKEN"
+       case yaml_STREAM_START_TOKEN:
+               return "yaml_STREAM_START_TOKEN"
+       case yaml_STREAM_END_TOKEN:
+               return "yaml_STREAM_END_TOKEN"
+       case yaml_VERSION_DIRECTIVE_TOKEN:
+               return "yaml_VERSION_DIRECTIVE_TOKEN"
+       case yaml_TAG_DIRECTIVE_TOKEN:
+               return "yaml_TAG_DIRECTIVE_TOKEN"
+       case yaml_DOCUMENT_START_TOKEN:
+               return "yaml_DOCUMENT_START_TOKEN"
+       case yaml_DOCUMENT_END_TOKEN:
+               return "yaml_DOCUMENT_END_TOKEN"
+       case yaml_BLOCK_SEQUENCE_START_TOKEN:
+               return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+       case yaml_BLOCK_MAPPING_START_TOKEN:
+               return "yaml_BLOCK_MAPPING_START_TOKEN"
+       case yaml_BLOCK_END_TOKEN:
+               return "yaml_BLOCK_END_TOKEN"
+       case yaml_FLOW_SEQUENCE_START_TOKEN:
+               return "yaml_FLOW_SEQUENCE_START_TOKEN"
+       case yaml_FLOW_SEQUENCE_END_TOKEN:
+               return "yaml_FLOW_SEQUENCE_END_TOKEN"
+       case yaml_FLOW_MAPPING_START_TOKEN:
+               return "yaml_FLOW_MAPPING_START_TOKEN"
+       case yaml_FLOW_MAPPING_END_TOKEN:
+               return "yaml_FLOW_MAPPING_END_TOKEN"
+       case yaml_BLOCK_ENTRY_TOKEN:
+               return "yaml_BLOCK_ENTRY_TOKEN"
+       case yaml_FLOW_ENTRY_TOKEN:
+               return "yaml_FLOW_ENTRY_TOKEN"
+       case yaml_KEY_TOKEN:
+               return "yaml_KEY_TOKEN"
+       case yaml_VALUE_TOKEN:
+               return "yaml_VALUE_TOKEN"
+       case yaml_ALIAS_TOKEN:
+               return "yaml_ALIAS_TOKEN"
+       case yaml_ANCHOR_TOKEN:
+               return "yaml_ANCHOR_TOKEN"
+       case yaml_TAG_TOKEN:
+               return "yaml_TAG_TOKEN"
+       case yaml_SCALAR_TOKEN:
+               return "yaml_SCALAR_TOKEN"
+       }
+       return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+       // The token type.
+       typ yaml_token_type_t
+
+       // The start/end of the token.
+       start_mark, end_mark yaml_mark_t
+
+       // The stream encoding (for yaml_STREAM_START_TOKEN).
+       encoding yaml_encoding_t
+
+       // The alias/anchor/scalar value or tag/tag directive handle
+       // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+       value []byte
+
+       // The tag suffix (for yaml_TAG_TOKEN).
+       suffix []byte
+
+       // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+       prefix []byte
+
+       // The scalar style (for yaml_SCALAR_TOKEN).
+       style yaml_scalar_style_t
+
+       // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+       major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+       // An empty event.
+       yaml_NO_EVENT yaml_event_type_t = iota
+
+       yaml_STREAM_START_EVENT   // A STREAM-START event.
+       yaml_STREAM_END_EVENT     // A STREAM-END event.
+       yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+       yaml_DOCUMENT_END_EVENT   // A DOCUMENT-END event.
+       yaml_ALIAS_EVENT          // An ALIAS event.
+       yaml_SCALAR_EVENT         // A SCALAR event.
+       yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+       yaml_SEQUENCE_END_EVENT   // A SEQUENCE-END event.
+       yaml_MAPPING_START_EVENT  // A MAPPING-START event.
+       yaml_MAPPING_END_EVENT    // A MAPPING-END event.
+)
+
+var eventStrings = []string{
+       yaml_NO_EVENT:             "none",
+       yaml_STREAM_START_EVENT:   "stream start",
+       yaml_STREAM_END_EVENT:     "stream end",
+       yaml_DOCUMENT_START_EVENT: "document start",
+       yaml_DOCUMENT_END_EVENT:   "document end",
+       yaml_ALIAS_EVENT:          "alias",
+       yaml_SCALAR_EVENT:         "scalar",
+       yaml_SEQUENCE_START_EVENT: "sequence start",
+       yaml_SEQUENCE_END_EVENT:   "sequence end",
+       yaml_MAPPING_START_EVENT:  "mapping start",
+       yaml_MAPPING_END_EVENT:    "mapping end",
+}
+
+func (e yaml_event_type_t) String() string {
+       if e < 0 || int(e) >= len(eventStrings) {
+               return fmt.Sprintf("unknown event %d", e)
+       }
+       return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+       // The event type.
+       typ yaml_event_type_t
+
+       // The start and end of the event.
+       start_mark, end_mark yaml_mark_t
+
+       // The document encoding (for yaml_STREAM_START_EVENT).
+       encoding yaml_encoding_t
+
+       // The version directive (for yaml_DOCUMENT_START_EVENT).
+       version_directive *yaml_version_directive_t
+
+       // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+       tag_directives []yaml_tag_directive_t
+
+       // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+       anchor []byte
+
+       // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+       tag []byte
+
+       // The scalar value (for yaml_SCALAR_EVENT).
+       value []byte
+
+       // Is the document start/end indicator implicit, or the tag optional?
+       // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+       implicit bool
+
+       // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+       quoted_implicit bool
+
+       // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+       style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t     { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t   { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+       yaml_NULL_TAG      = "tag:yaml.org,2002:null"      // The tag !!null with the only possible value: null.
+       yaml_BOOL_TAG      = "tag:yaml.org,2002:bool"      // The tag !!bool with the values: true and false.
+       yaml_STR_TAG       = "tag:yaml.org,2002:str"       // The tag !!str for string values.
+       yaml_INT_TAG       = "tag:yaml.org,2002:int"       // The tag !!int for integer values.
+       yaml_FLOAT_TAG     = "tag:yaml.org,2002:float"     // The tag !!float for float values.
+       yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+       yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+       yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+       // Not in original libyaml.
+       yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+       yaml_MERGE_TAG  = "tag:yaml.org,2002:merge"
+
+       yaml_DEFAULT_SCALAR_TAG   = yaml_STR_TAG // The default scalar tag is !!str.
+       yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+       yaml_DEFAULT_MAPPING_TAG  = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+       // An empty node.
+       yaml_NO_NODE yaml_node_type_t = iota
+
+       yaml_SCALAR_NODE   // A scalar node.
+       yaml_SEQUENCE_NODE // A sequence node.
+       yaml_MAPPING_NODE  // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+       key   int // The key of the element.
+       value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+       typ yaml_node_type_t // The node type.
+       tag []byte           // The node tag.
+
+       // The node data.
+
+       // The scalar parameters (for yaml_SCALAR_NODE).
+       scalar struct {
+               value  []byte              // The scalar value.
+               length int                 // The length of the scalar value.
+               style  yaml_scalar_style_t // The scalar style.
+       }
+
+       // The sequence parameters (for YAML_SEQUENCE_NODE).
+       sequence struct {
+               items_data []yaml_node_item_t    // The stack of sequence items.
+               style      yaml_sequence_style_t // The sequence style.
+       }
+
+       // The mapping parameters (for yaml_MAPPING_NODE).
+       mapping struct {
+               pairs_data  []yaml_node_pair_t   // The stack of mapping pairs (key, value).
+               pairs_start *yaml_node_pair_t    // The beginning of the stack.
+               pairs_end   *yaml_node_pair_t    // The end of the stack.
+               pairs_top   *yaml_node_pair_t    // The top of the stack.
+               style       yaml_mapping_style_t // The mapping style.
+       }
+
+       start_mark yaml_mark_t // The beginning of the node.
+       end_mark   yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+       // The document nodes.
+       nodes []yaml_node_t
+
+       // The version directive.
+       version_directive *yaml_version_directive_t
+
+       // The list of tag directives.
+       tag_directives_data  []yaml_tag_directive_t
+       tag_directives_start int // The beginning of the tag directives list.
+       tag_directives_end   int // The end of the tag directives list.
+
+       start_implicit int // Is the document start indicator implicit?
+       end_implicit   int // Is the document end indicator implicit?
+
+       // The start/end of the document.
+       start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out]   data        A pointer to an application data specified by
+//                        yaml_parser_set_input().
+// [out]      buffer      The buffer to write the data from the source.
+// [in]       size        The size of the buffer.
+// [out]      size_read   The actual number of bytes read from the source.
+//
+// On success, the handler should return 1.  If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+       possible     bool        // Is a simple key possible?
+       required     bool        // Is a simple key required?
+       token_number int         // The number of the token.
+       mark         yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+       yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+       yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE           // Expect the beginning of an implicit document.
+       yaml_PARSE_DOCUMENT_START_STATE                    // Expect DOCUMENT-START.
+       yaml_PARSE_DOCUMENT_CONTENT_STATE                  // Expect the content of a document.
+       yaml_PARSE_DOCUMENT_END_STATE                      // Expect DOCUMENT-END.
+       yaml_PARSE_BLOCK_NODE_STATE                        // Expect a block node.
+       yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+       yaml_PARSE_FLOW_NODE_STATE                         // Expect a flow node.
+       yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE        // Expect the first entry of a block sequence.
+       yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE              // Expect an entry of a block sequence.
+       yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE         // Expect an entry of an indentless sequence.
+       yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE           // Expect the first key of a block mapping.
+       yaml_PARSE_BLOCK_MAPPING_KEY_STATE                 // Expect a block mapping key.
+       yaml_PARSE_BLOCK_MAPPING_VALUE_STATE               // Expect a block mapping value.
+       yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE         // Expect the first entry of a flow sequence.
+       yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE               // Expect an entry of a flow sequence.
+       yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE   // Expect a key of an ordered mapping.
+       yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+       yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE   // Expect the and of an ordered mapping entry.
+       yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE            // Expect the first key of a flow mapping.
+       yaml_PARSE_FLOW_MAPPING_KEY_STATE                  // Expect a key of a flow mapping.
+       yaml_PARSE_FLOW_MAPPING_VALUE_STATE                // Expect a value of a flow mapping.
+       yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE          // Expect an empty value of a flow mapping.
+       yaml_PARSE_END_STATE                               // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+       switch ps {
+       case yaml_PARSE_STREAM_START_STATE:
+               return "yaml_PARSE_STREAM_START_STATE"
+       case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+               return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+       case yaml_PARSE_DOCUMENT_START_STATE:
+               return "yaml_PARSE_DOCUMENT_START_STATE"
+       case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+               return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+       case yaml_PARSE_DOCUMENT_END_STATE:
+               return "yaml_PARSE_DOCUMENT_END_STATE"
+       case yaml_PARSE_BLOCK_NODE_STATE:
+               return "yaml_PARSE_BLOCK_NODE_STATE"
+       case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+               return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+       case yaml_PARSE_FLOW_NODE_STATE:
+               return "yaml_PARSE_FLOW_NODE_STATE"
+       case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+               return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+       case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+               return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+       case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+               return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+       case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+               return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+       case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+               return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+       case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+               return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+       case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+               return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+       case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+               return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+       case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+               return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+       case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+               return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+       case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+               return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+       case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+               return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+       case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+               return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+       case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+               return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+       case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+               return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+       case yaml_PARSE_END_STATE:
+               return "yaml_PARSE_END_STATE"
+       }
+       return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+       anchor []byte      // The anchor.
+       index  int         // The node id.
+       mark   yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+       // Error handling
+
+       error yaml_error_type_t // Error type.
+
+       problem string // Error description.
+
+       // The byte about which the problem occurred.
+       problem_offset int
+       problem_value  int
+       problem_mark   yaml_mark_t
+
+       // The error context.
+       context      string
+       context_mark yaml_mark_t
+
+       // Reader stuff
+
+       read_handler yaml_read_handler_t // Read handler.
+
+       input_reader io.Reader // File input data.
+       input        []byte    // String input data.
+       input_pos    int
+
+       eof bool // EOF flag
+
+       buffer     []byte // The working buffer.
+       buffer_pos int    // The current position of the buffer.
+
+       unread int // The number of unread characters in the buffer.
+
+       raw_buffer     []byte // The raw buffer.
+       raw_buffer_pos int    // The current position of the buffer.
+
+       encoding yaml_encoding_t // The input encoding.
+
+       offset int         // The offset of the current position (in bytes).
+       mark   yaml_mark_t // The mark of the current position.
+
+       // Scanner stuff
+
+       stream_start_produced bool // Have we started to scan the input stream?
+       stream_end_produced   bool // Have we reached the end of the input stream?
+
+       flow_level int // The number of unclosed '[' and '{' indicators.
+
+       tokens          []yaml_token_t // The tokens queue.
+       tokens_head     int            // The head of the tokens queue.
+       tokens_parsed   int            // The number of tokens fetched from the queue.
+       token_available bool           // Does the tokens queue contain a token ready for dequeueing.
+
+       indent  int   // The current indentation level.
+       indents []int // The indentation levels stack.
+
+       simple_key_allowed bool                // May a simple key occur at the current position?
+       simple_keys        []yaml_simple_key_t // The stack of simple keys.
+
+       // Parser stuff
+
+       state          yaml_parser_state_t    // The current parser state.
+       states         []yaml_parser_state_t  // The parser states stack.
+       marks          []yaml_mark_t          // The stack of marks.
+       tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+       // Dumper stuff
+
+       aliases []yaml_alias_data_t // The alias data.
+
+       document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output.  The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out]   data        A pointer to an application data specified by
+//                              yaml_emitter_set_output().
+// @param[in]       buffer      The buffer with bytes to be written.
+// @param[in]       size        The size of the buffer.
+//
+// @returns On success, the handler should return @c 1.  If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+       // Expect STREAM-START.
+       yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+       yaml_EMIT_FIRST_DOCUMENT_START_STATE       // Expect the first DOCUMENT-START or STREAM-END.
+       yaml_EMIT_DOCUMENT_START_STATE             // Expect DOCUMENT-START or STREAM-END.
+       yaml_EMIT_DOCUMENT_CONTENT_STATE           // Expect the content of a document.
+       yaml_EMIT_DOCUMENT_END_STATE               // Expect DOCUMENT-END.
+       yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE   // Expect the first item of a flow sequence.
+       yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE         // Expect an item of a flow sequence.
+       yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE     // Expect the first key of a flow mapping.
+       yaml_EMIT_FLOW_MAPPING_KEY_STATE           // Expect a key of a flow mapping.
+       yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE  // Expect a value for a simple key of a flow mapping.
+       yaml_EMIT_FLOW_MAPPING_VALUE_STATE         // Expect a value of a flow mapping.
+       yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE  // Expect the first item of a block sequence.
+       yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE        // Expect an item of a block sequence.
+       yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE    // Expect the first key of a block mapping.
+       yaml_EMIT_BLOCK_MAPPING_KEY_STATE          // Expect the key of a block mapping.
+       yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+       yaml_EMIT_BLOCK_MAPPING_VALUE_STATE        // Expect a value of a block mapping.
+       yaml_EMIT_END_STATE                        // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal.  Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+       // Error handling
+
+       error   yaml_error_type_t // Error type.
+       problem string            // Error description.
+
+       // Writer stuff
+
+       write_handler yaml_write_handler_t // Write handler.
+
+       output_buffer *[]byte   // String output data.
+       output_writer io.Writer // File output data.
+
+       buffer     []byte // The working buffer.
+       buffer_pos int    // The current position of the buffer.
+
+       raw_buffer     []byte // The raw buffer.
+       raw_buffer_pos int    // The current position of the buffer.
+
+       encoding yaml_encoding_t // The stream encoding.
+
+       // Emitter stuff
+
+       canonical   bool         // If the output is in the canonical style?
+       best_indent int          // The number of indentation spaces.
+       best_width  int          // The preferred width of the output lines.
+       unicode     bool         // Allow unescaped non-ASCII characters?
+       line_break  yaml_break_t // The preferred line break.
+
+       state  yaml_emitter_state_t   // The current emitter state.
+       states []yaml_emitter_state_t // The stack of states.
+
+       events      []yaml_event_t // The event queue.
+       events_head int            // The head of the event queue.
+
+       indents []int // The stack of indentation levels.
+
+       tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+       indent int // The current indentation level.
+
+       flow_level int // The current flow level.
+
+       root_context       bool // Is it the document root context?
+       sequence_context   bool // Is it a sequence context?
+       mapping_context    bool // Is it a mapping context?
+       simple_key_context bool // Is it a simple mapping key context?
+
+       line       int  // The current line.
+       column     int  // The current column.
+       whitespace bool // If the last character was a whitespace?
+       indention  bool // If the last character was an indentation character (' ', '-', '?', ':')?
+       open_ended bool // If an explicit document end is required?
+
+       // Anchor analysis.
+       anchor_data struct {
+               anchor []byte // The anchor value.
+               alias  bool   // Is it an alias?
+       }
+
+       // Tag analysis.
+       tag_data struct {
+               handle []byte // The tag handle.
+               suffix []byte // The tag suffix.
+       }
+
+       // Scalar analysis.
+       scalar_data struct {
+               value                 []byte              // The scalar value.
+               multiline             bool                // Does the scalar contain line breaks?
+               flow_plain_allowed    bool                // Can the scalar be expessed in the flow plain style?
+               block_plain_allowed   bool                // Can the scalar be expressed in the block plain style?
+               single_quoted_allowed bool                // Can the scalar be expressed in the single quoted style?
+               block_allowed         bool                // Can the scalar be expressed in the literal or folded styles?
+               style                 yaml_scalar_style_t // The output style.
+       }
+
+       // Dumper stuff
+
+       opened bool // If the stream was already opened?
+       closed bool // If the stream was already closed?
+
+       // The information associated with the document nodes.
+       anchors *struct {
+               references int  // The number of references.
+               anchor     int  // The anchor id.
+               serialized bool // If the node has been emitted?
+       }
+
+       last_anchor_id int // The last assigned anchor id.
+
+       document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go b/vendor/github.com/zclconf/go-cty-yaml/yamlprivateh.go
new file mode 100644 (file)
index 0000000..8110ce3
--- /dev/null
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+       // The size of the input raw buffer.
+       input_raw_buffer_size = 512
+
+       // The size of the input buffer.
+       // It should be possible to decode the whole raw buffer.
+       input_buffer_size = input_raw_buffer_size * 3
+
+       // The size of the output buffer.
+       output_buffer_size = 128
+
+       // The size of the output raw buffer.
+       // It should be possible to encode the whole output buffer.
+       output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+       // The size of other stacks and queues.
+       initial_stack_size  = 16
+       initial_queue_size  = 16
+       initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+       return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+       return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+       return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+       return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+       bi := b[i]
+       if bi >= 'A' && bi <= 'F' {
+               return int(bi) - 'A' + 10
+       }
+       if bi >= 'a' && bi <= 'f' {
+               return int(bi) - 'a' + 10
+       }
+       return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+       return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+       return ((b[i] == 0x0A) || // . == #x0A
+               (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+               (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+               (b[i] > 0xC2 && b[i] < 0xED) ||
+               (b[i] == 0xED && b[i+1] < 0xA0) ||
+               (b[i] == 0xEE) ||
+               (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+                       !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+                       !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+       return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+       return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+       return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+       return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+       //return is_space(b, i) || is_tab(b, i)
+       return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+       return (b[i] == '\r' || // CR (#xD)
+               b[i] == '\n' || // LF (#xA)
+               b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+               b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+               b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+       return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+       //return is_break(b, i) || is_z(b, i)
+       return (        // is_break:
+       b[i] == '\r' || // CR (#xD)
+               b[i] == '\n' || // LF (#xA)
+               b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+               b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+               b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+               // is_z:
+               b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+       //return is_space(b, i) || is_breakz(b, i)
+       return ( // is_space:
+       b[i] == ' ' ||
+               // is_breakz:
+               b[i] == '\r' || // CR (#xD)
+               b[i] == '\n' || // LF (#xA)
+               b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+               b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+               b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+               b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+       //return is_blank(b, i) || is_breakz(b, i)
+       return ( // is_blank:
+       b[i] == ' ' || b[i] == '\t' ||
+               // is_breakz:
+               b[i] == '\r' || // CR (#xD)
+               b[i] == '\n' || // LF (#xA)
+               b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+               b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+               b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+               b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+       // Don't replace these by a switch without first
+       // confirming that it is being inlined.
+       if b&0x80 == 0x00 {
+               return 1
+       }
+       if b&0xE0 == 0xC0 {
+               return 2
+       }
+       if b&0xF0 == 0xE0 {
+               return 3
+       }
+       if b&0xF8 == 0xF0 {
+               return 4
+       }
+       return 0
+
+}
index bf1a7c15aa017eff10017eea9dca77715a8c65ae..b31444954d3a1235065068721a050e6db78d0414 100644 (file)
@@ -71,6 +71,48 @@ func (p Path) GetAttr(name string) Path {
        return ret
 }
 
+// Equals compares 2 Paths for exact equality.
+func (p Path) Equals(other Path) bool {
+       if len(p) != len(other) {
+               return false
+       }
+
+       for i := range p {
+               pv := p[i]
+               switch pv := pv.(type) {
+               case GetAttrStep:
+                       ov, ok := other[i].(GetAttrStep)
+                       if !ok || pv != ov {
+                               return false
+                       }
+               case IndexStep:
+                       ov, ok := other[i].(IndexStep)
+                       if !ok {
+                               return false
+                       }
+
+                       if !pv.Key.RawEquals(ov.Key) {
+                               return false
+                       }
+               default:
+                       // Any invalid steps default to evaluating false.
+                       return false
+               }
+       }
+
+       return true
+
+}
+
+// HasPrefix determines if the path p contains the provided prefix.
+func (p Path) HasPrefix(prefix Path) bool {
+       if len(prefix) > len(p) {
+               return false
+       }
+
+       return p[:len(prefix)].Equals(prefix)
+}
+
 // GetAttrPath is a convenience method to start a new Path with a GetAttrStep.
 func GetAttrPath(name string) Path {
        return Path{}.GetAttr(name)
index 3e2518600e29e9e736125ed119cd5ea0bc99539c..faa2fb3693e9ffca87efa98284581be86fef6941 100644 (file)
@@ -504,7 +504,7 @@ const defaultRSAKeyBits = 2048
 // which may be empty but must not contain any of "()<>\x00".
 // If config is nil, sensible defaults will be used.
 func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) {
-       currentTime := config.Now()
+       creationTime := config.Now()
 
        bits := defaultRSAKeyBits
        if config != nil && config.RSABits != 0 {
@@ -525,8 +525,8 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
        }
 
        e := &Entity{
-               PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey),
-               PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv),
+               PrimaryKey: packet.NewRSAPublicKey(creationTime, &signingPriv.PublicKey),
+               PrivateKey: packet.NewRSAPrivateKey(creationTime, signingPriv),
                Identities: make(map[string]*Identity),
        }
        isPrimaryId := true
@@ -534,7 +534,7 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
                Name:   uid.Id,
                UserId: uid,
                SelfSignature: &packet.Signature{
-                       CreationTime: currentTime,
+                       CreationTime: creationTime,
                        SigType:      packet.SigTypePositiveCert,
                        PubKeyAlgo:   packet.PubKeyAlgoRSA,
                        Hash:         config.Hash(),
@@ -563,10 +563,10 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err
 
        e.Subkeys = make([]Subkey, 1)
        e.Subkeys[0] = Subkey{
-               PublicKey:  packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey),
-               PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv),
+               PublicKey:  packet.NewRSAPublicKey(creationTime, &encryptingPriv.PublicKey),
+               PrivateKey: packet.NewRSAPrivateKey(creationTime, encryptingPriv),
                Sig: &packet.Signature{
-                       CreationTime:              currentTime,
+                       CreationTime:              creationTime,
                        SigType:                   packet.SigTypeSubkeyBinding,
                        PubKeyAlgo:                packet.PubKeyAlgoRSA,
                        Hash:                      config.Hash(),
index bd31cceac62e595679b4c11d4ea379eafefd582d..6f8ec0938416eab11f4a021e0982337ecbd3b445 100644 (file)
@@ -36,49 +36,49 @@ type PrivateKey struct {
        iv            []byte
 }
 
-func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
+func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey {
        pk := new(PrivateKey)
-       pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey)
+       pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey)
        pk.PrivateKey = priv
        return pk
 }
 
-func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
+func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey {
        pk := new(PrivateKey)
-       pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey)
+       pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey)
        pk.PrivateKey = priv
        return pk
 }
 
-func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
+func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey {
        pk := new(PrivateKey)
-       pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey)
+       pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey)
        pk.PrivateKey = priv
        return pk
 }
 
-func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey {
+func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey {
        pk := new(PrivateKey)
-       pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey)
+       pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey)
        pk.PrivateKey = priv
        return pk
 }
 
 // NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that
 // implements RSA or ECDSA.
-func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey {
+func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey {
        pk := new(PrivateKey)
        // In general, the public Keys should be used as pointers. We still
        // type-switch on the values, for backwards-compatibility.
        switch pubkey := signer.Public().(type) {
        case *rsa.PublicKey:
-               pk.PublicKey = *NewRSAPublicKey(currentTime, pubkey)
+               pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey)
        case rsa.PublicKey:
-               pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey)
+               pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey)
        case *ecdsa.PublicKey:
-               pk.PublicKey = *NewECDSAPublicKey(currentTime, pubkey)
+               pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey)
        case ecdsa.PublicKey:
-               pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey)
+               pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey)
        default:
                panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey")
        }
index b1009a70de7b6e502d9196234bbd775adbb04ba8..7ad0c027496145378b483bd004d4be053e612f18 100644 (file)
@@ -6,7 +6,7 @@ cloud.google.com/go/internal/optional
 cloud.google.com/go/internal/trace
 cloud.google.com/go/internal/version
 cloud.google.com/go/compute/metadata
-# github.com/DreamItGetIT/statuscake v0.0.0-20190218105717-471b24d8edfb
+# github.com/DreamItGetIT/statuscake v0.0.0-20190809134845-9d26ad75405b
 github.com/DreamItGetIT/statuscake
 # github.com/agext/levenshtein v1.2.2
 github.com/agext/levenshtein
@@ -16,7 +16,7 @@ github.com/apparentlymart/go-cidr/cidr
 github.com/apparentlymart/go-textseg/textseg
 # github.com/armon/go-radix v1.0.0
 github.com/armon/go-radix
-# github.com/aws/aws-sdk-go v1.19.18
+# github.com/aws/aws-sdk-go v1.21.7
 github.com/aws/aws-sdk-go/aws
 github.com/aws/aws-sdk-go/aws/credentials
 github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds
@@ -29,10 +29,10 @@ github.com/aws/aws-sdk-go/internal/sdkio
 github.com/aws/aws-sdk-go/internal/ini
 github.com/aws/aws-sdk-go/internal/shareddefaults
 github.com/aws/aws-sdk-go/aws/client
+github.com/aws/aws-sdk-go/aws/request
 github.com/aws/aws-sdk-go/internal/sdkuri
 github.com/aws/aws-sdk-go/aws/client/metadata
 github.com/aws/aws-sdk-go/aws/corehandlers
-github.com/aws/aws-sdk-go/aws/request
 github.com/aws/aws-sdk-go/aws/credentials/processcreds
 github.com/aws/aws-sdk-go/aws/credentials/stscreds
 github.com/aws/aws-sdk-go/aws/csm
@@ -45,11 +45,13 @@ github.com/aws/aws-sdk-go/private/protocol/eventstream
 github.com/aws/aws-sdk-go/private/protocol/eventstream/eventstreamapi
 github.com/aws/aws-sdk-go/private/protocol/rest
 github.com/aws/aws-sdk-go/private/protocol/restxml
+github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil
 github.com/aws/aws-sdk-go/internal/sdkrand
 github.com/aws/aws-sdk-go/service/sts
+github.com/aws/aws-sdk-go/service/sts/stsiface
 github.com/aws/aws-sdk-go/aws/credentials/endpointcreds
 github.com/aws/aws-sdk-go/private/protocol/query
-github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil
+github.com/aws/aws-sdk-go/private/protocol/json/jsonutil
 github.com/aws/aws-sdk-go/private/protocol/query/queryutil
 # github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d
 github.com/bgentry/go-netrc/netrc
@@ -68,25 +70,28 @@ github.com/golang/protobuf/ptypes/any
 github.com/golang/protobuf/ptypes/duration
 github.com/golang/protobuf/ptypes/timestamp
 github.com/golang/protobuf/protoc-gen-go/descriptor
-# github.com/google/go-cmp v0.2.0
+# github.com/google/go-cmp v0.3.0
 github.com/google/go-cmp/cmp
 github.com/google/go-cmp/cmp/internal/diff
+github.com/google/go-cmp/cmp/internal/flags
 github.com/google/go-cmp/cmp/internal/function
 github.com/google/go-cmp/cmp/internal/value
+# github.com/google/go-querystring v1.0.0
+github.com/google/go-querystring/query
 # github.com/googleapis/gax-go/v2 v2.0.3
 github.com/googleapis/gax-go/v2
 # github.com/hashicorp/errwrap v1.0.0
 github.com/hashicorp/errwrap
 # github.com/hashicorp/go-cleanhttp v0.5.0
 github.com/hashicorp/go-cleanhttp
-# github.com/hashicorp/go-getter v1.3.0
+# github.com/hashicorp/go-getter v1.3.1-0.20190627223108-da0323b9545e
 github.com/hashicorp/go-getter
 github.com/hashicorp/go-getter/helper/url
 # github.com/hashicorp/go-hclog v0.0.0-20181001195459-61d530d6c27f
 github.com/hashicorp/go-hclog
 # github.com/hashicorp/go-multierror v1.0.0
 github.com/hashicorp/go-multierror
-# github.com/hashicorp/go-plugin v1.0.1-0.20190430211030-5692942914bb
+# github.com/hashicorp/go-plugin v1.0.1-0.20190610192547-a1bc61569a26
 github.com/hashicorp/go-plugin
 github.com/hashicorp/go-plugin/internal/plugin
 # github.com/hashicorp/go-safetemp v1.0.0
@@ -105,7 +110,7 @@ github.com/hashicorp/hcl/hcl/scanner
 github.com/hashicorp/hcl/hcl/strconv
 github.com/hashicorp/hcl/json/scanner
 github.com/hashicorp/hcl/json/token
-# github.com/hashicorp/hcl2 v0.0.0-20190515223218-4b22149b7cef
+# github.com/hashicorp/hcl2 v0.0.0-20190725010614-0c3fe388e450
 github.com/hashicorp/hcl2/hcl
 github.com/hashicorp/hcl2/hcl/hclsyntax
 github.com/hashicorp/hcl2/hcldec
@@ -123,7 +128,7 @@ github.com/hashicorp/hil/parser
 github.com/hashicorp/hil/scanner
 # github.com/hashicorp/logutils v1.0.0
 github.com/hashicorp/logutils
-# github.com/hashicorp/terraform v0.12.0
+# github.com/hashicorp/terraform v0.12.6
 github.com/hashicorp/terraform/plugin
 github.com/hashicorp/terraform/helper/schema
 github.com/hashicorp/terraform/terraform
@@ -204,6 +209,8 @@ github.com/posener/complete
 github.com/posener/complete/cmd/install
 github.com/posener/complete/cmd
 github.com/posener/complete/match
+# github.com/satori/go.uuid v1.2.0
+github.com/satori/go.uuid
 # github.com/spf13/afero v1.2.1
 github.com/spf13/afero
 github.com/spf13/afero/mem
@@ -215,7 +222,7 @@ github.com/ulikunitz/xz/internal/hash
 # github.com/vmihailenco/msgpack v4.0.1+incompatible
 github.com/vmihailenco/msgpack
 github.com/vmihailenco/msgpack/codes
-# github.com/zclconf/go-cty v0.0.0-20190516203816-4fecf87372ec
+# github.com/zclconf/go-cty v1.0.1-0.20190708163926-19588f92a98f
 github.com/zclconf/go-cty/cty
 github.com/zclconf/go-cty/cty/msgpack
 github.com/zclconf/go-cty/cty/convert
@@ -224,6 +231,8 @@ github.com/zclconf/go-cty/cty/gocty
 github.com/zclconf/go-cty/cty/set
 github.com/zclconf/go-cty/cty/function
 github.com/zclconf/go-cty/cty/function/stdlib
+# github.com/zclconf/go-cty-yaml v1.0.1
+github.com/zclconf/go-cty-yaml
 # go.opencensus.io v0.18.0
 go.opencensus.io/trace
 go.opencensus.io/plugin/ochttp
@@ -239,7 +248,7 @@ go.opencensus.io/trace/propagation
 go.opencensus.io
 go.opencensus.io/stats/internal
 go.opencensus.io/internal/tagencoding
-# golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734
+# golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4
 golang.org/x/crypto/openpgp
 golang.org/x/crypto/bcrypt
 golang.org/x/crypto/openpgp/armor