]> git.immae.eu Git - github/fretlink/terraform-provider-statuscake.git/commitdiff
Initial transfer of provider code
authorJake Champlin <jake.champlin.27@gmail.com>
Tue, 6 Jun 2017 16:40:07 +0000 (12:40 -0400)
committerJake Champlin <jake.champlin.27@gmail.com>
Tue, 6 Jun 2017 16:40:07 +0000 (12:40 -0400)
576 files changed:
main.go
vendor/github.com/apparentlymart/go-cidr/LICENSE [new file with mode: 0644]
vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go [new file with mode: 0644]
vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/LICENSE.txt [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/NOTICE.txt [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/client/client.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/config.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/context.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/convert_types.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/doc.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/errors.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/logger.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_appengine.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/request/request.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/session/doc.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/session/session.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/types.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/url.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/aws/version.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/s3/api.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/s3/doc.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/s3/errors.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/s3/service.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/s3/sse.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/sts/api.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/sts/doc.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/sts/errors.go [new file with mode: 0644]
vendor/github.com/aws/aws-sdk-go/service/sts/service.go [new file with mode: 0644]
vendor/github.com/bgentry/go-netrc/LICENSE [new file with mode: 0644]
vendor/github.com/bgentry/go-netrc/netrc/netrc.go [new file with mode: 0644]
vendor/github.com/davecgh/go-spew/LICENSE [new file with mode: 0644]
vendor/github.com/davecgh/go-spew/spew/bypass.go [new file with mode: 0644]
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go [new file with mode: 0644]
vendor/github.com/davecgh/go-spew/spew/common.go [new file with mode: 0644]
vendor/github.com/davecgh/go-spew/spew/config.go [new file with mode: 0644]
vendor/github.com/davecgh/go-spew/spew/doc.go [new file with mode: 0644]
vendor/github.com/davecgh/go-spew/spew/dump.go [new file with mode: 0644]
vendor/github.com/davecgh/go-spew/spew/format.go [new file with mode: 0644]
vendor/github.com/davecgh/go-spew/spew/spew.go [new file with mode: 0644]
vendor/github.com/go-ini/ini/LICENSE [new file with mode: 0644]
vendor/github.com/go-ini/ini/Makefile [new file with mode: 0644]
vendor/github.com/go-ini/ini/README.md [new file with mode: 0644]
vendor/github.com/go-ini/ini/README_ZH.md [new file with mode: 0644]
vendor/github.com/go-ini/ini/error.go [new file with mode: 0644]
vendor/github.com/go-ini/ini/ini.go [new file with mode: 0644]
vendor/github.com/go-ini/ini/key.go [new file with mode: 0644]
vendor/github.com/go-ini/ini/parser.go [new file with mode: 0644]
vendor/github.com/go-ini/ini/section.go [new file with mode: 0644]
vendor/github.com/go-ini/ini/struct.go [new file with mode: 0644]
vendor/github.com/hashicorp/errwrap/LICENSE [new file with mode: 0644]
vendor/github.com/hashicorp/errwrap/README.md [new file with mode: 0644]
vendor/github.com/hashicorp/errwrap/errwrap.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/LICENSE [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/README.md [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/appveyor.yml [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/client.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/client_mode.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/copy_dir.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/decompress.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/decompress_bzip2.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/decompress_gzip.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/decompress_tbz2.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/decompress_testing.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/decompress_tgz.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/decompress_zip.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/detect.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/detect_bitbucket.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/detect_file.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/detect_github.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/detect_s3.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/folder_storage.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/get.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/get_file.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/get_file_unix.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/get_file_windows.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/get_git.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/get_hg.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/get_http.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/get_mock.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/get_s3.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/helper/url/url.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/netrc.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/source.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-getter/storage.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-multierror/LICENSE [new file with mode: 0644]
vendor/github.com/hashicorp/go-multierror/README.md [new file with mode: 0644]
vendor/github.com/hashicorp/go-multierror/append.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-multierror/flatten.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-multierror/format.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-multierror/multierror.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-multierror/prefix.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/LICENSE [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/README.md [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/client.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/discover.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/error.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/mux_broker.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/plugin.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/process.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/process_posix.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/process_windows.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/rpc_client.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/rpc_server.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/server.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/server_mux.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/stream.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-plugin/testing.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-uuid/LICENSE [new file with mode: 0644]
vendor/github.com/hashicorp/go-uuid/README.md [new file with mode: 0644]
vendor/github.com/hashicorp/go-uuid/uuid.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-version/LICENSE [new file with mode: 0644]
vendor/github.com/hashicorp/go-version/README.md [new file with mode: 0644]
vendor/github.com/hashicorp/go-version/constraint.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-version/version.go [new file with mode: 0644]
vendor/github.com/hashicorp/go-version/version_collection.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/LICENSE [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/Makefile [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/README.md [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/appveyor.yml [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/decoder.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/hcl.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/hcl/ast/ast.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/hcl/ast/walk.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/hcl/parser/error.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/hcl/parser/parser.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/hcl/token/position.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/hcl/token/token.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/json/parser/flatten.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/json/parser/parser.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/json/scanner/scanner.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/json/token/position.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/json/token/token.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/lex.go [new file with mode: 0644]
vendor/github.com/hashicorp/hcl/parse.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/LICENSE [new file with mode: 0644]
vendor/github.com/hashicorp/hil/README.md [new file with mode: 0644]
vendor/github.com/hashicorp/hil/appveyor.yml [new file with mode: 0644]
vendor/github.com/hashicorp/hil/ast/arithmetic.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/ast/arithmetic_op.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/ast/ast.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/ast/call.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/ast/conditional.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/ast/index.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/ast/literal.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/ast/output.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/ast/scope.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/ast/stack.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/ast/type_string.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/ast/unknown.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/ast/variable_access.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/ast/variables_helper.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/builtins.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/check_identifier.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/check_types.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/convert.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/eval.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/eval_type.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/evaltype_string.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/parse.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/parser/binary_op.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/parser/error.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/parser/fuzz.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/parser/parser.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/scanner/peeker.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/scanner/scanner.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/scanner/token.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/scanner/tokentype_string.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/transform_fixed.go [new file with mode: 0644]
vendor/github.com/hashicorp/hil/walk.go [new file with mode: 0644]
vendor/github.com/hashicorp/logutils/LICENSE [new file with mode: 0644]
vendor/github.com/hashicorp/logutils/README.md [new file with mode: 0644]
vendor/github.com/hashicorp/logutils/level.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/LICENSE [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/append.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/config.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/config_string.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/config_terraform.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/config_tree.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/import_tree.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/interpolate.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/interpolate_walk.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/lang.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/loader.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/loader_hcl.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/merge.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/module/copy_dir.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/module/get.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/module/inode.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/module/inode_windows.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/module/module.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/module/testing.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/module/tree.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/module/tree_gob.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/provisioner_enums.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/raw_config.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/resource_mode.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/resource_mode_string.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/config/testing.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/dag/dag.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/dag/dot.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/dag/edge.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/dag/graph.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/dag/marshal.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/dag/set.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/dag/tarjan.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/dag/walk.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/flatmap/expand.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/flatmap/flatten.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/flatmap/map.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/acctest/random.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/config/decode.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/config/validator.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/experiment/id.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/logging/logging.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/logging/transport.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/resource/error.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/resource/id.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/resource/map.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/resource/resource.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/resource/state.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/resource/testing.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/resource/wait.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/README.md [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/backend.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/equal.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/provider.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/resource.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/schema.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/serialize.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/set.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/testing.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/shadow/closer.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/shadow/value.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/structure/expand_json.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/structure/flatten_json.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/structure/normalize_json.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/structure/suppress_json_diff.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/helper/validation/validation.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/plugin/plugin.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/plugin/resource_provider.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/plugin/serve.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/plugin/ui_input.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/plugin/ui_output.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/context.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/context_components.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/context_import.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/debug.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/diff.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_apply.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_context.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_count.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_diff.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_error.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_filter.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_if.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_noop.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_output.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_provider.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_resource.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_state.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_validate.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/eval_variable.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/graph.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/graph_builder.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/graph_dot.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/graph_walk.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/hook.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/hook_mock.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/hook_stop.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/instancetype.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/interpolate.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_output.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_provider.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/path.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/plan.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/resource.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/resource_address.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/resource_provider.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/semantics.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/shadow.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/shadow_components.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/shadow_context.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/state.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/state_add.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/state_filter.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/state_v1.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/testing.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_config.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_diff.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_expand.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_output.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_provider.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_reference.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_root.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_state.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_targets.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_variable.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/ui_input.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/ui_output.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/util.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/variables.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/version.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/version_required.go [new file with mode: 0644]
vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go [new file with mode: 0644]
vendor/github.com/hashicorp/yamux/LICENSE [new file with mode: 0644]
vendor/github.com/hashicorp/yamux/README.md [new file with mode: 0644]
vendor/github.com/hashicorp/yamux/addr.go [new file with mode: 0644]
vendor/github.com/hashicorp/yamux/const.go [new file with mode: 0644]
vendor/github.com/hashicorp/yamux/mux.go [new file with mode: 0644]
vendor/github.com/hashicorp/yamux/session.go [new file with mode: 0644]
vendor/github.com/hashicorp/yamux/spec.md [new file with mode: 0644]
vendor/github.com/hashicorp/yamux/stream.go [new file with mode: 0644]
vendor/github.com/hashicorp/yamux/util.go [new file with mode: 0644]
vendor/github.com/jmespath/go-jmespath/LICENSE [new file with mode: 0644]
vendor/github.com/jmespath/go-jmespath/Makefile [new file with mode: 0644]
vendor/github.com/jmespath/go-jmespath/README.md [new file with mode: 0644]
vendor/github.com/jmespath/go-jmespath/api.go [new file with mode: 0644]
vendor/github.com/jmespath/go-jmespath/astnodetype_string.go [new file with mode: 0644]
vendor/github.com/jmespath/go-jmespath/functions.go [new file with mode: 0644]
vendor/github.com/jmespath/go-jmespath/interpreter.go [new file with mode: 0644]
vendor/github.com/jmespath/go-jmespath/lexer.go [new file with mode: 0644]
vendor/github.com/jmespath/go-jmespath/parser.go [new file with mode: 0644]
vendor/github.com/jmespath/go-jmespath/toktype_string.go [new file with mode: 0644]
vendor/github.com/jmespath/go-jmespath/util.go [new file with mode: 0644]
vendor/github.com/mitchellh/copystructure/LICENSE [new file with mode: 0644]
vendor/github.com/mitchellh/copystructure/README.md [new file with mode: 0644]
vendor/github.com/mitchellh/copystructure/copier_time.go [new file with mode: 0644]
vendor/github.com/mitchellh/copystructure/copystructure.go [new file with mode: 0644]
vendor/github.com/mitchellh/go-homedir/LICENSE [new file with mode: 0644]
vendor/github.com/mitchellh/go-homedir/README.md [new file with mode: 0644]
vendor/github.com/mitchellh/go-homedir/homedir.go [new file with mode: 0644]
vendor/github.com/mitchellh/hashstructure/LICENSE [new file with mode: 0644]
vendor/github.com/mitchellh/hashstructure/README.md [new file with mode: 0644]
vendor/github.com/mitchellh/hashstructure/hashstructure.go [new file with mode: 0644]
vendor/github.com/mitchellh/hashstructure/include.go [new file with mode: 0644]
vendor/github.com/mitchellh/mapstructure/LICENSE [new file with mode: 0644]
vendor/github.com/mitchellh/mapstructure/README.md [new file with mode: 0644]
vendor/github.com/mitchellh/mapstructure/decode_hooks.go [new file with mode: 0644]
vendor/github.com/mitchellh/mapstructure/error.go [new file with mode: 0644]
vendor/github.com/mitchellh/mapstructure/mapstructure.go [new file with mode: 0644]
vendor/github.com/mitchellh/reflectwalk/LICENSE [new file with mode: 0644]
vendor/github.com/mitchellh/reflectwalk/README.md [new file with mode: 0644]
vendor/github.com/mitchellh/reflectwalk/location.go [new file with mode: 0644]
vendor/github.com/mitchellh/reflectwalk/location_string.go [new file with mode: 0644]
vendor/github.com/mitchellh/reflectwalk/reflectwalk.go [new file with mode: 0644]
vendor/github.com/satori/go.uuid/LICENSE [new file with mode: 0644]
vendor/github.com/satori/go.uuid/README.md [new file with mode: 0644]
vendor/github.com/satori/go.uuid/uuid.go [new file with mode: 0644]
vendor/golang.org/x/crypto/LICENSE [new file with mode: 0644]
vendor/golang.org/x/crypto/PATENTS [new file with mode: 0644]
vendor/golang.org/x/crypto/curve25519/const_amd64.h [new file with mode: 0644]
vendor/golang.org/x/crypto/curve25519/const_amd64.s [new file with mode: 0644]
vendor/golang.org/x/crypto/curve25519/cswap_amd64.s [new file with mode: 0644]
vendor/golang.org/x/crypto/curve25519/curve25519.go [new file with mode: 0644]
vendor/golang.org/x/crypto/curve25519/doc.go [new file with mode: 0644]
vendor/golang.org/x/crypto/curve25519/freeze_amd64.s [new file with mode: 0644]
vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s [new file with mode: 0644]
vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go [new file with mode: 0644]
vendor/golang.org/x/crypto/curve25519/mul_amd64.s [new file with mode: 0644]
vendor/golang.org/x/crypto/curve25519/square_amd64.s [new file with mode: 0644]
vendor/golang.org/x/crypto/ed25519/ed25519.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/buffer.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/certs.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/channel.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/cipher.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/client.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/client_auth.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/common.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/connection.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/doc.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/handshake.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/kex.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/keys.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/mac.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/messages.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/mux.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/server.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/session.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/tcpip.go [new file with mode: 0644]
vendor/golang.org/x/crypto/ssh/transport.go [new file with mode: 0644]
vendor/vendor.json

diff --git a/main.go b/main.go
index 76c19ea8da649409e06e3bcc612cad8ee7ef52ae..45a1eee9d12a6380e356633ffe78f1b8cc030cc8 100644 (file)
--- a/main.go
+++ b/main.go
@@ -1,6 +1,11 @@
 package main
 
+import (
+       "github.com/hashicorp/terraform/plugin"
+       "github.com/terraform-providers/terraform-provider-statuscake/statuscake"
+)
+
 func main() {
-       /*plugin.Serve(&plugin.ServeOpts{
-               ProviderFunc: opc.Provider})*/
+       plugin.Serve(&plugin.ServeOpts{
+               ProviderFunc: statuscake.Provider})
 }
diff --git a/vendor/github.com/apparentlymart/go-cidr/LICENSE b/vendor/github.com/apparentlymart/go-cidr/LICENSE
new file mode 100644 (file)
index 0000000..2125378
--- /dev/null
@@ -0,0 +1,19 @@
+Copyright (c) 2015 Martin Atkins
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go
new file mode 100644 (file)
index 0000000..a31cdec
--- /dev/null
@@ -0,0 +1,112 @@
+// Package cidr is a collection of assorted utilities for computing
+// network and host addresses within network ranges.
+//
+// It expects a CIDR-type address structure where addresses are divided into
+// some number of prefix bits representing the network and then the remaining
+// suffix bits represent the host.
+//
+// For example, it can help to calculate addresses for sub-networks of a
+// parent network, or to calculate host addresses within a particular prefix.
+//
+// At present this package is prioritizing simplicity of implementation and
+// de-prioritizing speed and memory usage. Thus caution is advised before
+// using this package in performance-critical applications or hot codepaths.
+// Patches to improve the speed and memory usage may be accepted as long as
+// they do not result in a significant increase in code complexity.
+package cidr
+
+import (
+       "fmt"
+       "math/big"
+       "net"
+)
+
+// Subnet takes a parent CIDR range and creates a subnet within it
+// with the given number of additional prefix bits and the given
+// network number.
+//
+// For example, 10.3.0.0/16, extended by 8 bits, with a network number
+// of 5, becomes 10.3.5.0/24 .
+func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) {
+       ip := base.IP
+       mask := base.Mask
+
+       parentLen, addrLen := mask.Size()
+       newPrefixLen := parentLen + newBits
+
+       if newPrefixLen > addrLen {
+               return nil, fmt.Errorf("insufficient address space to extend prefix of %d by %d", parentLen, newBits)
+       }
+
+       maxNetNum := uint64(1<<uint64(newBits)) - 1
+       if uint64(num) > maxNetNum {
+               return nil, fmt.Errorf("prefix extension of %d does not accommodate a subnet numbered %d", newBits, num)
+       }
+
+       return &net.IPNet{
+               IP:   insertNumIntoIP(ip, num, newPrefixLen),
+               Mask: net.CIDRMask(newPrefixLen, addrLen),
+       }, nil
+}
+
+// Host takes a parent CIDR range and turns it into a host IP address with
+// the given host number.
+//
+// For example, 10.3.0.0/16 with a host number of 2 gives 10.3.0.2.
+func Host(base *net.IPNet, num int) (net.IP, error) {
+       ip := base.IP
+       mask := base.Mask
+
+       parentLen, addrLen := mask.Size()
+       hostLen := addrLen - parentLen
+
+       maxHostNum := uint64(1<<uint64(hostLen)) - 1
+
+       numUint64 := uint64(num)
+       if num < 0 {
+               numUint64 = uint64(-num) - 1
+               num = int(maxHostNum - numUint64)
+       }
+
+       if numUint64 > maxHostNum {
+               return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num)
+       }
+
+       return insertNumIntoIP(ip, num, 32), nil
+}
+
+// AddressRange returns the first and last addresses in the given CIDR range.
+func AddressRange(network *net.IPNet) (net.IP, net.IP) {
+       // the first IP is easy
+       firstIP := network.IP
+
+       // the last IP is the network address OR NOT the mask address
+       prefixLen, bits := network.Mask.Size()
+       if prefixLen == bits {
+               // Easy!
+               // But make sure that our two slices are distinct, since they
+               // would be in all other cases.
+               lastIP := make([]byte, len(firstIP))
+               copy(lastIP, firstIP)
+               return firstIP, lastIP
+       }
+
+       firstIPInt, bits := ipToInt(firstIP)
+       hostLen := uint(bits) - uint(prefixLen)
+       lastIPInt := big.NewInt(1)
+       lastIPInt.Lsh(lastIPInt, hostLen)
+       lastIPInt.Sub(lastIPInt, big.NewInt(1))
+       lastIPInt.Or(lastIPInt, firstIPInt)
+
+       return firstIP, intToIP(lastIPInt, bits)
+}
+
+// AddressCount returns the number of distinct host addresses within the given
+// CIDR range.
+//
+// Since the result is a uint64, this function returns meaningful information
+// only for IPv4 ranges and IPv6 ranges with a prefix size of at least 65.
+func AddressCount(network *net.IPNet) uint64 {
+       prefixLen, bits := network.Mask.Size()
+       return 1 << (uint64(bits) - uint64(prefixLen))
+}
diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go
new file mode 100644 (file)
index 0000000..861a5f6
--- /dev/null
@@ -0,0 +1,38 @@
+package cidr
+
+import (
+       "fmt"
+       "math/big"
+       "net"
+)
+
+func ipToInt(ip net.IP) (*big.Int, int) {
+       val := &big.Int{}
+       val.SetBytes([]byte(ip))
+       if len(ip) == net.IPv4len {
+               return val, 32
+       } else if len(ip) == net.IPv6len {
+               return val, 128
+       } else {
+               panic(fmt.Errorf("Unsupported address length %d", len(ip)))
+       }
+}
+
+func intToIP(ipInt *big.Int, bits int) net.IP {
+       ipBytes := ipInt.Bytes()
+       ret := make([]byte, bits/8)
+       // Pack our IP bytes into the end of the return array,
+       // since big.Int.Bytes() removes front zero padding.
+       for i := 1; i <= len(ipBytes); i++ {
+               ret[len(ret)-i] = ipBytes[len(ipBytes)-i]
+       }
+       return net.IP(ret)
+}
+
+func insertNumIntoIP(ip net.IP, num int, prefixLen int) net.IP {
+       ipInt, totalBits := ipToInt(ip)
+       bigNum := big.NewInt(int64(num))
+       bigNum.Lsh(bigNum, uint(totalBits-prefixLen))
+       ipInt.Or(ipInt, bigNum)
+       return intToIP(ipInt, totalBits)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
new file mode 100644 (file)
index 0000000..d645695
--- /dev/null
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
new file mode 100644 (file)
index 0000000..5f14d11
--- /dev/null
@@ -0,0 +1,3 @@
+AWS SDK for Go
+Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. 
+Copyright 2014-2015 Stripe, Inc.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
new file mode 100644 (file)
index 0000000..56fdfc2
--- /dev/null
@@ -0,0 +1,145 @@
+// Package awserr represents API error interface accessors for the SDK.
+package awserr
+
+// An Error wraps lower level errors with code, message and an original error.
+// The underlying concrete error type may also satisfy other interfaces which
+// can be to used to obtain more specific information about the error.
+//
+// Calling Error() or String() will always include the full information about
+// an error based on its underlying type.
+//
+// Example:
+//
+//     output, err := s3manage.Upload(svc, input, opts)
+//     if err != nil {
+//         if awsErr, ok := err.(awserr.Error); ok {
+//             // Get error details
+//             log.Println("Error:", awsErr.Code(), awsErr.Message())
+//
+//             // Prints out full error message, including original error if there was one.
+//             log.Println("Error:", awsErr.Error())
+//
+//             // Get original error
+//             if origErr := awsErr.OrigErr(); origErr != nil {
+//                 // operate on original error.
+//             }
+//         } else {
+//             fmt.Println(err.Error())
+//         }
+//     }
+//
+type Error interface {
+       // Satisfy the generic error interface.
+       error
+
+       // Returns the short phrase depicting the classification of the error.
+       Code() string
+
+       // Returns the error details message.
+       Message() string
+
+       // Returns the original error if one was set.  Nil is returned if not set.
+       OrigErr() error
+}
+
+// BatchError is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Deprecated: Replaced with BatchedErrors. Only defined for backwards
+// compatibility.
+type BatchError interface {
+       // Satisfy the generic error interface.
+       error
+
+       // Returns the short phrase depicting the classification of the error.
+       Code() string
+
+       // Returns the error details message.
+       Message() string
+
+       // Returns the original error if one was set.  Nil is returned if not set.
+       OrigErrs() []error
+}
+
+// BatchedErrors is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occurred in the batch.
+//
+// Replaces BatchError
+type BatchedErrors interface {
+       // Satisfy the base Error interface.
+       Error
+
+       // Returns the original error if one was set.  Nil is returned if not set.
+       OrigErrs() []error
+}
+
+// New returns an Error object described by the code, message, and origErr.
+//
+// If origErr satisfies the Error interface it will not be wrapped within a new
+// Error object and will instead be returned.
+func New(code, message string, origErr error) Error {
+       var errs []error
+       if origErr != nil {
+               errs = append(errs, origErr)
+       }
+       return newBaseError(code, message, errs)
+}
+
+// NewBatchError returns an BatchedErrors with a collection of errors as an
+// array of errors.
+func NewBatchError(code, message string, errs []error) BatchedErrors {
+       return newBaseError(code, message, errs)
+}
+
+// A RequestFailure is an interface to extract request failure information from
+// an Error such as the request ID of the failed request returned by a service.
+// RequestFailures may not always have a requestID value if the request failed
+// prior to reaching the service such as a connection error.
+//
+// Example:
+//
+//     output, err := s3manage.Upload(svc, input, opts)
+//     if err != nil {
+//         if reqerr, ok := err.(RequestFailure); ok {
+//             log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
+//         } else {
+//             log.Println("Error:", err.Error())
+//         }
+//     }
+//
+// Combined with awserr.Error:
+//
+//    output, err := s3manage.Upload(svc, input, opts)
+//    if err != nil {
+//        if awsErr, ok := err.(awserr.Error); ok {
+//            // Generic AWS Error with Code, Message, and original error (if any)
+//            fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+//
+//            if reqErr, ok := err.(awserr.RequestFailure); ok {
+//                // A service error occurred
+//                fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
+//            }
+//        } else {
+//            fmt.Println(err.Error())
+//        }
+//    }
+//
+type RequestFailure interface {
+       Error
+
+       // The status code of the HTTP response.
+       StatusCode() int
+
+       // The request ID returned by the service for a request failure. This will
+       // be empty if no request ID is available such as the request failed due
+       // to a connection error.
+       RequestID() string
+}
+
+// NewRequestFailure returns a new request error wrapper for the given Error
+// provided.
+func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
+       return newRequestError(err, statusCode, reqID)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
new file mode 100644 (file)
index 0000000..0202a00
--- /dev/null
@@ -0,0 +1,194 @@
+package awserr
+
+import "fmt"
+
+// SprintError returns a string of the formatted error code.
+//
+// Both extra and origErr are optional.  If they are included their lines
+// will be added, but if they are not included their lines will be ignored.
+func SprintError(code, message, extra string, origErr error) string {
+       msg := fmt.Sprintf("%s: %s", code, message)
+       if extra != "" {
+               msg = fmt.Sprintf("%s\n\t%s", msg, extra)
+       }
+       if origErr != nil {
+               msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
+       }
+       return msg
+}
+
+// A baseError wraps the code and message which defines an error. It also
+// can be used to wrap an original error object.
+//
+// Should be used as the root for errors satisfying the awserr.Error. Also
+// for any error which does not fit into a specific error wrapper type.
+type baseError struct {
+       // Classification of error
+       code string
+
+       // Detailed information about error
+       message string
+
+       // Optional original error this error is based off of. Allows building
+       // chained errors.
+       errs []error
+}
+
+// newBaseError returns an error object for the code, message, and errors.
+//
+// code is a short no whitespace phrase depicting the classification of
+// the error that is being created.
+//
+// message is the free flow string containing detailed information about the
+// error.
+//
+// origErrs is the error objects which will be nested under the new errors to
+// be returned.
+func newBaseError(code, message string, origErrs []error) *baseError {
+       b := &baseError{
+               code:    code,
+               message: message,
+               errs:    origErrs,
+       }
+
+       return b
+}
+
+// Error returns the string representation of the error.
+//
+// See ErrorWithExtra for formatting.
+//
+// Satisfies the error interface.
+func (b baseError) Error() string {
+       size := len(b.errs)
+       if size > 0 {
+               return SprintError(b.code, b.message, "", errorList(b.errs))
+       }
+
+       return SprintError(b.code, b.message, "", nil)
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (b baseError) String() string {
+       return b.Error()
+}
+
+// Code returns the short phrase depicting the classification of the error.
+func (b baseError) Code() string {
+       return b.code
+}
+
+// Message returns the error details message.
+func (b baseError) Message() string {
+       return b.message
+}
+
+// OrigErr returns the original error if one was set. Nil is returned if no
+// error was set. This only returns the first element in the list. If the full
+// list is needed, use BatchedErrors.
+func (b baseError) OrigErr() error {
+       switch len(b.errs) {
+       case 0:
+               return nil
+       case 1:
+               return b.errs[0]
+       default:
+               if err, ok := b.errs[0].(Error); ok {
+                       return NewBatchError(err.Code(), err.Message(), b.errs[1:])
+               }
+               return NewBatchError("BatchedErrors",
+                       "multiple errors occurred", b.errs)
+       }
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (b baseError) OrigErrs() []error {
+       return b.errs
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError Error
+
+// A requestError wraps a request or service error.
+//
+// Composed of baseError for code, message, and original error.
+type requestError struct {
+       awsError
+       statusCode int
+       requestID  string
+}
+
+// newRequestError returns a wrapped error with additional information for
+// request status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
+//
+// Also wraps original errors via the baseError.
+func newRequestError(err Error, statusCode int, requestID string) *requestError {
+       return &requestError{
+               awsError:   err,
+               statusCode: statusCode,
+               requestID:  requestID,
+       }
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (r requestError) Error() string {
+       extra := fmt.Sprintf("status code: %d, request id: %s",
+               r.statusCode, r.requestID)
+       return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (r requestError) String() string {
+       return r.Error()
+}
+
+// StatusCode returns the wrapped status code for the error
+func (r requestError) StatusCode() int {
+       return r.statusCode
+}
+
+// RequestID returns the wrapped requestID
+func (r requestError) RequestID() string {
+       return r.requestID
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (r requestError) OrigErrs() []error {
+       if b, ok := r.awsError.(BatchedErrors); ok {
+               return b.OrigErrs()
+       }
+       return []error{r.OrigErr()}
+}
+
+// An error list that satisfies the golang interface
+type errorList []error
+
+// Error returns the string representation of the error.
+//
+// Satisfies the error interface.
+func (e errorList) Error() string {
+       msg := ""
+       // How do we want to handle the array size being zero
+       if size := len(e); size > 0 {
+               for i := 0; i < size; i++ {
+                       msg += fmt.Sprintf("%s", e[i].Error())
+                       // We check the next index to see if it is within the slice.
+                       // If it is, then we append a newline. We do this, because unit tests
+                       // could be broken with the additional '\n'
+                       if i+1 < size {
+                               msg += "\n"
+                       }
+               }
+       }
+       return msg
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
new file mode 100644 (file)
index 0000000..1a3d106
--- /dev/null
@@ -0,0 +1,108 @@
+package awsutil
+
+import (
+       "io"
+       "reflect"
+       "time"
+)
+
+// Copy deeply copies a src structure to dst. Useful for copying request and
+// response structures.
+//
+// Can copy between structs of different type, but will only copy fields which
+// are assignable, and exist in both structs. Fields which are not assignable,
+// or do not exist in both structs are ignored.
+func Copy(dst, src interface{}) {
+       dstval := reflect.ValueOf(dst)
+       if !dstval.IsValid() {
+               panic("Copy dst cannot be nil")
+       }
+
+       rcopy(dstval, reflect.ValueOf(src), true)
+}
+
+// CopyOf returns a copy of src while also allocating the memory for dst.
+// src must be a pointer type or this operation will fail.
+func CopyOf(src interface{}) (dst interface{}) {
+       dsti := reflect.New(reflect.TypeOf(src).Elem())
+       dst = dsti.Interface()
+       rcopy(dsti, reflect.ValueOf(src), true)
+       return
+}
+
+// rcopy performs a recursive copy of values from the source to destination.
+//
+// root is used to skip certain aspects of the copy which are not valid
+// for the root node of a object.
+func rcopy(dst, src reflect.Value, root bool) {
+       if !src.IsValid() {
+               return
+       }
+
+       switch src.Kind() {
+       case reflect.Ptr:
+               if _, ok := src.Interface().(io.Reader); ok {
+                       if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
+                               dst.Elem().Set(src)
+                       } else if dst.CanSet() {
+                               dst.Set(src)
+                       }
+               } else {
+                       e := src.Type().Elem()
+                       if dst.CanSet() && !src.IsNil() {
+                               if _, ok := src.Interface().(*time.Time); !ok {
+                                       dst.Set(reflect.New(e))
+                               } else {
+                                       tempValue := reflect.New(e)
+                                       tempValue.Elem().Set(src.Elem())
+                                       // Sets time.Time's unexported values
+                                       dst.Set(tempValue)
+                               }
+                       }
+                       if src.Elem().IsValid() {
+                               // Keep the current root state since the depth hasn't changed
+                               rcopy(dst.Elem(), src.Elem(), root)
+                       }
+               }
+       case reflect.Struct:
+               t := dst.Type()
+               for i := 0; i < t.NumField(); i++ {
+                       name := t.Field(i).Name
+                       srcVal := src.FieldByName(name)
+                       dstVal := dst.FieldByName(name)
+                       if srcVal.IsValid() && dstVal.CanSet() {
+                               rcopy(dstVal, srcVal, false)
+                       }
+               }
+       case reflect.Slice:
+               if src.IsNil() {
+                       break
+               }
+
+               s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+               dst.Set(s)
+               for i := 0; i < src.Len(); i++ {
+                       rcopy(dst.Index(i), src.Index(i), false)
+               }
+       case reflect.Map:
+               if src.IsNil() {
+                       break
+               }
+
+               s := reflect.MakeMap(src.Type())
+               dst.Set(s)
+               for _, k := range src.MapKeys() {
+                       v := src.MapIndex(k)
+                       v2 := reflect.New(v.Type()).Elem()
+                       rcopy(v2, v, false)
+                       dst.SetMapIndex(k, v2)
+               }
+       default:
+               // Assign the value if possible. If its not assignable, the value would
+               // need to be converted and the impact of that may be unexpected, or is
+               // not compatible with the dst type.
+               if src.Type().AssignableTo(dst.Type()) {
+                       dst.Set(src)
+               }
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
new file mode 100644 (file)
index 0000000..59fa4a5
--- /dev/null
@@ -0,0 +1,27 @@
+package awsutil
+
+import (
+       "reflect"
+)
+
+// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
+// In addition to this, this method will also dereference the input values if
+// possible so the DeepEqual performed will not fail if one parameter is a
+// pointer and the other is not.
+//
+// DeepEqual will not perform indirection of nested values of the input parameters.
+func DeepEqual(a, b interface{}) bool {
+       ra := reflect.Indirect(reflect.ValueOf(a))
+       rb := reflect.Indirect(reflect.ValueOf(b))
+
+       if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
+               // If the elements are both nil, and of the same type the are equal
+               // If they are of different types they are not equal
+               return reflect.TypeOf(a) == reflect.TypeOf(b)
+       } else if raValid != rbValid {
+               // Both values must be valid to be equal
+               return false
+       }
+
+       return reflect.DeepEqual(ra.Interface(), rb.Interface())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
new file mode 100644 (file)
index 0000000..11c52c3
--- /dev/null
@@ -0,0 +1,222 @@
+package awsutil
+
+import (
+       "reflect"
+       "regexp"
+       "strconv"
+       "strings"
+
+       "github.com/jmespath/go-jmespath"
+)
+
+var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
+
+// rValuesAtPath returns a slice of values found in value v. The values
+// in v are explored recursively so all nested values are collected.
+func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
+       pathparts := strings.Split(path, "||")
+       if len(pathparts) > 1 {
+               for _, pathpart := range pathparts {
+                       vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
+                       if len(vals) > 0 {
+                               return vals
+                       }
+               }
+               return nil
+       }
+
+       values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
+       components := strings.Split(path, ".")
+       for len(values) > 0 && len(components) > 0 {
+               var index *int64
+               var indexStar bool
+               c := strings.TrimSpace(components[0])
+               if c == "" { // no actual component, illegal syntax
+                       return nil
+               } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
+                       // TODO normalize case for user
+                       return nil // don't support unexported fields
+               }
+
+               // parse this component
+               if m := indexRe.FindStringSubmatch(c); m != nil {
+                       c = m[1]
+                       if m[2] == "" {
+                               index = nil
+                               indexStar = true
+                       } else {
+                               i, _ := strconv.ParseInt(m[2], 10, 32)
+                               index = &i
+                               indexStar = false
+                       }
+               }
+
+               nextvals := []reflect.Value{}
+               for _, value := range values {
+                       // pull component name out of struct member
+                       if value.Kind() != reflect.Struct {
+                               continue
+                       }
+
+                       if c == "*" { // pull all members
+                               for i := 0; i < value.NumField(); i++ {
+                                       if f := reflect.Indirect(value.Field(i)); f.IsValid() {
+                                               nextvals = append(nextvals, f)
+                                       }
+                               }
+                               continue
+                       }
+
+                       value = value.FieldByNameFunc(func(name string) bool {
+                               if c == name {
+                                       return true
+                               } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
+                                       return true
+                               }
+                               return false
+                       })
+
+                       if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
+                               if !value.IsNil() {
+                                       value.Set(reflect.Zero(value.Type()))
+                               }
+                               return []reflect.Value{value}
+                       }
+
+                       if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
+                               // TODO if the value is the terminus it should not be created
+                               // if the value to be set to its position is nil.
+                               value.Set(reflect.New(value.Type().Elem()))
+                               value = value.Elem()
+                       } else {
+                               value = reflect.Indirect(value)
+                       }
+
+                       if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+                               if !createPath && value.IsNil() {
+                                       value = reflect.ValueOf(nil)
+                               }
+                       }
+
+                       if value.IsValid() {
+                               nextvals = append(nextvals, value)
+                       }
+               }
+               values = nextvals
+
+               if indexStar || index != nil {
+                       nextvals = []reflect.Value{}
+                       for _, valItem := range values {
+                               value := reflect.Indirect(valItem)
+                               if value.Kind() != reflect.Slice {
+                                       continue
+                               }
+
+                               if indexStar { // grab all indices
+                                       for i := 0; i < value.Len(); i++ {
+                                               idx := reflect.Indirect(value.Index(i))
+                                               if idx.IsValid() {
+                                                       nextvals = append(nextvals, idx)
+                                               }
+                                       }
+                                       continue
+                               }
+
+                               // pull out index
+                               i := int(*index)
+                               if i >= value.Len() { // check out of bounds
+                                       if createPath {
+                                               // TODO resize slice
+                                       } else {
+                                               continue
+                                       }
+                               } else if i < 0 { // support negative indexing
+                                       i = value.Len() + i
+                               }
+                               value = reflect.Indirect(value.Index(i))
+
+                               if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+                                       if !createPath && value.IsNil() {
+                                               value = reflect.ValueOf(nil)
+                                       }
+                               }
+
+                               if value.IsValid() {
+                                       nextvals = append(nextvals, value)
+                               }
+                       }
+                       values = nextvals
+               }
+
+               components = components[1:]
+       }
+       return values
+}
+
+// ValuesAtPath returns a list of values at the case insensitive lexical
+// path inside of a structure.
+func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
+       result, err := jmespath.Search(path, i)
+       if err != nil {
+               return nil, err
+       }
+
+       v := reflect.ValueOf(result)
+       if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
+               return nil, nil
+       }
+       if s, ok := result.([]interface{}); ok {
+               return s, err
+       }
+       if v.Kind() == reflect.Map && v.Len() == 0 {
+               return nil, nil
+       }
+       if v.Kind() == reflect.Slice {
+               out := make([]interface{}, v.Len())
+               for i := 0; i < v.Len(); i++ {
+                       out[i] = v.Index(i).Interface()
+               }
+               return out, nil
+       }
+
+       return []interface{}{result}, nil
+}
+
+// SetValueAtPath sets a value at the case insensitive lexical path inside
+// of a structure.
+func SetValueAtPath(i interface{}, path string, v interface{}) {
+       if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
+               for _, rval := range rvals {
+                       if rval.Kind() == reflect.Ptr && rval.IsNil() {
+                               continue
+                       }
+                       setValue(rval, v)
+               }
+       }
+}
+
+func setValue(dstVal reflect.Value, src interface{}) {
+       if dstVal.Kind() == reflect.Ptr {
+               dstVal = reflect.Indirect(dstVal)
+       }
+       srcVal := reflect.ValueOf(src)
+
+       if !srcVal.IsValid() { // src is literal nil
+               if dstVal.CanAddr() {
+                       // Convert to pointer so that pointer's value can be nil'ed
+                       //                     dstVal = dstVal.Addr()
+               }
+               dstVal.Set(reflect.Zero(dstVal.Type()))
+
+       } else if srcVal.Kind() == reflect.Ptr {
+               if srcVal.IsNil() {
+                       srcVal = reflect.Zero(dstVal.Type())
+               } else {
+                       srcVal = reflect.ValueOf(src).Elem()
+               }
+               dstVal.Set(srcVal)
+       } else {
+               dstVal.Set(srcVal)
+       }
+
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
new file mode 100644 (file)
index 0000000..710eb43
--- /dev/null
@@ -0,0 +1,113 @@
+package awsutil
+
+import (
+       "bytes"
+       "fmt"
+       "io"
+       "reflect"
+       "strings"
+)
+
+// Prettify returns the string representation of a value.
+func Prettify(i interface{}) string {
+       var buf bytes.Buffer
+       prettify(reflect.ValueOf(i), 0, &buf)
+       return buf.String()
+}
+
+// prettify will recursively walk value v to build a textual
+// representation of the value.
+func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
+       for v.Kind() == reflect.Ptr {
+               v = v.Elem()
+       }
+
+       switch v.Kind() {
+       case reflect.Struct:
+               strtype := v.Type().String()
+               if strtype == "time.Time" {
+                       fmt.Fprintf(buf, "%s", v.Interface())
+                       break
+               } else if strings.HasPrefix(strtype, "io.") {
+                       buf.WriteString("<buffer>")
+                       break
+               }
+
+               buf.WriteString("{\n")
+
+               names := []string{}
+               for i := 0; i < v.Type().NumField(); i++ {
+                       name := v.Type().Field(i).Name
+                       f := v.Field(i)
+                       if name[0:1] == strings.ToLower(name[0:1]) {
+                               continue // ignore unexported fields
+                       }
+                       if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
+                               continue // ignore unset fields
+                       }
+                       names = append(names, name)
+               }
+
+               for i, n := range names {
+                       val := v.FieldByName(n)
+                       buf.WriteString(strings.Repeat(" ", indent+2))
+                       buf.WriteString(n + ": ")
+                       prettify(val, indent+2, buf)
+
+                       if i < len(names)-1 {
+                               buf.WriteString(",\n")
+                       }
+               }
+
+               buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+       case reflect.Slice:
+               strtype := v.Type().String()
+               if strtype == "[]uint8" {
+                       fmt.Fprintf(buf, "<binary> len %d", v.Len())
+                       break
+               }
+
+               nl, id, id2 := "", "", ""
+               if v.Len() > 3 {
+                       nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+               }
+               buf.WriteString("[" + nl)
+               for i := 0; i < v.Len(); i++ {
+                       buf.WriteString(id2)
+                       prettify(v.Index(i), indent+2, buf)
+
+                       if i < v.Len()-1 {
+                               buf.WriteString("," + nl)
+                       }
+               }
+
+               buf.WriteString(nl + id + "]")
+       case reflect.Map:
+               buf.WriteString("{\n")
+
+               for i, k := range v.MapKeys() {
+                       buf.WriteString(strings.Repeat(" ", indent+2))
+                       buf.WriteString(k.String() + ": ")
+                       prettify(v.MapIndex(k), indent+2, buf)
+
+                       if i < v.Len()-1 {
+                               buf.WriteString(",\n")
+                       }
+               }
+
+               buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+       default:
+               if !v.IsValid() {
+                       fmt.Fprint(buf, "<invalid value>")
+                       return
+               }
+               format := "%v"
+               switch v.Interface().(type) {
+               case string:
+                       format = "%q"
+               case io.ReadSeeker, io.Reader:
+                       format = "buffer(%p)"
+               }
+               fmt.Fprintf(buf, format, v.Interface())
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
new file mode 100644 (file)
index 0000000..b6432f1
--- /dev/null
@@ -0,0 +1,89 @@
+package awsutil
+
+import (
+       "bytes"
+       "fmt"
+       "reflect"
+       "strings"
+)
+
+// StringValue returns the string representation of a value.
+func StringValue(i interface{}) string {
+       var buf bytes.Buffer
+       stringValue(reflect.ValueOf(i), 0, &buf)
+       return buf.String()
+}
+
+func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
+       for v.Kind() == reflect.Ptr {
+               v = v.Elem()
+       }
+
+       switch v.Kind() {
+       case reflect.Struct:
+               buf.WriteString("{\n")
+
+               names := []string{}
+               for i := 0; i < v.Type().NumField(); i++ {
+                       name := v.Type().Field(i).Name
+                       f := v.Field(i)
+                       if name[0:1] == strings.ToLower(name[0:1]) {
+                               continue // ignore unexported fields
+                       }
+                       if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
+                               continue // ignore unset fields
+                       }
+                       names = append(names, name)
+               }
+
+               for i, n := range names {
+                       val := v.FieldByName(n)
+                       buf.WriteString(strings.Repeat(" ", indent+2))
+                       buf.WriteString(n + ": ")
+                       stringValue(val, indent+2, buf)
+
+                       if i < len(names)-1 {
+                               buf.WriteString(",\n")
+                       }
+               }
+
+               buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+       case reflect.Slice:
+               nl, id, id2 := "", "", ""
+               if v.Len() > 3 {
+                       nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+               }
+               buf.WriteString("[" + nl)
+               for i := 0; i < v.Len(); i++ {
+                       buf.WriteString(id2)
+                       stringValue(v.Index(i), indent+2, buf)
+
+                       if i < v.Len()-1 {
+                               buf.WriteString("," + nl)
+                       }
+               }
+
+               buf.WriteString(nl + id + "]")
+       case reflect.Map:
+               buf.WriteString("{\n")
+
+               for i, k := range v.MapKeys() {
+                       buf.WriteString(strings.Repeat(" ", indent+2))
+                       buf.WriteString(k.String() + ": ")
+                       stringValue(v.MapIndex(k), indent+2, buf)
+
+                       if i < v.Len()-1 {
+                               buf.WriteString(",\n")
+                       }
+               }
+
+               buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+       default:
+               format := "%v"
+               switch v.Interface().(type) {
+               case string:
+                       format = "%q"
+               }
+               fmt.Fprintf(buf, format, v.Interface())
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
new file mode 100644 (file)
index 0000000..48b0fbd
--- /dev/null
@@ -0,0 +1,149 @@
+package client
+
+import (
+       "fmt"
+       "net/http/httputil"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/client/metadata"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A Config provides configuration to a service client instance.
+type Config struct {
+       Config        *aws.Config
+       Handlers      request.Handlers
+       Endpoint      string
+       SigningRegion string
+       SigningName   string
+}
+
+// ConfigProvider provides a generic way for a service client to receive
+// the ClientConfig without circular dependencies.
+type ConfigProvider interface {
+       ClientConfig(serviceName string, cfgs ...*aws.Config) Config
+}
+
+// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not
+// resolve the endpoint automatically. The service client's endpoint must be
+// provided via the aws.Config.Endpoint field.
+type ConfigNoResolveEndpointProvider interface {
+       ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config
+}
+
+// A Client implements the base client request and response handling
+// used by all service clients.
+type Client struct {
+       request.Retryer
+       metadata.ClientInfo
+
+       Config   aws.Config
+       Handlers request.Handlers
+}
+
+// New will return a pointer to a new initialized service client.
+func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
+       svc := &Client{
+               Config:     cfg,
+               ClientInfo: info,
+               Handlers:   handlers.Copy(),
+       }
+
+       switch retryer, ok := cfg.Retryer.(request.Retryer); {
+       case ok:
+               svc.Retryer = retryer
+       case cfg.Retryer != nil && cfg.Logger != nil:
+               s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
+               cfg.Logger.Log(s)
+               fallthrough
+       default:
+               maxRetries := aws.IntValue(cfg.MaxRetries)
+               if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
+                       maxRetries = 3
+               }
+               svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
+       }
+
+       svc.AddDebugHandlers()
+
+       for _, option := range options {
+               option(svc)
+       }
+
+       return svc
+}
+
+// NewRequest returns a new Request pointer for the service API
+// operation and parameters.
+func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
+       return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
+}
+
+// AddDebugHandlers injects debug logging handlers into the service to log request
+// debug information.
+func (c *Client) AddDebugHandlers() {
+       if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
+               return
+       }
+
+       c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest})
+       c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse})
+}
+
+const logReqMsg = `DEBUG: Request %s/%s Details:
+---[ REQUEST POST-SIGN ]-----------------------------
+%s
+-----------------------------------------------------`
+
+const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
+---[ REQUEST DUMP ERROR ]-----------------------------
+%s
+-----------------------------------------------------`
+
+func logRequest(r *request.Request) {
+       logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+       dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+       if err != nil {
+               r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
+               r.Error = awserr.New(request.ErrCodeRead, "an error occurred during request body reading", err)
+               return
+       }
+
+       if logBody {
+               // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
+               // Body as a NoOpCloser and will not be reset after read by the HTTP
+               // client reader.
+               r.ResetBody()
+       }
+
+       r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
+}
+
+const logRespMsg = `DEBUG: Response %s/%s Details:
+---[ RESPONSE ]--------------------------------------
+%s
+-----------------------------------------------------`
+
+const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
+---[ RESPONSE DUMP ERROR ]-----------------------------
+%s
+-----------------------------------------------------`
+
+func logResponse(r *request.Request) {
+       var msg = "no response data"
+       if r.HTTPResponse != nil {
+               logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+               dumpedBody, err := httputil.DumpResponse(r.HTTPResponse, logBody)
+               if err != nil {
+                       r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
+                       r.Error = awserr.New(request.ErrCodeRead, "an error occurred during response body reading", err)
+                       return
+               }
+
+               msg = string(dumpedBody)
+       } else if r.Error != nil {
+               msg = r.Error.Error()
+       }
+       r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
new file mode 100644 (file)
index 0000000..1313478
--- /dev/null
@@ -0,0 +1,96 @@
+package client
+
+import (
+       "math/rand"
+       "sync"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// DefaultRetryer implements basic retry logic using exponential backoff for
+// most services. If you want to implement custom retry logic, implement the
+// request.Retryer interface or create a structure type that composes this
+// struct and override the specific methods. For example, to override only
+// the MaxRetries method:
+//
+//             type retryer struct {
+//      service.DefaultRetryer
+//    }
+//
+//    // This implementation always has 100 max retries
+//    func (d retryer) MaxRetries() uint { return 100 }
+type DefaultRetryer struct {
+       NumMaxRetries int
+}
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API request.
+func (d DefaultRetryer) MaxRetries() int {
+       return d.NumMaxRetries
+}
+
+var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
+
+// RetryRules returns the delay duration before retrying this request again
+func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
+       // Set the upper limit of delay in retrying at ~five minutes
+       minTime := 30
+       throttle := d.shouldThrottle(r)
+       if throttle {
+               minTime = 500
+       }
+
+       retryCount := r.RetryCount
+       if retryCount > 13 {
+               retryCount = 13
+       } else if throttle && retryCount > 8 {
+               retryCount = 8
+       }
+
+       delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
+       return time.Duration(delay) * time.Millisecond
+}
+
+// ShouldRetry returns true if the request should be retried.
+func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
+       // If one of the other handlers already set the retry state
+       // we don't want to override it based on the service's state
+       if r.Retryable != nil {
+               return *r.Retryable
+       }
+
+       if r.HTTPResponse.StatusCode >= 500 {
+               return true
+       }
+       return r.IsErrorRetryable() || d.shouldThrottle(r)
+}
+
+// ShouldThrottle returns true if the request should be throttled.
+func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
+       if r.HTTPResponse.StatusCode == 502 ||
+               r.HTTPResponse.StatusCode == 503 ||
+               r.HTTPResponse.StatusCode == 504 {
+               return true
+       }
+       return r.IsErrorThrottle()
+}
+
+// lockedSource is a thread-safe implementation of rand.Source
+type lockedSource struct {
+       lk  sync.Mutex
+       src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+       r.lk.Lock()
+       n = r.src.Int63()
+       r.lk.Unlock()
+       return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+       r.lk.Lock()
+       r.src.Seed(seed)
+       r.lk.Unlock()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
new file mode 100644 (file)
index 0000000..4778056
--- /dev/null
@@ -0,0 +1,12 @@
+package metadata
+
+// ClientInfo wraps immutable data from the client.Client structure.
+type ClientInfo struct {
+       ServiceName   string
+       APIVersion    string
+       Endpoint      string
+       SigningName   string
+       SigningRegion string
+       JSONVersion   string
+       TargetPrefix  string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
new file mode 100644 (file)
index 0000000..d1f31f1
--- /dev/null
@@ -0,0 +1,470 @@
+package aws
+
+import (
+       "net/http"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws/credentials"
+       "github.com/aws/aws-sdk-go/aws/endpoints"
+)
+
+// UseServiceDefaultRetries instructs the config to use the service's own
+// default number of retries. This will be the default action if
+// Config.MaxRetries is nil also.
+const UseServiceDefaultRetries = -1
+
+// RequestRetryer is an alias for a type that implements the request.Retryer
+// interface.
+type RequestRetryer interface{}
+
+// A Config provides service configuration for service clients. By default,
+// all clients will use the defaults.DefaultConfig tructure.
+//
+//     // Create Session with MaxRetry configuration to be shared by multiple
+//     // service clients.
+//     sess := session.Must(session.NewSession(&aws.Config{
+//         MaxRetries: aws.Int(3),
+//     }))
+//
+//     // Create S3 service client with a specific Region.
+//     svc := s3.New(sess, &aws.Config{
+//         Region: aws.String("us-west-2"),
+//     })
+type Config struct {
+       // Enables verbose error printing of all credential chain errors.
+       // Should be used when wanting to see all errors while attempting to
+       // retrieve credentials.
+       CredentialsChainVerboseErrors *bool
+
+       // The credentials object to use when signing requests. Defaults to a
+       // chain of credential providers to search for credentials in environment
+       // variables, shared credential file, and EC2 Instance Roles.
+       Credentials *credentials.Credentials
+
+       // An optional endpoint URL (hostname only or fully qualified URI)
+       // that overrides the default generated endpoint for a client. Set this
+       // to `""` to use the default generated endpoint.
+       //
+       // @note You must still provide a `Region` value when specifying an
+       //   endpoint for a client.
+       Endpoint *string
+
+       // The resolver to use for looking up endpoints for AWS service clients
+       // to use based on region.
+       EndpointResolver endpoints.Resolver
+
+       // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
+       // ShouldRetry regardless of whether or not if request.Retryable is set.
+       // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
+       // is not set, then ShouldRetry will only be called if request.Retryable is nil.
+       // Proper handling of the request.Retryable field is important when setting this field.
+       EnforceShouldRetryCheck *bool
+
+       // The region to send requests to. This parameter is required and must
+       // be configured globally or on a per-client basis unless otherwise
+       // noted. A full list of regions is found in the "Regions and Endpoints"
+       // document.
+       //
+       // @see http://docs.aws.amazon.com/general/latest/gr/rande.html
+       //   AWS Regions and Endpoints
+       Region *string
+
+       // Set this to `true` to disable SSL when sending requests. Defaults
+       // to `false`.
+       DisableSSL *bool
+
+       // The HTTP client to use when sending requests. Defaults to
+       // `http.DefaultClient`.
+       HTTPClient *http.Client
+
+       // An integer value representing the logging level. The default log level
+       // is zero (LogOff), which represents no logging. To enable logging set
+       // to a LogLevel Value.
+       LogLevel *LogLevelType
+
+       // The logger writer interface to write logging messages to. Defaults to
+       // standard out.
+       Logger Logger
+
+       // The maximum number of times that a request will be retried for failures.
+       // Defaults to -1, which defers the max retry setting to the service
+       // specific configuration.
+       MaxRetries *int
+
+       // Retryer guides how HTTP requests should be retried in case of
+       // recoverable failures.
+       //
+       // When nil or the value does not implement the request.Retryer interface,
+       // the request.DefaultRetryer will be used.
+       //
+       // When both Retryer and MaxRetries are non-nil, the former is used and
+       // the latter ignored.
+       //
+       // To set the Retryer field in a type-safe manner and with chaining, use
+       // the request.WithRetryer helper function:
+       //
+       //   cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
+       //
+       Retryer RequestRetryer
+
+       // Disables semantic parameter validation, which validates input for
+       // missing required fields and/or other semantic request input errors.
+       DisableParamValidation *bool
+
+       // Disables the computation of request and response checksums, e.g.,
+       // CRC32 checksums in Amazon DynamoDB.
+       DisableComputeChecksums *bool
+
+       // Set this to `true` to force the request to use path-style addressing,
+       // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
+       // will use virtual hosted bucket addressing when possible
+       // (`http://BUCKET.s3.amazonaws.com/KEY`).
+       //
+       // @note This configuration option is specific to the Amazon S3 service.
+       // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
+       //   Amazon S3: Virtual Hosting of Buckets
+       S3ForcePathStyle *bool
+
+       // Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
+       // header to PUT requests over 2MB of content. 100-Continue instructs the
+       // HTTP client not to send the body until the service responds with a
+       // `continue` status. This is useful to prevent sending the request body
+       // until after the request is authenticated, and validated.
+       //
+       // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
+       //
+       // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
+       // `ExpectContinueTimeout` for information on adjusting the continue wait
+       // timeout. https://golang.org/pkg/net/http/#Transport
+       //
+       // You should use this flag to disble 100-Continue if you experience issues
+       // with proxies or third party S3 compatible services.
+       S3Disable100Continue *bool
+
+       // Set this to `true` to enable S3 Accelerate feature. For all operations
+       // compatible with S3 Accelerate will use the accelerate endpoint for
+       // requests. Requests not compatible will fall back to normal S3 requests.
+       //
+       // The bucket must be enable for accelerate to be used with S3 client with
+       // accelerate enabled. If the bucket is not enabled for accelerate an error
+       // will be returned. The bucket name must be DNS compatible to also work
+       // with accelerate.
+       S3UseAccelerate *bool
+
+       // Set this to `true` to disable the EC2Metadata client from overriding the
+       // default http.Client's Timeout. This is helpful if you do not want the
+       // EC2Metadata client to create a new http.Client. This options is only
+       // meaningful if you're not already using a custom HTTP client with the
+       // SDK. Enabled by default.
+       //
+       // Must be set and provided to the session.NewSession() in order to disable
+       // the EC2Metadata overriding the timeout for default credentials chain.
+       //
+       // Example:
+       //    sess := session.Must(session.NewSession(aws.NewConfig()
+       //       .WithEC2MetadataDiableTimeoutOverride(true)))
+       //
+       //    svc := s3.New(sess)
+       //
+       EC2MetadataDisableTimeoutOverride *bool
+
+       // Instructs the endpiont to be generated for a service client to
+       // be the dual stack endpoint. The dual stack endpoint will support
+       // both IPv4 and IPv6 addressing.
+       //
+       // Setting this for a service which does not support dual stack will fail
+       // to make requets. It is not recommended to set this value on the session
+       // as it will apply to all service clients created with the session. Even
+       // services which don't support dual stack endpoints.
+       //
+       // If the Endpoint config value is also provided the UseDualStack flag
+       // will be ignored.
+       //
+       // Only supported with.
+       //
+       //     sess := session.Must(session.NewSession())
+       //
+       //     svc := s3.New(sess, &aws.Config{
+       //         UseDualStack: aws.Bool(true),
+       //     })
+       UseDualStack *bool
+
+       // SleepDelay is an override for the func the SDK will call when sleeping
+       // during the lifecycle of a request. Specifically this will be used for
+       // request delays. This value should only be used for testing. To adjust
+       // the delay of a request see the aws/client.DefaultRetryer and
+       // aws/request.Retryer.
+       //
+       // SleepDelay will prevent any Context from being used for canceling retry
+       // delay of an API operation. It is recommended to not use SleepDelay at all
+       // and specify a Retryer instead.
+       SleepDelay func(time.Duration)
+
+       // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
+       // Will default to false. This would only be used for empty directory names in s3 requests.
+       //
+       // Example:
+       //    sess := session.Must(session.NewSession(&aws.Config{
+       //         DisableRestProtocolURICleaning: aws.Bool(true),
+       //    }))
+       //
+       //    svc := s3.New(sess)
+       //    out, err := svc.GetObject(&s3.GetObjectInput {
+       //      Bucket: aws.String("bucketname"),
+       //      Key: aws.String("//foo//bar//moo"),
+       //    })
+       DisableRestProtocolURICleaning *bool
+}
+
+// NewConfig returns a new Config pointer that can be chained with builder
+// methods to set multiple configuration values inline without using pointers.
+//
+//     // Create Session with MaxRetry configuration to be shared by multiple
+//     // service clients.
+//     sess := session.Must(session.NewSession(aws.NewConfig().
+//         WithMaxRetries(3),
+//     ))
+//
+//     // Create S3 service client with a specific Region.
+//     svc := s3.New(sess, aws.NewConfig().
+//         WithRegion("us-west-2"),
+//     )
+func NewConfig() *Config {
+       return &Config{}
+}
+
+// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
+// a Config pointer.
+func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
+       c.CredentialsChainVerboseErrors = &verboseErrs
+       return c
+}
+
+// WithCredentials sets a config Credentials value returning a Config pointer
+// for chaining.
+func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
+       c.Credentials = creds
+       return c
+}
+
+// WithEndpoint sets a config Endpoint value returning a Config pointer for
+// chaining.
+func (c *Config) WithEndpoint(endpoint string) *Config {
+       c.Endpoint = &endpoint
+       return c
+}
+
+// WithEndpointResolver sets a config EndpointResolver value returning a
+// Config pointer for chaining.
+func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
+       c.EndpointResolver = resolver
+       return c
+}
+
+// WithRegion sets a config Region value returning a Config pointer for
+// chaining.
+func (c *Config) WithRegion(region string) *Config {
+       c.Region = &region
+       return c
+}
+
+// WithDisableSSL sets a config DisableSSL value returning a Config pointer
+// for chaining.
+func (c *Config) WithDisableSSL(disable bool) *Config {
+       c.DisableSSL = &disable
+       return c
+}
+
+// WithHTTPClient sets a config HTTPClient value returning a Config pointer
+// for chaining.
+func (c *Config) WithHTTPClient(client *http.Client) *Config {
+       c.HTTPClient = client
+       return c
+}
+
+// WithMaxRetries sets a config MaxRetries value returning a Config pointer
+// for chaining.
+func (c *Config) WithMaxRetries(max int) *Config {
+       c.MaxRetries = &max
+       return c
+}
+
+// WithDisableParamValidation sets a config DisableParamValidation value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableParamValidation(disable bool) *Config {
+       c.DisableParamValidation = &disable
+       return c
+}
+
+// WithDisableComputeChecksums sets a config DisableComputeChecksums value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
+       c.DisableComputeChecksums = &disable
+       return c
+}
+
+// WithLogLevel sets a config LogLevel value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogLevel(level LogLevelType) *Config {
+       c.LogLevel = &level
+       return c
+}
+
+// WithLogger sets a config Logger value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogger(logger Logger) *Config {
+       c.Logger = logger
+       return c
+}
+
+// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3ForcePathStyle(force bool) *Config {
+       c.S3ForcePathStyle = &force
+       return c
+}
+
+// WithS3Disable100Continue sets a config S3Disable100Continue value returning
+// a Config pointer for chaining.
+func (c *Config) WithS3Disable100Continue(disable bool) *Config {
+       c.S3Disable100Continue = &disable
+       return c
+}
+
+// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3UseAccelerate(enable bool) *Config {
+       c.S3UseAccelerate = &enable
+       return c
+}
+
+// WithUseDualStack sets a config UseDualStack value returning a Config
+// pointer for chaining.
+func (c *Config) WithUseDualStack(enable bool) *Config {
+       c.UseDualStack = &enable
+       return c
+}
+
+// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
+// returning a Config pointer for chaining.
+func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
+       c.EC2MetadataDisableTimeoutOverride = &enable
+       return c
+}
+
+// WithSleepDelay overrides the function used to sleep while waiting for the
+// next retry. Defaults to time.Sleep.
+func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
+       c.SleepDelay = fn
+       return c
+}
+
+// MergeIn merges the passed in configs into the existing config object.
+func (c *Config) MergeIn(cfgs ...*Config) {
+       for _, other := range cfgs {
+               mergeInConfig(c, other)
+       }
+}
+
+func mergeInConfig(dst *Config, other *Config) {
+       if other == nil {
+               return
+       }
+
+       if other.CredentialsChainVerboseErrors != nil {
+               dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
+       }
+
+       if other.Credentials != nil {
+               dst.Credentials = other.Credentials
+       }
+
+       if other.Endpoint != nil {
+               dst.Endpoint = other.Endpoint
+       }
+
+       if other.EndpointResolver != nil {
+               dst.EndpointResolver = other.EndpointResolver
+       }
+
+       if other.Region != nil {
+               dst.Region = other.Region
+       }
+
+       if other.DisableSSL != nil {
+               dst.DisableSSL = other.DisableSSL
+       }
+
+       if other.HTTPClient != nil {
+               dst.HTTPClient = other.HTTPClient
+       }
+
+       if other.LogLevel != nil {
+               dst.LogLevel = other.LogLevel
+       }
+
+       if other.Logger != nil {
+               dst.Logger = other.Logger
+       }
+
+       if other.MaxRetries != nil {
+               dst.MaxRetries = other.MaxRetries
+       }
+
+       if other.Retryer != nil {
+               dst.Retryer = other.Retryer
+       }
+
+       if other.DisableParamValidation != nil {
+               dst.DisableParamValidation = other.DisableParamValidation
+       }
+
+       if other.DisableComputeChecksums != nil {
+               dst.DisableComputeChecksums = other.DisableComputeChecksums
+       }
+
+       if other.S3ForcePathStyle != nil {
+               dst.S3ForcePathStyle = other.S3ForcePathStyle
+       }
+
+       if other.S3Disable100Continue != nil {
+               dst.S3Disable100Continue = other.S3Disable100Continue
+       }
+
+       if other.S3UseAccelerate != nil {
+               dst.S3UseAccelerate = other.S3UseAccelerate
+       }
+
+       if other.UseDualStack != nil {
+               dst.UseDualStack = other.UseDualStack
+       }
+
+       if other.EC2MetadataDisableTimeoutOverride != nil {
+               dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
+       }
+
+       if other.SleepDelay != nil {
+               dst.SleepDelay = other.SleepDelay
+       }
+
+       if other.DisableRestProtocolURICleaning != nil {
+               dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
+       }
+
+       if other.EnforceShouldRetryCheck != nil {
+               dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
+       }
+}
+
+// Copy will return a shallow copy of the Config object. If any additional
+// configurations are provided they will be merged into the new config returned.
+func (c *Config) Copy(cfgs ...*Config) *Config {
+       dst := &Config{}
+       dst.MergeIn(c)
+
+       for _, cfg := range cfgs {
+               dst.MergeIn(cfg)
+       }
+
+       return dst
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context.go b/vendor/github.com/aws/aws-sdk-go/aws/context.go
new file mode 100644 (file)
index 0000000..79f4268
--- /dev/null
@@ -0,0 +1,71 @@
+package aws
+
+import (
+       "time"
+)
+
+// Context is an copy of the Go v1.7 stdlib's context.Context interface.
+// It is represented as a SDK interface to enable you to use the "WithContext"
+// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context interface {
+       // Deadline returns the time when work done on behalf of this context
+       // should be canceled. Deadline returns ok==false when no deadline is
+       // set. Successive calls to Deadline return the same results.
+       Deadline() (deadline time.Time, ok bool)
+
+       // Done returns a channel that's closed when work done on behalf of this
+       // context should be canceled. Done may return nil if this context can
+       // never be canceled. Successive calls to Done return the same value.
+       Done() <-chan struct{}
+
+       // Err returns a non-nil error value after Done is closed. Err returns
+       // Canceled if the context was canceled or DeadlineExceeded if the
+       // context's deadline passed. No other values for Err are defined.
+       // After Done is closed, successive calls to Err return the same value.
+       Err() error
+
+       // Value returns the value associated with this context for key, or nil
+       // if no value is associated with key. Successive calls to Value with
+       // the same key returns the same result.
+       //
+       // Use context values only for request-scoped data that transits
+       // processes and API boundaries, not for passing optional parameters to
+       // functions.
+       Value(key interface{}) interface{}
+}
+
+// BackgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func BackgroundContext() Context {
+       return backgroundCtx
+}
+
+// SleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the Context's
+// error will be returned.
+//
+// Expects Context to always return a non-nil error if the Done channel is closed.
+func SleepWithContext(ctx Context, dur time.Duration) error {
+       t := time.NewTimer(dur)
+       defer t.Stop()
+
+       select {
+       case <-t.C:
+               break
+       case <-ctx.Done():
+               return ctx.Err()
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
new file mode 100644 (file)
index 0000000..e8cf93d
--- /dev/null
@@ -0,0 +1,41 @@
+// +build !go1.7
+
+package aws
+
+import "time"
+
+// An emptyCtx is a copy of the the Go 1.7 context.emptyCtx type. This
+// is copied to provide a 1.6 and 1.5 safe version of context that is compatible
+// with Go 1.7's Context.
+//
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+       return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+       return nil
+}
+
+func (*emptyCtx) Err() error {
+       return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+       return nil
+}
+
+func (e *emptyCtx) String() string {
+       switch e {
+       case backgroundCtx:
+               return "aws.BackgroundContext"
+       }
+       return "unknown empty Context"
+}
+
+var (
+       backgroundCtx = new(emptyCtx)
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go
new file mode 100644 (file)
index 0000000..064f75c
--- /dev/null
@@ -0,0 +1,9 @@
+// +build go1.7
+
+package aws
+
+import "context"
+
+var (
+       backgroundCtx = context.Background()
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
new file mode 100644 (file)
index 0000000..3b73a7d
--- /dev/null
@@ -0,0 +1,369 @@
+package aws
+
+import "time"
+
+// String returns a pointer to the string value passed in.
+func String(v string) *string {
+       return &v
+}
+
+// StringValue returns the value of the string pointer passed in or
+// "" if the pointer is nil.
+func StringValue(v *string) string {
+       if v != nil {
+               return *v
+       }
+       return ""
+}
+
+// StringSlice converts a slice of string values into a slice of
+// string pointers
+func StringSlice(src []string) []*string {
+       dst := make([]*string, len(src))
+       for i := 0; i < len(src); i++ {
+               dst[i] = &(src[i])
+       }
+       return dst
+}
+
+// StringValueSlice converts a slice of string pointers into a slice of
+// string values
+func StringValueSlice(src []*string) []string {
+       dst := make([]string, len(src))
+       for i := 0; i < len(src); i++ {
+               if src[i] != nil {
+                       dst[i] = *(src[i])
+               }
+       }
+       return dst
+}
+
+// StringMap converts a string map of string values into a string
+// map of string pointers
+func StringMap(src map[string]string) map[string]*string {
+       dst := make(map[string]*string)
+       for k, val := range src {
+               v := val
+               dst[k] = &v
+       }
+       return dst
+}
+
+// StringValueMap converts a string map of string pointers into a string
+// map of string values
+func StringValueMap(src map[string]*string) map[string]string {
+       dst := make(map[string]string)
+       for k, val := range src {
+               if val != nil {
+                       dst[k] = *val
+               }
+       }
+       return dst
+}
+
+// Bool returns a pointer to the bool value passed in.
+func Bool(v bool) *bool {
+       return &v
+}
+
+// BoolValue returns the value of the bool pointer passed in or
+// false if the pointer is nil.
+func BoolValue(v *bool) bool {
+       if v != nil {
+               return *v
+       }
+       return false
+}
+
+// BoolSlice converts a slice of bool values into a slice of
+// bool pointers
+func BoolSlice(src []bool) []*bool {
+       dst := make([]*bool, len(src))
+       for i := 0; i < len(src); i++ {
+               dst[i] = &(src[i])
+       }
+       return dst
+}
+
+// BoolValueSlice converts a slice of bool pointers into a slice of
+// bool values
+func BoolValueSlice(src []*bool) []bool {
+       dst := make([]bool, len(src))
+       for i := 0; i < len(src); i++ {
+               if src[i] != nil {
+                       dst[i] = *(src[i])
+               }
+       }
+       return dst
+}
+
+// BoolMap converts a string map of bool values into a string
+// map of bool pointers
+func BoolMap(src map[string]bool) map[string]*bool {
+       dst := make(map[string]*bool)
+       for k, val := range src {
+               v := val
+               dst[k] = &v
+       }
+       return dst
+}
+
+// BoolValueMap converts a string map of bool pointers into a string
+// map of bool values
+func BoolValueMap(src map[string]*bool) map[string]bool {
+       dst := make(map[string]bool)
+       for k, val := range src {
+               if val != nil {
+                       dst[k] = *val
+               }
+       }
+       return dst
+}
+
+// Int returns a pointer to the int value passed in.
+func Int(v int) *int {
+       return &v
+}
+
+// IntValue returns the value of the int pointer passed in or
+// 0 if the pointer is nil.
+func IntValue(v *int) int {
+       if v != nil {
+               return *v
+       }
+       return 0
+}
+
+// IntSlice converts a slice of int values into a slice of
+// int pointers
+func IntSlice(src []int) []*int {
+       dst := make([]*int, len(src))
+       for i := 0; i < len(src); i++ {
+               dst[i] = &(src[i])
+       }
+       return dst
+}
+
+// IntValueSlice converts a slice of int pointers into a slice of
+// int values
+func IntValueSlice(src []*int) []int {
+       dst := make([]int, len(src))
+       for i := 0; i < len(src); i++ {
+               if src[i] != nil {
+                       dst[i] = *(src[i])
+               }
+       }
+       return dst
+}
+
+// IntMap converts a string map of int values into a string
+// map of int pointers
+func IntMap(src map[string]int) map[string]*int {
+       dst := make(map[string]*int)
+       for k, val := range src {
+               v := val
+               dst[k] = &v
+       }
+       return dst
+}
+
+// IntValueMap converts a string map of int pointers into a string
+// map of int values
+func IntValueMap(src map[string]*int) map[string]int {
+       dst := make(map[string]int)
+       for k, val := range src {
+               if val != nil {
+                       dst[k] = *val
+               }
+       }
+       return dst
+}
+
+// Int64 returns a pointer to the int64 value passed in.
+func Int64(v int64) *int64 {
+       return &v
+}
+
+// Int64Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int64Value(v *int64) int64 {
+       if v != nil {
+               return *v
+       }
+       return 0
+}
+
+// Int64Slice converts a slice of int64 values into a slice of
+// int64 pointers
+func Int64Slice(src []int64) []*int64 {
+       dst := make([]*int64, len(src))
+       for i := 0; i < len(src); i++ {
+               dst[i] = &(src[i])
+       }
+       return dst
+}
+
+// Int64ValueSlice converts a slice of int64 pointers into a slice of
+// int64 values
+func Int64ValueSlice(src []*int64) []int64 {
+       dst := make([]int64, len(src))
+       for i := 0; i < len(src); i++ {
+               if src[i] != nil {
+                       dst[i] = *(src[i])
+               }
+       }
+       return dst
+}
+
+// Int64Map converts a string map of int64 values into a string
+// map of int64 pointers
+func Int64Map(src map[string]int64) map[string]*int64 {
+       dst := make(map[string]*int64)
+       for k, val := range src {
+               v := val
+               dst[k] = &v
+       }
+       return dst
+}
+
+// Int64ValueMap converts a string map of int64 pointers into a string
+// map of int64 values
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
+       dst := make(map[string]int64)
+       for k, val := range src {
+               if val != nil {
+                       dst[k] = *val
+               }
+       }
+       return dst
+}
+
+// Float64 returns a pointer to the float64 value passed in.
+func Float64(v float64) *float64 {
+       return &v
+}
+
+// Float64Value returns the value of the float64 pointer passed in or
+// 0 if the pointer is nil.
+func Float64Value(v *float64) float64 {
+       if v != nil {
+               return *v
+       }
+       return 0
+}
+
+// Float64Slice converts a slice of float64 values into a slice of
+// float64 pointers
+func Float64Slice(src []float64) []*float64 {
+       dst := make([]*float64, len(src))
+       for i := 0; i < len(src); i++ {
+               dst[i] = &(src[i])
+       }
+       return dst
+}
+
+// Float64ValueSlice converts a slice of float64 pointers into a slice of
+// float64 values
+func Float64ValueSlice(src []*float64) []float64 {
+       dst := make([]float64, len(src))
+       for i := 0; i < len(src); i++ {
+               if src[i] != nil {
+                       dst[i] = *(src[i])
+               }
+       }
+       return dst
+}
+
+// Float64Map converts a string map of float64 values into a string
+// map of float64 pointers
+func Float64Map(src map[string]float64) map[string]*float64 {
+       dst := make(map[string]*float64)
+       for k, val := range src {
+               v := val
+               dst[k] = &v
+       }
+       return dst
+}
+
+// Float64ValueMap converts a string map of float64 pointers into a string
+// map of float64 values
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
+       dst := make(map[string]float64)
+       for k, val := range src {
+               if val != nil {
+                       dst[k] = *val
+               }
+       }
+       return dst
+}
+
+// Time returns a pointer to the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+       return &v
+}
+
+// TimeValue returns the value of the time.Time pointer passed in or
+// time.Time{} if the pointer is nil.
+func TimeValue(v *time.Time) time.Time {
+       if v != nil {
+               return *v
+       }
+       return time.Time{}
+}
+
+// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
+// The result is undefined if the Unix time cannot be represented by an int64.
+// Which includes calling TimeUnixMilli on a zero Time is undefined.
+//
+// This utility is useful for service API's such as CloudWatch Logs which require
+// their unix time values to be in milliseconds.
+//
+// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
+func TimeUnixMilli(t time.Time) int64 {
+       return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
+}
+
+// TimeSlice converts a slice of time.Time values into a slice of
+// time.Time pointers
+func TimeSlice(src []time.Time) []*time.Time {
+       dst := make([]*time.Time, len(src))
+       for i := 0; i < len(src); i++ {
+               dst[i] = &(src[i])
+       }
+       return dst
+}
+
+// TimeValueSlice converts a slice of time.Time pointers into a slice of
+// time.Time values
+func TimeValueSlice(src []*time.Time) []time.Time {
+       dst := make([]time.Time, len(src))
+       for i := 0; i < len(src); i++ {
+               if src[i] != nil {
+                       dst[i] = *(src[i])
+               }
+       }
+       return dst
+}
+
+// TimeMap converts a string map of time.Time values into a string
+// map of time.Time pointers
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
+       dst := make(map[string]*time.Time)
+       for k, val := range src {
+               v := val
+               dst[k] = &v
+       }
+       return dst
+}
+
+// TimeValueMap converts a string map of time.Time pointers into a string
+// map of time.Time values
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
+       dst := make(map[string]time.Time)
+       for k, val := range src {
+               if val != nil {
+                       dst[k] = *val
+               }
+       }
+       return dst
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
new file mode 100644 (file)
index 0000000..25b461c
--- /dev/null
@@ -0,0 +1,226 @@
+package corehandlers
+
+import (
+       "bytes"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/http"
+       "net/url"
+       "regexp"
+       "runtime"
+       "strconv"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/credentials"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// Interface for matching types which also have a Len method.
+type lener interface {
+       Len() int
+}
+
+// BuildContentLengthHandler builds the content length of a request based on the body,
+// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
+// to determine request body length and no "Content-Length" was specified it will panic.
+//
+// The Content-Length will only be added to the request if the length of the body
+// is greater than 0. If the body is empty or the current `Content-Length`
+// header is <= 0, the header will also be stripped.
+var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
+       var length int64
+
+       if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
+               length, _ = strconv.ParseInt(slength, 10, 64)
+       } else {
+               switch body := r.Body.(type) {
+               case nil:
+                       length = 0
+               case lener:
+                       length = int64(body.Len())
+               case io.Seeker:
+                       r.BodyStart, _ = body.Seek(0, 1)
+                       end, _ := body.Seek(0, 2)
+                       body.Seek(r.BodyStart, 0) // make sure to seek back to original location
+                       length = end - r.BodyStart
+               default:
+                       panic("Cannot get length of body, must provide `ContentLength`")
+               }
+       }
+
+       if length > 0 {
+               r.HTTPRequest.ContentLength = length
+               r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
+       } else {
+               r.HTTPRequest.ContentLength = 0
+               r.HTTPRequest.Header.Del("Content-Length")
+       }
+}}
+
+// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
+var SDKVersionUserAgentHandler = request.NamedHandler{
+       Name: "core.SDKVersionUserAgentHandler",
+       Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
+               runtime.Version(), runtime.GOOS, runtime.GOARCH),
+}
+
+var reStatusCode = regexp.MustCompile(`^(\d{3})`)
+
+// ValidateReqSigHandler is a request handler to ensure that the request's
+// signature doesn't expire before it is sent. This can happen when a request
+// is built and signed significantly before it is sent. Or significant delays
+// occur when retrying requests that would cause the signature to expire.
+var ValidateReqSigHandler = request.NamedHandler{
+       Name: "core.ValidateReqSigHandler",
+       Fn: func(r *request.Request) {
+               // Unsigned requests are not signed
+               if r.Config.Credentials == credentials.AnonymousCredentials {
+                       return
+               }
+
+               signedTime := r.Time
+               if !r.LastSignedAt.IsZero() {
+                       signedTime = r.LastSignedAt
+               }
+
+               // 10 minutes to allow for some clock skew/delays in transmission.
+               // Would be improved with aws/aws-sdk-go#423
+               if signedTime.Add(10 * time.Minute).After(time.Now()) {
+                       return
+               }
+
+               fmt.Println("request expired, resigning")
+               r.Sign()
+       },
+}
+
+// SendHandler is a request handler to send service request using HTTP client.
+var SendHandler = request.NamedHandler{
+       Name: "core.SendHandler",
+       Fn: func(r *request.Request) {
+               sender := sendFollowRedirects
+               if r.DisableFollowRedirects {
+                       sender = sendWithoutFollowRedirects
+               }
+
+               var err error
+               r.HTTPResponse, err = sender(r)
+               if err != nil {
+                       handleSendError(r, err)
+               }
+       },
+}
+
+func sendFollowRedirects(r *request.Request) (*http.Response, error) {
+       return r.Config.HTTPClient.Do(r.HTTPRequest)
+}
+
+func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) {
+       transport := r.Config.HTTPClient.Transport
+       if transport == nil {
+               transport = http.DefaultTransport
+       }
+
+       return transport.RoundTrip(r.HTTPRequest)
+}
+
+func handleSendError(r *request.Request, err error) {
+       // Prevent leaking if an HTTPResponse was returned. Clean up
+       // the body.
+       if r.HTTPResponse != nil {
+               r.HTTPResponse.Body.Close()
+       }
+       // Capture the case where url.Error is returned for error processing
+       // response. e.g. 301 without location header comes back as string
+       // error and r.HTTPResponse is nil. Other URL redirect errors will
+       // comeback in a similar method.
+       if e, ok := err.(*url.Error); ok && e.Err != nil {
+               if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
+                       code, _ := strconv.ParseInt(s[1], 10, 64)
+                       r.HTTPResponse = &http.Response{
+                               StatusCode: int(code),
+                               Status:     http.StatusText(int(code)),
+                               Body:       ioutil.NopCloser(bytes.NewReader([]byte{})),
+                       }
+                       return
+               }
+       }
+       if r.HTTPResponse == nil {
+               // Add a dummy request response object to ensure the HTTPResponse
+               // value is consistent.
+               r.HTTPResponse = &http.Response{
+                       StatusCode: int(0),
+                       Status:     http.StatusText(int(0)),
+                       Body:       ioutil.NopCloser(bytes.NewReader([]byte{})),
+               }
+       }
+       // Catch all other request errors.
+       r.Error = awserr.New("RequestError", "send request failed", err)
+       r.Retryable = aws.Bool(true) // network errors are retryable
+
+       // Override the error with a context canceled error, if that was canceled.
+       ctx := r.Context()
+       select {
+       case <-ctx.Done():
+               r.Error = awserr.New(request.CanceledErrorCode,
+                       "request context canceled", ctx.Err())
+               r.Retryable = aws.Bool(false)
+       default:
+       }
+}
+
+// ValidateResponseHandler is a request handler to validate service response.
+var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
+       if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
+               // this may be replaced by an UnmarshalError handler
+               r.Error = awserr.New("UnknownError", "unknown error", nil)
+       }
+}}
+
+// AfterRetryHandler performs final checks to determine if the request should
+// be retried and how long to delay.
+var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
+       // If one of the other handlers already set the retry state
+       // we don't want to override it based on the service's state
+       if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
+               r.Retryable = aws.Bool(r.ShouldRetry(r))
+       }
+
+       if r.WillRetry() {
+               r.RetryDelay = r.RetryRules(r)
+
+               if sleepFn := r.Config.SleepDelay; sleepFn != nil {
+                       // Support SleepDelay for backwards compatibility and testing
+                       sleepFn(r.RetryDelay)
+               } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
+                       r.Error = awserr.New(request.CanceledErrorCode,
+                               "request context canceled", err)
+                       r.Retryable = aws.Bool(false)
+                       return
+               }
+
+               // when the expired token exception occurs the credentials
+               // need to be expired locally so that the next request to
+               // get credentials will trigger a credentials refresh.
+               if r.IsErrorExpired() {
+                       r.Config.Credentials.Expire()
+               }
+
+               r.RetryCount++
+               r.Error = nil
+       }
+}}
+
+// ValidateEndpointHandler is a request handler to validate a request had the
+// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
+// region is not valid.
+var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
+       if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
+               r.Error = aws.ErrMissingRegion
+       } else if r.ClientInfo.Endpoint == "" {
+               r.Error = aws.ErrMissingEndpoint
+       }
+}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
new file mode 100644 (file)
index 0000000..7d50b15
--- /dev/null
@@ -0,0 +1,17 @@
+package corehandlers
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+// ValidateParametersHandler is a request handler to validate the input parameters.
+// Validating parameters only has meaning if done prior to the request being sent.
+var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
+       if !r.ParamsFilled() {
+               return
+       }
+
+       if v, ok := r.Params.(request.Validator); ok {
+               if err := v.Validate(); err != nil {
+                       r.Error = err
+               }
+       }
+}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
new file mode 100644 (file)
index 0000000..f298d65
--- /dev/null
@@ -0,0 +1,102 @@
+package credentials
+
+import (
+       "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+       // ErrNoValidProvidersFoundInChain Is returned when there are no valid
+       // providers in the ChainProvider.
+       //
+       // This has been deprecated. For verbose error messaging set
+       // aws.Config.CredentialsChainVerboseErrors to true
+       //
+       // @readonly
+       ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
+               `no valid providers in chain. Deprecated.
+       For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
+               nil)
+)
+
+// A ChainProvider will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The ChainProvider provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the Providers
+// in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again.
+//
+// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
+// In this example EnvProvider will first check if any credentials are available
+// via the environment variables. If there are none ChainProvider will check
+// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
+// does not return any credentials ChainProvider will return the error
+// ErrNoValidProvidersFoundInChain
+//
+//     creds := credentials.NewChainCredentials(
+//         []credentials.Provider{
+//             &credentials.EnvProvider{},
+//             &ec2rolecreds.EC2RoleProvider{
+//                 Client: ec2metadata.New(sess),
+//             },
+//         })
+//
+//     // Usage of ChainCredentials with aws.Config
+//     svc := ec2.New(session.Must(session.NewSession(&aws.Config{
+//       Credentials: creds,
+//     })))
+//
+type ChainProvider struct {
+       Providers     []Provider
+       curr          Provider
+       VerboseErrors bool
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+       return NewCredentials(&ChainProvider{
+               Providers: append([]Provider{}, providers...),
+       })
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+//
+// If a provider is found it will be cached and any calls to IsExpired()
+// will return the expired state of the cached provider.
+func (c *ChainProvider) Retrieve() (Value, error) {
+       var errs []error
+       for _, p := range c.Providers {
+               creds, err := p.Retrieve()
+               if err == nil {
+                       c.curr = p
+                       return creds, nil
+               }
+               errs = append(errs, err)
+       }
+       c.curr = nil
+
+       var err error
+       err = ErrNoValidProvidersFoundInChain
+       if c.VerboseErrors {
+               err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
+       }
+       return Value{}, err
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one.  If there is no current provider, true will be returned.
+func (c *ChainProvider) IsExpired() bool {
+       if c.curr != nil {
+               return c.curr.IsExpired()
+       }
+
+       return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
new file mode 100644 (file)
index 0000000..42416fc
--- /dev/null
@@ -0,0 +1,246 @@
+// Package credentials provides credential retrieval and management
+//
+// The Credentials is the primary method of getting access to and managing
+// credentials Values. Using dependency injection retrieval of the credential
+// values is handled by a object which satisfies the Provider interface.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials Value have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+//     creds := credentials.NewEnvCredentials()
+//
+//     // Retrieve the credentials value
+//     credValue, err := creds.Get()
+//     if err != nil {
+//         // handle error
+//     }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+//     creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{})
+//     creds.Expire()
+//     credsValue, err := creds.Get()
+//     // New credentials will be retrieved instead of from cache.
+//
+//
+// Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+//     type MyProvider struct{}
+//     func (m *MyProvider) Retrieve() (Value, error) {...}
+//     func (m *MyProvider) IsExpired() bool {...}
+//
+//     creds := credentials.NewCredentials(&MyProvider{})
+//     credValue, err := creds.Get()
+//
+package credentials
+
+import (
+       "sync"
+       "time"
+)
+
+// AnonymousCredentials is an empty Credential object that can be used as
+// dummy placeholder credentials for requests that do not need signed.
+//
+// This Credentials can be used to configure a service to not sign requests
+// when making service API calls. For example, when accessing public
+// s3 buckets.
+//
+//     svc := s3.New(session.Must(session.NewSession(&aws.Config{
+//       Credentials: credentials.AnonymousCredentials,
+//     })))
+//     // Access public S3 buckets.
+//
+// @readonly
+var AnonymousCredentials = NewStaticCredentials("", "", "")
+
+// A Value is the AWS credentials value for individual credential fields.
+type Value struct {
+       // AWS Access key ID
+       AccessKeyID string
+
+       // AWS Secret Access Key
+       SecretAccessKey string
+
+       // AWS Session Token
+       SessionToken string
+
+       // Provider used to get credentials
+       ProviderName string
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+//
+// The Provider should not need to implement its own mutexes, because
+// that will be managed by Credentials.
+type Provider interface {
+       // Retrieve returns nil if it successfully retrieved the value.
+       // Error is returned if the value were not obtainable, or empty.
+       Retrieve() (Value, error)
+
+       // IsExpired returns if the credentials are no longer valid, and need
+       // to be retrieved.
+       IsExpired() bool
+}
+
+// An ErrorProvider is a stub credentials provider that always returns an error
+// this is used by the SDK when construction a known provider is not possible
+// due to an error.
+type ErrorProvider struct {
+       // The error to be returned from Retrieve
+       Err error
+
+       // The provider name to set on the Retrieved returned Value
+       ProviderName string
+}
+
+// Retrieve will always return the error that the ErrorProvider was created with.
+func (p ErrorProvider) Retrieve() (Value, error) {
+       return Value{ProviderName: p.ProviderName}, p.Err
+}
+
+// IsExpired will always return not expired.
+func (p ErrorProvider) IsExpired() bool {
+       return false
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+//     type EC2RoleProvider struct {
+//         Expiry
+//         ...
+//     }
+type Expiry struct {
+       // The date/time when to expire on
+       expiration time.Time
+
+       // If set will be used by IsExpired to determine the current time.
+       // Defaults to time.Now if CurrentTime is not set.  Available for testing
+       // to be able to mock out the current time.
+       CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+       e.expiration = expiration
+       if window > 0 {
+               e.expiration = e.expiration.Add(-window)
+       }
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+       if e.CurrentTime == nil {
+               e.CurrentTime = time.Now
+       }
+       return e.expiration.Before(e.CurrentTime())
+}
+
+// A Credentials provides synchronous safe retrieval of AWS credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+       creds        Value
+       forceRefresh bool
+       m            sync.Mutex
+
+       provider Provider
+}
+
+// NewCredentials returns a pointer to a new Credentials with the provider set.
+func NewCredentials(provider Provider) *Credentials {
+       return &Credentials{
+               provider:     provider,
+               forceRefresh: true,
+       }
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+       c.m.Lock()
+       defer c.m.Unlock()
+
+       if c.isExpired() {
+               creds, err := c.provider.Retrieve()
+               if err != nil {
+                       return Value{}, err
+               }
+               c.creds = creds
+               c.forceRefresh = false
+       }
+
+       return c.creds, nil
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+       c.m.Lock()
+       defer c.m.Unlock()
+
+       c.forceRefresh = true
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be retrieved.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+       c.m.Lock()
+       defer c.m.Unlock()
+
+       return c.isExpired()
+}
+
+// isExpired helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpired() bool {
+       return c.forceRefresh || c.provider.IsExpired()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
new file mode 100644 (file)
index 0000000..c397495
--- /dev/null
@@ -0,0 +1,178 @@
+package ec2rolecreds
+
+import (
+       "bufio"
+       "encoding/json"
+       "fmt"
+       "path"
+       "strings"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/client"
+       "github.com/aws/aws-sdk-go/aws/credentials"
+       "github.com/aws/aws-sdk-go/aws/ec2metadata"
+)
+
+// ProviderName provides a name of EC2Role provider
+const ProviderName = "EC2RoleProvider"
+
+// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+//
+// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
+// or ExpiryWindow
+//
+//     p := &ec2rolecreds.EC2RoleProvider{
+//         // Pass in a custom timeout to be used when requesting
+//         // IAM EC2 Role credentials.
+//         Client: ec2metadata.New(sess, aws.Config{
+//             HTTPClient: &http.Client{Timeout: 10 * time.Second},
+//         }),
+//
+//         // Do not use early expiry of credentials. If a non zero value is
+//         // specified the credentials will be expired early
+//         ExpiryWindow: 0,
+//     }
+type EC2RoleProvider struct {
+       credentials.Expiry
+
+       // Required EC2Metadata client to use when connecting to EC2 metadata service.
+       Client *ec2metadata.EC2Metadata
+
+       // ExpiryWindow will allow the credentials to trigger refreshing prior to
+       // the credentials actually expiring. This is beneficial so race conditions
+       // with expiring credentials do not cause request to fail unexpectedly
+       // due to ExpiredTokenException exceptions.
+       //
+       // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+       // 10 seconds before the credentials are actually expired.
+       //
+       // If ExpiryWindow is 0 or less it will be ignored.
+       ExpiryWindow time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
+// The ConfigProvider is satisfied by the session.Session type.
+func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+       p := &EC2RoleProvider{
+               Client: ec2metadata.New(c),
+       }
+
+       for _, option := range options {
+               option(p)
+       }
+
+       return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
+// metadata service.
+func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+       p := &EC2RoleProvider{
+               Client: client,
+       }
+
+       for _, option := range options {
+               option(p)
+       }
+
+       return credentials.NewCredentials(p)
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired credentials.
+func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
+       credsList, err := requestCredList(m.Client)
+       if err != nil {
+               return credentials.Value{ProviderName: ProviderName}, err
+       }
+
+       if len(credsList) == 0 {
+               return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
+       }
+       credsName := credsList[0]
+
+       roleCreds, err := requestCred(m.Client, credsName)
+       if err != nil {
+               return credentials.Value{ProviderName: ProviderName}, err
+       }
+
+       m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
+
+       return credentials.Value{
+               AccessKeyID:     roleCreds.AccessKeyID,
+               SecretAccessKey: roleCreds.SecretAccessKey,
+               SessionToken:    roleCreds.Token,
+               ProviderName:    ProviderName,
+       }, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for unmarshaling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+       // Success State
+       Expiration      time.Time
+       AccessKeyID     string
+       SecretAccessKey string
+       Token           string
+
+       // Error state
+       Code    string
+       Message string
+}
+
+const iamSecurityCredsPath = "/iam/security-credentials"
+
+// requestCredList requests a list of credentials from the EC2 service.
+// If there are no credentials, or there is an error making or receiving the request
+func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
+       resp, err := client.GetMetadata(iamSecurityCredsPath)
+       if err != nil {
+               return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
+       }
+
+       credsList := []string{}
+       s := bufio.NewScanner(strings.NewReader(resp))
+       for s.Scan() {
+               credsList = append(credsList, s.Text())
+       }
+
+       if err := s.Err(); err != nil {
+               return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
+       }
+
+       return credsList, nil
+}
+
+// requestCred requests the credentials for a specific credentials from the EC2 service.
+//
+// If the credentials cannot be found, or there is an error reading the response
+// and error will be returned.
+func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
+       resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
+       if err != nil {
+               return ec2RoleCredRespBody{},
+                       awserr.New("EC2RoleRequestError",
+                               fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
+                               err)
+       }
+
+       respCreds := ec2RoleCredRespBody{}
+       if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
+               return ec2RoleCredRespBody{},
+                       awserr.New("SerializationError",
+                               fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
+                               err)
+       }
+
+       if respCreds.Code != "Success" {
+               // If an error code was returned something failed requesting the role.
+               return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
+       }
+
+       return respCreds, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
new file mode 100644 (file)
index 0000000..a4cec5c
--- /dev/null
@@ -0,0 +1,191 @@
+// Package endpointcreds provides support for retrieving credentials from an
+// arbitrary HTTP endpoint.
+//
+// The credentials endpoint Provider can receive both static and refreshable
+// credentials that will expire. Credentials are static when an "Expiration"
+// value is not provided in the endpoint's response.
+//
+// Static credentials will never expire once they have been retrieved. The format
+// of the static credentials response:
+//    {
+//        "AccessKeyId" : "MUA...",
+//        "SecretAccessKey" : "/7PC5om....",
+//    }
+//
+// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
+// value in the response. The format of the refreshable credentials response:
+//    {
+//        "AccessKeyId" : "MUA...",
+//        "SecretAccessKey" : "/7PC5om....",
+//        "Token" : "AQoDY....=",
+//        "Expiration" : "2016-02-25T06:03:31Z"
+//    }
+//
+// Errors should be returned in the following format and only returned with 400
+// or 500 HTTP status codes.
+//    {
+//        "code": "ErrorCode",
+//        "message": "Helpful error message."
+//    }
+package endpointcreds
+
+import (
+       "encoding/json"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/client"
+       "github.com/aws/aws-sdk-go/aws/client/metadata"
+       "github.com/aws/aws-sdk-go/aws/credentials"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ProviderName is the name of the credentials provider.
+const ProviderName = `CredentialsEndpointProvider`
+
+// Provider satisfies the credentials.Provider interface, and is a client to
+// retrieve credentials from an arbitrary endpoint.
+type Provider struct {
+       staticCreds bool
+       credentials.Expiry
+
+       // Requires a AWS Client to make HTTP requests to the endpoint with.
+       // the Endpoint the request will be made to is provided by the aws.Config's
+       // Endpoint value.
+       Client *client.Client
+
+       // ExpiryWindow will allow the credentials to trigger refreshing prior to
+       // the credentials actually expiring. This is beneficial so race conditions
+       // with expiring credentials do not cause request to fail unexpectedly
+       // due to ExpiredTokenException exceptions.
+       //
+       // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+       // 10 seconds before the credentials are actually expired.
+       //
+       // If ExpiryWindow is 0 or less it will be ignored.
+       ExpiryWindow time.Duration
+}
+
+// NewProviderClient returns a credentials Provider for retrieving AWS credentials
+// from arbitrary endpoint.
+func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
+       p := &Provider{
+               Client: client.New(
+                       cfg,
+                       metadata.ClientInfo{
+                               ServiceName: "CredentialsEndpoint",
+                               Endpoint:    endpoint,
+                       },
+                       handlers,
+               ),
+       }
+
+       p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
+       p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
+       p.Client.Handlers.Validate.Clear()
+       p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
+
+       for _, option := range options {
+               option(p)
+       }
+
+       return p
+}
+
+// NewCredentialsClient returns a Credentials wrapper for retrieving credentials
+// from an arbitrary endpoint concurrently. The client will request the
+func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
+       return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
+}
+
+// IsExpired returns true if the credentials retrieved are expired, or not yet
+// retrieved.
+func (p *Provider) IsExpired() bool {
+       if p.staticCreds {
+               return false
+       }
+       return p.Expiry.IsExpired()
+}
+
+// Retrieve will attempt to request the credentials from the endpoint the Provider
+// was configured for. And error will be returned if the retrieval fails.
+func (p *Provider) Retrieve() (credentials.Value, error) {
+       resp, err := p.getCredentials()
+       if err != nil {
+               return credentials.Value{ProviderName: ProviderName},
+                       awserr.New("CredentialsEndpointError", "failed to load credentials", err)
+       }
+
+       if resp.Expiration != nil {
+               p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
+       } else {
+               p.staticCreds = true
+       }
+
+       return credentials.Value{
+               AccessKeyID:     resp.AccessKeyID,
+               SecretAccessKey: resp.SecretAccessKey,
+               SessionToken:    resp.Token,
+               ProviderName:    ProviderName,
+       }, nil
+}
+
+type getCredentialsOutput struct {
+       Expiration      *time.Time
+       AccessKeyID     string
+       SecretAccessKey string
+       Token           string
+}
+
+type errorOutput struct {
+       Code    string `json:"code"`
+       Message string `json:"message"`
+}
+
+func (p *Provider) getCredentials() (*getCredentialsOutput, error) {
+       op := &request.Operation{
+               Name:       "GetCredentials",
+               HTTPMethod: "GET",
+       }
+
+       out := &getCredentialsOutput{}
+       req := p.Client.NewRequest(op, nil, out)
+       req.HTTPRequest.Header.Set("Accept", "application/json")
+
+       return out, req.Send()
+}
+
+func validateEndpointHandler(r *request.Request) {
+       if len(r.ClientInfo.Endpoint) == 0 {
+               r.Error = aws.ErrMissingEndpoint
+       }
+}
+
+func unmarshalHandler(r *request.Request) {
+       defer r.HTTPResponse.Body.Close()
+
+       out := r.Data.(*getCredentialsOutput)
+       if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
+               r.Error = awserr.New("SerializationError",
+                       "failed to decode endpoint credentials",
+                       err,
+               )
+       }
+}
+
+func unmarshalError(r *request.Request) {
+       defer r.HTTPResponse.Body.Close()
+
+       var errOut errorOutput
+       if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil {
+               r.Error = awserr.New("SerializationError",
+                       "failed to decode endpoint credentials",
+                       err,
+               )
+       }
+
+       // Response body format is not consistent between metadata endpoints.
+       // Grab the error message as a string and include that as the source error
+       r.Error = awserr.New(errOut.Code, errOut.Message, nil)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
new file mode 100644 (file)
index 0000000..c14231a
--- /dev/null
@@ -0,0 +1,78 @@
+package credentials
+
+import (
+       "os"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// EnvProviderName provides a name of Env provider
+const EnvProviderName = "EnvProvider"
+
+var (
+       // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
+       // found in the process's environment.
+       //
+       // @readonly
+       ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
+
+       // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
+       // can't be found in the process's environment.
+       //
+       // @readonly
+       ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
+)
+
+// A EnvProvider retrieves credentials from the environment variables of the
+// running process. Environment credentials never expire.
+//
+// Environment variables used:
+//
+// * Access Key ID:     AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
+//
+// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
+type EnvProvider struct {
+       retrieved bool
+}
+
+// NewEnvCredentials returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvCredentials() *Credentials {
+       return NewCredentials(&EnvProvider{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvProvider) Retrieve() (Value, error) {
+       e.retrieved = false
+
+       id := os.Getenv("AWS_ACCESS_KEY_ID")
+       if id == "" {
+               id = os.Getenv("AWS_ACCESS_KEY")
+       }
+
+       secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+       if secret == "" {
+               secret = os.Getenv("AWS_SECRET_KEY")
+       }
+
+       if id == "" {
+               return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
+       }
+
+       if secret == "" {
+               return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
+       }
+
+       e.retrieved = true
+       return Value{
+               AccessKeyID:     id,
+               SecretAccessKey: secret,
+               SessionToken:    os.Getenv("AWS_SESSION_TOKEN"),
+               ProviderName:    EnvProviderName,
+       }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvProvider) IsExpired() bool {
+       return !e.retrieved
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
new file mode 100644 (file)
index 0000000..7fc91d9
--- /dev/null
@@ -0,0 +1,12 @@
+[default]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+aws_session_token = token
+
+[no_token]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+
+[with_colon]
+aws_access_key_id: accessKey
+aws_secret_access_key: secret
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
new file mode 100644 (file)
index 0000000..7fb7cbf
--- /dev/null
@@ -0,0 +1,151 @@
+package credentials
+
+import (
+       "fmt"
+       "os"
+       "path/filepath"
+
+       "github.com/go-ini/ini"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// SharedCredsProviderName provides a name of SharedCreds provider
+const SharedCredsProviderName = "SharedCredentialsProvider"
+
+var (
+       // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
+       //
+       // @readonly
+       ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
+)
+
+// A SharedCredentialsProvider retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type SharedCredentialsProvider struct {
+       // Path to the shared credentials file.
+       //
+       // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
+       // env value is empty will default to current user's home directory.
+       // Linux/OSX: "$HOME/.aws/credentials"
+       // Windows:   "%USERPROFILE%\.aws\credentials"
+       Filename string
+
+       // AWS Profile to extract credentials from the shared credentials file. If empty
+       // will default to environment variable "AWS_PROFILE" or "default" if
+       // environment variable is also not set.
+       Profile string
+
+       // retrieved states if the credentials have been successfully retrieved.
+       retrieved bool
+}
+
+// NewSharedCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewSharedCredentials(filename, profile string) *Credentials {
+       return NewCredentials(&SharedCredentialsProvider{
+               Filename: filename,
+               Profile:  profile,
+       })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
+       p.retrieved = false
+
+       filename, err := p.filename()
+       if err != nil {
+               return Value{ProviderName: SharedCredsProviderName}, err
+       }
+
+       creds, err := loadProfile(filename, p.profile())
+       if err != nil {
+               return Value{ProviderName: SharedCredsProviderName}, err
+       }
+
+       p.retrieved = true
+       return creds, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *SharedCredentialsProvider) IsExpired() bool {
+       return !p.retrieved
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (Value, error) {
+       config, err := ini.Load(filename)
+       if err != nil {
+               return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
+       }
+       iniProfile, err := config.GetSection(profile)
+       if err != nil {
+               return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err)
+       }
+
+       id, err := iniProfile.GetKey("aws_access_key_id")
+       if err != nil {
+               return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
+                       fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
+                       err)
+       }
+
+       secret, err := iniProfile.GetKey("aws_secret_access_key")
+       if err != nil {
+               return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
+                       fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
+                       nil)
+       }
+
+       // Default to empty string if not found
+       token := iniProfile.Key("aws_session_token")
+
+       return Value{
+               AccessKeyID:     id.String(),
+               SecretAccessKey: secret.String(),
+               SessionToken:    token.String(),
+               ProviderName:    SharedCredsProviderName,
+       }, nil
+}
+
+// filename returns the filename to use to read AWS shared credentials.
+//
+// Will return an error if the user's home directory path cannot be found.
+func (p *SharedCredentialsProvider) filename() (string, error) {
+       if p.Filename == "" {
+               if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" {
+                       return p.Filename, nil
+               }
+
+               homeDir := os.Getenv("HOME") // *nix
+               if homeDir == "" {           // Windows
+                       homeDir = os.Getenv("USERPROFILE")
+               }
+               if homeDir == "" {
+                       return "", ErrSharedCredentialsHomeNotFound
+               }
+
+               p.Filename = filepath.Join(homeDir, ".aws", "credentials")
+       }
+
+       return p.Filename, nil
+}
+
+// profile returns the AWS shared credentials profile.  If empty will read
+// environment variable "AWS_PROFILE". If that is not set profile will
+// return "default".
+func (p *SharedCredentialsProvider) profile() string {
+       if p.Profile == "" {
+               p.Profile = os.Getenv("AWS_PROFILE")
+       }
+       if p.Profile == "" {
+               p.Profile = "default"
+       }
+
+       return p.Profile
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
new file mode 100644 (file)
index 0000000..4f5dab3
--- /dev/null
@@ -0,0 +1,57 @@
+package credentials
+
+import (
+       "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// StaticProviderName provides a name of Static provider
+const StaticProviderName = "StaticProvider"
+
+var (
+       // ErrStaticCredentialsEmpty is emitted when static credentials are empty.
+       //
+       // @readonly
+       ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
+)
+
+// A StaticProvider is a set of credentials which are set programmatically,
+// and will never expire.
+type StaticProvider struct {
+       Value
+}
+
+// NewStaticCredentials returns a pointer to a new Credentials object
+// wrapping a static credentials value provider.
+func NewStaticCredentials(id, secret, token string) *Credentials {
+       return NewCredentials(&StaticProvider{Value: Value{
+               AccessKeyID:     id,
+               SecretAccessKey: secret,
+               SessionToken:    token,
+       }})
+}
+
+// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
+// wrapping the static credentials value provide. Same as NewStaticCredentials
+// but takes the creds Value instead of individual fields
+func NewStaticCredentialsFromCreds(creds Value) *Credentials {
+       return NewCredentials(&StaticProvider{Value: creds})
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s *StaticProvider) Retrieve() (Value, error) {
+       if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+               return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
+       }
+
+       if len(s.Value.ProviderName) == 0 {
+               s.Value.ProviderName = StaticProviderName
+       }
+       return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For StaticProvider, the credentials never expired.
+func (s *StaticProvider) IsExpired() bool {
+       return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
new file mode 100644 (file)
index 0000000..4108e43
--- /dev/null
@@ -0,0 +1,298 @@
+/*
+Package stscreds are credential Providers to retrieve STS AWS credentials.
+
+STS provides multiple ways to retrieve credentials which can be used when making
+future AWS service API operation calls.
+
+The SDK will ensure that per instance of credentials.Credentials all requests
+to refresh the credentials will be synchronized. But, the SDK is unable to
+ensure synchronous usage of the AssumeRoleProvider if the value is shared
+between multiple Credentials, Sessions or service clients.
+
+Assume Role
+
+To assume an IAM role using STS with the SDK you can create a new Credentials
+with the SDKs's stscreds package.
+
+       // Initial credentials loaded from SDK's default credential chain. Such as
+       // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
+       // Role. These credentials will be used to to make the STS Assume Role API.
+       sess := session.Must(session.NewSession())
+
+       // Create the credentials from AssumeRoleProvider to assume the role
+       // referenced by the "myRoleARN" ARN.
+       creds := stscreds.NewCredentials(sess, "myRoleArn")
+
+       // Create service client value configured for credentials
+       // from assumed role.
+       svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+Assume Role with static MFA Token
+
+To assume an IAM role with a MFA token you can either specify a MFA token code
+directly or provide a function to prompt the user each time the credentials
+need to refresh the role's credentials. Specifying the TokenCode should be used
+for short lived operations that will not need to be refreshed, and when you do
+not want to have direct control over the user provides their MFA token.
+
+With TokenCode the AssumeRoleProvider will be not be able to refresh the role's
+credentials.
+
+       // Create the credentials from AssumeRoleProvider to assume the role
+       // referenced by the "myRoleARN" ARN using the MFA token code provided.
+       creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+               p.SerialNumber = aws.String("myTokenSerialNumber")
+               p.TokenCode = aws.String("00000000")
+       })
+
+       // Create service client value configured for credentials
+       // from assumed role.
+       svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+Assume Role with MFA Token Provider
+
+To assume an IAM role with MFA for longer running tasks where the credentials
+may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
+will allow the credential provider to prompt for new MFA token code when the
+role's credentials need to be refreshed.
+
+The StdinTokenProvider function is available to prompt on stdin to retrieve
+the MFA token code from the user. You can also implement custom prompts by
+satisfing the TokenProvider function signature.
+
+Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+have undesirable results as the StdinTokenProvider will not be synchronized. A
+single Credentials with an AssumeRoleProvider can be shared safely.
+
+       // Create the credentials from AssumeRoleProvider to assume the role
+       // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin.
+       creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+               p.SerialNumber = aws.String("myTokenSerialNumber")
+               p.TokenProvider = stscreds.StdinTokenProvider
+       })
+
+       // Create service client value configured for credentials
+       // from assumed role.
+       svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+*/
+package stscreds
+
+import (
+       "fmt"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/client"
+       "github.com/aws/aws-sdk-go/aws/credentials"
+       "github.com/aws/aws-sdk-go/service/sts"
+)
+
+// StdinTokenProvider will prompt on stdout and read from stdin for a string value.
+// An error is returned if reading from stdin fails.
+//
+// Use this function go read MFA tokens from stdin. The function makes no attempt
+// to make atomic prompts from stdin across multiple gorouties.
+//
+// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+// have undesirable results as the StdinTokenProvider will not be synchronized. A
+// single Credentials with an AssumeRoleProvider can be shared safely
+//
+// Will wait forever until something is provided on the stdin.
+func StdinTokenProvider() (string, error) {
+       var v string
+       fmt.Printf("Assume Role MFA token code: ")
+       _, err := fmt.Scanln(&v)
+
+       return v, err
+}
+
+// ProviderName provides a name of AssumeRole provider
+const ProviderName = "AssumeRoleProvider"
+
+// AssumeRoler represents the minimal subset of the STS client API used by this provider.
+type AssumeRoler interface {
+       AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
+}
+
+// DefaultDuration is the default amount of time in minutes that the credentials
+// will be valid for.
+var DefaultDuration = time.Duration(15) * time.Minute
+
+// AssumeRoleProvider retrieves temporary credentials from the STS service, and
+// keeps track of their expiration time.
+//
+// This credential provider will be used by the SDKs default credential change
+// when shared configuration is enabled, and the shared config or shared credentials
+// file configure assume role. See Session docs for how to do this.
+//
+// AssumeRoleProvider does not provide any synchronization and it is not safe
+// to share this value across multiple Credentials, Sessions, or service clients
+// without also sharing the same Credentials instance.
+type AssumeRoleProvider struct {
+       credentials.Expiry
+
+       // STS client to make assume role request with.
+       Client AssumeRoler
+
+       // Role to be assumed.
+       RoleARN string
+
+       // Session name, if you wish to reuse the credentials elsewhere.
+       RoleSessionName string
+
+       // Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
+       Duration time.Duration
+
+       // Optional ExternalID to pass along, defaults to nil if not set.
+       ExternalID *string
+
+       // The policy plain text must be 2048 bytes or shorter. However, an internal
+       // conversion compresses it into a packed binary format with a separate limit.
+       // The PackedPolicySize response element indicates by percentage how close to
+       // the upper size limit the policy is, with 100% equaling the maximum allowed
+       // size.
+       Policy *string
+
+       // The identification number of the MFA device that is associated with the user
+       // who is making the AssumeRole call. Specify this value if the trust policy
+       // of the role being assumed includes a condition that requires MFA authentication.
+       // The value is either the serial number for a hardware device (such as GAHT12345678)
+       // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+       SerialNumber *string
+
+       // The value provided by the MFA device, if the trust policy of the role being
+       // assumed requires MFA (that is, if the policy includes a condition that tests
+       // for MFA). If the role being assumed requires MFA and if the TokenCode value
+       // is missing or expired, the AssumeRole call returns an "access denied" error.
+       //
+       // If SerialNumber is set and neither TokenCode nor TokenProvider are also
+       // set an error will be returned.
+       TokenCode *string
+
+       // Async method of providing MFA token code for assuming an IAM role with MFA.
+       // The value returned by the function will be used as the TokenCode in the Retrieve
+       // call. See StdinTokenProvider for a provider that prompts and reads from stdin.
+       //
+       // This token provider will be called when ever the assumed role's
+       // credentials need to be refreshed when SerialNumber is also set and
+       // TokenCode is not set.
+       //
+       // If both TokenCode and TokenProvider is set, TokenProvider will be used and
+       // TokenCode is ignored.
+       TokenProvider func() (string, error)
+
+       // ExpiryWindow will allow the credentials to trigger refreshing prior to
+       // the credentials actually expiring. This is beneficial so race conditions
+       // with expiring credentials do not cause request to fail unexpectedly
+       // due to ExpiredTokenException exceptions.
+       //
+       // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+       // 10 seconds before the credentials are actually expired.
+       //
+       // If ExpiryWindow is 0 or less it will be ignored.
+       ExpiryWindow time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
+//
+// Takes a Config provider to create the STS client. The ConfigProvider is
+// satisfied by the session.Session type.
+//
+// It is safe to share the returned Credentials with multiple Sessions and
+// service clients. All access to the credentials and refreshing them
+// will be synchronized.
+func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+       p := &AssumeRoleProvider{
+               Client:   sts.New(c),
+               RoleARN:  roleARN,
+               Duration: DefaultDuration,
+       }
+
+       for _, option := range options {
+               option(p)
+       }
+
+       return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the
+// AssumeRoleProvider. The credentials will expire every 15 minutes and the
+// role will be named after a nanosecond timestamp of this operation.
+//
+// Takes an AssumeRoler which can be satisfied by the STS client.
+//
+// It is safe to share the returned Credentials with multiple Sessions and
+// service clients. All access to the credentials and refreshing them
+// will be synchronized.
+func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
+       p := &AssumeRoleProvider{
+               Client:   svc,
+               RoleARN:  roleARN,
+               Duration: DefaultDuration,
+       }
+
+       for _, option := range options {
+               option(p)
+       }
+
+       return credentials.NewCredentials(p)
+}
+
+// Retrieve generates a new set of temporary credentials using STS.
+func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
+
+       // Apply defaults where parameters are not set.
+       if p.RoleSessionName == "" {
+               // Try to work out a role name that will hopefully end up unique.
+               p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
+       }
+       if p.Duration == 0 {
+               // Expire as often as AWS permits.
+               p.Duration = DefaultDuration
+       }
+       input := &sts.AssumeRoleInput{
+               DurationSeconds: aws.Int64(int64(p.Duration / time.Second)),
+               RoleArn:         aws.String(p.RoleARN),
+               RoleSessionName: aws.String(p.RoleSessionName),
+               ExternalId:      p.ExternalID,
+       }
+       if p.Policy != nil {
+               input.Policy = p.Policy
+       }
+       if p.SerialNumber != nil {
+               if p.TokenCode != nil {
+                       input.SerialNumber = p.SerialNumber
+                       input.TokenCode = p.TokenCode
+               } else if p.TokenProvider != nil {
+                       input.SerialNumber = p.SerialNumber
+                       code, err := p.TokenProvider()
+                       if err != nil {
+                               return credentials.Value{ProviderName: ProviderName}, err
+                       }
+                       input.TokenCode = aws.String(code)
+               } else {
+                       return credentials.Value{ProviderName: ProviderName},
+                               awserr.New("AssumeRoleTokenNotAvailable",
+                                       "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil)
+               }
+       }
+
+       roleOutput, err := p.Client.AssumeRole(input)
+       if err != nil {
+               return credentials.Value{ProviderName: ProviderName}, err
+       }
+
+       // We will proactively generate new credentials before they expire.
+       p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
+
+       return credentials.Value{
+               AccessKeyID:     *roleOutput.Credentials.AccessKeyId,
+               SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
+               SessionToken:    *roleOutput.Credentials.SessionToken,
+               ProviderName:    ProviderName,
+       }, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
new file mode 100644 (file)
index 0000000..07afe3b
--- /dev/null
@@ -0,0 +1,163 @@
+// Package defaults is a collection of helpers to retrieve the SDK's default
+// configuration and handlers.
+//
+// Generally this package shouldn't be used directly, but session.Session
+// instead. This package is useful when you need to reset the defaults
+// of a session or service client to the SDK defaults before setting
+// additional parameters.
+package defaults
+
+import (
+       "fmt"
+       "net/http"
+       "net/url"
+       "os"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/corehandlers"
+       "github.com/aws/aws-sdk-go/aws/credentials"
+       "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+       "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
+       "github.com/aws/aws-sdk-go/aws/ec2metadata"
+       "github.com/aws/aws-sdk-go/aws/endpoints"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A Defaults provides a collection of default values for SDK clients.
+type Defaults struct {
+       Config   *aws.Config
+       Handlers request.Handlers
+}
+
+// Get returns the SDK's default values with Config and handlers pre-configured.
+func Get() Defaults {
+       cfg := Config()
+       handlers := Handlers()
+       cfg.Credentials = CredChain(cfg, handlers)
+
+       return Defaults{
+               Config:   cfg,
+               Handlers: handlers,
+       }
+}
+
+// Config returns the default configuration without credentials.
+// To retrieve a config with credentials also included use
+// `defaults.Get().Config` instead.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the configuration of an
+// existing service client or session.
+func Config() *aws.Config {
+       return aws.NewConfig().
+               WithCredentials(credentials.AnonymousCredentials).
+               WithRegion(os.Getenv("AWS_REGION")).
+               WithHTTPClient(http.DefaultClient).
+               WithMaxRetries(aws.UseServiceDefaultRetries).
+               WithLogger(aws.NewDefaultLogger()).
+               WithLogLevel(aws.LogOff).
+               WithEndpointResolver(endpoints.DefaultResolver())
+}
+
+// Handlers returns the default request handlers.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the request handlers of an
+// existing service client or session.
+func Handlers() request.Handlers {
+       var handlers request.Handlers
+
+       handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
+       handlers.Validate.AfterEachFn = request.HandlerListStopOnError
+       handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
+       handlers.Build.AfterEachFn = request.HandlerListStopOnError
+       handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
+       handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
+       handlers.Send.PushBackNamed(corehandlers.SendHandler)
+       handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
+       handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
+
+       return handlers
+}
+
+// CredChain returns the default credential chain.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the credentials of an
+// existing service client or session's Config.
+func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
+       return credentials.NewCredentials(&credentials.ChainProvider{
+               VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+               Providers: []credentials.Provider{
+                       &credentials.EnvProvider{},
+                       &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+                       RemoteCredProvider(*cfg, handlers),
+               },
+       })
+}
+
+const (
+       httpProviderEnvVar     = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
+       ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
+)
+
+// RemoteCredProvider returns a credentials provider for the default remote
+// endpoints such as EC2 or ECS Roles.
+func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
+       if u := os.Getenv(httpProviderEnvVar); len(u) > 0 {
+               return localHTTPCredProvider(cfg, handlers, u)
+       }
+
+       if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 {
+               u := fmt.Sprintf("http://169.254.170.2%s", uri)
+               return httpCredProvider(cfg, handlers, u)
+       }
+
+       return ec2RoleProvider(cfg, handlers)
+}
+
+func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+       var errMsg string
+
+       parsed, err := url.Parse(u)
+       if err != nil {
+               errMsg = fmt.Sprintf("invalid URL, %v", err)
+       } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") {
+               errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host)
+       }
+
+       if len(errMsg) > 0 {
+               if cfg.Logger != nil {
+                       cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err)
+               }
+               return credentials.ErrorProvider{
+                       Err:          awserr.New("CredentialsEndpointError", errMsg, err),
+                       ProviderName: endpointcreds.ProviderName,
+               }
+       }
+
+       return httpCredProvider(cfg, handlers, u)
+}
+
+func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+       return endpointcreds.NewProviderClient(cfg, handlers, u,
+               func(p *endpointcreds.Provider) {
+                       p.ExpiryWindow = 5 * time.Minute
+               },
+       )
+}
+
+func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
+       resolver := cfg.EndpointResolver
+       if resolver == nil {
+               resolver = endpoints.DefaultResolver()
+       }
+
+       e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "")
+       return &ec2rolecreds.EC2RoleProvider{
+               Client:       ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion),
+               ExpiryWindow: 5 * time.Minute,
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go
new file mode 100644 (file)
index 0000000..4fcb616
--- /dev/null
@@ -0,0 +1,56 @@
+// Package aws provides the core SDK's utilities and shared types. Use this package's
+// utilities to simplify setting and reading API operations parameters.
+//
+// Value and Pointer Conversion Utilities
+//
+// This package includes a helper conversion utility for each scalar type the SDK's
+// API use. These utilities make getting a pointer of the scalar, and dereferencing
+// a pointer easier.
+//
+// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
+// The Pointer to value will safely dereference the pointer and return its value.
+// If the pointer was nil, the scalar's zero value will be returned.
+//
+// The value to pointer functions will be named after the scalar type. So get a
+// *string from a string value use the "String" function. This makes it easy to
+// to get pointer of a literal string value, because getting the address of a
+// literal requires assigning the value to a variable first.
+//
+//    var strPtr *string
+//
+//    // Without the SDK's conversion functions
+//    str := "my string"
+//    strPtr = &str
+//
+//    // With the SDK's conversion functions
+//    strPtr = aws.String("my string")
+//
+//    // Convert *string to string value
+//    str = aws.StringValue(strPtr)
+//
+// In addition to scalars the aws package also includes conversion utilities for
+// map and slice for commonly types used in API parameters. The map and slice
+// conversion functions use similar naming pattern as the scalar conversion
+// functions.
+//
+//    var strPtrs []*string
+//    var strs []string = []string{"Go", "Gophers", "Go"}
+//
+//    // Convert []string to []*string
+//    strPtrs = aws.StringSlice(strs)
+//
+//    // Convert []*string to []string
+//    strs = aws.StringValueSlice(strPtrs)
+//
+// SDK Default HTTP Client
+//
+// The SDK will use the http.DefaultClient if a HTTP client is not provided to
+// the SDK's Session, or service client constructor. This means that if the
+// http.DefaultClient is modified by other components of your application the
+// modifications will be picked up by the SDK as well.
+//
+// In some cases this might be intended, but it is a better practice to create
+// a custom HTTP Client to share explicitly through your application. You can
+// configure the SDK to use the custom HTTP Client by setting the HTTPClient
+// value of the SDK's Config type when creating a Session or service client.
+package aws
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
new file mode 100644 (file)
index 0000000..984407a
--- /dev/null
@@ -0,0 +1,162 @@
+package ec2metadata
+
+import (
+       "encoding/json"
+       "fmt"
+       "net/http"
+       "path"
+       "strings"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// GetMetadata uses the path provided to request information from the EC2
+// instance metdata service. The content will be returned as a string, or
+// error if the request failed.
+func (c *EC2Metadata) GetMetadata(p string) (string, error) {
+       op := &request.Operation{
+               Name:       "GetMetadata",
+               HTTPMethod: "GET",
+               HTTPPath:   path.Join("/", "meta-data", p),
+       }
+
+       output := &metadataOutput{}
+       req := c.NewRequest(op, nil, output)
+
+       return output.Content, req.Send()
+}
+
+// GetUserData returns the userdata that was configured for the service. If
+// there is no user-data setup for the EC2 instance a "NotFoundError" error
+// code will be returned.
+func (c *EC2Metadata) GetUserData() (string, error) {
+       op := &request.Operation{
+               Name:       "GetUserData",
+               HTTPMethod: "GET",
+               HTTPPath:   path.Join("/", "user-data"),
+       }
+
+       output := &metadataOutput{}
+       req := c.NewRequest(op, nil, output)
+       req.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
+               if r.HTTPResponse.StatusCode == http.StatusNotFound {
+                       r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
+               }
+       })
+
+       return output.Content, req.Send()
+}
+
+// GetDynamicData uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
+       op := &request.Operation{
+               Name:       "GetDynamicData",
+               HTTPMethod: "GET",
+               HTTPPath:   path.Join("/", "dynamic", p),
+       }
+
+       output := &metadataOutput{}
+       req := c.NewRequest(op, nil, output)
+
+       return output.Content, req.Send()
+}
+
+// GetInstanceIdentityDocument retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
+       resp, err := c.GetDynamicData("instance-identity/document")
+       if err != nil {
+               return EC2InstanceIdentityDocument{},
+                       awserr.New("EC2MetadataRequestError",
+                               "failed to get EC2 instance identity document", err)
+       }
+
+       doc := EC2InstanceIdentityDocument{}
+       if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
+               return EC2InstanceIdentityDocument{},
+                       awserr.New("SerializationError",
+                               "failed to decode EC2 instance identity document", err)
+       }
+
+       return doc, nil
+}
+
+// IAMInfo retrieves IAM info from the metadata API
+func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
+       resp, err := c.GetMetadata("iam/info")
+       if err != nil {
+               return EC2IAMInfo{},
+                       awserr.New("EC2MetadataRequestError",
+                               "failed to get EC2 IAM info", err)
+       }
+
+       info := EC2IAMInfo{}
+       if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
+               return EC2IAMInfo{},
+                       awserr.New("SerializationError",
+                               "failed to decode EC2 IAM info", err)
+       }
+
+       if info.Code != "Success" {
+               errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
+               return EC2IAMInfo{},
+                       awserr.New("EC2MetadataError", errMsg, nil)
+       }
+
+       return info, nil
+}
+
+// Region returns the region the instance is running in.
+func (c *EC2Metadata) Region() (string, error) {
+       resp, err := c.GetMetadata("placement/availability-zone")
+       if err != nil {
+               return "", err
+       }
+
+       // returns region without the suffix. Eg: us-west-2a becomes us-west-2
+       return resp[:len(resp)-1], nil
+}
+
+// Available returns if the application has access to the EC2 Metadata service.
+// Can be used to determine if application is running within an EC2 Instance and
+// the metadata service is available.
+func (c *EC2Metadata) Available() bool {
+       if _, err := c.GetMetadata("instance-id"); err != nil {
+               return false
+       }
+
+       return true
+}
+
+// An EC2IAMInfo provides the shape for unmarshaling
+// an IAM info from the metadata API
+type EC2IAMInfo struct {
+       Code               string
+       LastUpdated        time.Time
+       InstanceProfileArn string
+       InstanceProfileID  string
+}
+
+// An EC2InstanceIdentityDocument provides the shape for unmarshaling
+// an instance identity document
+type EC2InstanceIdentityDocument struct {
+       DevpayProductCodes []string  `json:"devpayProductCodes"`
+       AvailabilityZone   string    `json:"availabilityZone"`
+       PrivateIP          string    `json:"privateIp"`
+       Version            string    `json:"version"`
+       Region             string    `json:"region"`
+       InstanceID         string    `json:"instanceId"`
+       BillingProducts    []string  `json:"billingProducts"`
+       InstanceType       string    `json:"instanceType"`
+       AccountID          string    `json:"accountId"`
+       PendingTime        time.Time `json:"pendingTime"`
+       ImageID            string    `json:"imageId"`
+       KernelID           string    `json:"kernelId"`
+       RamdiskID          string    `json:"ramdiskId"`
+       Architecture       string    `json:"architecture"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
new file mode 100644 (file)
index 0000000..5b4379d
--- /dev/null
@@ -0,0 +1,124 @@
+// Package ec2metadata provides the client for making API calls to the
+// EC2 Metadata service.
+package ec2metadata
+
+import (
+       "bytes"
+       "errors"
+       "io"
+       "net/http"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/client"
+       "github.com/aws/aws-sdk-go/aws/client/metadata"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ServiceName is the name of the service.
+const ServiceName = "ec2metadata"
+
+// A EC2Metadata is an EC2 Metadata service Client.
+type EC2Metadata struct {
+       *client.Client
+}
+
+// New creates a new instance of the EC2Metadata client with a session.
+// This client is safe to use across multiple goroutines.
+//
+//
+// Example:
+//     // Create a EC2Metadata client from just a session.
+//     svc := ec2metadata.New(mySession)
+//
+//     // Create a EC2Metadata client with additional configuration
+//     svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
+       c := p.ClientConfig(ServiceName, cfgs...)
+       return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// NewClient returns a new EC2Metadata client. Should be used to create
+// a client when not using a session. Generally using just New with a session
+// is preferred.
+//
+// If an unmodified HTTP client is provided from the stdlib default, or no client
+// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
+// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
+func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
+       if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
+               // If the http client is unmodified and this feature is not disabled
+               // set custom timeouts for EC2Metadata requests.
+               cfg.HTTPClient = &http.Client{
+                       // use a shorter timeout than default because the metadata
+                       // service is local if it is running, and to fail faster
+                       // if not running on an ec2 instance.
+                       Timeout: 5 * time.Second,
+               }
+       }
+
+       svc := &EC2Metadata{
+               Client: client.New(
+                       cfg,
+                       metadata.ClientInfo{
+                               ServiceName: ServiceName,
+                               Endpoint:    endpoint,
+                               APIVersion:  "latest",
+                       },
+                       handlers,
+               ),
+       }
+
+       svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
+       svc.Handlers.UnmarshalError.PushBack(unmarshalError)
+       svc.Handlers.Validate.Clear()
+       svc.Handlers.Validate.PushBack(validateEndpointHandler)
+
+       // Add additional options to the service config
+       for _, option := range opts {
+               option(svc.Client)
+       }
+
+       return svc
+}
+
+func httpClientZero(c *http.Client) bool {
+       return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
+}
+
+type metadataOutput struct {
+       Content string
+}
+
+func unmarshalHandler(r *request.Request) {
+       defer r.HTTPResponse.Body.Close()
+       b := &bytes.Buffer{}
+       if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+               r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
+               return
+       }
+
+       if data, ok := r.Data.(*metadataOutput); ok {
+               data.Content = b.String()
+       }
+}
+
+func unmarshalError(r *request.Request) {
+       defer r.HTTPResponse.Body.Close()
+       b := &bytes.Buffer{}
+       if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+               r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
+               return
+       }
+
+       // Response body format is not consistent between metadata endpoints.
+       // Grab the error message as a string and include that as the source error
+       r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
+}
+
+func validateEndpointHandler(r *request.Request) {
+       if r.ClientInfo.Endpoint == "" {
+               r.Error = aws.ErrMissingEndpoint
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
new file mode 100644 (file)
index 0000000..74f72de
--- /dev/null
@@ -0,0 +1,133 @@
+package endpoints
+
+import (
+       "encoding/json"
+       "fmt"
+       "io"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+type modelDefinition map[string]json.RawMessage
+
+// A DecodeModelOptions are the options for how the endpoints model definition
+// are decoded.
+type DecodeModelOptions struct {
+       SkipCustomizations bool
+}
+
+// Set combines all of the option functions together.
+func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
+       for _, fn := range optFns {
+               fn(d)
+       }
+}
+
+// DecodeModel unmarshals a Regions and Endpoint model definition file into
+// a endpoint Resolver. If the file format is not supported, or an error occurs
+// when unmarshaling the model an error will be returned.
+//
+// Casting the return value of this func to a EnumPartitions will
+// allow you to get a list of the partitions in the order the endpoints
+// will be resolved in.
+//
+//    resolver, err := endpoints.DecodeModel(reader)
+//
+//    partitions := resolver.(endpoints.EnumPartitions).Partitions()
+//    for _, p := range partitions {
+//        // ... inspect partitions
+//    }
+func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
+       var opts DecodeModelOptions
+       opts.Set(optFns...)
+
+       // Get the version of the partition file to determine what
+       // unmarshaling model to use.
+       modelDef := modelDefinition{}
+       if err := json.NewDecoder(r).Decode(&modelDef); err != nil {
+               return nil, newDecodeModelError("failed to decode endpoints model", err)
+       }
+
+       var version string
+       if b, ok := modelDef["version"]; ok {
+               version = string(b)
+       } else {
+               return nil, newDecodeModelError("endpoints version not found in model", nil)
+       }
+
+       if version == "3" {
+               return decodeV3Endpoints(modelDef, opts)
+       }
+
+       return nil, newDecodeModelError(
+               fmt.Sprintf("endpoints version %s, not supported", version), nil)
+}
+
+func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) {
+       b, ok := modelDef["partitions"]
+       if !ok {
+               return nil, newDecodeModelError("endpoints model missing partitions", nil)
+       }
+
+       ps := partitions{}
+       if err := json.Unmarshal(b, &ps); err != nil {
+               return nil, newDecodeModelError("failed to decode endpoints model", err)
+       }
+
+       if opts.SkipCustomizations {
+               return ps, nil
+       }
+
+       // Customization
+       for i := 0; i < len(ps); i++ {
+               p := &ps[i]
+               custAddEC2Metadata(p)
+               custAddS3DualStack(p)
+               custRmIotDataService(p)
+       }
+
+       return ps, nil
+}
+
+func custAddS3DualStack(p *partition) {
+       if p.ID != "aws" {
+               return
+       }
+
+       s, ok := p.Services["s3"]
+       if !ok {
+               return
+       }
+
+       s.Defaults.HasDualStack = boxedTrue
+       s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}"
+
+       p.Services["s3"] = s
+}
+
+func custAddEC2Metadata(p *partition) {
+       p.Services["ec2metadata"] = service{
+               IsRegionalized:    boxedFalse,
+               PartitionEndpoint: "aws-global",
+               Endpoints: endpoints{
+                       "aws-global": endpoint{
+                               Hostname:  "169.254.169.254/latest",
+                               Protocols: []string{"http"},
+                       },
+               },
+       }
+}
+
+func custRmIotDataService(p *partition) {
+       delete(p.Services, "data.iot")
+}
+
+type decodeModelError struct {
+       awsError
+}
+
+func newDecodeModelError(msg string, err error) decodeModelError {
+       return decodeModelError{
+               awsError: awserr.New("DecodeEndpointsModelError", msg, err),
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
new file mode 100644 (file)
index 0000000..e6d7ede
--- /dev/null
@@ -0,0 +1,2174 @@
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+       "regexp"
+)
+
+// Partition identifiers
+const (
+       AwsPartitionID      = "aws"        // AWS Standard partition.
+       AwsCnPartitionID    = "aws-cn"     // AWS China partition.
+       AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition.
+)
+
+// AWS Standard partition's regions.
+const (
+       ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo).
+       ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul).
+       ApSouth1RegionID     = "ap-south-1"     // Asia Pacific (Mumbai).
+       ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
+       ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
+       CaCentral1RegionID   = "ca-central-1"   // Canada (Central).
+       EuCentral1RegionID   = "eu-central-1"   // EU (Frankfurt).
+       EuWest1RegionID      = "eu-west-1"      // EU (Ireland).
+       EuWest2RegionID      = "eu-west-2"      // EU (London).
+       SaEast1RegionID      = "sa-east-1"      // South America (Sao Paulo).
+       UsEast1RegionID      = "us-east-1"      // US East (N. Virginia).
+       UsEast2RegionID      = "us-east-2"      // US East (Ohio).
+       UsWest1RegionID      = "us-west-1"      // US West (N. California).
+       UsWest2RegionID      = "us-west-2"      // US West (Oregon).
+)
+
+// AWS China partition's regions.
+const (
+       CnNorth1RegionID = "cn-north-1" // China (Beijing).
+)
+
+// AWS GovCloud (US) partition's regions.
+const (
+       UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US).
+)
+
+// Service identifiers
+const (
+       AcmServiceID                          = "acm"                          // Acm.
+       ApigatewayServiceID                   = "apigateway"                   // Apigateway.
+       ApplicationAutoscalingServiceID       = "application-autoscaling"      // ApplicationAutoscaling.
+       Appstream2ServiceID                   = "appstream2"                   // Appstream2.
+       AutoscalingServiceID                  = "autoscaling"                  // Autoscaling.
+       BatchServiceID                        = "batch"                        // Batch.
+       BudgetsServiceID                      = "budgets"                      // Budgets.
+       ClouddirectoryServiceID               = "clouddirectory"               // Clouddirectory.
+       CloudformationServiceID               = "cloudformation"               // Cloudformation.
+       CloudfrontServiceID                   = "cloudfront"                   // Cloudfront.
+       CloudhsmServiceID                     = "cloudhsm"                     // Cloudhsm.
+       CloudsearchServiceID                  = "cloudsearch"                  // Cloudsearch.
+       CloudtrailServiceID                   = "cloudtrail"                   // Cloudtrail.
+       CodebuildServiceID                    = "codebuild"                    // Codebuild.
+       CodecommitServiceID                   = "codecommit"                   // Codecommit.
+       CodedeployServiceID                   = "codedeploy"                   // Codedeploy.
+       CodepipelineServiceID                 = "codepipeline"                 // Codepipeline.
+       CodestarServiceID                     = "codestar"                     // Codestar.
+       CognitoIdentityServiceID              = "cognito-identity"             // CognitoIdentity.
+       CognitoIdpServiceID                   = "cognito-idp"                  // CognitoIdp.
+       CognitoSyncServiceID                  = "cognito-sync"                 // CognitoSync.
+       ConfigServiceID                       = "config"                       // Config.
+       CurServiceID                          = "cur"                          // Cur.
+       DatapipelineServiceID                 = "datapipeline"                 // Datapipeline.
+       DevicefarmServiceID                   = "devicefarm"                   // Devicefarm.
+       DirectconnectServiceID                = "directconnect"                // Directconnect.
+       DiscoveryServiceID                    = "discovery"                    // Discovery.
+       DmsServiceID                          = "dms"                          // Dms.
+       DsServiceID                           = "ds"                           // Ds.
+       DynamodbServiceID                     = "dynamodb"                     // Dynamodb.
+       Ec2ServiceID                          = "ec2"                          // Ec2.
+       Ec2metadataServiceID                  = "ec2metadata"                  // Ec2metadata.
+       EcrServiceID                          = "ecr"                          // Ecr.
+       EcsServiceID                          = "ecs"                          // Ecs.
+       ElasticacheServiceID                  = "elasticache"                  // Elasticache.
+       ElasticbeanstalkServiceID             = "elasticbeanstalk"             // Elasticbeanstalk.
+       ElasticfilesystemServiceID            = "elasticfilesystem"            // Elasticfilesystem.
+       ElasticloadbalancingServiceID         = "elasticloadbalancing"         // Elasticloadbalancing.
+       ElasticmapreduceServiceID             = "elasticmapreduce"             // Elasticmapreduce.
+       ElastictranscoderServiceID            = "elastictranscoder"            // Elastictranscoder.
+       EmailServiceID                        = "email"                        // Email.
+       EntitlementMarketplaceServiceID       = "entitlement.marketplace"      // EntitlementMarketplace.
+       EsServiceID                           = "es"                           // Es.
+       EventsServiceID                       = "events"                       // Events.
+       FirehoseServiceID                     = "firehose"                     // Firehose.
+       GameliftServiceID                     = "gamelift"                     // Gamelift.
+       GlacierServiceID                      = "glacier"                      // Glacier.
+       HealthServiceID                       = "health"                       // Health.
+       IamServiceID                          = "iam"                          // Iam.
+       ImportexportServiceID                 = "importexport"                 // Importexport.
+       InspectorServiceID                    = "inspector"                    // Inspector.
+       IotServiceID                          = "iot"                          // Iot.
+       KinesisServiceID                      = "kinesis"                      // Kinesis.
+       KinesisanalyticsServiceID             = "kinesisanalytics"             // Kinesisanalytics.
+       KmsServiceID                          = "kms"                          // Kms.
+       LambdaServiceID                       = "lambda"                       // Lambda.
+       LightsailServiceID                    = "lightsail"                    // Lightsail.
+       LogsServiceID                         = "logs"                         // Logs.
+       MachinelearningServiceID              = "machinelearning"              // Machinelearning.
+       MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
+       MeteringMarketplaceServiceID          = "metering.marketplace"         // MeteringMarketplace.
+       MobileanalyticsServiceID              = "mobileanalytics"              // Mobileanalytics.
+       ModelsLexServiceID                    = "models.lex"                   // ModelsLex.
+       MonitoringServiceID                   = "monitoring"                   // Monitoring.
+       MturkRequesterServiceID               = "mturk-requester"              // MturkRequester.
+       OpsworksServiceID                     = "opsworks"                     // Opsworks.
+       OpsworksCmServiceID                   = "opsworks-cm"                  // OpsworksCm.
+       OrganizationsServiceID                = "organizations"                // Organizations.
+       PinpointServiceID                     = "pinpoint"                     // Pinpoint.
+       PollyServiceID                        = "polly"                        // Polly.
+       RdsServiceID                          = "rds"                          // Rds.
+       RedshiftServiceID                     = "redshift"                     // Redshift.
+       RekognitionServiceID                  = "rekognition"                  // Rekognition.
+       Route53ServiceID                      = "route53"                      // Route53.
+       Route53domainsServiceID               = "route53domains"               // Route53domains.
+       RuntimeLexServiceID                   = "runtime.lex"                  // RuntimeLex.
+       S3ServiceID                           = "s3"                           // S3.
+       SdbServiceID                          = "sdb"                          // Sdb.
+       ServicecatalogServiceID               = "servicecatalog"               // Servicecatalog.
+       ShieldServiceID                       = "shield"                       // Shield.
+       SmsServiceID                          = "sms"                          // Sms.
+       SnowballServiceID                     = "snowball"                     // Snowball.
+       SnsServiceID                          = "sns"                          // Sns.
+       SqsServiceID                          = "sqs"                          // Sqs.
+       SsmServiceID                          = "ssm"                          // Ssm.
+       StatesServiceID                       = "states"                       // States.
+       StoragegatewayServiceID               = "storagegateway"               // Storagegateway.
+       StreamsDynamodbServiceID              = "streams.dynamodb"             // StreamsDynamodb.
+       StsServiceID                          = "sts"                          // Sts.
+       SupportServiceID                      = "support"                      // Support.
+       SwfServiceID                          = "swf"                          // Swf.
+       TaggingServiceID                      = "tagging"                      // Tagging.
+       WafServiceID                          = "waf"                          // Waf.
+       WafRegionalServiceID                  = "waf-regional"                 // WafRegional.
+       WorkdocsServiceID                     = "workdocs"                     // Workdocs.
+       WorkspacesServiceID                   = "workspaces"                   // Workspaces.
+       XrayServiceID                         = "xray"                         // Xray.
+)
+
+// DefaultResolver returns an Endpoint resolver that will be able
+// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US).
+//
+// Use DefaultPartitions() to get the list of the default partitions.
+func DefaultResolver() Resolver {
+       return defaultPartitions
+}
+
+// DefaultPartitions returns a list of the partitions the SDK is bundled
+// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US).
+//
+//    partitions := endpoints.DefaultPartitions
+//    for _, p := range partitions {
+//        // ... inspect partitions
+//    }
+func DefaultPartitions() []Partition {
+       return defaultPartitions.Partitions()
+}
+
+var defaultPartitions = partitions{
+       awsPartition,
+       awscnPartition,
+       awsusgovPartition,
+}
+
+// AwsPartition returns the Resolver for AWS Standard.
+func AwsPartition() Partition {
+       return awsPartition.Partition()
+}
+
+var awsPartition = partition{
+       ID:        "aws",
+       Name:      "AWS Standard",
+       DNSSuffix: "amazonaws.com",
+       RegionRegex: regionRegex{
+               Regexp: func() *regexp.Regexp {
+                       reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$")
+                       return reg
+               }(),
+       },
+       Defaults: endpoint{
+               Hostname:          "{service}.{region}.{dnsSuffix}",
+               Protocols:         []string{"https"},
+               SignatureVersions: []string{"v4"},
+       },
+       Regions: regions{
+               "ap-northeast-1": region{
+                       Description: "Asia Pacific (Tokyo)",
+               },
+               "ap-northeast-2": region{
+                       Description: "Asia Pacific (Seoul)",
+               },
+               "ap-south-1": region{
+                       Description: "Asia Pacific (Mumbai)",
+               },
+               "ap-southeast-1": region{
+                       Description: "Asia Pacific (Singapore)",
+               },
+               "ap-southeast-2": region{
+                       Description: "Asia Pacific (Sydney)",
+               },
+               "ca-central-1": region{
+                       Description: "Canada (Central)",
+               },
+               "eu-central-1": region{
+                       Description: "EU (Frankfurt)",
+               },
+               "eu-west-1": region{
+                       Description: "EU (Ireland)",
+               },
+               "eu-west-2": region{
+                       Description: "EU (London)",
+               },
+               "sa-east-1": region{
+                       Description: "South America (Sao Paulo)",
+               },
+               "us-east-1": region{
+                       Description: "US East (N. Virginia)",
+               },
+               "us-east-2": region{
+                       Description: "US East (Ohio)",
+               },
+               "us-west-1": region{
+                       Description: "US West (N. California)",
+               },
+               "us-west-2": region{
+                       Description: "US West (Oregon)",
+               },
+       },
+       Services: services{
+               "acm": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "apigateway": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "application-autoscaling": service{
+                       Defaults: endpoint{
+                               Hostname:  "autoscaling.{region}.amazonaws.com",
+                               Protocols: []string{"http", "https"},
+                               CredentialScope: credentialScope{
+                                       Service: "application-autoscaling",
+                               },
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "appstream2": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"https"},
+                               CredentialScope: credentialScope{
+                                       Service: "appstream",
+                               },
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "autoscaling": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "batch": service{
+
+                       Endpoints: endpoints{
+                               "us-east-1": endpoint{},
+                       },
+               },
+               "budgets": service{
+                       PartitionEndpoint: "aws-global",
+                       IsRegionalized:    boxedFalse,
+
+                       Endpoints: endpoints{
+                               "aws-global": endpoint{
+                                       Hostname: "budgets.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-1",
+                                       },
+                               },
+                       },
+               },
+               "clouddirectory": service{
+
+                       Endpoints: endpoints{
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "cloudformation": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "cloudfront": service{
+                       PartitionEndpoint: "aws-global",
+                       IsRegionalized:    boxedFalse,
+
+                       Endpoints: endpoints{
+                               "aws-global": endpoint{
+                                       Hostname:  "cloudfront.amazonaws.com",
+                                       Protocols: []string{"http", "https"},
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-1",
+                                       },
+                               },
+                       },
+               },
+               "cloudhsm": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "cloudsearch": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "cloudtrail": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "codebuild": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "codecommit": service{
+
+                       Endpoints: endpoints{
+                               "eu-west-1": endpoint{},
+                               "us-east-1": endpoint{},
+                               "us-east-2": endpoint{},
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "codedeploy": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "codepipeline": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "codestar": service{
+
+                       Endpoints: endpoints{
+                               "eu-west-1": endpoint{},
+                               "us-east-1": endpoint{},
+                               "us-east-2": endpoint{},
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "cognito-identity": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "cognito-idp": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "cognito-sync": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "config": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "cur": service{
+
+                       Endpoints: endpoints{
+                               "us-east-1": endpoint{},
+                       },
+               },
+               "datapipeline": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "devicefarm": service{
+
+                       Endpoints: endpoints{
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "directconnect": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "discovery": service{
+
+                       Endpoints: endpoints{
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "dms": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "ds": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "dynamodb": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "local": endpoint{
+                                       Hostname:  "localhost:8000",
+                                       Protocols: []string{"http"},
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-1",
+                                       },
+                               },
+                               "sa-east-1": endpoint{},
+                               "us-east-1": endpoint{},
+                               "us-east-2": endpoint{},
+                               "us-west-1": endpoint{},
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "ec2": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "ec2metadata": service{
+                       PartitionEndpoint: "aws-global",
+                       IsRegionalized:    boxedFalse,
+
+                       Endpoints: endpoints{
+                               "aws-global": endpoint{
+                                       Hostname:  "169.254.169.254/latest",
+                                       Protocols: []string{"http"},
+                               },
+                       },
+               },
+               "ecr": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "ecs": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "elasticache": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "elasticbeanstalk": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "elasticfilesystem": service{
+
+                       Endpoints: endpoints{
+                               "ap-southeast-2": endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "elasticloadbalancing": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "elasticmapreduce": service{
+                       Defaults: endpoint{
+                               SSLCommonName: "{region}.{service}.{dnsSuffix}",
+                               Protocols:     []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1": endpoint{
+                                       SSLCommonName: "{service}.{region}.{dnsSuffix}",
+                               },
+                               "eu-west-1": endpoint{},
+                               "eu-west-2": endpoint{},
+                               "sa-east-1": endpoint{},
+                               "us-east-1": endpoint{
+                                       SSLCommonName: "{service}.{region}.{dnsSuffix}",
+                               },
+                               "us-east-2": endpoint{},
+                               "us-west-1": endpoint{},
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "elastictranscoder": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "email": service{
+
+                       Endpoints: endpoints{
+                               "eu-west-1": endpoint{},
+                               "us-east-1": endpoint{},
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "entitlement.marketplace": service{
+                       Defaults: endpoint{
+                               CredentialScope: credentialScope{
+                                       Service: "aws-marketplace",
+                               },
+                       },
+                       Endpoints: endpoints{
+                               "us-east-1": endpoint{},
+                       },
+               },
+               "es": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "events": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "firehose": service{
+
+                       Endpoints: endpoints{
+                               "eu-west-1": endpoint{},
+                               "us-east-1": endpoint{},
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "gamelift": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "glacier": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "health": service{
+
+                       Endpoints: endpoints{
+                               "us-east-1": endpoint{},
+                       },
+               },
+               "iam": service{
+                       PartitionEndpoint: "aws-global",
+                       IsRegionalized:    boxedFalse,
+
+                       Endpoints: endpoints{
+                               "aws-global": endpoint{
+                                       Hostname: "iam.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-1",
+                                       },
+                               },
+                       },
+               },
+               "importexport": service{
+                       PartitionEndpoint: "aws-global",
+                       IsRegionalized:    boxedFalse,
+
+                       Endpoints: endpoints{
+                               "aws-global": endpoint{
+                                       Hostname:          "importexport.amazonaws.com",
+                                       SignatureVersions: []string{"v2", "v4"},
+                                       CredentialScope: credentialScope{
+                                               Region:  "us-east-1",
+                                               Service: "IngestionService",
+                                       },
+                               },
+                       },
+               },
+               "inspector": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "iot": service{
+                       Defaults: endpoint{
+                               CredentialScope: credentialScope{
+                                       Service: "execute-api",
+                               },
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "kinesis": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "kinesisanalytics": service{
+
+                       Endpoints: endpoints{
+                               "eu-west-1": endpoint{},
+                               "us-east-1": endpoint{},
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "kms": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "lambda": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "lightsail": service{
+
+                       Endpoints: endpoints{
+                               "us-east-1": endpoint{},
+                       },
+               },
+               "logs": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "machinelearning": service{
+
+                       Endpoints: endpoints{
+                               "eu-west-1": endpoint{},
+                               "us-east-1": endpoint{},
+                       },
+               },
+               "marketplacecommerceanalytics": service{
+
+                       Endpoints: endpoints{
+                               "us-east-1": endpoint{},
+                       },
+               },
+               "metering.marketplace": service{
+                       Defaults: endpoint{
+                               CredentialScope: credentialScope{
+                                       Service: "aws-marketplace",
+                               },
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "mobileanalytics": service{
+
+                       Endpoints: endpoints{
+                               "us-east-1": endpoint{},
+                       },
+               },
+               "models.lex": service{
+                       Defaults: endpoint{
+                               CredentialScope: credentialScope{
+                                       Service: "lex",
+                               },
+                       },
+                       Endpoints: endpoints{
+                               "us-east-1": endpoint{},
+                       },
+               },
+               "monitoring": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "mturk-requester": service{
+                       IsRegionalized: boxedFalse,
+
+                       Endpoints: endpoints{
+                               "sandbox": endpoint{
+                                       Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com",
+                               },
+                               "us-east-1": endpoint{},
+                       },
+               },
+               "opsworks": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "opsworks-cm": service{
+
+                       Endpoints: endpoints{
+                               "eu-west-1": endpoint{},
+                               "us-east-1": endpoint{},
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "organizations": service{
+                       PartitionEndpoint: "aws-global",
+                       IsRegionalized:    boxedFalse,
+
+                       Endpoints: endpoints{
+                               "aws-global": endpoint{
+                                       Hostname: "organizations.us-east-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-1",
+                                       },
+                               },
+                       },
+               },
+               "pinpoint": service{
+                       Defaults: endpoint{
+                               CredentialScope: credentialScope{
+                                       Service: "mobiletargeting",
+                               },
+                       },
+                       Endpoints: endpoints{
+                               "us-east-1": endpoint{},
+                       },
+               },
+               "polly": service{
+
+                       Endpoints: endpoints{
+                               "eu-west-1": endpoint{},
+                               "us-east-1": endpoint{},
+                               "us-east-2": endpoint{},
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "rds": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1": endpoint{
+                                       SSLCommonName: "{service}.{dnsSuffix}",
+                               },
+                               "us-east-2": endpoint{},
+                               "us-west-1": endpoint{},
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "redshift": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "rekognition": service{
+
+                       Endpoints: endpoints{
+                               "eu-west-1": endpoint{},
+                               "us-east-1": endpoint{},
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "route53": service{
+                       PartitionEndpoint: "aws-global",
+                       IsRegionalized:    boxedFalse,
+
+                       Endpoints: endpoints{
+                               "aws-global": endpoint{
+                                       Hostname: "route53.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-1",
+                                       },
+                               },
+                       },
+               },
+               "route53domains": service{
+
+                       Endpoints: endpoints{
+                               "us-east-1": endpoint{},
+                       },
+               },
+               "runtime.lex": service{
+                       Defaults: endpoint{
+                               CredentialScope: credentialScope{
+                                       Service: "lex",
+                               },
+                       },
+                       Endpoints: endpoints{
+                               "us-east-1": endpoint{},
+                       },
+               },
+               "s3": service{
+                       PartitionEndpoint: "us-east-1",
+                       IsRegionalized:    boxedTrue,
+                       Defaults: endpoint{
+                               Protocols:         []string{"http", "https"},
+                               SignatureVersions: []string{"s3v4"},
+
+                               HasDualStack:      boxedTrue,
+                               DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}",
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{
+                                       Hostname:          "s3-ap-northeast-1.amazonaws.com",
+                                       SignatureVersions: []string{"s3", "s3v4"},
+                               },
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{
+                                       Hostname:          "s3-ap-southeast-1.amazonaws.com",
+                                       SignatureVersions: []string{"s3", "s3v4"},
+                               },
+                               "ap-southeast-2": endpoint{
+                                       Hostname:          "s3-ap-southeast-2.amazonaws.com",
+                                       SignatureVersions: []string{"s3", "s3v4"},
+                               },
+                               "ca-central-1": endpoint{},
+                               "eu-central-1": endpoint{},
+                               "eu-west-1": endpoint{
+                                       Hostname:          "s3-eu-west-1.amazonaws.com",
+                                       SignatureVersions: []string{"s3", "s3v4"},
+                               },
+                               "eu-west-2": endpoint{},
+                               "s3-external-1": endpoint{
+                                       Hostname:          "s3-external-1.amazonaws.com",
+                                       SignatureVersions: []string{"s3", "s3v4"},
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-1",
+                                       },
+                               },
+                               "sa-east-1": endpoint{
+                                       Hostname:          "s3-sa-east-1.amazonaws.com",
+                                       SignatureVersions: []string{"s3", "s3v4"},
+                               },
+                               "us-east-1": endpoint{
+                                       Hostname:          "s3.amazonaws.com",
+                                       SignatureVersions: []string{"s3", "s3v4"},
+                               },
+                               "us-east-2": endpoint{},
+                               "us-west-1": endpoint{
+                                       Hostname:          "s3-us-west-1.amazonaws.com",
+                                       SignatureVersions: []string{"s3", "s3v4"},
+                               },
+                               "us-west-2": endpoint{
+                                       Hostname:          "s3-us-west-2.amazonaws.com",
+                                       SignatureVersions: []string{"s3", "s3v4"},
+                               },
+                       },
+               },
+               "sdb": service{
+                       Defaults: endpoint{
+                               Protocols:         []string{"http", "https"},
+                               SignatureVersions: []string{"v2"},
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1": endpoint{
+                                       Hostname: "sdb.amazonaws.com",
+                               },
+                               "us-west-1": endpoint{},
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "servicecatalog": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "shield": service{
+                       IsRegionalized: boxedFalse,
+                       Defaults: endpoint{
+                               SSLCommonName: "Shield.us-east-1.amazonaws.com",
+                               Protocols:     []string{"https"},
+                       },
+                       Endpoints: endpoints{
+                               "us-east-1": endpoint{},
+                       },
+               },
+               "sms": service{
+
+                       Endpoints: endpoints{
+                               "ap-southeast-2": endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                       },
+               },
+               "snowball": service{
+
+                       Endpoints: endpoints{
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "sns": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "sqs": service{
+                       Defaults: endpoint{
+                               SSLCommonName: "{region}.queue.{dnsSuffix}",
+                               Protocols:     []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1": endpoint{
+                                       SSLCommonName: "queue.{dnsSuffix}",
+                               },
+                               "us-east-2": endpoint{},
+                               "us-west-1": endpoint{},
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "ssm": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "states": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "storagegateway": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "streams.dynamodb": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "http", "https", "https"},
+                               CredentialScope: credentialScope{
+                                       Service: "dynamodb",
+                               },
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "local": endpoint{
+                                       Hostname:  "localhost:8000",
+                                       Protocols: []string{"http"},
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-1",
+                                       },
+                               },
+                               "sa-east-1": endpoint{},
+                               "us-east-1": endpoint{},
+                               "us-east-2": endpoint{},
+                               "us-west-1": endpoint{},
+                               "us-west-2": endpoint{},
+                       },
+               },
+               "sts": service{
+                       PartitionEndpoint: "aws-global",
+                       Defaults: endpoint{
+                               Hostname: "sts.amazonaws.com",
+                               CredentialScope: credentialScope{
+                                       Region: "us-east-1",
+                               },
+                       },
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{
+                                       Hostname: "sts.ap-northeast-2.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "ap-northeast-2",
+                                       },
+                               },
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "aws-global":     endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "support": service{
+
+                       Endpoints: endpoints{
+                               "us-east-1": endpoint{},
+                       },
+               },
+               "swf": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "tagging": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "ca-central-1":   endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "eu-west-2":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "waf": service{
+                       PartitionEndpoint: "aws-global",
+                       IsRegionalized:    boxedFalse,
+
+                       Endpoints: endpoints{
+                               "aws-global": endpoint{
+                                       Hostname: "waf.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-east-1",
+                                       },
+                               },
+                       },
+               },
+               "waf-regional": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "workdocs": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "workspaces": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+               "xray": service{
+
+                       Endpoints: endpoints{
+                               "ap-northeast-1": endpoint{},
+                               "ap-northeast-2": endpoint{},
+                               "ap-south-1":     endpoint{},
+                               "ap-southeast-1": endpoint{},
+                               "ap-southeast-2": endpoint{},
+                               "eu-central-1":   endpoint{},
+                               "eu-west-1":      endpoint{},
+                               "sa-east-1":      endpoint{},
+                               "us-east-1":      endpoint{},
+                               "us-east-2":      endpoint{},
+                               "us-west-1":      endpoint{},
+                               "us-west-2":      endpoint{},
+                       },
+               },
+       },
+}
+
+// AwsCnPartition returns the Resolver for AWS China.
+func AwsCnPartition() Partition {
+       return awscnPartition.Partition()
+}
+
+var awscnPartition = partition{
+       ID:        "aws-cn",
+       Name:      "AWS China",
+       DNSSuffix: "amazonaws.com.cn",
+       RegionRegex: regionRegex{
+               Regexp: func() *regexp.Regexp {
+                       reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$")
+                       return reg
+               }(),
+       },
+       Defaults: endpoint{
+               Hostname:          "{service}.{region}.{dnsSuffix}",
+               Protocols:         []string{"https"},
+               SignatureVersions: []string{"v4"},
+       },
+       Regions: regions{
+               "cn-north-1": region{
+                       Description: "China (Beijing)",
+               },
+       },
+       Services: services{
+               "autoscaling": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "cloudformation": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "cloudtrail": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "codedeploy": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "config": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "directconnect": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "dynamodb": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "ec2": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "ec2metadata": service{
+                       PartitionEndpoint: "aws-global",
+                       IsRegionalized:    boxedFalse,
+
+                       Endpoints: endpoints{
+                               "aws-global": endpoint{
+                                       Hostname:  "169.254.169.254/latest",
+                                       Protocols: []string{"http"},
+                               },
+                       },
+               },
+               "elasticache": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "elasticbeanstalk": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "elasticloadbalancing": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "elasticmapreduce": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "events": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "glacier": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "iam": service{
+                       PartitionEndpoint: "aws-cn-global",
+                       IsRegionalized:    boxedFalse,
+
+                       Endpoints: endpoints{
+                               "aws-cn-global": endpoint{
+                                       Hostname: "iam.cn-north-1.amazonaws.com.cn",
+                                       CredentialScope: credentialScope{
+                                               Region: "cn-north-1",
+                                       },
+                               },
+                       },
+               },
+               "kinesis": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "logs": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "monitoring": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "rds": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "redshift": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "s3": service{
+                       Defaults: endpoint{
+                               Protocols:         []string{"http", "https"},
+                               SignatureVersions: []string{"s3v4"},
+                       },
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "sns": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "sqs": service{
+                       Defaults: endpoint{
+                               SSLCommonName: "{region}.queue.{dnsSuffix}",
+                               Protocols:     []string{"http", "https"},
+                       },
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "storagegateway": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "streams.dynamodb": service{
+                       Defaults: endpoint{
+                               Protocols: []string{"http", "http", "https", "https"},
+                               CredentialScope: credentialScope{
+                                       Service: "dynamodb",
+                               },
+                       },
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "sts": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "swf": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+               "tagging": service{
+
+                       Endpoints: endpoints{
+                               "cn-north-1": endpoint{},
+                       },
+               },
+       },
+}
+
+// AwsUsGovPartition returns the Resolver for AWS GovCloud (US).
+func AwsUsGovPartition() Partition {
+       return awsusgovPartition.Partition()
+}
+
+var awsusgovPartition = partition{
+       ID:        "aws-us-gov",
+       Name:      "AWS GovCloud (US)",
+       DNSSuffix: "amazonaws.com",
+       RegionRegex: regionRegex{
+               Regexp: func() *regexp.Regexp {
+                       reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$")
+                       return reg
+               }(),
+       },
+       Defaults: endpoint{
+               Hostname:          "{service}.{region}.{dnsSuffix}",
+               Protocols:         []string{"https"},
+               SignatureVersions: []string{"v4"},
+       },
+       Regions: regions{
+               "us-gov-west-1": region{
+                       Description: "AWS GovCloud (US)",
+               },
+       },
+       Services: services{
+               "autoscaling": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{
+                                       Protocols: []string{"http", "https"},
+                               },
+                       },
+               },
+               "cloudformation": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "cloudhsm": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "cloudtrail": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "config": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "directconnect": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "dynamodb": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "ec2": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "ec2metadata": service{
+                       PartitionEndpoint: "aws-global",
+                       IsRegionalized:    boxedFalse,
+
+                       Endpoints: endpoints{
+                               "aws-global": endpoint{
+                                       Hostname:  "169.254.169.254/latest",
+                                       Protocols: []string{"http"},
+                               },
+                       },
+               },
+               "elasticache": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "elasticloadbalancing": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{
+                                       Protocols: []string{"http", "https"},
+                               },
+                       },
+               },
+               "elasticmapreduce": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{
+                                       Protocols: []string{"http", "https"},
+                               },
+                       },
+               },
+               "glacier": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{
+                                       Protocols: []string{"http", "https"},
+                               },
+                       },
+               },
+               "iam": service{
+                       PartitionEndpoint: "aws-us-gov-global",
+                       IsRegionalized:    boxedFalse,
+
+                       Endpoints: endpoints{
+                               "aws-us-gov-global": endpoint{
+                                       Hostname: "iam.us-gov.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-gov-west-1",
+                                       },
+                               },
+                       },
+               },
+               "kinesis": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "kms": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "logs": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "monitoring": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "rds": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "redshift": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "s3": service{
+                       Defaults: endpoint{
+                               SignatureVersions: []string{"s3", "s3v4"},
+                       },
+                       Endpoints: endpoints{
+                               "fips-us-gov-west-1": endpoint{
+                                       Hostname: "s3-fips-us-gov-west-1.amazonaws.com",
+                                       CredentialScope: credentialScope{
+                                               Region: "us-gov-west-1",
+                                       },
+                               },
+                               "us-gov-west-1": endpoint{
+                                       Hostname:  "s3-us-gov-west-1.amazonaws.com",
+                                       Protocols: []string{"http", "https"},
+                               },
+                       },
+               },
+               "snowball": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "sns": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{
+                                       Protocols: []string{"http", "https"},
+                               },
+                       },
+               },
+               "sqs": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{
+                                       SSLCommonName: "{region}.queue.{dnsSuffix}",
+                                       Protocols:     []string{"http", "https"},
+                               },
+                       },
+               },
+               "streams.dynamodb": service{
+                       Defaults: endpoint{
+                               CredentialScope: credentialScope{
+                                       Service: "dynamodb",
+                               },
+                       },
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "sts": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+               "swf": service{
+
+                       Endpoints: endpoints{
+                               "us-gov-west-1": endpoint{},
+                       },
+               },
+       },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
new file mode 100644 (file)
index 0000000..a0e9bc4
--- /dev/null
@@ -0,0 +1,66 @@
+// Package endpoints provides the types and functionality for defining regions
+// and endpoints, as well as querying those definitions.
+//
+// The SDK's Regions and Endpoints metadata is code generated into the endpoints
+// package, and is accessible via the DefaultResolver function. This function
+// returns a endpoint Resolver will search the metadata and build an associated
+// endpoint if one is found. The default resolver will search all partitions
+// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and
+// AWS GovCloud (US) (aws-us-gov).
+// .
+//
+// Enumerating Regions and Endpoint Metadata
+//
+// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
+// will allow you to get access to the list of underlying Partitions with the
+// Partitions method. This is helpful if you want to limit the SDK's endpoint
+// resolving to a single partition, or enumerate regions, services, and endpoints
+// in the partition.
+//
+//     resolver := endpoints.DefaultResolver()
+//     partitions := resolver.(endpoints.EnumPartitions).Partitions()
+//
+//     for _, p := range partitions {
+//         fmt.Println("Regions for", p.Name)
+//         for id, _ := range p.Regions() {
+//             fmt.Println("*", id)
+//         }
+//
+//         fmt.Println("Services for", p.Name)
+//         for id, _ := range p.Services() {
+//             fmt.Println("*", id)
+//         }
+//     }
+//
+// Using Custom Endpoints
+//
+// The endpoints package also gives you the ability to use your own logic how
+// endpoints are resolved. This is a great way to define a custom endpoint
+// for select services, without passing that logic down through your code.
+//
+// If a type implements the Resolver interface it can be used to resolve
+// endpoints. To use this with the SDK's Session and Config set the value
+// of the type to the EndpointsResolver field of aws.Config when initializing
+// the session, or service client.
+//
+// In addition the ResolverFunc is a wrapper for a func matching the signature
+// of Resolver.EndpointFor, converting it to a type that satisfies the
+// Resolver interface.
+//
+//
+//     myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
+//         if service == endpoints.S3ServiceID {
+//             return endpoints.ResolvedEndpoint{
+//                 URL:           "s3.custom.endpoint.com",
+//                 SigningRegion: "custom-signing-region",
+//             }, nil
+//         }
+//
+//         return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
+//     }
+//
+//     sess := session.Must(session.NewSession(&aws.Config{
+//         Region:           aws.String("us-west-2"),
+//         EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
+//     }))
+package endpoints
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
new file mode 100644 (file)
index 0000000..9c3eedb
--- /dev/null
@@ -0,0 +1,439 @@
+package endpoints
+
+import (
+       "fmt"
+       "regexp"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Options provide the configuration needed to direct how the
+// endpoints will be resolved.
+type Options struct {
+       // DisableSSL forces the endpoint to be resolved as HTTP.
+       // instead of HTTPS if the service supports it.
+       DisableSSL bool
+
+       // Sets the resolver to resolve the endpoint as a dualstack endpoint
+       // for the service. If dualstack support for a service is not known and
+       // StrictMatching is not enabled a dualstack endpoint for the service will
+       // be returned. This endpoint may not be valid. If StrictMatching is
+       // enabled only services that are known to support dualstack will return
+       // dualstack endpoints.
+       UseDualStack bool
+
+       // Enables strict matching of services and regions resolved endpoints.
+       // If the partition doesn't enumerate the exact service and region an
+       // error will be returned. This option will prevent returning endpoints
+       // that look valid, but may not resolve to any real endpoint.
+       StrictMatching bool
+
+       // Enables resolving a service endpoint based on the region provided if the
+       // service does not exist. The service endpoint ID will be used as the service
+       // domain name prefix. By default the endpoint resolver requires the service
+       // to be known when resolving endpoints.
+       //
+       // If resolving an endpoint on the partition list the provided region will
+       // be used to determine which partition's domain name pattern to the service
+       // endpoint ID with. If both the service and region are unkonwn and resolving
+       // the endpoint on partition list an UnknownEndpointError error will be returned.
+       //
+       // If resolving and endpoint on a partition specific resolver that partition's
+       // domain name pattern will be used with the service endpoint ID. If both
+       // region and service do not exist when resolving an endpoint on a specific
+       // partition the partition's domain pattern will be used to combine the
+       // endpoint and region together.
+       //
+       // This option is ignored if StrictMatching is enabled.
+       ResolveUnknownService bool
+}
+
+// Set combines all of the option functions together.
+func (o *Options) Set(optFns ...func(*Options)) {
+       for _, fn := range optFns {
+               fn(o)
+       }
+}
+
+// DisableSSLOption sets the DisableSSL options. Can be used as a functional
+// option when resolving endpoints.
+func DisableSSLOption(o *Options) {
+       o.DisableSSL = true
+}
+
+// UseDualStackOption sets the UseDualStack option. Can be used as a functional
+// option when resolving endpoints.
+func UseDualStackOption(o *Options) {
+       o.UseDualStack = true
+}
+
+// StrictMatchingOption sets the StrictMatching option. Can be used as a functional
+// option when resolving endpoints.
+func StrictMatchingOption(o *Options) {
+       o.StrictMatching = true
+}
+
+// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used
+// as a functional option when resolving endpoints.
+func ResolveUnknownServiceOption(o *Options) {
+       o.ResolveUnknownService = true
+}
+
+// A Resolver provides the interface for functionality to resolve endpoints.
+// The build in Partition and DefaultResolver return value satisfy this interface.
+type Resolver interface {
+       EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+}
+
+// ResolverFunc is a helper utility that wraps a function so it satisfies the
+// Resolver interface. This is useful when you want to add additional endpoint
+// resolving logic, or stub out specific endpoints with custom values.
+type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
+
+// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface.
+func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+       return fn(service, region, opts...)
+}
+
+var schemeRE = regexp.MustCompile("^([^:]+)://")
+
+// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
+// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS.
+//
+// If disableSSL is set, it will only set the URL's scheme if the URL does not
+// contain a scheme.
+func AddScheme(endpoint string, disableSSL bool) string {
+       if !schemeRE.MatchString(endpoint) {
+               scheme := "https"
+               if disableSSL {
+                       scheme = "http"
+               }
+               endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
+       }
+
+       return endpoint
+}
+
+// EnumPartitions a provides a way to retrieve the underlying partitions that
+// make up the SDK's default Resolver, or any resolver decoded from a model
+// file.
+//
+// Use this interface with DefaultResolver and DecodeModels to get the list of
+// Partitions.
+type EnumPartitions interface {
+       Partitions() []Partition
+}
+
+// RegionsForService returns a map of regions for the partition and service.
+// If either the partition or service does not exist false will be returned
+// as the second parameter.
+//
+// This example shows how  to get the regions for DynamoDB in the AWS partition.
+//    rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
+//
+// This is equivalent to using the partition directly.
+//    rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
+func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) {
+       for _, p := range ps {
+               if p.ID() != partitionID {
+                       continue
+               }
+               if _, ok := p.p.Services[serviceID]; !ok {
+                       break
+               }
+
+               s := Service{
+                       id: serviceID,
+                       p:  p.p,
+               }
+               return s.Regions(), true
+       }
+
+       return map[string]Region{}, false
+}
+
+// PartitionForRegion returns the first partition which includes the region
+// passed in. This includes both known regions and regions which match
+// a pattern supported by the partition which may include regions that are
+// not explicitly known by the partition. Use the Regions method of the
+// returned Partition if explicit support is needed.
+func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) {
+       for _, p := range ps {
+               if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) {
+                       return p, true
+               }
+       }
+
+       return Partition{}, false
+}
+
+// A Partition provides the ability to enumerate the partition's regions
+// and services.
+type Partition struct {
+       id string
+       p  *partition
+}
+
+// ID returns the identifier of the partition.
+func (p Partition) ID() string { return p.id }
+
+// EndpointFor attempts to resolve the endpoint based on service and region.
+// See Options for information on configuring how the endpoint is resolved.
+//
+// If the service cannot be found in the metadata the UnknownServiceError
+// error will be returned. This validation will occur regardless if
+// StrictMatching is enabled. To enable resolving unknown services set the
+// "ResolveUnknownService" option to true. When StrictMatching is disabled
+// this option allows the partition resolver to resolve a endpoint based on
+// the service endpoint ID provided.
+//
+// When resolving endpoints you can choose to enable StrictMatching. This will
+// require the provided service and region to be known by the partition.
+// If the endpoint cannot be strictly resolved an error will be returned. This
+// mode is useful to ensure the endpoint resolved is valid. Without
+// StrictMatching enabled the endpoint returned my look valid but may not work.
+// StrictMatching requires the SDK to be updated if you want to take advantage
+// of new regions and services expansions.
+//
+// Errors that can be returned.
+//   * UnknownServiceError
+//   * UnknownEndpointError
+func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+       return p.p.EndpointFor(service, region, opts...)
+}
+
+// Regions returns a map of Regions indexed by their ID. This is useful for
+// enumerating over the regions in a partition.
+func (p Partition) Regions() map[string]Region {
+       rs := map[string]Region{}
+       for id := range p.p.Regions {
+               rs[id] = Region{
+                       id: id,
+                       p:  p.p,
+               }
+       }
+
+       return rs
+}
+
+// Services returns a map of Service indexed by their ID. This is useful for
+// enumerating over the services in a partition.
+func (p Partition) Services() map[string]Service {
+       ss := map[string]Service{}
+       for id := range p.p.Services {
+               ss[id] = Service{
+                       id: id,
+                       p:  p.p,
+               }
+       }
+
+       return ss
+}
+
+// A Region provides information about a region, and ability to resolve an
+// endpoint from the context of a region, given a service.
+type Region struct {
+       id, desc string
+       p        *partition
+}
+
+// ID returns the region's identifier.
+func (r Region) ID() string { return r.id }
+
+// ResolveEndpoint resolves an endpoint from the context of the region given
+// a service. See Partition.EndpointFor for usage and errors that can be returned.
+func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+       return r.p.EndpointFor(service, r.id, opts...)
+}
+
+// Services returns a list of all services that are known to be in this region.
+func (r Region) Services() map[string]Service {
+       ss := map[string]Service{}
+       for id, s := range r.p.Services {
+               if _, ok := s.Endpoints[r.id]; ok {
+                       ss[id] = Service{
+                               id: id,
+                               p:  r.p,
+                       }
+               }
+       }
+
+       return ss
+}
+
+// A Service provides information about a service, and ability to resolve an
+// endpoint from the context of a service, given a region.
+type Service struct {
+       id string
+       p  *partition
+}
+
+// ID returns the identifier for the service.
+func (s Service) ID() string { return s.id }
+
+// ResolveEndpoint resolves an endpoint from the context of a service given
+// a region. See Partition.EndpointFor for usage and errors that can be returned.
+func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+       return s.p.EndpointFor(s.id, region, opts...)
+}
+
+// Regions returns a map of Regions that the service is present in.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Regions() map[string]Region {
+       rs := map[string]Region{}
+       for id := range s.p.Services[s.id].Endpoints {
+               if _, ok := s.p.Regions[id]; ok {
+                       rs[id] = Region{
+                               id: id,
+                               p:  s.p,
+                       }
+               }
+       }
+
+       return rs
+}
+
+// Endpoints returns a map of Endpoints indexed by their ID for all known
+// endpoints for a service.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Endpoints() map[string]Endpoint {
+       es := map[string]Endpoint{}
+       for id := range s.p.Services[s.id].Endpoints {
+               es[id] = Endpoint{
+                       id:        id,
+                       serviceID: s.id,
+                       p:         s.p,
+               }
+       }
+
+       return es
+}
+
+// A Endpoint provides information about endpoints, and provides the ability
+// to resolve that endpoint for the service, and the region the endpoint
+// represents.
+type Endpoint struct {
+       id        string
+       serviceID string
+       p         *partition
+}
+
+// ID returns the identifier for an endpoint.
+func (e Endpoint) ID() string { return e.id }
+
+// ServiceID returns the identifier the endpoint belongs to.
+func (e Endpoint) ServiceID() string { return e.serviceID }
+
+// ResolveEndpoint resolves an endpoint from the context of a service and
+// region the endpoint represents. See Partition.EndpointFor for usage and
+// errors that can be returned.
+func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) {
+       return e.p.EndpointFor(e.serviceID, e.id, opts...)
+}
+
+// A ResolvedEndpoint is an endpoint that has been resolved based on a partition
+// service, and region.
+type ResolvedEndpoint struct {
+       // The endpoint URL
+       URL string
+
+       // The region that should be used for signing requests.
+       SigningRegion string
+
+       // The service name that should be used for signing requests.
+       SigningName string
+
+       // The signing method that should be used for signing requests.
+       SigningMethod string
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError awserr.Error
+
+// A EndpointNotFoundError is returned when in StrictMatching mode, and the
+// endpoint for the service and region cannot be found in any of the partitions.
+type EndpointNotFoundError struct {
+       awsError
+       Partition string
+       Service   string
+       Region    string
+}
+
+// A UnknownServiceError is returned when the service does not resolve to an
+// endpoint. Includes a list of all known services for the partition. Returned
+// when a partition does not support the service.
+type UnknownServiceError struct {
+       awsError
+       Partition string
+       Service   string
+       Known     []string
+}
+
+// NewUnknownServiceError builds and returns UnknownServiceError.
+func NewUnknownServiceError(p, s string, known []string) UnknownServiceError {
+       return UnknownServiceError{
+               awsError: awserr.New("UnknownServiceError",
+                       "could not resolve endpoint for unknown service", nil),
+               Partition: p,
+               Service:   s,
+               Known:     known,
+       }
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) Error() string {
+       extra := fmt.Sprintf("partition: %q, service: %q",
+               e.Partition, e.Service)
+       if len(e.Known) > 0 {
+               extra += fmt.Sprintf(", known: %v", e.Known)
+       }
+       return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownServiceError) String() string {
+       return e.Error()
+}
+
+// A UnknownEndpointError is returned when in StrictMatching mode and the
+// service is valid, but the region does not resolve to an endpoint. Includes
+// a list of all known endpoints for the service.
+type UnknownEndpointError struct {
+       awsError
+       Partition string
+       Service   string
+       Region    string
+       Known     []string
+}
+
+// NewUnknownEndpointError builds and returns UnknownEndpointError.
+func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError {
+       return UnknownEndpointError{
+               awsError: awserr.New("UnknownEndpointError",
+                       "could not resolve endpoint", nil),
+               Partition: p,
+               Service:   s,
+               Region:    r,
+               Known:     known,
+       }
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) Error() string {
+       extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
+               e.Partition, e.Service, e.Region)
+       if len(e.Known) > 0 {
+               extra += fmt.Sprintf(", known: %v", e.Known)
+       }
+       return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
+}
+
+// String returns the string representation of the error.
+func (e UnknownEndpointError) String() string {
+       return e.Error()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
new file mode 100644 (file)
index 0000000..13d968a
--- /dev/null
@@ -0,0 +1,303 @@
+package endpoints
+
+import (
+       "fmt"
+       "regexp"
+       "strconv"
+       "strings"
+)
+
+type partitions []partition
+
+func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+       var opt Options
+       opt.Set(opts...)
+
+       for i := 0; i < len(ps); i++ {
+               if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) {
+                       continue
+               }
+
+               return ps[i].EndpointFor(service, region, opts...)
+       }
+
+       // If loose matching fallback to first partition format to use
+       // when resolving the endpoint.
+       if !opt.StrictMatching && len(ps) > 0 {
+               return ps[0].EndpointFor(service, region, opts...)
+       }
+
+       return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{})
+}
+
+// Partitions satisfies the EnumPartitions interface and returns a list
+// of Partitions representing each partition represented in the SDK's
+// endpoints model.
+func (ps partitions) Partitions() []Partition {
+       parts := make([]Partition, 0, len(ps))
+       for i := 0; i < len(ps); i++ {
+               parts = append(parts, ps[i].Partition())
+       }
+
+       return parts
+}
+
+type partition struct {
+       ID          string      `json:"partition"`
+       Name        string      `json:"partitionName"`
+       DNSSuffix   string      `json:"dnsSuffix"`
+       RegionRegex regionRegex `json:"regionRegex"`
+       Defaults    endpoint    `json:"defaults"`
+       Regions     regions     `json:"regions"`
+       Services    services    `json:"services"`
+}
+
+func (p partition) Partition() Partition {
+       return Partition{
+               id: p.ID,
+               p:  &p,
+       }
+}
+
+func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool {
+       s, hasService := p.Services[service]
+       _, hasEndpoint := s.Endpoints[region]
+
+       if hasEndpoint && hasService {
+               return true
+       }
+
+       if strictMatch {
+               return false
+       }
+
+       return p.RegionRegex.MatchString(region)
+}
+
+func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
+       var opt Options
+       opt.Set(opts...)
+
+       s, hasService := p.Services[service]
+       if !(hasService || opt.ResolveUnknownService) {
+               // Only return error if the resolver will not fallback to creating
+               // endpoint based on service endpoint ID passed in.
+               return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
+       }
+
+       e, hasEndpoint := s.endpointForRegion(region)
+       if !hasEndpoint && opt.StrictMatching {
+               return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints))
+       }
+
+       defs := []endpoint{p.Defaults, s.Defaults}
+       return e.resolve(service, region, p.DNSSuffix, defs, opt), nil
+}
+
+func serviceList(ss services) []string {
+       list := make([]string, 0, len(ss))
+       for k := range ss {
+               list = append(list, k)
+       }
+       return list
+}
+func endpointList(es endpoints) []string {
+       list := make([]string, 0, len(es))
+       for k := range es {
+               list = append(list, k)
+       }
+       return list
+}
+
+type regionRegex struct {
+       *regexp.Regexp
+}
+
+func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) {
+       // Strip leading and trailing quotes
+       regex, err := strconv.Unquote(string(b))
+       if err != nil {
+               return fmt.Errorf("unable to strip quotes from regex, %v", err)
+       }
+
+       rr.Regexp, err = regexp.Compile(regex)
+       if err != nil {
+               return fmt.Errorf("unable to unmarshal region regex, %v", err)
+       }
+       return nil
+}
+
+type regions map[string]region
+
+type region struct {
+       Description string `json:"description"`
+}
+
+type services map[string]service
+
+type service struct {
+       PartitionEndpoint string    `json:"partitionEndpoint"`
+       IsRegionalized    boxedBool `json:"isRegionalized,omitempty"`
+       Defaults          endpoint  `json:"defaults"`
+       Endpoints         endpoints `json:"endpoints"`
+}
+
+func (s *service) endpointForRegion(region string) (endpoint, bool) {
+       if s.IsRegionalized == boxedFalse {
+               return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint
+       }
+
+       if e, ok := s.Endpoints[region]; ok {
+               return e, true
+       }
+
+       // Unable to find any matching endpoint, return
+       // blank that will be used for generic endpoint creation.
+       return endpoint{}, false
+}
+
+type endpoints map[string]endpoint
+
+type endpoint struct {
+       Hostname        string          `json:"hostname"`
+       Protocols       []string        `json:"protocols"`
+       CredentialScope credentialScope `json:"credentialScope"`
+
+       // Custom fields not modeled
+       HasDualStack      boxedBool `json:"-"`
+       DualStackHostname string    `json:"-"`
+
+       // Signature Version not used
+       SignatureVersions []string `json:"signatureVersions"`
+
+       // SSLCommonName not used.
+       SSLCommonName string `json:"sslCommonName"`
+}
+
+const (
+       defaultProtocol = "https"
+       defaultSigner   = "v4"
+)
+
+var (
+       protocolPriority = []string{"https", "http"}
+       signerPriority   = []string{"v4", "v2"}
+)
+
+func getByPriority(s []string, p []string, def string) string {
+       if len(s) == 0 {
+               return def
+       }
+
+       for i := 0; i < len(p); i++ {
+               for j := 0; j < len(s); j++ {
+                       if s[j] == p[i] {
+                               return s[j]
+                       }
+               }
+       }
+
+       return s[0]
+}
+
+func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint {
+       var merged endpoint
+       for _, def := range defs {
+               merged.mergeIn(def)
+       }
+       merged.mergeIn(e)
+       e = merged
+
+       hostname := e.Hostname
+
+       // Offset the hostname for dualstack if enabled
+       if opts.UseDualStack && e.HasDualStack == boxedTrue {
+               hostname = e.DualStackHostname
+       }
+
+       u := strings.Replace(hostname, "{service}", service, 1)
+       u = strings.Replace(u, "{region}", region, 1)
+       u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1)
+
+       scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
+       u = fmt.Sprintf("%s://%s", scheme, u)
+
+       signingRegion := e.CredentialScope.Region
+       if len(signingRegion) == 0 {
+               signingRegion = region
+       }
+       signingName := e.CredentialScope.Service
+       if len(signingName) == 0 {
+               signingName = service
+       }
+
+       return ResolvedEndpoint{
+               URL:           u,
+               SigningRegion: signingRegion,
+               SigningName:   signingName,
+               SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
+       }
+}
+
+func getEndpointScheme(protocols []string, disableSSL bool) string {
+       if disableSSL {
+               return "http"
+       }
+
+       return getByPriority(protocols, protocolPriority, defaultProtocol)
+}
+
+func (e *endpoint) mergeIn(other endpoint) {
+       if len(other.Hostname) > 0 {
+               e.Hostname = other.Hostname
+       }
+       if len(other.Protocols) > 0 {
+               e.Protocols = other.Protocols
+       }
+       if len(other.SignatureVersions) > 0 {
+               e.SignatureVersions = other.SignatureVersions
+       }
+       if len(other.CredentialScope.Region) > 0 {
+               e.CredentialScope.Region = other.CredentialScope.Region
+       }
+       if len(other.CredentialScope.Service) > 0 {
+               e.CredentialScope.Service = other.CredentialScope.Service
+       }
+       if len(other.SSLCommonName) > 0 {
+               e.SSLCommonName = other.SSLCommonName
+       }
+       if other.HasDualStack != boxedBoolUnset {
+               e.HasDualStack = other.HasDualStack
+       }
+       if len(other.DualStackHostname) > 0 {
+               e.DualStackHostname = other.DualStackHostname
+       }
+}
+
+type credentialScope struct {
+       Region  string `json:"region"`
+       Service string `json:"service"`
+}
+
+type boxedBool int
+
+func (b *boxedBool) UnmarshalJSON(buf []byte) error {
+       v, err := strconv.ParseBool(string(buf))
+       if err != nil {
+               return err
+       }
+
+       if v {
+               *b = boxedTrue
+       } else {
+               *b = boxedFalse
+       }
+
+       return nil
+}
+
+const (
+       boxedBoolUnset boxedBool = iota
+       boxedFalse
+       boxedTrue
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
new file mode 100644 (file)
index 0000000..05e92df
--- /dev/null
@@ -0,0 +1,337 @@
+// +build codegen
+
+package endpoints
+
+import (
+       "fmt"
+       "io"
+       "reflect"
+       "strings"
+       "text/template"
+       "unicode"
+)
+
+// A CodeGenOptions are the options for code generating the endpoints into
+// Go code from the endpoints model definition.
+type CodeGenOptions struct {
+       // Options for how the model will be decoded.
+       DecodeModelOptions DecodeModelOptions
+}
+
+// Set combines all of the option functions together
+func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) {
+       for _, fn := range optFns {
+               fn(d)
+       }
+}
+
+// CodeGenModel given a endpoints model file will decode it and attempt to
+// generate Go code from the model definition. Error will be returned if
+// the code is unable to be generated, or decoded.
+func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error {
+       var opts CodeGenOptions
+       opts.Set(optFns...)
+
+       resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) {
+               *d = opts.DecodeModelOptions
+       })
+       if err != nil {
+               return err
+       }
+
+       tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
+       if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil {
+               return fmt.Errorf("failed to execute template, %v", err)
+       }
+
+       return nil
+}
+
+func toSymbol(v string) string {
+       out := []rune{}
+       for _, c := range strings.Title(v) {
+               if !(unicode.IsNumber(c) || unicode.IsLetter(c)) {
+                       continue
+               }
+
+               out = append(out, c)
+       }
+
+       return string(out)
+}
+
+func quoteString(v string) string {
+       return fmt.Sprintf("%q", v)
+}
+
+func regionConstName(p, r string) string {
+       return toSymbol(p) + toSymbol(r)
+}
+
+func partitionGetter(id string) string {
+       return fmt.Sprintf("%sPartition", toSymbol(id))
+}
+
+func partitionVarName(id string) string {
+       return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id)))
+}
+
+func listPartitionNames(ps partitions) string {
+       names := []string{}
+       switch len(ps) {
+       case 1:
+               return ps[0].Name
+       case 2:
+               return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name)
+       default:
+               for i, p := range ps {
+                       if i == len(ps)-1 {
+                               names = append(names, "and "+p.Name)
+                       } else {
+                               names = append(names, p.Name)
+                       }
+               }
+               return strings.Join(names, ", ")
+       }
+}
+
+func boxedBoolIfSet(msg string, v boxedBool) string {
+       switch v {
+       case boxedTrue:
+               return fmt.Sprintf(msg, "boxedTrue")
+       case boxedFalse:
+               return fmt.Sprintf(msg, "boxedFalse")
+       default:
+               return ""
+       }
+}
+
+func stringIfSet(msg, v string) string {
+       if len(v) == 0 {
+               return ""
+       }
+
+       return fmt.Sprintf(msg, v)
+}
+
+func stringSliceIfSet(msg string, vs []string) string {
+       if len(vs) == 0 {
+               return ""
+       }
+
+       names := []string{}
+       for _, v := range vs {
+               names = append(names, `"`+v+`"`)
+       }
+
+       return fmt.Sprintf(msg, strings.Join(names, ","))
+}
+
+func endpointIsSet(v endpoint) bool {
+       return !reflect.DeepEqual(v, endpoint{})
+}
+
+func serviceSet(ps partitions) map[string]struct{} {
+       set := map[string]struct{}{}
+       for _, p := range ps {
+               for id := range p.Services {
+                       set[id] = struct{}{}
+               }
+       }
+
+       return set
+}
+
+var funcMap = template.FuncMap{
+       "ToSymbol":           toSymbol,
+       "QuoteString":        quoteString,
+       "RegionConst":        regionConstName,
+       "PartitionGetter":    partitionGetter,
+       "PartitionVarName":   partitionVarName,
+       "ListPartitionNames": listPartitionNames,
+       "BoxedBoolIfSet":     boxedBoolIfSet,
+       "StringIfSet":        stringIfSet,
+       "StringSliceIfSet":   stringSliceIfSet,
+       "EndpointIsSet":      endpointIsSet,
+       "ServicesSet":        serviceSet,
+}
+
+const v3Tmpl = `
+{{ define "defaults" -}}
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
+
+package endpoints
+
+import (
+       "regexp"
+)
+
+       {{ template "partition consts" . }}
+
+       {{ range $_, $partition := . }}
+               {{ template "partition region consts" $partition }}
+       {{ end }}
+
+       {{ template "service consts" . }}
+       
+       {{ template "endpoint resolvers" . }}
+{{- end }}
+
+{{ define "partition consts" }}
+       // Partition identifiers
+       const (
+               {{ range $_, $p := . -}}
+                       {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition.
+               {{ end -}}
+       )
+{{- end }}
+
+{{ define "partition region consts" }}
+       // {{ .Name }} partition's regions.
+       const (
+               {{ range $id, $region := .Regions -}}
+                       {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}.
+               {{ end -}}
+       )
+{{- end }}
+
+{{ define "service consts" }}
+       // Service identifiers
+       const (
+               {{ $serviceSet := ServicesSet . -}}
+               {{ range $id, $_ := $serviceSet -}}
+                       {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}.
+               {{ end -}}
+       )
+{{- end }}
+
+{{ define "endpoint resolvers" }}
+       // DefaultResolver returns an Endpoint resolver that will be able
+       // to resolve endpoints for: {{ ListPartitionNames . }}.
+       //
+       // Use DefaultPartitions() to get the list of the default partitions.
+       func DefaultResolver() Resolver {
+               return defaultPartitions
+       }
+
+       // DefaultPartitions returns a list of the partitions the SDK is bundled
+       // with. The available partitions are: {{ ListPartitionNames . }}.
+       //
+       //    partitions := endpoints.DefaultPartitions
+       //    for _, p := range partitions {
+       //        // ... inspect partitions
+       //    }
+       func DefaultPartitions() []Partition {
+               return defaultPartitions.Partitions()
+       }
+
+       var defaultPartitions = partitions{
+               {{ range $_, $partition := . -}}
+                       {{ PartitionVarName $partition.ID }},
+               {{ end }}
+       }
+       
+       {{ range $_, $partition := . -}}
+               {{ $name := PartitionGetter $partition.ID -}}
+               // {{ $name }} returns the Resolver for {{ $partition.Name }}.
+               func {{ $name }}() Partition {
+                       return  {{ PartitionVarName $partition.ID }}.Partition()
+               }
+               var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }}
+       {{ end }}
+{{ end }}
+
+{{ define "default partitions" }}
+       func DefaultPartitions() []Partition {
+               return []partition{
+                       {{ range $_, $partition := . -}}
+                       // {{ ToSymbol $partition.ID}}Partition(),
+                       {{ end }}
+               }
+       }
+{{ end }}
+
+{{ define "gocode Partition" -}}
+partition{
+       {{ StringIfSet "ID: %q,\n" .ID -}}
+       {{ StringIfSet "Name: %q,\n" .Name -}}
+       {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
+       RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }},
+       {{ if EndpointIsSet .Defaults -}}
+               Defaults: {{ template "gocode Endpoint" .Defaults }},
+       {{- end }}
+       Regions:  {{ template "gocode Regions" .Regions }},
+       Services: {{ template "gocode Services" .Services }},
+}
+{{- end }}
+
+{{ define "gocode RegionRegex" -}}
+regionRegex{
+       Regexp: func() *regexp.Regexp{
+               reg, _ := regexp.Compile({{ QuoteString .Regexp.String }})
+               return reg
+       }(),
+}
+{{- end }}
+
+{{ define "gocode Regions" -}}
+regions{
+       {{ range $id, $region := . -}}
+               "{{ $id }}": {{ template "gocode Region" $region }},
+       {{ end -}}
+}
+{{- end }}
+
+{{ define "gocode Region" -}}
+region{
+       {{ StringIfSet "Description: %q,\n" .Description -}}
+}
+{{- end }}
+
+{{ define "gocode Services" -}}
+services{
+       {{ range $id, $service := . -}}
+       "{{ $id }}": {{ template "gocode Service" $service }},
+       {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Service" -}}
+service{
+       {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}}
+       {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}}
+       {{ if EndpointIsSet .Defaults -}}
+               Defaults: {{ template "gocode Endpoint" .Defaults -}},
+       {{- end }}
+       {{ if .Endpoints -}}
+               Endpoints: {{ template "gocode Endpoints" .Endpoints }},
+       {{- end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoints" -}}
+endpoints{
+       {{ range $id, $endpoint := . -}}
+       "{{ $id }}": {{ template "gocode Endpoint" $endpoint }},
+       {{ end }}
+}
+{{- end }}
+
+{{ define "gocode Endpoint" -}}
+endpoint{
+       {{ StringIfSet "Hostname: %q,\n" .Hostname -}}
+       {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}}
+       {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}}
+       {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}}
+       {{ if or .CredentialScope.Region .CredentialScope.Service -}}
+       CredentialScope: credentialScope{
+               {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}}
+               {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}}
+       },
+       {{- end }}
+       {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}}
+       {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}}
+
+}
+{{- end }}
+`
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go
new file mode 100644 (file)
index 0000000..5766361
--- /dev/null
@@ -0,0 +1,17 @@
+package aws
+
+import "github.com/aws/aws-sdk-go/aws/awserr"
+
+var (
+       // ErrMissingRegion is an error that is returned if region configuration is
+       // not found.
+       //
+       // @readonly
+       ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
+
+       // ErrMissingEndpoint is an error that is returned if an endpoint cannot be
+       // resolved for a service.
+       //
+       // @readonly
+       ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
new file mode 100644 (file)
index 0000000..91a6f27
--- /dev/null
@@ -0,0 +1,12 @@
+package aws
+
+// JSONValue is a representation of a grab bag type that will be marshaled
+// into a json string. This type can be used just like any other map.
+//
+//     Example:
+//
+//     values := aws.JSONValue{
+//             "Foo": "Bar",
+//     }
+//     values["Baz"] = "Qux"
+type JSONValue map[string]interface{}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
new file mode 100644 (file)
index 0000000..db87188
--- /dev/null
@@ -0,0 +1,112 @@
+package aws
+
+import (
+       "log"
+       "os"
+)
+
+// A LogLevelType defines the level logging should be performed at. Used to instruct
+// the SDK which statements should be logged.
+type LogLevelType uint
+
+// LogLevel returns the pointer to a LogLevel. Should be used to workaround
+// not being able to take the address of a non-composite literal.
+func LogLevel(l LogLevelType) *LogLevelType {
+       return &l
+}
+
+// Value returns the LogLevel value or the default value LogOff if the LogLevel
+// is nil. Safe to use on nil value LogLevelTypes.
+func (l *LogLevelType) Value() LogLevelType {
+       if l != nil {
+               return *l
+       }
+       return LogOff
+}
+
+// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
+// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
+// LogLevel is nill, will default to LogOff comparison.
+func (l *LogLevelType) Matches(v LogLevelType) bool {
+       c := l.Value()
+       return c&v == v
+}
+
+// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
+// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
+// to LogOff comparison.
+func (l *LogLevelType) AtLeast(v LogLevelType) bool {
+       c := l.Value()
+       return c >= v
+}
+
+const (
+       // LogOff states that no logging should be performed by the SDK. This is the
+       // default state of the SDK, and should be use to disable all logging.
+       LogOff LogLevelType = iota * 0x1000
+
+       // LogDebug state that debug output should be logged by the SDK. This should
+       // be used to inspect request made and responses received.
+       LogDebug
+)
+
+// Debug Logging Sub Levels
+const (
+       // LogDebugWithSigning states that the SDK should log request signing and
+       // presigning events. This should be used to log the signing details of
+       // requests for debugging. Will also enable LogDebug.
+       LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
+
+       // LogDebugWithHTTPBody states the SDK should log HTTP request and response
+       // HTTP bodys in addition to the headers and path. This should be used to
+       // see the body content of requests and responses made while using the SDK
+       // Will also enable LogDebug.
+       LogDebugWithHTTPBody
+
+       // LogDebugWithRequestRetries states the SDK should log when service requests will
+       // be retried. This should be used to log when you want to log when service
+       // requests are being retried. Will also enable LogDebug.
+       LogDebugWithRequestRetries
+
+       // LogDebugWithRequestErrors states the SDK should log when service requests fail
+       // to build, send, validate, or unmarshal.
+       LogDebugWithRequestErrors
+)
+
+// A Logger is a minimalistic interface for the SDK to log messages to. Should
+// be used to provide custom logging writers for the SDK to use.
+type Logger interface {
+       Log(...interface{})
+}
+
+// A LoggerFunc is a convenience type to convert a function taking a variadic
+// list of arguments and wrap it so the Logger interface can be used.
+//
+// Example:
+//     s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
+//         fmt.Fprintln(os.Stdout, args...)
+//     })})
+type LoggerFunc func(...interface{})
+
+// Log calls the wrapped function with the arguments provided
+func (f LoggerFunc) Log(args ...interface{}) {
+       f(args...)
+}
+
+// NewDefaultLogger returns a Logger which will write log messages to stdout, and
+// use same formatting runes as the stdlib log.Logger
+func NewDefaultLogger() Logger {
+       return &defaultLogger{
+               logger: log.New(os.Stdout, "", log.LstdFlags),
+       }
+}
+
+// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
+type defaultLogger struct {
+       logger *log.Logger
+}
+
+// Log logs the parameters to the stdlib logger. See log.Println.
+func (l defaultLogger) Log(args ...interface{}) {
+       l.logger.Println(args...)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
new file mode 100644 (file)
index 0000000..10fc8cb
--- /dev/null
@@ -0,0 +1,19 @@
+// +build !appengine
+
+package request
+
+import (
+       "net"
+       "os"
+       "syscall"
+)
+
+func isErrConnectionReset(err error) bool {
+       if opErr, ok := err.(*net.OpError); ok {
+               if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
+                       return sysErr.Err == syscall.ECONNRESET
+               }
+       }
+
+       return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_appengine.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_appengine.go
new file mode 100644 (file)
index 0000000..996196e
--- /dev/null
@@ -0,0 +1,11 @@
+// +build appengine
+
+package request
+
+import (
+       "strings"
+)
+
+func isErrConnectionReset(err error) bool {
+       return strings.Contains(err.Error(), "connection reset")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
new file mode 100644 (file)
index 0000000..6c14336
--- /dev/null
@@ -0,0 +1,225 @@
+package request
+
+import (
+       "fmt"
+       "strings"
+)
+
+// A Handlers provides a collection of request handlers for various
+// stages of handling requests.
+type Handlers struct {
+       Validate         HandlerList
+       Build            HandlerList
+       Sign             HandlerList
+       Send             HandlerList
+       ValidateResponse HandlerList
+       Unmarshal        HandlerList
+       UnmarshalMeta    HandlerList
+       UnmarshalError   HandlerList
+       Retry            HandlerList
+       AfterRetry       HandlerList
+       Complete         HandlerList
+}
+
+// Copy returns of this handler's lists.
+func (h *Handlers) Copy() Handlers {
+       return Handlers{
+               Validate:         h.Validate.copy(),
+               Build:            h.Build.copy(),
+               Sign:             h.Sign.copy(),
+               Send:             h.Send.copy(),
+               ValidateResponse: h.ValidateResponse.copy(),
+               Unmarshal:        h.Unmarshal.copy(),
+               UnmarshalError:   h.UnmarshalError.copy(),
+               UnmarshalMeta:    h.UnmarshalMeta.copy(),
+               Retry:            h.Retry.copy(),
+               AfterRetry:       h.AfterRetry.copy(),
+               Complete:         h.Complete.copy(),
+       }
+}
+
+// Clear removes callback functions for all handlers
+func (h *Handlers) Clear() {
+       h.Validate.Clear()
+       h.Build.Clear()
+       h.Send.Clear()
+       h.Sign.Clear()
+       h.Unmarshal.Clear()
+       h.UnmarshalMeta.Clear()
+       h.UnmarshalError.Clear()
+       h.ValidateResponse.Clear()
+       h.Retry.Clear()
+       h.AfterRetry.Clear()
+       h.Complete.Clear()
+}
+
+// A HandlerListRunItem represents an entry in the HandlerList which
+// is being run.
+type HandlerListRunItem struct {
+       Index   int
+       Handler NamedHandler
+       Request *Request
+}
+
+// A HandlerList manages zero or more handlers in a list.
+type HandlerList struct {
+       list []NamedHandler
+
+       // Called after each request handler in the list is called. If set
+       // and the func returns true the HandlerList will continue to iterate
+       // over the request handlers. If false is returned the HandlerList
+       // will stop iterating.
+       //
+       // Should be used if extra logic to be performed between each handler
+       // in the list. This can be used to terminate a list's iteration
+       // based on a condition such as error like, HandlerListStopOnError.
+       // Or for logging like HandlerListLogItem.
+       AfterEachFn func(item HandlerListRunItem) bool
+}
+
+// A NamedHandler is a struct that contains a name and function callback.
+type NamedHandler struct {
+       Name string
+       Fn   func(*Request)
+}
+
+// copy creates a copy of the handler list.
+func (l *HandlerList) copy() HandlerList {
+       n := HandlerList{
+               AfterEachFn: l.AfterEachFn,
+       }
+       if len(l.list) == 0 {
+               return n
+       }
+
+       n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...)
+       return n
+}
+
+// Clear clears the handler list.
+func (l *HandlerList) Clear() {
+       l.list = l.list[0:0]
+}
+
+// Len returns the number of handlers in the list.
+func (l *HandlerList) Len() int {
+       return len(l.list)
+}
+
+// PushBack pushes handler f to the back of the handler list.
+func (l *HandlerList) PushBack(f func(*Request)) {
+       l.PushBackNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushBackNamed pushes named handler f to the back of the handler list.
+func (l *HandlerList) PushBackNamed(n NamedHandler) {
+       if cap(l.list) == 0 {
+               l.list = make([]NamedHandler, 0, 5)
+       }
+       l.list = append(l.list, n)
+}
+
+// PushFront pushes handler f to the front of the handler list.
+func (l *HandlerList) PushFront(f func(*Request)) {
+       l.PushFrontNamed(NamedHandler{"__anonymous", f})
+}
+
+// PushFrontNamed pushes named handler f to the front of the handler list.
+func (l *HandlerList) PushFrontNamed(n NamedHandler) {
+       if cap(l.list) == len(l.list) {
+               // Allocating new list required
+               l.list = append([]NamedHandler{n}, l.list...)
+       } else {
+               // Enough room to prepend into list.
+               l.list = append(l.list, NamedHandler{})
+               copy(l.list[1:], l.list)
+               l.list[0] = n
+       }
+}
+
+// Remove removes a NamedHandler n
+func (l *HandlerList) Remove(n NamedHandler) {
+       l.RemoveByName(n.Name)
+}
+
+// RemoveByName removes a NamedHandler by name.
+func (l *HandlerList) RemoveByName(name string) {
+       for i := 0; i < len(l.list); i++ {
+               m := l.list[i]
+               if m.Name == name {
+                       // Shift array preventing creating new arrays
+                       copy(l.list[i:], l.list[i+1:])
+                       l.list[len(l.list)-1] = NamedHandler{}
+                       l.list = l.list[:len(l.list)-1]
+
+                       // decrement list so next check to length is correct
+                       i--
+               }
+       }
+}
+
+// Run executes all handlers in the list with a given request object.
+func (l *HandlerList) Run(r *Request) {
+       for i, h := range l.list {
+               h.Fn(r)
+               item := HandlerListRunItem{
+                       Index: i, Handler: h, Request: r,
+               }
+               if l.AfterEachFn != nil && !l.AfterEachFn(item) {
+                       return
+               }
+       }
+}
+
+// HandlerListLogItem logs the request handler and the state of the
+// request's Error value. Always returns true to continue iterating
+// request handlers in a HandlerList.
+func HandlerListLogItem(item HandlerListRunItem) bool {
+       if item.Request.Config.Logger == nil {
+               return true
+       }
+       item.Request.Config.Logger.Log("DEBUG: RequestHandler",
+               item.Index, item.Handler.Name, item.Request.Error)
+
+       return true
+}
+
+// HandlerListStopOnError returns false to stop the HandlerList iterating
+// over request handlers if Request.Error is not nil. True otherwise
+// to continue iterating.
+func HandlerListStopOnError(item HandlerListRunItem) bool {
+       return item.Request.Error == nil
+}
+
+// WithAppendUserAgent will add a string to the user agent prefixed with a
+// single white space.
+func WithAppendUserAgent(s string) Option {
+       return func(r *Request) {
+               r.Handlers.Build.PushBack(func(r2 *Request) {
+                       AddToUserAgent(r, s)
+               })
+       }
+}
+
+// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
+// header. If the extra parameters are provided they will be added as metadata to the
+// name/version pair resulting in the following format.
+// "name/version (extra0; extra1; ...)"
+// The user agent part will be concatenated with this current request's user agent string.
+func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
+       ua := fmt.Sprintf("%s/%s", name, version)
+       if len(extra) > 0 {
+               ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
+       }
+       return func(r *Request) {
+               AddToUserAgent(r, ua)
+       }
+}
+
+// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
+// The input string will be concatenated with the current request's user agent string.
+func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
+       return func(r *Request) {
+               AddToUserAgent(r, s)
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
new file mode 100644 (file)
index 0000000..79f7960
--- /dev/null
@@ -0,0 +1,24 @@
+package request
+
+import (
+       "io"
+       "net/http"
+       "net/url"
+)
+
+func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
+       req := new(http.Request)
+       *req = *r
+       req.URL = &url.URL{}
+       *req.URL = *r.URL
+       req.Body = body
+
+       req.Header = http.Header{}
+       for k, v := range r.Header {
+               for _, vv := range v {
+                       req.Header.Add(k, vv)
+               }
+       }
+
+       return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
new file mode 100644 (file)
index 0000000..02f07f4
--- /dev/null
@@ -0,0 +1,58 @@
+package request
+
+import (
+       "io"
+       "sync"
+)
+
+// offsetReader is a thread-safe io.ReadCloser to prevent racing
+// with retrying requests
+type offsetReader struct {
+       buf    io.ReadSeeker
+       lock   sync.Mutex
+       closed bool
+}
+
+func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
+       reader := &offsetReader{}
+       buf.Seek(offset, 0)
+
+       reader.buf = buf
+       return reader
+}
+
+// Close will close the instance of the offset reader's access to
+// the underlying io.ReadSeeker.
+func (o *offsetReader) Close() error {
+       o.lock.Lock()
+       defer o.lock.Unlock()
+       o.closed = true
+       return nil
+}
+
+// Read is a thread-safe read of the underlying io.ReadSeeker
+func (o *offsetReader) Read(p []byte) (int, error) {
+       o.lock.Lock()
+       defer o.lock.Unlock()
+
+       if o.closed {
+               return 0, io.EOF
+       }
+
+       return o.buf.Read(p)
+}
+
+// Seek is a thread-safe seeking operation.
+func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
+       o.lock.Lock()
+       defer o.lock.Unlock()
+
+       return o.buf.Seek(offset, whence)
+}
+
+// CloseAndCopy will return a new offsetReader with a copy of the old buffer
+// and close the old buffer.
+func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
+       o.Close()
+       return newOffsetReader(o.buf, offset)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
new file mode 100644 (file)
index 0000000..4f4f112
--- /dev/null
@@ -0,0 +1,575 @@
+package request
+
+import (
+       "bytes"
+       "fmt"
+       "io"
+       "net"
+       "net/http"
+       "net/url"
+       "reflect"
+       "strings"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/client/metadata"
+)
+
+const (
+       // ErrCodeSerialization is the serialization error code that is received
+       // during protocol unmarshaling.
+       ErrCodeSerialization = "SerializationError"
+
+       // ErrCodeRead is an error that is returned during HTTP reads.
+       ErrCodeRead = "ReadError"
+
+       // ErrCodeResponseTimeout is the connection timeout error that is recieved
+       // during body reads.
+       ErrCodeResponseTimeout = "ResponseTimeout"
+
+       // CanceledErrorCode is the error code that will be returned by an
+       // API request that was canceled. Requests given a aws.Context may
+       // return this error when canceled.
+       CanceledErrorCode = "RequestCanceled"
+)
+
+// A Request is the service request to be made.
+type Request struct {
+       Config     aws.Config
+       ClientInfo metadata.ClientInfo
+       Handlers   Handlers
+
+       Retryer
+       Time                   time.Time
+       ExpireTime             time.Duration
+       Operation              *Operation
+       HTTPRequest            *http.Request
+       HTTPResponse           *http.Response
+       Body                   io.ReadSeeker
+       BodyStart              int64 // offset from beginning of Body that the request body starts
+       Params                 interface{}
+       Error                  error
+       Data                   interface{}
+       RequestID              string
+       RetryCount             int
+       Retryable              *bool
+       RetryDelay             time.Duration
+       NotHoist               bool
+       SignedHeaderVals       http.Header
+       LastSignedAt           time.Time
+       DisableFollowRedirects bool
+
+       context aws.Context
+
+       built bool
+
+       // Need to persist an intermediate body between the input Body and HTTP
+       // request body because the HTTP Client's transport can maintain a reference
+       // to the HTTP request's body after the client has returned. This value is
+       // safe to use concurrently and wrap the input Body for each HTTP request.
+       safeBody *offsetReader
+}
+
+// An Operation is the service API operation to be made.
+type Operation struct {
+       Name       string
+       HTTPMethod string
+       HTTPPath   string
+       *Paginator
+
+       BeforePresignFn func(r *Request) error
+}
+
+// New returns a new Request pointer for the service API
+// operation and parameters.
+//
+// Params is any value of input parameters to be the request payload.
+// Data is pointer value to an object which the request's response
+// payload will be deserialized to.
+func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
+       retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
+
+       method := operation.HTTPMethod
+       if method == "" {
+               method = "POST"
+       }
+
+       httpReq, _ := http.NewRequest(method, "", nil)
+
+       var err error
+       httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath)
+       if err != nil {
+               httpReq.URL = &url.URL{}
+               err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
+       }
+
+       r := &Request{
+               Config:     cfg,
+               ClientInfo: clientInfo,
+               Handlers:   handlers.Copy(),
+
+               Retryer:     retryer,
+               Time:        time.Now(),
+               ExpireTime:  0,
+               Operation:   operation,
+               HTTPRequest: httpReq,
+               Body:        nil,
+               Params:      params,
+               Error:       err,
+               Data:        data,
+       }
+       r.SetBufferBody([]byte{})
+
+       return r
+}
+
+// A Option is a functional option that can augment or modify a request when
+// using a WithContext API operation method.
+type Option func(*Request)
+
+// WithGetResponseHeader builds a request Option which will retrieve a single
+// header value from the HTTP Response. If there are multiple values for the
+// header key use WithGetResponseHeaders instead to access the http.Header
+// map directly. The passed in val pointer must be non-nil.
+//
+// This Option can be used multiple times with a single API operation.
+//
+//    var id2, versionID string
+//    svc.PutObjectWithContext(ctx, params,
+//        request.WithGetResponseHeader("x-amz-id-2", &id2),
+//        request.WithGetResponseHeader("x-amz-version-id", &versionID),
+//    )
+func WithGetResponseHeader(key string, val *string) Option {
+       return func(r *Request) {
+               r.Handlers.Complete.PushBack(func(req *Request) {
+                       *val = req.HTTPResponse.Header.Get(key)
+               })
+       }
+}
+
+// WithGetResponseHeaders builds a request Option which will retrieve the
+// headers from the HTTP response and assign them to the passed in headers
+// variable. The passed in headers pointer must be non-nil.
+//
+//    var headers http.Header
+//    svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers))
+func WithGetResponseHeaders(headers *http.Header) Option {
+       return func(r *Request) {
+               r.Handlers.Complete.PushBack(func(req *Request) {
+                       *headers = req.HTTPResponse.Header
+               })
+       }
+}
+
+// WithLogLevel is a request option that will set the request to use a specific
+// log level when the request is made.
+//
+//     svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody)
+func WithLogLevel(l aws.LogLevelType) Option {
+       return func(r *Request) {
+               r.Config.LogLevel = aws.LogLevel(l)
+       }
+}
+
+// ApplyOptions will apply each option to the request calling them in the order
+// the were provided.
+func (r *Request) ApplyOptions(opts ...Option) {
+       for _, opt := range opts {
+               opt(r)
+       }
+}
+
+// Context will always returns a non-nil context. If Request does not have a
+// context aws.BackgroundContext will be returned.
+func (r *Request) Context() aws.Context {
+       if r.context != nil {
+               return r.context
+       }
+       return aws.BackgroundContext()
+}
+
+// SetContext adds a Context to the current request that can be used to cancel
+// a in-flight request. The Context value must not be nil, or this method will
+// panic.
+//
+// Unlike http.Request.WithContext, SetContext does not return a copy of the
+// Request. It is not safe to use use a single Request value for multiple
+// requests. A new Request should be created for each API operation request.
+//
+// Go 1.6 and below:
+// The http.Request's Cancel field will be set to the Done() value of
+// the context. This will overwrite the Cancel field's value.
+//
+// Go 1.7 and above:
+// The http.Request.WithContext will be used to set the context on the underlying
+// http.Request. This will create a shallow copy of the http.Request. The SDK
+// may create sub contexts in the future for nested requests such as retries.
+func (r *Request) SetContext(ctx aws.Context) {
+       if ctx == nil {
+               panic("context cannot be nil")
+       }
+       setRequestContext(r, ctx)
+}
+
+// WillRetry returns if the request's can be retried.
+func (r *Request) WillRetry() bool {
+       return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
+}
+
+// ParamsFilled returns if the request's parameters have been populated
+// and the parameters are valid. False is returned if no parameters are
+// provided or invalid.
+func (r *Request) ParamsFilled() bool {
+       return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
+}
+
+// DataFilled returns true if the request's data for response deserialization
+// target has been set and is a valid. False is returned if data is not
+// set, or is invalid.
+func (r *Request) DataFilled() bool {
+       return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
+}
+
+// SetBufferBody will set the request's body bytes that will be sent to
+// the service API.
+func (r *Request) SetBufferBody(buf []byte) {
+       r.SetReaderBody(bytes.NewReader(buf))
+}
+
+// SetStringBody sets the body of the request to be backed by a string.
+func (r *Request) SetStringBody(s string) {
+       r.SetReaderBody(strings.NewReader(s))
+}
+
+// SetReaderBody will set the request's body reader.
+func (r *Request) SetReaderBody(reader io.ReadSeeker) {
+       r.Body = reader
+       r.ResetBody()
+}
+
+// Presign returns the request's signed URL. Error will be returned
+// if the signing fails.
+func (r *Request) Presign(expireTime time.Duration) (string, error) {
+       r.ExpireTime = expireTime
+       r.NotHoist = false
+
+       if r.Operation.BeforePresignFn != nil {
+               r = r.copy()
+               err := r.Operation.BeforePresignFn(r)
+               if err != nil {
+                       return "", err
+               }
+       }
+
+       r.Sign()
+       if r.Error != nil {
+               return "", r.Error
+       }
+       return r.HTTPRequest.URL.String(), nil
+}
+
+// PresignRequest behaves just like presign, but hoists all headers and signs them.
+// Also returns the signed hash back to the user
+func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
+       r.ExpireTime = expireTime
+       r.NotHoist = true
+       r.Sign()
+       if r.Error != nil {
+               return "", nil, r.Error
+       }
+       return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
+}
+
+func debugLogReqError(r *Request, stage string, retrying bool, err error) {
+       if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
+               return
+       }
+
+       retryStr := "not retrying"
+       if retrying {
+               retryStr = "will retry"
+       }
+
+       r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
+               stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
+}
+
+// Build will build the request's object so it can be signed and sent
+// to the service. Build will also validate all the request's parameters.
+// Anny additional build Handlers set on this request will be run
+// in the order they were set.
+//
+// The request will only be built once. Multiple calls to build will have
+// no effect.
+//
+// If any Validate or Build errors occur the build will stop and the error
+// which occurred will be returned.
+func (r *Request) Build() error {
+       if !r.built {
+               r.Handlers.Validate.Run(r)
+               if r.Error != nil {
+                       debugLogReqError(r, "Validate Request", false, r.Error)
+                       return r.Error
+               }
+               r.Handlers.Build.Run(r)
+               if r.Error != nil {
+                       debugLogReqError(r, "Build Request", false, r.Error)
+                       return r.Error
+               }
+               r.built = true
+       }
+
+       return r.Error
+}
+
+// Sign will sign the request returning error if errors are encountered.
+//
+// Send will build the request prior to signing. All Sign Handlers will
+// be executed in the order they were set.
+func (r *Request) Sign() error {
+       r.Build()
+       if r.Error != nil {
+               debugLogReqError(r, "Build Request", false, r.Error)
+               return r.Error
+       }
+
+       r.Handlers.Sign.Run(r)
+       return r.Error
+}
+
+// ResetBody rewinds the request body backto its starting position, and
+// set's the HTTP Request body reference. When the body is read prior
+// to being sent in the HTTP request it will need to be rewound.
+func (r *Request) ResetBody() {
+       if r.safeBody != nil {
+               r.safeBody.Close()
+       }
+
+       r.safeBody = newOffsetReader(r.Body, r.BodyStart)
+
+       // Go 1.8 tightened and clarified the rules code needs to use when building
+       // requests with the http package. Go 1.8 removed the automatic detection
+       // of if the Request.Body was empty, or actually had bytes in it. The SDK
+       // always sets the Request.Body even if it is empty and should not actually
+       // be sent. This is incorrect.
+       //
+       // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http
+       // client that the request really should be sent without a body. The
+       // Request.Body cannot be set to nil, which is preferable, because the
+       // field is exported and could introduce nil pointer dereferences for users
+       // of the SDK if they used that field.
+       //
+       // Related golang/go#18257
+       l, err := computeBodyLength(r.Body)
+       if err != nil {
+               r.Error = awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
+               return
+       }
+
+       if l == 0 {
+               r.HTTPRequest.Body = noBodyReader
+       } else if l > 0 {
+               r.HTTPRequest.Body = r.safeBody
+       } else {
+               // Hack to prevent sending bodies for methods where the body
+               // should be ignored by the server. Sending bodies on these
+               // methods without an associated ContentLength will cause the
+               // request to socket timeout because the server does not handle
+               // Transfer-Encoding: chunked bodies for these methods.
+               //
+               // This would only happen if a aws.ReaderSeekerCloser was used with
+               // a io.Reader that was not also an io.Seeker.
+               switch r.Operation.HTTPMethod {
+               case "GET", "HEAD", "DELETE":
+                       r.HTTPRequest.Body = noBodyReader
+               default:
+                       r.HTTPRequest.Body = r.safeBody
+               }
+       }
+}
+
+// Attempts to compute the length of the body of the reader using the
+// io.Seeker interface. If the value is not seekable because of being
+// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned.
+// If no error occurs the length of the body will be returned.
+func computeBodyLength(r io.ReadSeeker) (int64, error) {
+       seekable := true
+       // Determine if the seeker is actually seekable. ReaderSeekerCloser
+       // hides the fact that a io.Readers might not actually be seekable.
+       switch v := r.(type) {
+       case aws.ReaderSeekerCloser:
+               seekable = v.IsSeeker()
+       case *aws.ReaderSeekerCloser:
+               seekable = v.IsSeeker()
+       }
+       if !seekable {
+               return -1, nil
+       }
+
+       curOffset, err := r.Seek(0, 1)
+       if err != nil {
+               return 0, err
+       }
+
+       endOffset, err := r.Seek(0, 2)
+       if err != nil {
+               return 0, err
+       }
+
+       _, err = r.Seek(curOffset, 0)
+       if err != nil {
+               return 0, err
+       }
+
+       return endOffset - curOffset, nil
+}
+
+// GetBody will return an io.ReadSeeker of the Request's underlying
+// input body with a concurrency safe wrapper.
+func (r *Request) GetBody() io.ReadSeeker {
+       return r.safeBody
+}
+
+// Send will send the request returning error if errors are encountered.
+//
+// Send will sign the request prior to sending. All Send Handlers will
+// be executed in the order they were set.
+//
+// Canceling a request is non-deterministic. If a request has been canceled,
+// then the transport will choose, randomly, one of the state channels during
+// reads or getting the connection.
+//
+// readLoop() and getConn(req *Request, cm connectMethod)
+// https://github.com/golang/go/blob/master/src/net/http/transport.go
+//
+// Send will not close the request.Request's body.
+func (r *Request) Send() error {
+       defer func() {
+               // Regardless of success or failure of the request trigger the Complete
+               // request handlers.
+               r.Handlers.Complete.Run(r)
+       }()
+
+       for {
+               if aws.BoolValue(r.Retryable) {
+                       if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
+                               r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
+                                       r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
+                       }
+
+                       // The previous http.Request will have a reference to the r.Body
+                       // and the HTTP Client's Transport may still be reading from
+                       // the request's body even though the Client's Do returned.
+                       r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
+                       r.ResetBody()
+
+                       // Closing response body to ensure that no response body is leaked
+                       // between retry attempts.
+                       if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
+                               r.HTTPResponse.Body.Close()
+                       }
+               }
+
+               r.Sign()
+               if r.Error != nil {
+                       return r.Error
+               }
+
+               r.Retryable = nil
+
+               r.Handlers.Send.Run(r)
+               if r.Error != nil {
+                       if !shouldRetryCancel(r) {
+                               return r.Error
+                       }
+
+                       err := r.Error
+                       r.Handlers.Retry.Run(r)
+                       r.Handlers.AfterRetry.Run(r)
+                       if r.Error != nil {
+                               debugLogReqError(r, "Send Request", false, r.Error)
+                               return r.Error
+                       }
+                       debugLogReqError(r, "Send Request", true, err)
+                       continue
+               }
+               r.Handlers.UnmarshalMeta.Run(r)
+               r.Handlers.ValidateResponse.Run(r)
+               if r.Error != nil {
+                       err := r.Error
+                       r.Handlers.UnmarshalError.Run(r)
+                       r.Handlers.Retry.Run(r)
+                       r.Handlers.AfterRetry.Run(r)
+                       if r.Error != nil {
+                               debugLogReqError(r, "Validate Response", false, r.Error)
+                               return r.Error
+                       }
+                       debugLogReqError(r, "Validate Response", true, err)
+                       continue
+               }
+
+               r.Handlers.Unmarshal.Run(r)
+               if r.Error != nil {
+                       err := r.Error
+                       r.Handlers.Retry.Run(r)
+                       r.Handlers.AfterRetry.Run(r)
+                       if r.Error != nil {
+                               debugLogReqError(r, "Unmarshal Response", false, r.Error)
+                               return r.Error
+                       }
+                       debugLogReqError(r, "Unmarshal Response", true, err)
+                       continue
+               }
+
+               break
+       }
+
+       return nil
+}
+
+// copy will copy a request which will allow for local manipulation of the
+// request.
+func (r *Request) copy() *Request {
+       req := &Request{}
+       *req = *r
+       req.Handlers = r.Handlers.Copy()
+       op := *r.Operation
+       req.Operation = &op
+       return req
+}
+
+// AddToUserAgent adds the string to the end of the request's current user agent.
+func AddToUserAgent(r *Request, s string) {
+       curUA := r.HTTPRequest.Header.Get("User-Agent")
+       if len(curUA) > 0 {
+               s = curUA + " " + s
+       }
+       r.HTTPRequest.Header.Set("User-Agent", s)
+}
+
+func shouldRetryCancel(r *Request) bool {
+       awsErr, ok := r.Error.(awserr.Error)
+       timeoutErr := false
+       errStr := r.Error.Error()
+       if ok {
+               if awsErr.Code() == CanceledErrorCode {
+                       return false
+               }
+               err := awsErr.OrigErr()
+               netErr, netOK := err.(net.Error)
+               timeoutErr = netOK && netErr.Temporary()
+               if urlErr, ok := err.(*url.Error); !timeoutErr && ok {
+                       errStr = urlErr.Err.Error()
+               }
+       }
+
+       // There can be two types of canceled errors here.
+       // The first being a net.Error and the other being an error.
+       // If the request was timed out, we want to continue the retry
+       // process. Otherwise, return the canceled error.
+       return timeoutErr ||
+               (errStr != "net/http: request canceled" &&
+                       errStr != "net/http: request canceled while waiting for connection")
+
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
new file mode 100644 (file)
index 0000000..1323af9
--- /dev/null
@@ -0,0 +1,21 @@
+// +build !go1.8
+
+package request
+
+import "io"
+
+// NoBody is an io.ReadCloser with no bytes. Read always returns EOF
+// and Close always returns nil. It can be used in an outgoing client
+// request to explicitly signal that a request has zero bytes.
+// An alternative, however, is to simply set Request.Body to nil.
+//
+// Copy of Go 1.8 NoBody type from net/http/http.go
+type noBody struct{}
+
+func (noBody) Read([]byte) (int, error)         { return 0, io.EOF }
+func (noBody) Close() error                     { return nil }
+func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
+
+// Is an empty reader that will trigger the Go HTTP client to not include
+// and body in the HTTP request.
+var noBodyReader = noBody{}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
new file mode 100644 (file)
index 0000000..8b963f4
--- /dev/null
@@ -0,0 +1,9 @@
+// +build go1.8
+
+package request
+
+import "net/http"
+
+// Is a http.NoBody reader instructing Go HTTP client to not include
+// and body in the HTTP request.
+var noBodyReader = http.NoBody
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
new file mode 100644 (file)
index 0000000..a7365cd
--- /dev/null
@@ -0,0 +1,14 @@
+// +build go1.7
+
+package request
+
+import "github.com/aws/aws-sdk-go/aws"
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx aws.Context) {
+       r.context = ctx
+       r.HTTPRequest = r.HTTPRequest.WithContext(ctx)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
new file mode 100644 (file)
index 0000000..307fa07
--- /dev/null
@@ -0,0 +1,14 @@
+// +build !go1.7
+
+package request
+
+import "github.com/aws/aws-sdk-go/aws"
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx aws.Context) {
+       r.context = ctx
+       r.HTTPRequest.Cancel = ctx.Done()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
new file mode 100644 (file)
index 0000000..59de673
--- /dev/null
@@ -0,0 +1,236 @@
+package request
+
+import (
+       "reflect"
+       "sync/atomic"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+// A Pagination provides paginating of SDK API operations which are paginatable.
+// Generally you should not use this type directly, but use the "Pages" API
+// operations method to automatically perform pagination for you. Such as,
+// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods.
+//
+// Pagination differs from a Paginator type in that pagination is the type that
+// does the pagination between API operations, and Paginator defines the
+// configuration that will be used per page request.
+//
+//     cont := true
+//     for p.Next() && cont {
+//         data := p.Page().(*s3.ListObjectsOutput)
+//         // process the page's data
+//     }
+//     return p.Err()
+//
+// See service client API operation Pages methods for examples how the SDK will
+// use the Pagination type.
+type Pagination struct {
+       // Function to return a Request value for each pagination request.
+       // Any configuration or handlers that need to be applied to the request
+       // prior to getting the next page should be done here before the request
+       // returned.
+       //
+       // NewRequest should always be built from the same API operations. It is
+       // undefined if different API operations are returned on subsequent calls.
+       NewRequest func() (*Request, error)
+
+       started    bool
+       nextTokens []interface{}
+
+       err     error
+       curPage interface{}
+}
+
+// HasNextPage will return true if Pagination is able to determine that the API
+// operation has additional pages. False will be returned if there are no more
+// pages remaining.
+//
+// Will always return true if Next has not been called yet.
+func (p *Pagination) HasNextPage() bool {
+       return !(p.started && len(p.nextTokens) == 0)
+}
+
+// Err returns the error Pagination encountered when retrieving the next page.
+func (p *Pagination) Err() error {
+       return p.err
+}
+
+// Page returns the current page. Page should only be called after a successful
+// call to Next. It is undefined what Page will return if Page is called after
+// Next returns false.
+func (p *Pagination) Page() interface{} {
+       return p.curPage
+}
+
+// Next will attempt to retrieve the next page for the API operation. When a page
+// is retrieved true will be returned. If the page cannot be retrieved, or there
+// are no more pages false will be returned.
+//
+// Use the Page method to retrieve the current page data. The data will need
+// to be cast to the API operation's output type.
+//
+// Use the Err method to determine if an error occurred if Page returns false.
+func (p *Pagination) Next() bool {
+       if !p.HasNextPage() {
+               return false
+       }
+
+       req, err := p.NewRequest()
+       if err != nil {
+               p.err = err
+               return false
+       }
+
+       if p.started {
+               for i, intok := range req.Operation.InputTokens {
+                       awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i])
+               }
+       }
+       p.started = true
+
+       err = req.Send()
+       if err != nil {
+               p.err = err
+               return false
+       }
+
+       p.nextTokens = req.nextPageTokens()
+       p.curPage = req.Data
+
+       return true
+}
+
+// A Paginator is the configuration data that defines how an API operation
+// should be paginated. This type is used by the API service models to define
+// the generated pagination config for service APIs.
+//
+// The Pagination type is what provides iterating between pages of an API. It
+// is only used to store the token metadata the SDK should use for performing
+// pagination.
+type Paginator struct {
+       InputTokens     []string
+       OutputTokens    []string
+       LimitToken      string
+       TruncationToken string
+}
+
+// nextPageTokens returns the tokens to use when asking for the next page of data.
+func (r *Request) nextPageTokens() []interface{} {
+       if r.Operation.Paginator == nil {
+               return nil
+       }
+       if r.Operation.TruncationToken != "" {
+               tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
+               if len(tr) == 0 {
+                       return nil
+               }
+
+               switch v := tr[0].(type) {
+               case *bool:
+                       if !aws.BoolValue(v) {
+                               return nil
+                       }
+               case bool:
+                       if v == false {
+                               return nil
+                       }
+               }
+       }
+
+       tokens := []interface{}{}
+       tokenAdded := false
+       for _, outToken := range r.Operation.OutputTokens {
+               v, _ := awsutil.ValuesAtPath(r.Data, outToken)
+               if len(v) > 0 {
+                       tokens = append(tokens, v[0])
+                       tokenAdded = true
+               } else {
+                       tokens = append(tokens, nil)
+               }
+       }
+       if !tokenAdded {
+               return nil
+       }
+
+       return tokens
+}
+
+// Ensure a deprecated item is only logged once instead of each time its used.
+func logDeprecatedf(logger aws.Logger, flag *int32, msg string) {
+       if logger == nil {
+               return
+       }
+       if atomic.CompareAndSwapInt32(flag, 0, 1) {
+               logger.Log(msg)
+       }
+}
+
+var (
+       logDeprecatedHasNextPage int32
+       logDeprecatedNextPage    int32
+       logDeprecatedEachPage    int32
+)
+
+// HasNextPage returns true if this request has more pages of data available.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) HasNextPage() bool {
+       logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage,
+               "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+       return len(r.nextPageTokens()) > 0
+}
+
+// NextPage returns a new Request that can be executed to return the next
+// page of result data. Call .Send() on this request to execute it.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) NextPage() *Request {
+       logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage,
+               "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+       tokens := r.nextPageTokens()
+       if len(tokens) == 0 {
+               return nil
+       }
+
+       data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
+       nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
+       for i, intok := range nr.Operation.InputTokens {
+               awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
+       }
+       return nr
+}
+
+// EachPage iterates over each page of a paginated request object. The fn
+// parameter should be a function with the following sample signature:
+//
+//   func(page *T, lastPage bool) bool {
+//       return true // return false to stop iterating
+//   }
+//
+// Where "T" is the structure type matching the output structure of the given
+// operation. For example, a request object generated by
+// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
+// as the structure "T". The lastPage value represents whether the page is
+// the last page of data or not. The return value of this function should
+// return true to keep iterating or false to stop.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
+       logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage,
+               "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+       for page := r; page != nil; page = page.NextPage() {
+               if err := page.Send(); err != nil {
+                       return err
+               }
+               if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
+                       return page.Error
+               }
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
new file mode 100644 (file)
index 0000000..7af81de
--- /dev/null
@@ -0,0 +1,154 @@
+package request
+
+import (
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Retryer is an interface to control retry logic for a given service.
+// The default implementation used by most services is the service.DefaultRetryer
+// structure, which contains basic retry logic using exponential backoff.
+type Retryer interface {
+       RetryRules(*Request) time.Duration
+       ShouldRetry(*Request) bool
+       MaxRetries() int
+}
+
+// WithRetryer sets a config Retryer value to the given Config returning it
+// for chaining.
+func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
+       cfg.Retryer = retryer
+       return cfg
+}
+
+// retryableCodes is a collection of service response codes which are retry-able
+// without any further action.
+var retryableCodes = map[string]struct{}{
+       "RequestError":            {},
+       "RequestTimeout":          {},
+       ErrCodeResponseTimeout:    {},
+       "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout
+}
+
+var throttleCodes = map[string]struct{}{
+       "ProvisionedThroughputExceededException": {},
+       "Throttling":                             {},
+       "ThrottlingException":                    {},
+       "RequestLimitExceeded":                   {},
+       "RequestThrottled":                       {},
+       "LimitExceededException":                 {}, // Deleting 10+ DynamoDb tables at once
+       "TooManyRequestsException":               {}, // Lambda functions
+       "PriorRequestNotComplete":                {}, // Route53
+}
+
+// credsExpiredCodes is a collection of error codes which signify the credentials
+// need to be refreshed. Expired tokens require refreshing of credentials, and
+// resigning before the request can be retried.
+var credsExpiredCodes = map[string]struct{}{
+       "ExpiredToken":          {},
+       "ExpiredTokenException": {},
+       "RequestExpired":        {}, // EC2 Only
+}
+
+func isCodeThrottle(code string) bool {
+       _, ok := throttleCodes[code]
+       return ok
+}
+
+func isCodeRetryable(code string) bool {
+       if _, ok := retryableCodes[code]; ok {
+               return true
+       }
+
+       return isCodeExpiredCreds(code)
+}
+
+func isCodeExpiredCreds(code string) bool {
+       _, ok := credsExpiredCodes[code]
+       return ok
+}
+
+var validParentCodes = map[string]struct{}{
+       ErrCodeSerialization: struct{}{},
+       ErrCodeRead:          struct{}{},
+}
+
+func isNestedErrorRetryable(parentErr awserr.Error) bool {
+       if parentErr == nil {
+               return false
+       }
+
+       if _, ok := validParentCodes[parentErr.Code()]; !ok {
+               return false
+       }
+
+       err := parentErr.OrigErr()
+       if err == nil {
+               return false
+       }
+
+       if aerr, ok := err.(awserr.Error); ok {
+               return isCodeRetryable(aerr.Code())
+       }
+
+       return isErrConnectionReset(err)
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if error is nil.
+func IsErrorRetryable(err error) bool {
+       if err != nil {
+               if aerr, ok := err.(awserr.Error); ok {
+                       return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr)
+               }
+       }
+       return false
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if error is nil.
+func IsErrorThrottle(err error) bool {
+       if err != nil {
+               if aerr, ok := err.(awserr.Error); ok {
+                       return isCodeThrottle(aerr.Code())
+               }
+       }
+       return false
+}
+
+// IsErrorExpiredCreds returns whether the error code is a credential expiry error.
+// Returns false if error is nil.
+func IsErrorExpiredCreds(err error) bool {
+       if err != nil {
+               if aerr, ok := err.(awserr.Error); ok {
+                       return isCodeExpiredCreds(aerr.Code())
+               }
+       }
+       return false
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorRetryable
+func (r *Request) IsErrorRetryable() bool {
+       return IsErrorRetryable(r.Error)
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if the request has no Error set
+//
+// Alias for the utility function IsErrorThrottle
+func (r *Request) IsErrorThrottle() bool {
+       return IsErrorThrottle(r.Error)
+}
+
+// IsErrorExpired returns whether the error code is a credential expiry error.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorExpiredCreds
+func (r *Request) IsErrorExpired() bool {
+       return IsErrorExpiredCreds(r.Error)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
new file mode 100644 (file)
index 0000000..09a44eb
--- /dev/null
@@ -0,0 +1,94 @@
+package request
+
+import (
+       "io"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var timeoutErr = awserr.New(
+       ErrCodeResponseTimeout,
+       "read on body has reached the timeout limit",
+       nil,
+)
+
+type readResult struct {
+       n   int
+       err error
+}
+
+// timeoutReadCloser will handle body reads that take too long.
+// We will return a ErrReadTimeout error if a timeout occurs.
+type timeoutReadCloser struct {
+       reader   io.ReadCloser
+       duration time.Duration
+}
+
+// Read will spin off a goroutine to call the reader's Read method. We will
+// select on the timer's channel or the read's channel. Whoever completes first
+// will be returned.
+func (r *timeoutReadCloser) Read(b []byte) (int, error) {
+       timer := time.NewTimer(r.duration)
+       c := make(chan readResult, 1)
+
+       go func() {
+               n, err := r.reader.Read(b)
+               timer.Stop()
+               c <- readResult{n: n, err: err}
+       }()
+
+       select {
+       case data := <-c:
+               return data.n, data.err
+       case <-timer.C:
+               return 0, timeoutErr
+       }
+}
+
+func (r *timeoutReadCloser) Close() error {
+       return r.reader.Close()
+}
+
+const (
+       // HandlerResponseTimeout is what we use to signify the name of the
+       // response timeout handler.
+       HandlerResponseTimeout = "ResponseTimeoutHandler"
+)
+
+// adaptToResponseTimeoutError is a handler that will replace any top level error
+// to a ErrCodeResponseTimeout, if its child is that.
+func adaptToResponseTimeoutError(req *Request) {
+       if err, ok := req.Error.(awserr.Error); ok {
+               aerr, ok := err.OrigErr().(awserr.Error)
+               if ok && aerr.Code() == ErrCodeResponseTimeout {
+                       req.Error = aerr
+               }
+       }
+}
+
+// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer.
+// This will allow for per read timeouts. If a timeout occurred, we will return the
+// ErrCodeResponseTimeout.
+//
+//     svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second)
+func WithResponseReadTimeout(duration time.Duration) Option {
+       return func(r *Request) {
+
+               var timeoutHandler = NamedHandler{
+                       HandlerResponseTimeout,
+                       func(req *Request) {
+                               req.HTTPResponse.Body = &timeoutReadCloser{
+                                       reader:   req.HTTPResponse.Body,
+                                       duration: duration,
+                               }
+                       }}
+
+               // remove the handler so we are not stomping over any new durations.
+               r.Handlers.Send.RemoveByName(HandlerResponseTimeout)
+               r.Handlers.Send.PushBackNamed(timeoutHandler)
+
+               r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError)
+               r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError)
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
new file mode 100644 (file)
index 0000000..2520286
--- /dev/null
@@ -0,0 +1,234 @@
+package request
+
+import (
+       "bytes"
+       "fmt"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+const (
+       // InvalidParameterErrCode is the error code for invalid parameters errors
+       InvalidParameterErrCode = "InvalidParameter"
+       // ParamRequiredErrCode is the error code for required parameter errors
+       ParamRequiredErrCode = "ParamRequiredError"
+       // ParamMinValueErrCode is the error code for fields with too low of a
+       // number value.
+       ParamMinValueErrCode = "ParamMinValueError"
+       // ParamMinLenErrCode is the error code for fields without enough elements.
+       ParamMinLenErrCode = "ParamMinLenError"
+)
+
+// Validator provides a way for types to perform validation logic on their
+// input values that external code can use to determine if a type's values
+// are valid.
+type Validator interface {
+       Validate() error
+}
+
+// An ErrInvalidParams provides wrapping of invalid parameter errors found when
+// validating API operation input parameters.
+type ErrInvalidParams struct {
+       // Context is the base context of the invalid parameter group.
+       Context string
+       errs    []ErrInvalidParam
+}
+
+// Add adds a new invalid parameter error to the collection of invalid
+// parameters. The context of the invalid parameter will be updated to reflect
+// this collection.
+func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
+       err.SetContext(e.Context)
+       e.errs = append(e.errs, err)
+}
+
+// AddNested adds the invalid parameter errors from another ErrInvalidParams
+// value into this collection. The nested errors will have their nested context
+// updated and base context to reflect the merging.
+//
+// Use for nested validations errors.
+func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
+       for _, err := range nested.errs {
+               err.SetContext(e.Context)
+               err.AddNestedContext(nestedCtx)
+               e.errs = append(e.errs, err)
+       }
+}
+
+// Len returns the number of invalid parameter errors
+func (e ErrInvalidParams) Len() int {
+       return len(e.errs)
+}
+
+// Code returns the code of the error
+func (e ErrInvalidParams) Code() string {
+       return InvalidParameterErrCode
+}
+
+// Message returns the message of the error
+func (e ErrInvalidParams) Message() string {
+       return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
+}
+
+// Error returns the string formatted form of the invalid parameters.
+func (e ErrInvalidParams) Error() string {
+       w := &bytes.Buffer{}
+       fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
+
+       for _, err := range e.errs {
+               fmt.Fprintf(w, "- %s\n", err.Message())
+       }
+
+       return w.String()
+}
+
+// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
+func (e ErrInvalidParams) OrigErr() error {
+       return awserr.NewBatchError(
+               InvalidParameterErrCode, e.Message(), e.OrigErrs())
+}
+
+// OrigErrs returns a slice of the invalid parameters
+func (e ErrInvalidParams) OrigErrs() []error {
+       errs := make([]error, len(e.errs))
+       for i := 0; i < len(errs); i++ {
+               errs[i] = e.errs[i]
+       }
+
+       return errs
+}
+
+// An ErrInvalidParam represents an invalid parameter error type.
+type ErrInvalidParam interface {
+       awserr.Error
+
+       // Field name the error occurred on.
+       Field() string
+
+       // SetContext updates the context of the error.
+       SetContext(string)
+
+       // AddNestedContext updates the error's context to include a nested level.
+       AddNestedContext(string)
+}
+
+type errInvalidParam struct {
+       context       string
+       nestedContext string
+       field         string
+       code          string
+       msg           string
+}
+
+// Code returns the error code for the type of invalid parameter.
+func (e *errInvalidParam) Code() string {
+       return e.code
+}
+
+// Message returns the reason the parameter was invalid, and its context.
+func (e *errInvalidParam) Message() string {
+       return fmt.Sprintf("%s, %s.", e.msg, e.Field())
+}
+
+// Error returns the string version of the invalid parameter error.
+func (e *errInvalidParam) Error() string {
+       return fmt.Sprintf("%s: %s", e.code, e.Message())
+}
+
+// OrigErr returns nil, Implemented for awserr.Error interface.
+func (e *errInvalidParam) OrigErr() error {
+       return nil
+}
+
+// Field Returns the field and context the error occurred.
+func (e *errInvalidParam) Field() string {
+       field := e.context
+       if len(field) > 0 {
+               field += "."
+       }
+       if len(e.nestedContext) > 0 {
+               field += fmt.Sprintf("%s.", e.nestedContext)
+       }
+       field += e.field
+
+       return field
+}
+
+// SetContext updates the base context of the error.
+func (e *errInvalidParam) SetContext(ctx string) {
+       e.context = ctx
+}
+
+// AddNestedContext prepends a context to the field's path.
+func (e *errInvalidParam) AddNestedContext(ctx string) {
+       if len(e.nestedContext) == 0 {
+               e.nestedContext = ctx
+       } else {
+               e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
+       }
+
+}
+
+// An ErrParamRequired represents an required parameter error.
+type ErrParamRequired struct {
+       errInvalidParam
+}
+
+// NewErrParamRequired creates a new required parameter error.
+func NewErrParamRequired(field string) *ErrParamRequired {
+       return &ErrParamRequired{
+               errInvalidParam{
+                       code:  ParamRequiredErrCode,
+                       field: field,
+                       msg:   fmt.Sprintf("missing required field"),
+               },
+       }
+}
+
+// An ErrParamMinValue represents a minimum value parameter error.
+type ErrParamMinValue struct {
+       errInvalidParam
+       min float64
+}
+
+// NewErrParamMinValue creates a new minimum value parameter error.
+func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
+       return &ErrParamMinValue{
+               errInvalidParam: errInvalidParam{
+                       code:  ParamMinValueErrCode,
+                       field: field,
+                       msg:   fmt.Sprintf("minimum field value of %v", min),
+               },
+               min: min,
+       }
+}
+
+// MinValue returns the field's require minimum value.
+//
+// float64 is returned for both int and float min values.
+func (e *ErrParamMinValue) MinValue() float64 {
+       return e.min
+}
+
+// An ErrParamMinLen represents a minimum length parameter error.
+type ErrParamMinLen struct {
+       errInvalidParam
+       min int
+}
+
+// NewErrParamMinLen creates a new minimum length parameter error.
+func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
+       return &ErrParamMinLen{
+               errInvalidParam: errInvalidParam{
+                       code:  ParamMinValueErrCode,
+                       field: field,
+                       msg:   fmt.Sprintf("minimum field size of %v", min),
+               },
+               min: min,
+       }
+}
+
+// MinLen returns the field's required minimum length.
+func (e *ErrParamMinLen) MinLen() int {
+       return e.min
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
new file mode 100644 (file)
index 0000000..22d2f80
--- /dev/null
@@ -0,0 +1,287 @@
+package request
+
+import (
+       "fmt"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when
+// the waiter's max attempts have been exhausted.
+const WaiterResourceNotReadyErrorCode = "ResourceNotReady"
+
+// A WaiterOption is a function that will update the Waiter value's fields to
+// configure the waiter.
+type WaiterOption func(*Waiter)
+
+// WithWaiterMaxAttempts returns the maximum number of times the waiter should
+// attempt to check the resource for the target state.
+func WithWaiterMaxAttempts(max int) WaiterOption {
+       return func(w *Waiter) {
+               w.MaxAttempts = max
+       }
+}
+
+// WaiterDelay will return a delay the waiter should pause between attempts to
+// check the resource state. The passed in attempt is the number of times the
+// Waiter has checked the resource state.
+//
+// Attempt is the number of attempts the Waiter has made checking the resource
+// state.
+type WaiterDelay func(attempt int) time.Duration
+
+// ConstantWaiterDelay returns a WaiterDelay that will always return a constant
+// delay the waiter should use between attempts. It ignores the number of
+// attempts made.
+func ConstantWaiterDelay(delay time.Duration) WaiterDelay {
+       return func(attempt int) time.Duration {
+               return delay
+       }
+}
+
+// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in.
+func WithWaiterDelay(delayer WaiterDelay) WaiterOption {
+       return func(w *Waiter) {
+               w.Delay = delayer
+       }
+}
+
+// WithWaiterLogger returns a waiter option to set the logger a waiter
+// should use to log warnings and errors to.
+func WithWaiterLogger(logger aws.Logger) WaiterOption {
+       return func(w *Waiter) {
+               w.Logger = logger
+       }
+}
+
+// WithWaiterRequestOptions returns a waiter option setting the request
+// options for each request the waiter makes. Appends to waiter's request
+// options already set.
+func WithWaiterRequestOptions(opts ...Option) WaiterOption {
+       return func(w *Waiter) {
+               w.RequestOptions = append(w.RequestOptions, opts...)
+       }
+}
+
+// A Waiter provides the functionality to perform a blocking call which will
+// wait for a resource state to be satisfied by a service.
+//
+// This type should not be used directly. The API operations provided in the
+// service packages prefixed with "WaitUntil" should be used instead.
+type Waiter struct {
+       Name      string
+       Acceptors []WaiterAcceptor
+       Logger    aws.Logger
+
+       MaxAttempts int
+       Delay       WaiterDelay
+
+       RequestOptions []Option
+       NewRequest     func([]Option) (*Request, error)
+}
+
+// ApplyOptions updates the waiter with the list of waiter options provided.
+func (w *Waiter) ApplyOptions(opts ...WaiterOption) {
+       for _, fn := range opts {
+               fn(w)
+       }
+}
+
+// WaiterState are states the waiter uses based on WaiterAcceptor definitions
+// to identify if the resource state the waiter is waiting on has occurred.
+type WaiterState int
+
+// String returns the string representation of the waiter state.
+func (s WaiterState) String() string {
+       switch s {
+       case SuccessWaiterState:
+               return "success"
+       case FailureWaiterState:
+               return "failure"
+       case RetryWaiterState:
+               return "retry"
+       default:
+               return "unknown waiter state"
+       }
+}
+
+// States the waiter acceptors will use to identify target resource states.
+const (
+       SuccessWaiterState WaiterState = iota // waiter successful
+       FailureWaiterState                    // waiter failed
+       RetryWaiterState                      // waiter needs to be retried
+)
+
+// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor
+// definition's Expected attribute.
+type WaiterMatchMode int
+
+// Modes the waiter will use when inspecting API response to identify target
+// resource states.
+const (
+       PathAllWaiterMatch  WaiterMatchMode = iota // match on all paths
+       PathWaiterMatch                            // match on specific path
+       PathAnyWaiterMatch                         // match on any path
+       PathListWaiterMatch                        // match on list of paths
+       StatusWaiterMatch                          // match on status code
+       ErrorWaiterMatch                           // match on error
+)
+
+// String returns the string representation of the waiter match mode.
+func (m WaiterMatchMode) String() string {
+       switch m {
+       case PathAllWaiterMatch:
+               return "pathAll"
+       case PathWaiterMatch:
+               return "path"
+       case PathAnyWaiterMatch:
+               return "pathAny"
+       case PathListWaiterMatch:
+               return "pathList"
+       case StatusWaiterMatch:
+               return "status"
+       case ErrorWaiterMatch:
+               return "error"
+       default:
+               return "unknown waiter match mode"
+       }
+}
+
+// WaitWithContext will make requests for the API operation using NewRequest to
+// build API requests. The request's response will be compared against the
+// Waiter's Acceptors to determine the successful state of the resource the
+// waiter is inspecting.
+//
+// The passed in context must not be nil. If it is nil a panic will occur. The
+// Context will be used to cancel the waiter's pending requests and retry delays.
+// Use aws.BackgroundContext if no context is available.
+//
+// The waiter will continue until the target state defined by the Acceptors,
+// or the max attempts expires.
+//
+// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's
+// retryer ShouldRetry returns false. This normally will happen when the max
+// wait attempts expires.
+func (w Waiter) WaitWithContext(ctx aws.Context) error {
+
+       for attempt := 1; ; attempt++ {
+               req, err := w.NewRequest(w.RequestOptions)
+               if err != nil {
+                       waiterLogf(w.Logger, "unable to create request %v", err)
+                       return err
+               }
+               req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter"))
+               err = req.Send()
+
+               // See if any of the acceptors match the request's response, or error
+               for _, a := range w.Acceptors {
+                       if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched {
+                               return matchErr
+                       }
+               }
+
+               // The Waiter should only check the resource state MaxAttempts times
+               // This is here instead of in the for loop above to prevent delaying
+               // unnecessary when the waiter will not retry.
+               if attempt == w.MaxAttempts {
+                       break
+               }
+
+               // Delay to wait before inspecting the resource again
+               delay := w.Delay(attempt)
+               if sleepFn := req.Config.SleepDelay; sleepFn != nil {
+                       // Support SleepDelay for backwards compatibility and testing
+                       sleepFn(delay)
+               } else if err := aws.SleepWithContext(ctx, delay); err != nil {
+                       return awserr.New(CanceledErrorCode, "waiter context canceled", err)
+               }
+       }
+
+       return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil)
+}
+
+// A WaiterAcceptor provides the information needed to wait for an API operation
+// to complete.
+type WaiterAcceptor struct {
+       State    WaiterState
+       Matcher  WaiterMatchMode
+       Argument string
+       Expected interface{}
+}
+
+// match returns if the acceptor found a match with the passed in request
+// or error. True is returned if the acceptor made a match, error is returned
+// if there was an error attempting to perform the match.
+func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) {
+       result := false
+       var vals []interface{}
+
+       switch a.Matcher {
+       case PathAllWaiterMatch, PathWaiterMatch:
+               // Require all matches to be equal for result to match
+               vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+               if len(vals) == 0 {
+                       break
+               }
+               result = true
+               for _, val := range vals {
+                       if !awsutil.DeepEqual(val, a.Expected) {
+                               result = false
+                               break
+                       }
+               }
+       case PathAnyWaiterMatch:
+               // Only a single match needs to equal for the result to match
+               vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+               for _, val := range vals {
+                       if awsutil.DeepEqual(val, a.Expected) {
+                               result = true
+                               break
+                       }
+               }
+       case PathListWaiterMatch:
+               // ignored matcher
+       case StatusWaiterMatch:
+               s := a.Expected.(int)
+               result = s == req.HTTPResponse.StatusCode
+       case ErrorWaiterMatch:
+               if aerr, ok := err.(awserr.Error); ok {
+                       result = aerr.Code() == a.Expected.(string)
+               }
+       default:
+               waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s",
+                       name, a.Matcher)
+       }
+
+       if !result {
+               // If there was no matching result found there is nothing more to do
+               // for this response, retry the request.
+               return false, nil
+       }
+
+       switch a.State {
+       case SuccessWaiterState:
+               // waiter completed
+               return true, nil
+       case FailureWaiterState:
+               // Waiter failure state triggered
+               return true, awserr.New(WaiterResourceNotReadyErrorCode,
+                       "failed waiting for successful resource state", err)
+       case RetryWaiterState:
+               // clear the error and retry the operation
+               return false, nil
+       default:
+               waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s",
+                       name, a.State)
+               return false, nil
+       }
+}
+
+func waiterLogf(logger aws.Logger, msg string, args ...interface{}) {
+       if logger != nil {
+               logger.Log(fmt.Sprintf(msg, args...))
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
new file mode 100644 (file)
index 0000000..ea7b886
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+Package session provides configuration for the SDK's service clients.
+
+Sessions can be shared across all service clients that share the same base
+configuration.  The Session is built from the SDK's default configuration and
+request handlers.
+
+Sessions should be cached when possible, because creating a new Session will
+load all configuration values from the environment, and config files each time
+the Session is created. Sharing the Session value across all of your service
+clients will ensure the configuration is loaded the fewest number of times possible.
+
+Concurrency
+
+Sessions are safe to use concurrently as long as the Session is not being
+modified. The SDK will not modify the Session once the Session has been created.
+Creating service clients concurrently from a shared Session is safe.
+
+Sessions from Shared Config
+
+Sessions can be created using the method above that will only load the
+additional config if the AWS_SDK_LOAD_CONFIG environment variable is set.
+Alternatively you can explicitly create a Session with shared config enabled.
+To do this you can use NewSessionWithOptions to configure how the Session will
+be created. Using the NewSessionWithOptions with SharedConfigState set to
+SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG
+environment variable was set.
+
+Creating Sessions
+
+When creating Sessions optional aws.Config values can be passed in that will
+override the default, or loaded config values the Session is being created
+with. This allows you to provide additional, or case based, configuration
+as needed.
+
+By default NewSession will only load credentials from the shared credentials
+file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is
+set to a truthy value the Session will be created from the configuration
+values from the shared config (~/.aws/config) and shared credentials
+(~/.aws/credentials) files. See the section Sessions from Shared Config for
+more information.
+
+Create a Session with the default config and request handlers. With credentials
+region, and profile loaded from the environment and shared config automatically.
+Requires the AWS_PROFILE to be set, or "default" is used.
+
+       // Create Session
+       sess := session.Must(session.NewSession())
+
+       // Create a Session with a custom region
+       sess := session.Must(session.NewSession(&aws.Config{
+               Region: aws.String("us-east-1"),
+       }))
+
+       // Create a S3 client instance from a session
+       sess := session.Must(session.NewSession())
+
+       svc := s3.New(sess)
+
+Create Session With Option Overrides
+
+In addition to NewSession, Sessions can be created using NewSessionWithOptions.
+This func allows you to control and override how the Session will be created
+through code instead of being driven by environment variables only.
+
+Use NewSessionWithOptions when you want to provide the config profile, or
+override the shared config state (AWS_SDK_LOAD_CONFIG).
+
+       // Equivalent to session.NewSession()
+       sess := session.Must(session.NewSessionWithOptions(session.Options{
+               // Options
+       }))
+
+       // Specify profile to load for the session's config
+       sess := session.Must(session.NewSessionWithOptions(session.Options{
+                Profile: "profile_name",
+       }))
+
+       // Specify profile for config and region for requests
+       sess := session.Must(session.NewSessionWithOptions(session.Options{
+                Config: aws.Config{Region: aws.String("us-east-1")},
+                Profile: "profile_name",
+       }))
+
+       // Force enable Shared Config support
+       sess := session.Must(session.NewSessionWithOptions(session.Options{
+               SharedConfigState: session.SharedConfigEnable,
+       }))
+
+Adding Handlers
+
+You can add handlers to a session for processing HTTP requests. All service
+clients that use the session inherit the handlers. For example, the following
+handler logs every request and its payload made by a service client:
+
+       // Create a session, and add additional handlers for all service
+       // clients created with the Session to inherit. Adds logging handler.
+       sess := session.Must(session.NewSession())
+
+       sess.Handlers.Send.PushFront(func(r *request.Request) {
+               // Log every request made and its payload
+               logger.Println("Request: %s/%s, Payload: %s",
+                       r.ClientInfo.ServiceName, r.Operation, r.Params)
+       })
+
+Deprecated "New" function
+
+The New session function has been deprecated because it does not provide good
+way to return errors that occur when loading the configuration files and values.
+Because of this, NewSession was created so errors can be retrieved when
+creating a session fails.
+
+Shared Config Fields
+
+By default the SDK will only load the shared credentials file's (~/.aws/credentials)
+credentials values, and all other config is provided by the environment variables,
+SDK defaults, and user provided aws.Config values.
+
+If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable
+option is used to create the Session the full shared config values will be
+loaded. This includes credentials, region, and support for assume role. In
+addition the Session will load its configuration from both the shared config
+file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both
+files have the same format.
+
+If both config files are present the configuration from both files will be
+read. The Session will be created from configuration values from the shared
+credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config).
+
+Credentials are the values the SDK should use for authenticating requests with
+AWS Services. They arfrom a configuration file will need to include both
+aws_access_key_id and aws_secret_access_key must be provided together in the
+same file to be considered valid. The values will be ignored if not a complete
+group. aws_session_token is an optional field that can be provided if both of
+the other two fields are also provided.
+
+       aws_access_key_id = AKID
+       aws_secret_access_key = SECRET
+       aws_session_token = TOKEN
+
+Assume Role values allow you to configure the SDK to assume an IAM role using
+a set of credentials provided in a config file via the source_profile field.
+Both "role_arn" and "source_profile" are required. The SDK supports assuming
+a role with MFA token if the session option AssumeRoleTokenProvider
+is set.
+
+       role_arn = arn:aws:iam::<account_number>:role/<role_name>
+       source_profile = profile_with_creds
+       external_id = 1234
+       mfa_serial = <serial or mfa arn>
+       role_session_name = session_name
+
+Region is the region the SDK should use for looking up AWS service endpoints
+and signing requests.
+
+       region = us-east-1
+
+Assume Role with MFA token
+
+To create a session with support for assuming an IAM role with MFA set the
+session option AssumeRoleTokenProvider to a function that will prompt for the
+MFA token code when the SDK assumes the role and refreshes the role's credentials.
+This allows you to configure the SDK via the shared config to assumea role
+with MFA tokens.
+
+In order for the SDK to assume a role with MFA the SharedConfigState
+session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG
+environment variable set.
+
+The shared configuration instructs the SDK to assume an IAM role with MFA
+when the mfa_serial configuration field is set in the shared config
+(~/.aws/config) or shared credentials (~/.aws/credentials) file.
+
+If mfa_serial is set in the configuration, the SDK will assume the role, and
+the AssumeRoleTokenProvider session option is not set an an error will
+be returned when creating the session.
+
+    sess := session.Must(session.NewSessionWithOptions(session.Options{
+        AssumeRoleTokenProvider: stscreds.StdinTokenProvider,
+    }))
+
+    // Create service client value configured for credentials
+    // from assumed role.
+    svc := s3.New(sess)
+
+To setup assume role outside of a session see the stscrds.AssumeRoleProvider
+documentation.
+
+Environment Variables
+
+When a Session is created several environment variables can be set to adjust
+how the SDK functions, and what configuration data it loads when creating
+Sessions. All environment values are optional, but some values like credentials
+require multiple of the values to set or the partial values will be ignored.
+All environment variable values are strings unless otherwise noted.
+
+Environment configuration values. If set both Access Key ID and Secret Access
+Key must be provided. Session Token and optionally also be provided, but is
+not required.
+
+       # Access Key ID
+       AWS_ACCESS_KEY_ID=AKID
+       AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+
+       # Secret Access Key
+       AWS_SECRET_ACCESS_KEY=SECRET
+       AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+
+       # Session Token
+       AWS_SESSION_TOKEN=TOKEN
+
+Region value will instruct the SDK where to make service API requests to. If is
+not provided in the environment the region must be provided before a service
+client request is made.
+
+       AWS_REGION=us-east-1
+
+       # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
+       # and AWS_REGION is not also set.
+       AWS_DEFAULT_REGION=us-east-1
+
+Profile name the SDK should load use when loading shared config from the
+configuration files. If not provided "default" will be used as the profile name.
+
+       AWS_PROFILE=my_profile
+
+       # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
+       # and AWS_PROFILE is not also set.
+       AWS_DEFAULT_PROFILE=my_profile
+
+SDK load config instructs the SDK to load the shared config in addition to
+shared credentials. This also expands the configuration loaded so the shared
+credentials will have parity with the shared config file. This also enables
+Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
+env values as well.
+
+       AWS_SDK_LOAD_CONFIG=1
+
+Shared credentials file path can be set to instruct the SDK to use an alternative
+file for the shared credentials. If not set the file will be loaded from
+$HOME/.aws/credentials on Linux/Unix based systems, and
+%USERPROFILE%\.aws\credentials on Windows.
+
+       AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+
+Shared config file path can be set to instruct the SDK to use an alternative
+file for the shared config. If not set the file will be loaded from
+$HOME/.aws/config on Linux/Unix based systems, and
+%USERPROFILE%\.aws\config on Windows.
+
+       AWS_CONFIG_FILE=$HOME/my_shared_config
+
+Path to a custom Credentials Authority (CA) bundle PEM file that the SDK
+will use instead of the default system's root CA bundle. Use this only
+if you want to replace the CA bundle the SDK uses for TLS requests.
+
+       AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+
+Enabling this option will attempt to merge the Transport into the SDK's HTTP
+client. If the client's Transport is not a http.Transport an error will be
+returned. If the Transport's TLS config is set this option will cause the SDK
+to overwrite the Transport's TLS config's  RootCAs value. If the CA bundle file
+contains multiple certificates all of them will be loaded.
+
+The Session option CustomCABundle is also available when creating sessions
+to also enable this feature. CustomCABundle session option field has priority
+over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+
+Setting a custom HTTPClient in the aws.Config options will override this setting.
+To use this option and custom HTTP client, the HTTP client needs to be provided
+when creating the session. Not the service client.
+*/
+package session
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
new file mode 100644 (file)
index 0000000..e6278a7
--- /dev/null
@@ -0,0 +1,208 @@
+package session
+
+import (
+       "os"
+       "path/filepath"
+       "strconv"
+
+       "github.com/aws/aws-sdk-go/aws/credentials"
+)
+
+// envConfig is a collection of environment values the SDK will read
+// setup config from. All environment values are optional. But some values
+// such as credentials require multiple values to be complete or the values
+// will be ignored.
+type envConfig struct {
+       // Environment configuration values. If set both Access Key ID and Secret Access
+       // Key must be provided. Session Token and optionally also be provided, but is
+       // not required.
+       //
+       //      # Access Key ID
+       //      AWS_ACCESS_KEY_ID=AKID
+       //      AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
+       //
+       //      # Secret Access Key
+       //      AWS_SECRET_ACCESS_KEY=SECRET
+       //      AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
+       //
+       //      # Session Token
+       //      AWS_SESSION_TOKEN=TOKEN
+       Creds credentials.Value
+
+       // Region value will instruct the SDK where to make service API requests to. If is
+       // not provided in the environment the region must be provided before a service
+       // client request is made.
+       //
+       //      AWS_REGION=us-east-1
+       //
+       //      # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
+       //      # and AWS_REGION is not also set.
+       //      AWS_DEFAULT_REGION=us-east-1
+       Region string
+
+       // Profile name the SDK should load use when loading shared configuration from the
+       // shared configuration files. If not provided "default" will be used as the
+       // profile name.
+       //
+       //      AWS_PROFILE=my_profile
+       //
+       //      # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
+       //      # and AWS_PROFILE is not also set.
+       //      AWS_DEFAULT_PROFILE=my_profile
+       Profile string
+
+       // SDK load config instructs the SDK to load the shared config in addition to
+       // shared credentials. This also expands the configuration loaded from the shared
+       // credentials to have parity with the shared config file. This also enables
+       // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
+       // env values as well.
+       //
+       //      AWS_SDK_LOAD_CONFIG=1
+       EnableSharedConfig bool
+
+       // Shared credentials file path can be set to instruct the SDK to use an alternate
+       // file for the shared credentials. If not set the file will be loaded from
+       // $HOME/.aws/credentials on Linux/Unix based systems, and
+       // %USERPROFILE%\.aws\credentials on Windows.
+       //
+       //      AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
+       SharedCredentialsFile string
+
+       // Shared config file path can be set to instruct the SDK to use an alternate
+       // file for the shared config. If not set the file will be loaded from
+       // $HOME/.aws/config on Linux/Unix based systems, and
+       // %USERPROFILE%\.aws\config on Windows.
+       //
+       //      AWS_CONFIG_FILE=$HOME/my_shared_config
+       SharedConfigFile string
+
+       // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file
+       // that the SDK will use instead of the the system's root CA bundle.
+       // Only use this if you want to configure the SDK to use a custom set
+       // of CAs.
+       //
+       // Enabling this option will attempt to merge the Transport
+       // into the SDK's HTTP client. If the client's Transport is
+       // not a http.Transport an error will be returned. If the
+       // Transport's TLS config is set this option will cause the
+       // SDK to overwrite the Transport's TLS config's  RootCAs value.
+       //
+       // Setting a custom HTTPClient in the aws.Config options will override this setting.
+       // To use this option and custom HTTP client, the HTTP client needs to be provided
+       // when creating the session. Not the service client.
+       //
+       //  AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+       CustomCABundle string
+}
+
+var (
+       credAccessEnvKey = []string{
+               "AWS_ACCESS_KEY_ID",
+               "AWS_ACCESS_KEY",
+       }
+       credSecretEnvKey = []string{
+               "AWS_SECRET_ACCESS_KEY",
+               "AWS_SECRET_KEY",
+       }
+       credSessionEnvKey = []string{
+               "AWS_SESSION_TOKEN",
+       }
+
+       regionEnvKeys = []string{
+               "AWS_REGION",
+               "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
+       }
+       profileEnvKeys = []string{
+               "AWS_PROFILE",
+               "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set
+       }
+)
+
+// loadEnvConfig retrieves the SDK's environment configuration.
+// See `envConfig` for the values that will be retrieved.
+//
+// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
+// the shared SDK config will be loaded in addition to the SDK's specific
+// configuration values.
+func loadEnvConfig() envConfig {
+       enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
+       return envConfigLoad(enableSharedConfig)
+}
+
+// loadEnvSharedConfig retrieves the SDK's environment configuration, and the
+// SDK shared config. See `envConfig` for the values that will be retrieved.
+//
+// Loads the shared configuration in addition to the SDK's specific configuration.
+// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
+// environment variable is set.
+func loadSharedEnvConfig() envConfig {
+       return envConfigLoad(true)
+}
+
+func envConfigLoad(enableSharedConfig bool) envConfig {
+       cfg := envConfig{}
+
+       cfg.EnableSharedConfig = enableSharedConfig
+
+       setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey)
+       setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey)
+       setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey)
+
+       // Require logical grouping of credentials
+       if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
+               cfg.Creds = credentials.Value{}
+       } else {
+               cfg.Creds.ProviderName = "EnvConfigCredentials"
+       }
+
+       regionKeys := regionEnvKeys
+       profileKeys := profileEnvKeys
+       if !cfg.EnableSharedConfig {
+               regionKeys = regionKeys[:1]
+               profileKeys = profileKeys[:1]
+       }
+
+       setFromEnvVal(&cfg.Region, regionKeys)
+       setFromEnvVal(&cfg.Profile, profileKeys)
+
+       cfg.SharedCredentialsFile = sharedCredentialsFilename()
+       cfg.SharedConfigFile = sharedConfigFilename()
+
+       cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
+
+       return cfg
+}
+
+func setFromEnvVal(dst *string, keys []string) {
+       for _, k := range keys {
+               if v := os.Getenv(k); len(v) > 0 {
+                       *dst = v
+                       break
+               }
+       }
+}
+
+func sharedCredentialsFilename() string {
+       if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 {
+               return name
+       }
+
+       return filepath.Join(userHomeDir(), ".aws", "credentials")
+}
+
+func sharedConfigFilename() string {
+       if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 {
+               return name
+       }
+
+       return filepath.Join(userHomeDir(), ".aws", "config")
+}
+
+func userHomeDir() string {
+       homeDir := os.Getenv("HOME") // *nix
+       if len(homeDir) == 0 {       // windows
+               homeDir = os.Getenv("USERPROFILE")
+       }
+
+       return homeDir
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
new file mode 100644 (file)
index 0000000..4792d3a
--- /dev/null
@@ -0,0 +1,590 @@
+package session
+
+import (
+       "crypto/tls"
+       "crypto/x509"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/http"
+       "os"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/client"
+       "github.com/aws/aws-sdk-go/aws/corehandlers"
+       "github.com/aws/aws-sdk-go/aws/credentials"
+       "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
+       "github.com/aws/aws-sdk-go/aws/defaults"
+       "github.com/aws/aws-sdk-go/aws/endpoints"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A Session provides a central location to create service clients from and
+// store configurations and request handlers for those services.
+//
+// Sessions are safe to create service clients concurrently, but it is not safe
+// to mutate the Session concurrently.
+//
+// The Session satisfies the service client's client.ClientConfigProvider.
+type Session struct {
+       Config   *aws.Config
+       Handlers request.Handlers
+}
+
+// New creates a new instance of the handlers merging in the provided configs
+// on top of the SDK's default configurations. Once the Session is created it
+// can be mutated to modify the Config or Handlers. The Session is safe to be
+// read concurrently, but it should not be written to concurrently.
+//
+// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
+// method could now encounter an error when loading the configuration. When
+// The environment variable is set, and an error occurs, New will return a
+// session that will fail all requests reporting the error that occurred while
+// loading the session. Use NewSession to get the error when creating the
+// session.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded, in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file.
+//
+// Deprecated: Use NewSession functions to create sessions instead. NewSession
+// has the same functionality as New except an error can be returned when the
+// func is called instead of waiting to receive an error until a request is made.
+func New(cfgs ...*aws.Config) *Session {
+       // load initial config from environment
+       envCfg := loadEnvConfig()
+
+       if envCfg.EnableSharedConfig {
+               s, err := newSession(Options{}, envCfg, cfgs...)
+               if err != nil {
+                       // Old session.New expected all errors to be discovered when
+                       // a request is made, and would report the errors then. This
+                       // needs to be replicated if an error occurs while creating
+                       // the session.
+                       msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
+                               "Use session.NewSession to handle errors occurring during session creation."
+
+                       // Session creation failed, need to report the error and prevent
+                       // any requests from succeeding.
+                       s = &Session{Config: defaults.Config()}
+                       s.Config.MergeIn(cfgs...)
+                       s.Config.Logger.Log("ERROR:", msg, "Error:", err)
+                       s.Handlers.Validate.PushBack(func(r *request.Request) {
+                               r.Error = err
+                       })
+               }
+               return s
+       }
+
+       return deprecatedNewSession(cfgs...)
+}
+
+// NewSession returns a new Session created from SDK defaults, config files,
+// environment, and user provided config files. Once the Session is created
+// it can be mutated to modify the Config or Handlers. The Session is safe to
+// be read concurrently, but it should not be written to concurrently.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file. Enabling the Shared Config will also allow the Session
+// to be built with retrieving credentials with AssumeRole set in the config.
+//
+// See the NewSessionWithOptions func for information on how to override or
+// control through code how the Session will be created. Such as specifying the
+// config profile, and controlling if shared config is enabled or not.
+func NewSession(cfgs ...*aws.Config) (*Session, error) {
+       opts := Options{}
+       opts.Config.MergeIn(cfgs...)
+
+       return NewSessionWithOptions(opts)
+}
+
+// SharedConfigState provides the ability to optionally override the state
+// of the session's creation based on the shared config being enabled or
+// disabled.
+type SharedConfigState int
+
+const (
+       // SharedConfigStateFromEnv does not override any state of the
+       // AWS_SDK_LOAD_CONFIG env var. It is the default value of the
+       // SharedConfigState type.
+       SharedConfigStateFromEnv SharedConfigState = iota
+
+       // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value
+       // and disables the shared config functionality.
+       SharedConfigDisable
+
+       // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value
+       // and enables the shared config functionality.
+       SharedConfigEnable
+)
+
+// Options provides the means to control how a Session is created and what
+// configuration values will be loaded.
+//
+type Options struct {
+       // Provides config values for the SDK to use when creating service clients
+       // and making API requests to services. Any value set in with this field
+       // will override the associated value provided by the SDK defaults,
+       // environment or config files where relevant.
+       //
+       // If not set, configuration values from from SDK defaults, environment,
+       // config will be used.
+       Config aws.Config
+
+       // Overrides the config profile the Session should be created from. If not
+       // set the value of the environment variable will be loaded (AWS_PROFILE,
+       // or AWS_DEFAULT_PROFILE if the Shared Config is enabled).
+       //
+       // If not set and environment variables are not set the "default"
+       // (DefaultSharedConfigProfile) will be used as the profile to load the
+       // session config from.
+       Profile string
+
+       // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG
+       // environment variable. By default a Session will be created using the
+       // value provided by the AWS_SDK_LOAD_CONFIG environment variable.
+       //
+       // Setting this value to SharedConfigEnable or SharedConfigDisable
+       // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
+       // and enable or disable the shared config functionality.
+       SharedConfigState SharedConfigState
+
+       // When the SDK's shared config is configured to assume a role with MFA
+       // this option is required in order to provide the mechanism that will
+       // retrieve the MFA token. There is no default value for this field. If
+       // it is not set an error will be returned when creating the session.
+       //
+       // This token provider will be called when ever the assumed role's
+       // credentials need to be refreshed. Within the context of service clients
+       // all sharing the same session the SDK will ensure calls to the token
+       // provider are atomic. When sharing a token provider across multiple
+       // sessions additional synchronization logic is needed to ensure the
+       // token providers do not introduce race conditions. It is recommend to
+       // share the session where possible.
+       //
+       // stscreds.StdinTokenProvider is a basic implementation that will prompt
+       // from stdin for the MFA token code.
+       //
+       // This field is only used if the shared configuration is enabled, and
+       // the config enables assume role wit MFA via the mfa_serial field.
+       AssumeRoleTokenProvider func() (string, error)
+
+       // Reader for a custom Credentials Authority (CA) bundle in PEM format that
+       // the SDK will use instead of the default system's root CA bundle. Use this
+       // only if you want to replace the CA bundle the SDK uses for TLS requests.
+       //
+       // Enabling this option will attempt to merge the Transport into the SDK's HTTP
+       // client. If the client's Transport is not a http.Transport an error will be
+       // returned. If the Transport's TLS config is set this option will cause the SDK
+       // to overwrite the Transport's TLS config's  RootCAs value. If the CA
+       // bundle reader contains multiple certificates all of them will be loaded.
+       //
+       // The Session option CustomCABundle is also available when creating sessions
+       // to also enable this feature. CustomCABundle session option field has priority
+       // over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+       CustomCABundle io.Reader
+}
+
+// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
+// environment, and user provided config files. This func uses the Options
+// values to configure how the Session is created.
+//
+// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
+// the shared config file (~/.aws/config) will also be loaded in addition to
+// the shared credentials file (~/.aws/credentials). Values set in both the
+// shared config, and shared credentials will be taken from the shared
+// credentials file. Enabling the Shared Config will also allow the Session
+// to be built with retrieving credentials with AssumeRole set in the config.
+//
+//     // Equivalent to session.New
+//     sess := session.Must(session.NewSessionWithOptions(session.Options{}))
+//
+//     // Specify profile to load for the session's config
+//     sess := session.Must(session.NewSessionWithOptions(session.Options{
+//          Profile: "profile_name",
+//     }))
+//
+//     // Specify profile for config and region for requests
+//     sess := session.Must(session.NewSessionWithOptions(session.Options{
+//          Config: aws.Config{Region: aws.String("us-east-1")},
+//          Profile: "profile_name",
+//     }))
+//
+//     // Force enable Shared Config support
+//     sess := session.Must(session.NewSessionWithOptions(session.Options{
+//         SharedConfigState: session.SharedConfigEnable,
+//     }))
+func NewSessionWithOptions(opts Options) (*Session, error) {
+       var envCfg envConfig
+       if opts.SharedConfigState == SharedConfigEnable {
+               envCfg = loadSharedEnvConfig()
+       } else {
+               envCfg = loadEnvConfig()
+       }
+
+       if len(opts.Profile) > 0 {
+               envCfg.Profile = opts.Profile
+       }
+
+       switch opts.SharedConfigState {
+       case SharedConfigDisable:
+               envCfg.EnableSharedConfig = false
+       case SharedConfigEnable:
+               envCfg.EnableSharedConfig = true
+       }
+
+       // Only use AWS_CA_BUNDLE if session option is not provided.
+       if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
+               f, err := os.Open(envCfg.CustomCABundle)
+               if err != nil {
+                       return nil, awserr.New("LoadCustomCABundleError",
+                               "failed to open custom CA bundle PEM file", err)
+               }
+               defer f.Close()
+               opts.CustomCABundle = f
+       }
+
+       return newSession(opts, envCfg, &opts.Config)
+}
+
+// Must is a helper function to ensure the Session is valid and there was no
+// error when calling a NewSession function.
+//
+// This helper is intended to be used in variable initialization to load the
+// Session and configuration at startup. Such as:
+//
+//     var sess = session.Must(session.NewSession())
+func Must(sess *Session, err error) *Session {
+       if err != nil {
+               panic(err)
+       }
+
+       return sess
+}
+
+func deprecatedNewSession(cfgs ...*aws.Config) *Session {
+       cfg := defaults.Config()
+       handlers := defaults.Handlers()
+
+       // Apply the passed in configs so the configuration can be applied to the
+       // default credential chain
+       cfg.MergeIn(cfgs...)
+       if cfg.EndpointResolver == nil {
+               // An endpoint resolver is required for a session to be able to provide
+               // endpoints for service client configurations.
+               cfg.EndpointResolver = endpoints.DefaultResolver()
+       }
+       cfg.Credentials = defaults.CredChain(cfg, handlers)
+
+       // Reapply any passed in configs to override credentials if set
+       cfg.MergeIn(cfgs...)
+
+       s := &Session{
+               Config:   cfg,
+               Handlers: handlers,
+       }
+
+       initHandlers(s)
+
+       return s
+}
+
+func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
+       cfg := defaults.Config()
+       handlers := defaults.Handlers()
+
+       // Get a merged version of the user provided config to determine if
+       // credentials were.
+       userCfg := &aws.Config{}
+       userCfg.MergeIn(cfgs...)
+
+       // Order config files will be loaded in with later files overwriting
+       // previous config file values.
+       cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
+       if !envCfg.EnableSharedConfig {
+               // The shared config file (~/.aws/config) is only loaded if instructed
+               // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
+               cfgFiles = cfgFiles[1:]
+       }
+
+       // Load additional config from file(s)
+       sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles)
+       if err != nil {
+               return nil, err
+       }
+
+       if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil {
+               return nil, err
+       }
+
+       s := &Session{
+               Config:   cfg,
+               Handlers: handlers,
+       }
+
+       initHandlers(s)
+
+       // Setup HTTP client with custom cert bundle if enabled
+       if opts.CustomCABundle != nil {
+               if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil {
+                       return nil, err
+               }
+       }
+
+       return s, nil
+}
+
+func loadCustomCABundle(s *Session, bundle io.Reader) error {
+       var t *http.Transport
+       switch v := s.Config.HTTPClient.Transport.(type) {
+       case *http.Transport:
+               t = v
+       default:
+               if s.Config.HTTPClient.Transport != nil {
+                       return awserr.New("LoadCustomCABundleError",
+                               "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil)
+               }
+       }
+       if t == nil {
+               t = &http.Transport{}
+       }
+
+       p, err := loadCertPool(bundle)
+       if err != nil {
+               return err
+       }
+       if t.TLSClientConfig == nil {
+               t.TLSClientConfig = &tls.Config{}
+       }
+       t.TLSClientConfig.RootCAs = p
+
+       s.Config.HTTPClient.Transport = t
+
+       return nil
+}
+
+func loadCertPool(r io.Reader) (*x509.CertPool, error) {
+       b, err := ioutil.ReadAll(r)
+       if err != nil {
+               return nil, awserr.New("LoadCustomCABundleError",
+                       "failed to read custom CA bundle PEM file", err)
+       }
+
+       p := x509.NewCertPool()
+       if !p.AppendCertsFromPEM(b) {
+               return nil, awserr.New("LoadCustomCABundleError",
+                       "failed to load custom CA bundle PEM file", err)
+       }
+
+       return p, nil
+}
+
+func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error {
+       // Merge in user provided configuration
+       cfg.MergeIn(userCfg)
+
+       // Region if not already set by user
+       if len(aws.StringValue(cfg.Region)) == 0 {
+               if len(envCfg.Region) > 0 {
+                       cfg.WithRegion(envCfg.Region)
+               } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 {
+                       cfg.WithRegion(sharedCfg.Region)
+               }
+       }
+
+       // Configure credentials if not already set
+       if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
+               if len(envCfg.Creds.AccessKeyID) > 0 {
+                       cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
+                               envCfg.Creds,
+                       )
+               } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil {
+                       cfgCp := *cfg
+                       cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
+                               sharedCfg.AssumeRoleSource.Creds,
+                       )
+                       if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
+                               // AssumeRole Token provider is required if doing Assume Role
+                               // with MFA.
+                               return AssumeRoleTokenProviderNotSetError{}
+                       }
+                       cfg.Credentials = stscreds.NewCredentials(
+                               &Session{
+                                       Config:   &cfgCp,
+                                       Handlers: handlers.Copy(),
+                               },
+                               sharedCfg.AssumeRole.RoleARN,
+                               func(opt *stscreds.AssumeRoleProvider) {
+                                       opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
+
+                                       // Assume role with external ID
+                                       if len(sharedCfg.AssumeRole.ExternalID) > 0 {
+                                               opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
+                                       }
+
+                                       // Assume role with MFA
+                                       if len(sharedCfg.AssumeRole.MFASerial) > 0 {
+                                               opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
+                                               opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
+                                       }
+                               },
+                       )
+               } else if len(sharedCfg.Creds.AccessKeyID) > 0 {
+                       cfg.Credentials = credentials.NewStaticCredentialsFromCreds(
+                               sharedCfg.Creds,
+                       )
+               } else {
+                       // Fallback to default credentials provider, include mock errors
+                       // for the credential chain so user can identify why credentials
+                       // failed to be retrieved.
+                       cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{
+                               VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+                               Providers: []credentials.Provider{
+                                       &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)},
+                                       &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)},
+                                       defaults.RemoteCredProvider(*cfg, handlers),
+                               },
+                       })
+               }
+       }
+
+       return nil
+}
+
+// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the
+// MFAToken option is not set when shared config is configured load assume a
+// role with an MFA token.
+type AssumeRoleTokenProviderNotSetError struct{}
+
+// Code is the short id of the error.
+func (e AssumeRoleTokenProviderNotSetError) Code() string {
+       return "AssumeRoleTokenProviderNotSetError"
+}
+
+// Message is the description of the error
+func (e AssumeRoleTokenProviderNotSetError) Message() string {
+       return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
+       return nil
+}
+
+// Error satisfies the error interface.
+func (e AssumeRoleTokenProviderNotSetError) Error() string {
+       return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
+
+type credProviderError struct {
+       Err error
+}
+
+var emptyCreds = credentials.Value{}
+
+func (c credProviderError) Retrieve() (credentials.Value, error) {
+       return credentials.Value{}, c.Err
+}
+func (c credProviderError) IsExpired() bool {
+       return true
+}
+
+func initHandlers(s *Session) {
+       // Add the Validate parameter handler if it is not disabled.
+       s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
+       if !aws.BoolValue(s.Config.DisableParamValidation) {
+               s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
+       }
+}
+
+// Copy creates and returns a copy of the current Session, coping the config
+// and handlers. If any additional configs are provided they will be merged
+// on top of the Session's copied config.
+//
+//     // Create a copy of the current Session, configured for the us-west-2 region.
+//     sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
+func (s *Session) Copy(cfgs ...*aws.Config) *Session {
+       newSession := &Session{
+               Config:   s.Config.Copy(cfgs...),
+               Handlers: s.Handlers.Copy(),
+       }
+
+       initHandlers(newSession)
+
+       return newSession
+}
+
+// ClientConfig satisfies the client.ConfigProvider interface and is used to
+// configure the service client instances. Passing the Session to the service
+// client's constructor (New) will use this method to configure the client.
+func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
+       // Backwards compatibility, the error will be eaten if user calls ClientConfig
+       // directly. All SDK services will use ClientconfigWithError.
+       cfg, _ := s.clientConfigWithErr(serviceName, cfgs...)
+
+       return cfg
+}
+
+func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) {
+       s = s.Copy(cfgs...)
+
+       var resolved endpoints.ResolvedEndpoint
+       var err error
+
+       region := aws.StringValue(s.Config.Region)
+
+       if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 {
+               resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL))
+               resolved.SigningRegion = region
+       } else {
+               resolved, err = s.Config.EndpointResolver.EndpointFor(
+                       serviceName, region,
+                       func(opt *endpoints.Options) {
+                               opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL)
+                               opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack)
+
+                               // Support the condition where the service is modeled but its
+                               // endpoint metadata is not available.
+                               opt.ResolveUnknownService = true
+                       },
+               )
+       }
+
+       return client.Config{
+               Config:        s.Config,
+               Handlers:      s.Handlers,
+               Endpoint:      resolved.URL,
+               SigningRegion: resolved.SigningRegion,
+               SigningName:   resolved.SigningName,
+       }, err
+}
+
+// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception
+// that the EndpointResolver will not be used to resolve the endpoint. The only
+// endpoint set must come from the aws.Config.Endpoint field.
+func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config {
+       s = s.Copy(cfgs...)
+
+       var resolved endpoints.ResolvedEndpoint
+
+       region := aws.StringValue(s.Config.Region)
+
+       if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 {
+               resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL))
+               resolved.SigningRegion = region
+       }
+
+       return client.Config{
+               Config:        s.Config,
+               Handlers:      s.Handlers,
+               Endpoint:      resolved.URL,
+               SigningRegion: resolved.SigningRegion,
+               SigningName:   resolved.SigningName,
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
new file mode 100644 (file)
index 0000000..b58076f
--- /dev/null
@@ -0,0 +1,295 @@
+package session
+
+import (
+       "fmt"
+       "io/ioutil"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/credentials"
+       "github.com/go-ini/ini"
+)
+
+const (
+       // Static Credentials group
+       accessKeyIDKey  = `aws_access_key_id`     // group required
+       secretAccessKey = `aws_secret_access_key` // group required
+       sessionTokenKey = `aws_session_token`     // optional
+
+       // Assume Role Credentials group
+       roleArnKey         = `role_arn`          // group required
+       sourceProfileKey   = `source_profile`    // group required
+       externalIDKey      = `external_id`       // optional
+       mfaSerialKey       = `mfa_serial`        // optional
+       roleSessionNameKey = `role_session_name` // optional
+
+       // Additional Config fields
+       regionKey = `region`
+
+       // DefaultSharedConfigProfile is the default profile to be used when
+       // loading configuration from the config files if another profile name
+       // is not provided.
+       DefaultSharedConfigProfile = `default`
+)
+
+type assumeRoleConfig struct {
+       RoleARN         string
+       SourceProfile   string
+       ExternalID      string
+       MFASerial       string
+       RoleSessionName string
+}
+
+// sharedConfig represents the configuration fields of the SDK config files.
+type sharedConfig struct {
+       // Credentials values from the config file. Both aws_access_key_id
+       // and aws_secret_access_key must be provided together in the same file
+       // to be considered valid. The values will be ignored if not a complete group.
+       // aws_session_token is an optional field that can be provided if both of the
+       // other two fields are also provided.
+       //
+       //      aws_access_key_id
+       //      aws_secret_access_key
+       //      aws_session_token
+       Creds credentials.Value
+
+       AssumeRole       assumeRoleConfig
+       AssumeRoleSource *sharedConfig
+
+       // Region is the region the SDK should use for looking up AWS service endpoints
+       // and signing requests.
+       //
+       //      region
+       Region string
+}
+
+type sharedConfigFile struct {
+       Filename string
+       IniData  *ini.File
+}
+
+// loadSharedConfig retrieves the configuration from the list of files
+// using the profile provided. The order the files are listed will determine
+// precedence. Values in subsequent files will overwrite values defined in
+// earlier files.
+//
+// For example, given two files A and B. Both define credentials. If the order
+// of the files are A then B, B's credential values will be used instead of A's.
+//
+// See sharedConfig.setFromFile for information how the config files
+// will be loaded.
+func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) {
+       if len(profile) == 0 {
+               profile = DefaultSharedConfigProfile
+       }
+
+       files, err := loadSharedConfigIniFiles(filenames)
+       if err != nil {
+               return sharedConfig{}, err
+       }
+
+       cfg := sharedConfig{}
+       if err = cfg.setFromIniFiles(profile, files); err != nil {
+               return sharedConfig{}, err
+       }
+
+       if len(cfg.AssumeRole.SourceProfile) > 0 {
+               if err := cfg.setAssumeRoleSource(profile, files); err != nil {
+                       return sharedConfig{}, err
+               }
+       }
+
+       return cfg, nil
+}
+
+func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
+       files := make([]sharedConfigFile, 0, len(filenames))
+
+       for _, filename := range filenames {
+               b, err := ioutil.ReadFile(filename)
+               if err != nil {
+                       // Skip files which can't be opened and read for whatever reason
+                       continue
+               }
+
+               f, err := ini.Load(b)
+               if err != nil {
+                       return nil, SharedConfigLoadError{Filename: filename}
+               }
+
+               files = append(files, sharedConfigFile{
+                       Filename: filename, IniData: f,
+               })
+       }
+
+       return files, nil
+}
+
+func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error {
+       var assumeRoleSrc sharedConfig
+
+       // Multiple level assume role chains are not support
+       if cfg.AssumeRole.SourceProfile == origProfile {
+               assumeRoleSrc = *cfg
+               assumeRoleSrc.AssumeRole = assumeRoleConfig{}
+       } else {
+               err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files)
+               if err != nil {
+                       return err
+               }
+       }
+
+       if len(assumeRoleSrc.Creds.AccessKeyID) == 0 {
+               return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN}
+       }
+
+       cfg.AssumeRoleSource = &assumeRoleSrc
+
+       return nil
+}
+
+func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error {
+       // Trim files from the list that don't exist.
+       for _, f := range files {
+               if err := cfg.setFromIniFile(profile, f); err != nil {
+                       if _, ok := err.(SharedConfigProfileNotExistsError); ok {
+                               // Ignore proviles missings
+                               continue
+                       }
+                       return err
+               }
+       }
+
+       return nil
+}
+
+// setFromFile loads the configuration from the file using
+// the profile provided. A sharedConfig pointer type value is used so that
+// multiple config file loadings can be chained.
+//
+// Only loads complete logically grouped values, and will not set fields in cfg
+// for incomplete grouped values in the config. Such as credentials. For example
+// if a config file only includes aws_access_key_id but no aws_secret_access_key
+// the aws_access_key_id will be ignored.
+func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error {
+       section, err := file.IniData.GetSection(profile)
+       if err != nil {
+               // Fallback to to alternate profile name: profile <name>
+               section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
+               if err != nil {
+                       return SharedConfigProfileNotExistsError{Profile: profile, Err: err}
+               }
+       }
+
+       // Shared Credentials
+       akid := section.Key(accessKeyIDKey).String()
+       secret := section.Key(secretAccessKey).String()
+       if len(akid) > 0 && len(secret) > 0 {
+               cfg.Creds = credentials.Value{
+                       AccessKeyID:     akid,
+                       SecretAccessKey: secret,
+                       SessionToken:    section.Key(sessionTokenKey).String(),
+                       ProviderName:    fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
+               }
+       }
+
+       // Assume Role
+       roleArn := section.Key(roleArnKey).String()
+       srcProfile := section.Key(sourceProfileKey).String()
+       if len(roleArn) > 0 && len(srcProfile) > 0 {
+               cfg.AssumeRole = assumeRoleConfig{
+                       RoleARN:         roleArn,
+                       SourceProfile:   srcProfile,
+                       ExternalID:      section.Key(externalIDKey).String(),
+                       MFASerial:       section.Key(mfaSerialKey).String(),
+                       RoleSessionName: section.Key(roleSessionNameKey).String(),
+               }
+       }
+
+       // Region
+       if v := section.Key(regionKey).String(); len(v) > 0 {
+               cfg.Region = v
+       }
+
+       return nil
+}
+
+// SharedConfigLoadError is an error for the shared config file failed to load.
+type SharedConfigLoadError struct {
+       Filename string
+       Err      error
+}
+
+// Code is the short id of the error.
+func (e SharedConfigLoadError) Code() string {
+       return "SharedConfigLoadError"
+}
+
+// Message is the description of the error
+func (e SharedConfigLoadError) Message() string {
+       return fmt.Sprintf("failed to load config file, %s", e.Filename)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigLoadError) OrigErr() error {
+       return e.Err
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigLoadError) Error() string {
+       return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
+}
+
+// SharedConfigProfileNotExistsError is an error for the shared config when
+// the profile was not find in the config file.
+type SharedConfigProfileNotExistsError struct {
+       Profile string
+       Err     error
+}
+
+// Code is the short id of the error.
+func (e SharedConfigProfileNotExistsError) Code() string {
+       return "SharedConfigProfileNotExistsError"
+}
+
+// Message is the description of the error
+func (e SharedConfigProfileNotExistsError) Message() string {
+       return fmt.Sprintf("failed to get profile, %s", e.Profile)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigProfileNotExistsError) OrigErr() error {
+       return e.Err
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigProfileNotExistsError) Error() string {
+       return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
+}
+
+// SharedConfigAssumeRoleError is an error for the shared config when the
+// profile contains assume role information, but that information is invalid
+// or not complete.
+type SharedConfigAssumeRoleError struct {
+       RoleARN string
+}
+
+// Code is the short id of the error.
+func (e SharedConfigAssumeRoleError) Code() string {
+       return "SharedConfigAssumeRoleError"
+}
+
+// Message is the description of the error
+func (e SharedConfigAssumeRoleError) Message() string {
+       return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials",
+               e.RoleARN)
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e SharedConfigAssumeRoleError) OrigErr() error {
+       return nil
+}
+
+// Error satisfies the error interface.
+func (e SharedConfigAssumeRoleError) Error() string {
+       return awserr.SprintError(e.Code(), e.Message(), "", nil)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
new file mode 100644 (file)
index 0000000..244c86d
--- /dev/null
@@ -0,0 +1,82 @@
+package v4
+
+import (
+       "net/http"
+       "strings"
+)
+
+// validator houses a set of rule needed for validation of a
+// string value
+type rules []rule
+
+// rule interface allows for more flexible rules and just simply
+// checks whether or not a value adheres to that rule
+type rule interface {
+       IsValid(value string) bool
+}
+
+// IsValid will iterate through all rules and see if any rules
+// apply to the value and supports nested rules
+func (r rules) IsValid(value string) bool {
+       for _, rule := range r {
+               if rule.IsValid(value) {
+                       return true
+               }
+       }
+       return false
+}
+
+// mapRule generic rule for maps
+type mapRule map[string]struct{}
+
+// IsValid for the map rule satisfies whether it exists in the map
+func (m mapRule) IsValid(value string) bool {
+       _, ok := m[value]
+       return ok
+}
+
+// whitelist is a generic rule for whitelisting
+type whitelist struct {
+       rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (w whitelist) IsValid(value string) bool {
+       return w.rule.IsValid(value)
+}
+
+// blacklist is a generic rule for blacklisting
+type blacklist struct {
+       rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (b blacklist) IsValid(value string) bool {
+       return !b.rule.IsValid(value)
+}
+
+type patterns []string
+
+// IsValid for patterns checks each pattern and returns if a match has
+// been found
+func (p patterns) IsValid(value string) bool {
+       for _, pattern := range p {
+               if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
+                       return true
+               }
+       }
+       return false
+}
+
+// inclusiveRules rules allow for rules to depend on one another
+type inclusiveRules []rule
+
+// IsValid will return true if all rules are true
+func (r inclusiveRules) IsValid(value string) bool {
+       for _, rule := range r {
+               if !rule.IsValid(value) {
+                       return false
+               }
+       }
+       return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
new file mode 100644 (file)
index 0000000..6aa2ed2
--- /dev/null
@@ -0,0 +1,7 @@
+package v4
+
+// WithUnsignedPayload will enable and set the UnsignedPayload field to
+// true of the signer.
+func WithUnsignedPayload(v4 *Signer) {
+       v4.UnsignedPayload = true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
new file mode 100644 (file)
index 0000000..bd082e9
--- /dev/null
@@ -0,0 +1,24 @@
+// +build go1.5
+
+package v4
+
+import (
+       "net/url"
+       "strings"
+)
+
+func getURIPath(u *url.URL) string {
+       var uri string
+
+       if len(u.Opaque) > 0 {
+               uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
+       } else {
+               uri = u.EscapedPath()
+       }
+
+       if len(uri) == 0 {
+               uri = "/"
+       }
+
+       return uri
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
new file mode 100644 (file)
index 0000000..434ac87
--- /dev/null
@@ -0,0 +1,761 @@
+// Package v4 implements signing for AWS V4 signer
+//
+// Provides request signing for request that need to be signed with
+// AWS V4 Signatures.
+//
+// Standalone Signer
+//
+// Generally using the signer outside of the SDK should not require any additional
+// logic when using Go v1.5 or higher. The signer does this by taking advantage
+// of the URL.EscapedPath method. If your request URI requires additional escaping
+// you many need to use the URL.Opaque to define what the raw URI should be sent
+// to the service as.
+//
+// The signer will first check the URL.Opaque field, and use its value if set.
+// The signer does require the URL.Opaque field to be set in the form of:
+//
+//     "//<hostname>/<path>"
+//
+//     // e.g.
+//     "//example.com/some/path"
+//
+// The leading "//" and hostname are required or the URL.Opaque escaping will
+// not work correctly.
+//
+// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
+// method and using the returned value. If you're using Go v1.4 you must set
+// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
+// Go v1.5 the signer will fallback to URL.Path.
+//
+// AWS v4 signature validation requires that the canonical string's URI path
+// element must be the URI escaped form of the HTTP request's path.
+// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+//
+// The Go HTTP client will perform escaping automatically on the request. Some
+// of these escaping may cause signature validation errors because the HTTP
+// request differs from the URI path or query that the signature was generated.
+// https://golang.org/pkg/net/url/#URL.EscapedPath
+//
+// Because of this, it is recommended that when using the signer outside of the
+// SDK that explicitly escaping the request prior to being signed is preferable,
+// and will help prevent signature validation errors. This can be done by setting
+// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
+// call URL.EscapedPath() if Opaque is not set.
+//
+// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
+// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
+// request URL. https://github.com/golang/go/issues/16847 points to a bug in
+// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP
+// message. URL.Opaque generally will force Go to make requests with absolute URL.
+// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
+// or url.EscapedPath will ignore the RawPath escaping.
+//
+// Test `TestStandaloneSign` provides a complete example of using the signer
+// outside of the SDK and pre-escaping the URI path.
+package v4
+
+import (
+       "bytes"
+       "crypto/hmac"
+       "crypto/sha256"
+       "encoding/hex"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/http"
+       "net/url"
+       "sort"
+       "strconv"
+       "strings"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/credentials"
+       "github.com/aws/aws-sdk-go/aws/request"
+       "github.com/aws/aws-sdk-go/private/protocol/rest"
+)
+
+const (
+       authHeaderPrefix = "AWS4-HMAC-SHA256"
+       timeFormat       = "20060102T150405Z"
+       shortTimeFormat  = "20060102"
+
+       // emptyStringSHA256 is a SHA256 of an empty string
+       emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
+)
+
+var ignoredHeaders = rules{
+       blacklist{
+               mapRule{
+                       "Authorization":   struct{}{},
+                       "User-Agent":      struct{}{},
+                       "X-Amzn-Trace-Id": struct{}{},
+               },
+       },
+}
+
+// requiredSignedHeaders is a whitelist for build canonical headers.
+var requiredSignedHeaders = rules{
+       whitelist{
+               mapRule{
+                       "Cache-Control":                                               struct{}{},
+                       "Content-Disposition":                                         struct{}{},
+                       "Content-Encoding":                                            struct{}{},
+                       "Content-Language":                                            struct{}{},
+                       "Content-Md5":                                                 struct{}{},
+                       "Content-Type":                                                struct{}{},
+                       "Expires":                                                     struct{}{},
+                       "If-Match":                                                    struct{}{},
+                       "If-Modified-Since":                                           struct{}{},
+                       "If-None-Match":                                               struct{}{},
+                       "If-Unmodified-Since":                                         struct{}{},
+                       "Range":                                                       struct{}{},
+                       "X-Amz-Acl":                                                   struct{}{},
+                       "X-Amz-Copy-Source":                                           struct{}{},
+                       "X-Amz-Copy-Source-If-Match":                                  struct{}{},
+                       "X-Amz-Copy-Source-If-Modified-Since":                         struct{}{},
+                       "X-Amz-Copy-Source-If-None-Match":                             struct{}{},
+                       "X-Amz-Copy-Source-If-Unmodified-Since":                       struct{}{},
+                       "X-Amz-Copy-Source-Range":                                     struct{}{},
+                       "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+                       "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key":       struct{}{},
+                       "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5":   struct{}{},
+                       "X-Amz-Grant-Full-control":                                    struct{}{},
+                       "X-Amz-Grant-Read":                                            struct{}{},
+                       "X-Amz-Grant-Read-Acp":                                        struct{}{},
+                       "X-Amz-Grant-Write":                                           struct{}{},
+                       "X-Amz-Grant-Write-Acp":                                       struct{}{},
+                       "X-Amz-Metadata-Directive":                                    struct{}{},
+                       "X-Amz-Mfa":                                                   struct{}{},
+                       "X-Amz-Request-Payer":                                         struct{}{},
+                       "X-Amz-Server-Side-Encryption":                                struct{}{},
+                       "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id":                 struct{}{},
+                       "X-Amz-Server-Side-Encryption-Customer-Algorithm":             struct{}{},
+                       "X-Amz-Server-Side-Encryption-Customer-Key":                   struct{}{},
+                       "X-Amz-Server-Side-Encryption-Customer-Key-Md5":               struct{}{},
+                       "X-Amz-Storage-Class":                                         struct{}{},
+                       "X-Amz-Website-Redirect-Location":                             struct{}{},
+               },
+       },
+       patterns{"X-Amz-Meta-"},
+}
+
+// allowedHoisting is a whitelist for build query headers. The boolean value
+// represents whether or not it is a pattern.
+var allowedQueryHoisting = inclusiveRules{
+       blacklist{requiredSignedHeaders},
+       patterns{"X-Amz-"},
+}
+
+// Signer applies AWS v4 signing to given request. Use this to sign requests
+// that need to be signed with AWS V4 Signatures.
+type Signer struct {
+       // The authentication credentials the request will be signed against.
+       // This value must be set to sign requests.
+       Credentials *credentials.Credentials
+
+       // Sets the log level the signer should use when reporting information to
+       // the logger. If the logger is nil nothing will be logged. See
+       // aws.LogLevelType for more information on available logging levels
+       //
+       // By default nothing will be logged.
+       Debug aws.LogLevelType
+
+       // The logger loging information will be written to. If there the logger
+       // is nil, nothing will be logged.
+       Logger aws.Logger
+
+       // Disables the Signer's moving HTTP header key/value pairs from the HTTP
+       // request header to the request's query string. This is most commonly used
+       // with pre-signed requests preventing headers from being added to the
+       // request's query string.
+       DisableHeaderHoisting bool
+
+       // Disables the automatic escaping of the URI path of the request for the
+       // siganture's canonical string's path. For services that do not need additional
+       // escaping then use this to disable the signer escaping the path.
+       //
+       // S3 is an example of a service that does not need additional escaping.
+       //
+       // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+       DisableURIPathEscaping bool
+
+       // Disales the automatical setting of the HTTP request's Body field with the
+       // io.ReadSeeker passed in to the signer. This is useful if you're using a
+       // custom wrapper around the body for the io.ReadSeeker and want to preserve
+       // the Body value on the Request.Body.
+       //
+       // This does run the risk of signing a request with a body that will not be
+       // sent in the request. Need to ensure that the underlying data of the Body
+       // values are the same.
+       DisableRequestBodyOverwrite bool
+
+       // currentTimeFn returns the time value which represents the current time.
+       // This value should only be used for testing. If it is nil the default
+       // time.Now will be used.
+       currentTimeFn func() time.Time
+
+       // UnsignedPayload will prevent signing of the payload. This will only
+       // work for services that have support for this.
+       UnsignedPayload bool
+}
+
+// NewSigner returns a Signer pointer configured with the credentials and optional
+// option values provided. If not options are provided the Signer will use its
+// default configuration.
+func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
+       v4 := &Signer{
+               Credentials: credentials,
+       }
+
+       for _, option := range options {
+               option(v4)
+       }
+
+       return v4
+}
+
+type signingCtx struct {
+       ServiceName      string
+       Region           string
+       Request          *http.Request
+       Body             io.ReadSeeker
+       Query            url.Values
+       Time             time.Time
+       ExpireTime       time.Duration
+       SignedHeaderVals http.Header
+
+       DisableURIPathEscaping bool
+
+       credValues         credentials.Value
+       isPresign          bool
+       formattedTime      string
+       formattedShortTime string
+       unsignedPayload    bool
+
+       bodyDigest       string
+       signedHeaders    string
+       canonicalHeaders string
+       canonicalString  string
+       credentialString string
+       stringToSign     string
+       signature        string
+       authorization    string
+}
+
+// Sign signs AWS v4 requests with the provided body, service name, region the
+// request is made to, and time the request is signed at. The signTime allows
+// you to specify that a request is signed for the future, and cannot be
+// used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. Generally for signed requests this value
+// is not needed as the full request context will be captured by the http.Request
+// value. It is included for reference though.
+//
+// Sign will set the request's Body to be the `body` parameter passed in. If
+// the body is not already an io.ReadCloser, it will be wrapped within one. If
+// a `nil` body parameter passed to Sign, the request's Body field will be
+// also set to nil. Its important to note that this functionality will not
+// change the request's ContentLength of the request.
+//
+// Sign differs from Presign in that it will sign the request using HTTP
+// header values. This type of signing is intended for http.Request values that
+// will not be shared, or are shared in a way the header values on the request
+// will not be lost.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
+       return v4.signWithBody(r, body, service, region, 0, signTime)
+}
+
+// Presign signs AWS v4 requests with the provided body, service name, region
+// the request is made to, and time the request is signed at. The signTime
+// allows you to specify that a request is signed for the future, and cannot
+// be used until then.
+//
+// Returns a list of HTTP headers that were included in the signature or an
+// error if signing the request failed. For presigned requests these headers
+// and their values must be included on the HTTP request when it is made. This
+// is helpful to know what header values need to be shared with the party the
+// presigned request will be distributed to.
+//
+// Presign differs from Sign in that it will sign the request using query string
+// instead of header values. This allows you to share the Presigned Request's
+// URL with third parties, or distribute it throughout your system with minimal
+// dependencies.
+//
+// Presign also takes an exp value which is the duration the
+// signed request will be valid after the signing time. This is allows you to
+// set when the request will expire.
+//
+// The requests body is an io.ReadSeeker so the SHA256 of the body can be
+// generated. To bypass the signer computing the hash you can set the
+// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
+// only compute the hash if the request header value is empty.
+//
+// Presigning a S3 request will not compute the body's SHA256 hash by default.
+// This is done due to the general use case for S3 presigned URLs is to share
+// PUT/GET capabilities. If you would like to include the body's SHA256 in the
+// presigned request's signature you can set the "X-Amz-Content-Sha256"
+// HTTP header and that will be included in the request's signature.
+func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
+       return v4.signWithBody(r, body, service, region, exp, signTime)
+}
+
+func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
+       currentTimeFn := v4.currentTimeFn
+       if currentTimeFn == nil {
+               currentTimeFn = time.Now
+       }
+
+       ctx := &signingCtx{
+               Request:                r,
+               Body:                   body,
+               Query:                  r.URL.Query(),
+               Time:                   signTime,
+               ExpireTime:             exp,
+               isPresign:              exp != 0,
+               ServiceName:            service,
+               Region:                 region,
+               DisableURIPathEscaping: v4.DisableURIPathEscaping,
+               unsignedPayload:        v4.UnsignedPayload,
+       }
+
+       for key := range ctx.Query {
+               sort.Strings(ctx.Query[key])
+       }
+
+       if ctx.isRequestSigned() {
+               ctx.Time = currentTimeFn()
+               ctx.handlePresignRemoval()
+       }
+
+       var err error
+       ctx.credValues, err = v4.Credentials.Get()
+       if err != nil {
+               return http.Header{}, err
+       }
+
+       ctx.assignAmzQueryValues()
+       ctx.build(v4.DisableHeaderHoisting)
+
+       // If the request is not presigned the body should be attached to it. This
+       // prevents the confusion of wanting to send a signed request without
+       // the body the request was signed for attached.
+       if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
+               var reader io.ReadCloser
+               if body != nil {
+                       var ok bool
+                       if reader, ok = body.(io.ReadCloser); !ok {
+                               reader = ioutil.NopCloser(body)
+                       }
+               }
+               r.Body = reader
+       }
+
+       if v4.Debug.Matches(aws.LogDebugWithSigning) {
+               v4.logSigningInfo(ctx)
+       }
+
+       return ctx.SignedHeaderVals, nil
+}
+
+func (ctx *signingCtx) handlePresignRemoval() {
+       if !ctx.isPresign {
+               return
+       }
+
+       // The credentials have expired for this request. The current signing
+       // is invalid, and needs to be request because the request will fail.
+       ctx.removePresign()
+
+       // Update the request's query string to ensure the values stays in
+       // sync in the case retrieving the new credentials fails.
+       ctx.Request.URL.RawQuery = ctx.Query.Encode()
+}
+
+func (ctx *signingCtx) assignAmzQueryValues() {
+       if ctx.isPresign {
+               ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
+               if ctx.credValues.SessionToken != "" {
+                       ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+               } else {
+                       ctx.Query.Del("X-Amz-Security-Token")
+               }
+
+               return
+       }
+
+       if ctx.credValues.SessionToken != "" {
+               ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
+       }
+}
+
+// SignRequestHandler is a named request handler the SDK will use to sign
+// service client request with using the V4 signature.
+var SignRequestHandler = request.NamedHandler{
+       Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
+}
+
+// SignSDKRequest signs an AWS request with the V4 signature. This
+// request handler is bested used only with the SDK's built in service client's
+// API operation requests.
+//
+// This function should not be used on its on its own, but in conjunction with
+// an AWS service client's API operation call. To sign a standalone request
+// not created by a service client's API operation method use the "Sign" or
+// "Presign" functions of the "Signer" type.
+//
+// If the credentials of the request's config are set to
+// credentials.AnonymousCredentials the request will not be signed.
+func SignSDKRequest(req *request.Request) {
+       signSDKRequestWithCurrTime(req, time.Now)
+}
+
+// BuildNamedHandler will build a generic handler for signing.
+func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler {
+       return request.NamedHandler{
+               Name: name,
+               Fn: func(req *request.Request) {
+                       signSDKRequestWithCurrTime(req, time.Now, opts...)
+               },
+       }
+}
+
+func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
+       // If the request does not need to be signed ignore the signing of the
+       // request if the AnonymousCredentials object is used.
+       if req.Config.Credentials == credentials.AnonymousCredentials {
+               return
+       }
+
+       region := req.ClientInfo.SigningRegion
+       if region == "" {
+               region = aws.StringValue(req.Config.Region)
+       }
+
+       name := req.ClientInfo.SigningName
+       if name == "" {
+               name = req.ClientInfo.ServiceName
+       }
+
+       v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
+               v4.Debug = req.Config.LogLevel.Value()
+               v4.Logger = req.Config.Logger
+               v4.DisableHeaderHoisting = req.NotHoist
+               v4.currentTimeFn = curTimeFn
+               if name == "s3" {
+                       // S3 service should not have any escaping applied
+                       v4.DisableURIPathEscaping = true
+               }
+               // Prevents setting the HTTPRequest's Body. Since the Body could be
+               // wrapped in a custom io.Closer that we do not want to be stompped
+               // on top of by the signer.
+               v4.DisableRequestBodyOverwrite = true
+       })
+
+       for _, opt := range opts {
+               opt(v4)
+       }
+
+       signingTime := req.Time
+       if !req.LastSignedAt.IsZero() {
+               signingTime = req.LastSignedAt
+       }
+
+       signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
+               name, region, req.ExpireTime, signingTime,
+       )
+       if err != nil {
+               req.Error = err
+               req.SignedHeaderVals = nil
+               return
+       }
+
+       req.SignedHeaderVals = signedHeaders
+       req.LastSignedAt = curTimeFn()
+}
+
+const logSignInfoMsg = `DEBUG: Request Signature:
+---[ CANONICAL STRING  ]-----------------------------
+%s
+---[ STRING TO SIGN ]--------------------------------
+%s%s
+-----------------------------------------------------`
+const logSignedURLMsg = `
+---[ SIGNED URL ]------------------------------------
+%s`
+
+func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
+       signedURLMsg := ""
+       if ctx.isPresign {
+               signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
+       }
+       msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
+       v4.Logger.Log(msg)
+}
+
+func (ctx *signingCtx) build(disableHeaderHoisting bool) {
+       ctx.buildTime()             // no depends
+       ctx.buildCredentialString() // no depends
+
+       unsignedHeaders := ctx.Request.Header
+       if ctx.isPresign {
+               if !disableHeaderHoisting {
+                       urlValues := url.Values{}
+                       urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
+                       for k := range urlValues {
+                               ctx.Query[k] = urlValues[k]
+                       }
+               }
+       }
+
+       ctx.buildBodyDigest()
+       ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
+       ctx.buildCanonicalString() // depends on canon headers / signed headers
+       ctx.buildStringToSign()    // depends on canon string
+       ctx.buildSignature()       // depends on string to sign
+
+       if ctx.isPresign {
+               ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature
+       } else {
+               parts := []string{
+                       authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
+                       "SignedHeaders=" + ctx.signedHeaders,
+                       "Signature=" + ctx.signature,
+               }
+               ctx.Request.Header.Set("Authorization", strings.Join(parts, ", "))
+       }
+}
+
+func (ctx *signingCtx) buildTime() {
+       ctx.formattedTime = ctx.Time.UTC().Format(timeFormat)
+       ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat)
+
+       if ctx.isPresign {
+               duration := int64(ctx.ExpireTime / time.Second)
+               ctx.Query.Set("X-Amz-Date", ctx.formattedTime)
+               ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
+       } else {
+               ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime)
+       }
+}
+
+func (ctx *signingCtx) buildCredentialString() {
+       ctx.credentialString = strings.Join([]string{
+               ctx.formattedShortTime,
+               ctx.Region,
+               ctx.ServiceName,
+               "aws4_request",
+       }, "/")
+
+       if ctx.isPresign {
+               ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
+       }
+}
+
+func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
+       query := url.Values{}
+       unsignedHeaders := http.Header{}
+       for k, h := range header {
+               if r.IsValid(k) {
+                       query[k] = h
+               } else {
+                       unsignedHeaders[k] = h
+               }
+       }
+
+       return query, unsignedHeaders
+}
+func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
+       var headers []string
+       headers = append(headers, "host")
+       for k, v := range header {
+               canonicalKey := http.CanonicalHeaderKey(k)
+               if !r.IsValid(canonicalKey) {
+                       continue // ignored header
+               }
+               if ctx.SignedHeaderVals == nil {
+                       ctx.SignedHeaderVals = make(http.Header)
+               }
+
+               lowerCaseKey := strings.ToLower(k)
+               if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
+                       // include additional values
+                       ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
+                       continue
+               }
+
+               headers = append(headers, lowerCaseKey)
+               ctx.SignedHeaderVals[lowerCaseKey] = v
+       }
+       sort.Strings(headers)
+
+       ctx.signedHeaders = strings.Join(headers, ";")
+
+       if ctx.isPresign {
+               ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
+       }
+
+       headerValues := make([]string, len(headers))
+       for i, k := range headers {
+               if k == "host" {
+                       headerValues[i] = "host:" + ctx.Request.URL.Host
+               } else {
+                       headerValues[i] = k + ":" +
+                               strings.Join(ctx.SignedHeaderVals[k], ",")
+               }
+       }
+
+       ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n")
+}
+
+func (ctx *signingCtx) buildCanonicalString() {
+       ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
+
+       uri := getURIPath(ctx.Request.URL)
+
+       if !ctx.DisableURIPathEscaping {
+               uri = rest.EscapePath(uri, false)
+       }
+
+       ctx.canonicalString = strings.Join([]string{
+               ctx.Request.Method,
+               uri,
+               ctx.Request.URL.RawQuery,
+               ctx.canonicalHeaders + "\n",
+               ctx.signedHeaders,
+               ctx.bodyDigest,
+       }, "\n")
+}
+
+func (ctx *signingCtx) buildStringToSign() {
+       ctx.stringToSign = strings.Join([]string{
+               authHeaderPrefix,
+               ctx.formattedTime,
+               ctx.credentialString,
+               hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))),
+       }, "\n")
+}
+
+func (ctx *signingCtx) buildSignature() {
+       secret := ctx.credValues.SecretAccessKey
+       date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime))
+       region := makeHmac(date, []byte(ctx.Region))
+       service := makeHmac(region, []byte(ctx.ServiceName))
+       credentials := makeHmac(service, []byte("aws4_request"))
+       signature := makeHmac(credentials, []byte(ctx.stringToSign))
+       ctx.signature = hex.EncodeToString(signature)
+}
+
+func (ctx *signingCtx) buildBodyDigest() {
+       hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
+       if hash == "" {
+               if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") {
+                       hash = "UNSIGNED-PAYLOAD"
+               } else if ctx.Body == nil {
+                       hash = emptyStringSHA256
+               } else {
+                       hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
+               }
+               if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" {
+                       ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
+               }
+       }
+       ctx.bodyDigest = hash
+}
+
+// isRequestSigned returns if the request is currently signed or presigned
+func (ctx *signingCtx) isRequestSigned() bool {
+       if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
+               return true
+       }
+       if ctx.Request.Header.Get("Authorization") != "" {
+               return true
+       }
+
+       return false
+}
+
+// unsign removes signing flags for both signed and presigned requests.
+func (ctx *signingCtx) removePresign() {
+       ctx.Query.Del("X-Amz-Algorithm")
+       ctx.Query.Del("X-Amz-Signature")
+       ctx.Query.Del("X-Amz-Security-Token")
+       ctx.Query.Del("X-Amz-Date")
+       ctx.Query.Del("X-Amz-Expires")
+       ctx.Query.Del("X-Amz-Credential")
+       ctx.Query.Del("X-Amz-SignedHeaders")
+}
+
+func makeHmac(key []byte, data []byte) []byte {
+       hash := hmac.New(sha256.New, key)
+       hash.Write(data)
+       return hash.Sum(nil)
+}
+
+func makeSha256(data []byte) []byte {
+       hash := sha256.New()
+       hash.Write(data)
+       return hash.Sum(nil)
+}
+
+func makeSha256Reader(reader io.ReadSeeker) []byte {
+       hash := sha256.New()
+       start, _ := reader.Seek(0, 1)
+       defer reader.Seek(start, 0)
+
+       io.Copy(hash, reader)
+       return hash.Sum(nil)
+}
+
+const doubleSpaces = "  "
+
+var doubleSpaceBytes = []byte(doubleSpaces)
+
+func stripExcessSpaces(headerVals []string) []string {
+       vals := make([]string, len(headerVals))
+       for i, str := range headerVals {
+               // Trim leading and trailing spaces
+               trimmed := strings.TrimSpace(str)
+
+               idx := strings.Index(trimmed, doubleSpaces)
+               var buf []byte
+               for idx > -1 {
+                       // Multiple adjacent spaces found
+                       if buf == nil {
+                               // first time create the buffer
+                               buf = []byte(trimmed)
+                       }
+
+                       stripToIdx := -1
+                       for j := idx + 1; j < len(buf); j++ {
+                               if buf[j] != ' ' {
+                                       buf = append(buf[:idx+1], buf[j:]...)
+                                       stripToIdx = j
+                                       break
+                               }
+                       }
+
+                       if stripToIdx >= 0 {
+                               idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes)
+                               if idx >= 0 {
+                                       idx += stripToIdx
+                               }
+                       } else {
+                               idx = -1
+                       }
+               }
+
+               if buf != nil {
+                       vals[i] = string(buf)
+               } else {
+                       vals[i] = trimmed
+               }
+       }
+       return vals
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go
new file mode 100644 (file)
index 0000000..0e2d864
--- /dev/null
@@ -0,0 +1,118 @@
+package aws
+
+import (
+       "io"
+       "sync"
+)
+
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should
+// only be used with an io.Reader that is also an io.Seeker. Doing so may
+// cause request signature errors, or request body's not sent for GET, HEAD
+// and DELETE HTTP methods.
+//
+// Deprecated: Should only be used with io.ReadSeeker. If using for
+// S3 PutObject to stream content use s3manager.Uploader instead.
+func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
+       return ReaderSeekerCloser{r}
+}
+
+// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
+// io.Closer interfaces to the underlying object if they are available.
+type ReaderSeekerCloser struct {
+       r io.Reader
+}
+
+// Read reads from the reader up to size of p. The number of bytes read, and
+// error if it occurred will be returned.
+//
+// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
+//
+// Performs the same functionality as io.Reader Read
+func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
+       switch t := r.r.(type) {
+       case io.Reader:
+               return t.Read(p)
+       }
+       return 0, nil
+}
+
+// Seek sets the offset for the next Read to offset, interpreted according to
+// whence: 0 means relative to the origin of the file, 1 means relative to the
+// current offset, and 2 means relative to the end. Seek returns the new offset
+// and an error, if any.
+//
+// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
+func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
+       switch t := r.r.(type) {
+       case io.Seeker:
+               return t.Seek(offset, whence)
+       }
+       return int64(0), nil
+}
+
+// IsSeeker returns if the underlying reader is also a seeker.
+func (r ReaderSeekerCloser) IsSeeker() bool {
+       _, ok := r.r.(io.Seeker)
+       return ok
+}
+
+// Close closes the ReaderSeekerCloser.
+//
+// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
+func (r ReaderSeekerCloser) Close() error {
+       switch t := r.r.(type) {
+       case io.Closer:
+               return t.Close()
+       }
+       return nil
+}
+
+// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
+// Can be used with the s3manager.Downloader to download content to a buffer
+// in memory. Safe to use concurrently.
+type WriteAtBuffer struct {
+       buf []byte
+       m   sync.Mutex
+
+       // GrowthCoeff defines the growth rate of the internal buffer. By
+       // default, the growth rate is 1, where expanding the internal
+       // buffer will allocate only enough capacity to fit the new expected
+       // length.
+       GrowthCoeff float64
+}
+
+// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
+// provided by buf.
+func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
+       return &WriteAtBuffer{buf: buf}
+}
+
+// WriteAt writes a slice of bytes to a buffer starting at the position provided
+// The number of bytes written will be returned, or error. Can overwrite previous
+// written slices if the write ats overlap.
+func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
+       pLen := len(p)
+       expLen := pos + int64(pLen)
+       b.m.Lock()
+       defer b.m.Unlock()
+       if int64(len(b.buf)) < expLen {
+               if int64(cap(b.buf)) < expLen {
+                       if b.GrowthCoeff < 1 {
+                               b.GrowthCoeff = 1
+                       }
+                       newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
+                       copy(newBuf, b.buf)
+                       b.buf = newBuf
+               }
+               b.buf = b.buf[:expLen]
+       }
+       copy(b.buf[pos:], p)
+       return pLen, nil
+}
+
+// Bytes returns a slice of bytes written to the buffer.
+func (b *WriteAtBuffer) Bytes() []byte {
+       b.m.Lock()
+       defer b.m.Unlock()
+       return b.buf
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go
new file mode 100644 (file)
index 0000000..6192b24
--- /dev/null
@@ -0,0 +1,12 @@
+// +build go1.8
+
+package aws
+
+import "net/url"
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Wrapper of net/url#URL.Hostname for backwards Go version compatibility.
+func URLHostname(url *url.URL) string {
+       return url.Hostname()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
new file mode 100644 (file)
index 0000000..0210d27
--- /dev/null
@@ -0,0 +1,29 @@
+// +build !go1.8
+
+package aws
+
+import (
+       "net/url"
+       "strings"
+)
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Copy of Go 1.8's net/url#URL.Hostname functionality.
+func URLHostname(url *url.URL) string {
+       return stripPort(url.Host)
+
+}
+
+// stripPort is copy of Go 1.8 url#URL.Hostname functionality.
+// https://golang.org/src/net/url/url.go
+func stripPort(hostport string) string {
+       colon := strings.IndexByte(hostport, ':')
+       if colon == -1 {
+               return hostport
+       }
+       if i := strings.IndexByte(hostport, ']'); i != -1 {
+               return strings.TrimPrefix(hostport[:i], "[")
+       }
+       return hostport[:colon]
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
new file mode 100644 (file)
index 0000000..129dad9
--- /dev/null
@@ -0,0 +1,8 @@
+// Package aws provides core functionality for making requests to AWS services.
+package aws
+
+// SDKName is the name of this AWS SDK
+const SDKName = "aws-sdk-go"
+
+// SDKVersion is the version of this SDK
+const SDKVersion = "1.8.21"
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
new file mode 100644 (file)
index 0000000..53831df
--- /dev/null
@@ -0,0 +1,75 @@
+package protocol
+
+import (
+       "crypto/rand"
+       "fmt"
+       "reflect"
+)
+
+// RandReader is the random reader the protocol package will use to read
+// random bytes from. This is exported for testing, and should not be used.
+var RandReader = rand.Reader
+
+const idempotencyTokenFillTag = `idempotencyToken`
+
+// CanSetIdempotencyToken returns true if the struct field should be
+// automatically populated with a Idempotency token.
+//
+// Only *string and string type fields that are tagged with idempotencyToken
+// which are not already set can be auto filled.
+func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool {
+       switch u := v.Interface().(type) {
+       // To auto fill an Idempotency token the field must be a string,
+       // tagged for auto fill, and have a zero value.
+       case *string:
+               return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+       case string:
+               return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+       }
+
+       return false
+}
+
+// GetIdempotencyToken returns a randomly generated idempotency token.
+func GetIdempotencyToken() string {
+       b := make([]byte, 16)
+       RandReader.Read(b)
+
+       return UUIDVersion4(b)
+}
+
+// SetIdempotencyToken will set the value provided with a Idempotency Token.
+// Given that the value can be set. Will panic if value is not setable.
+func SetIdempotencyToken(v reflect.Value) {
+       if v.Kind() == reflect.Ptr {
+               if v.IsNil() && v.CanSet() {
+                       v.Set(reflect.New(v.Type().Elem()))
+               }
+               v = v.Elem()
+       }
+       v = reflect.Indirect(v)
+
+       if !v.CanSet() {
+               panic(fmt.Sprintf("unable to set idempotnecy token %v", v))
+       }
+
+       b := make([]byte, 16)
+       _, err := rand.Read(b)
+       if err != nil {
+               // TODO handle error
+               return
+       }
+
+       v.Set(reflect.ValueOf(UUIDVersion4(b)))
+}
+
+// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided
+func UUIDVersion4(u []byte) string {
+       // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
+       // 13th character is "4"
+       u[6] = (u[6] | 0x40) & 0x4F
+       // 17th character is "8", "9", "a", or "b"
+       u[8] = (u[8] | 0x80) & 0xBF
+
+       return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
new file mode 100644 (file)
index 0000000..18169f0
--- /dev/null
@@ -0,0 +1,36 @@
+// Package query provides serialization of AWS query requests, and responses.
+package query
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
+
+import (
+       "net/url"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/request"
+       "github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
+)
+
+// BuildHandler is a named request handler for building query protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build}
+
+// Build builds a request for an AWS Query service.
+func Build(r *request.Request) {
+       body := url.Values{
+               "Action":  {r.Operation.Name},
+               "Version": {r.ClientInfo.APIVersion},
+       }
+       if err := queryutil.Parse(body, r.Params, false); err != nil {
+               r.Error = awserr.New("SerializationError", "failed encoding Query request", err)
+               return
+       }
+
+       if r.ExpireTime == 0 {
+               r.HTTPRequest.Method = "POST"
+               r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
+               r.SetBufferBody([]byte(body.Encode()))
+       } else { // This is a pre-signed request
+               r.HTTPRequest.Method = "GET"
+               r.HTTPRequest.URL.RawQuery = body.Encode()
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
new file mode 100644 (file)
index 0000000..524ca95
--- /dev/null
@@ -0,0 +1,237 @@
+package queryutil
+
+import (
+       "encoding/base64"
+       "fmt"
+       "net/url"
+       "reflect"
+       "sort"
+       "strconv"
+       "strings"
+       "time"
+
+       "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// Parse parses an object i and fills a url.Values object. The isEC2 flag
+// indicates if this is the EC2 Query sub-protocol.
+func Parse(body url.Values, i interface{}, isEC2 bool) error {
+       q := queryParser{isEC2: isEC2}
+       return q.parseValue(body, reflect.ValueOf(i), "", "")
+}
+
+func elemOf(value reflect.Value) reflect.Value {
+       for value.Kind() == reflect.Ptr {
+               value = value.Elem()
+       }
+       return value
+}
+
+type queryParser struct {
+       isEC2 bool
+}
+
+func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+       value = elemOf(value)
+
+       // no need to handle zero values
+       if !value.IsValid() {
+               return nil
+       }
+
+       t := tag.Get("type")
+       if t == "" {
+               switch value.Kind() {
+               case reflect.Struct:
+                       t = "structure"
+               case reflect.Slice:
+                       t = "list"
+               case reflect.Map:
+                       t = "map"
+               }
+       }
+
+       switch t {
+       case "structure":
+               return q.parseStruct(v, value, prefix)
+       case "list":
+               return q.parseList(v, value, prefix, tag)
+       case "map":
+               return q.parseMap(v, value, prefix, tag)
+       default:
+               return q.parseScalar(v, value, prefix, tag)
+       }
+}
+
+func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
+       if !value.IsValid() {
+               return nil
+       }
+
+       t := value.Type()
+       for i := 0; i < value.NumField(); i++ {
+               elemValue := elemOf(value.Field(i))
+               field := t.Field(i)
+
+               if field.PkgPath != "" {
+                       continue // ignore unexported fields
+               }
+               if field.Tag.Get("ignore") != "" {
+                       continue
+               }
+
+               if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+                       token := protocol.GetIdempotencyToken()
+                       elemValue = reflect.ValueOf(token)
+               }
+
+               var name string
+               if q.isEC2 {
+                       name = field.Tag.Get("queryName")
+               }
+               if name == "" {
+                       if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+                               name = field.Tag.Get("locationNameList")
+                       } else if locName := field.Tag.Get("locationName"); locName != "" {
+                               name = locName
+                       }
+                       if name != "" && q.isEC2 {
+                               name = strings.ToUpper(name[0:1]) + name[1:]
+                       }
+               }
+               if name == "" {
+                       name = field.Name
+               }
+
+               if prefix != "" {
+                       name = prefix + "." + name
+               }
+
+               if err := q.parseValue(v, elemValue, name, field.Tag); err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+       // If it's empty, generate an empty value
+       if !value.IsNil() && value.Len() == 0 {
+               v.Set(prefix, "")
+               return nil
+       }
+
+       // check for unflattened list member
+       if !q.isEC2 && tag.Get("flattened") == "" {
+               if listName := tag.Get("locationNameList"); listName == "" {
+                       prefix += ".member"
+               } else {
+                       prefix += "." + listName
+               }
+       }
+
+       for i := 0; i < value.Len(); i++ {
+               slicePrefix := prefix
+               if slicePrefix == "" {
+                       slicePrefix = strconv.Itoa(i + 1)
+               } else {
+                       slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
+               }
+               if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+       // If it's empty, generate an empty value
+       if !value.IsNil() && value.Len() == 0 {
+               v.Set(prefix, "")
+               return nil
+       }
+
+       // check for unflattened list member
+       if !q.isEC2 && tag.Get("flattened") == "" {
+               prefix += ".entry"
+       }
+
+       // sort keys for improved serialization consistency.
+       // this is not strictly necessary for protocol support.
+       mapKeyValues := value.MapKeys()
+       mapKeys := map[string]reflect.Value{}
+       mapKeyNames := make([]string, len(mapKeyValues))
+       for i, mapKey := range mapKeyValues {
+               name := mapKey.String()
+               mapKeys[name] = mapKey
+               mapKeyNames[i] = name
+       }
+       sort.Strings(mapKeyNames)
+
+       for i, mapKeyName := range mapKeyNames {
+               mapKey := mapKeys[mapKeyName]
+               mapValue := value.MapIndex(mapKey)
+
+               kname := tag.Get("locationNameKey")
+               if kname == "" {
+                       kname = "key"
+               }
+               vname := tag.Get("locationNameValue")
+               if vname == "" {
+                       vname = "value"
+               }
+
+               // serialize key
+               var keyName string
+               if prefix == "" {
+                       keyName = strconv.Itoa(i+1) + "." + kname
+               } else {
+                       keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
+               }
+
+               if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
+                       return err
+               }
+
+               // serialize value
+               var valueName string
+               if prefix == "" {
+                       valueName = strconv.Itoa(i+1) + "." + vname
+               } else {
+                       valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
+               }
+
+               if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
+
+func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
+       switch value := r.Interface().(type) {
+       case string:
+               v.Set(name, value)
+       case []byte:
+               if !r.IsNil() {
+                       v.Set(name, base64.StdEncoding.EncodeToString(value))
+               }
+       case bool:
+               v.Set(name, strconv.FormatBool(value))
+       case int64:
+               v.Set(name, strconv.FormatInt(value, 10))
+       case int:
+               v.Set(name, strconv.Itoa(value))
+       case float64:
+               v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
+       case float32:
+               v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
+       case time.Time:
+               const ISO8601UTC = "2006-01-02T15:04:05Z"
+               v.Set(name, value.UTC().Format(ISO8601UTC))
+       default:
+               return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
+       }
+       return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
new file mode 100644 (file)
index 0000000..e0f4d5a
--- /dev/null
@@ -0,0 +1,35 @@
+package query
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
+
+import (
+       "encoding/xml"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/request"
+       "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling query protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals a response for an AWS Query service.
+func Unmarshal(r *request.Request) {
+       defer r.HTTPResponse.Body.Close()
+       if r.DataFilled() {
+               decoder := xml.NewDecoder(r.HTTPResponse.Body)
+               err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
+               if err != nil {
+                       r.Error = awserr.New("SerializationError", "failed decoding Query response", err)
+                       return
+               }
+       }
+}
+
+// UnmarshalMeta unmarshals header response values for an AWS Query service.
+func UnmarshalMeta(r *request.Request) {
+       r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
new file mode 100644 (file)
index 0000000..f214296
--- /dev/null
@@ -0,0 +1,66 @@
+package query
+
+import (
+       "encoding/xml"
+       "io/ioutil"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+type xmlErrorResponse struct {
+       XMLName   xml.Name `xml:"ErrorResponse"`
+       Code      string   `xml:"Error>Code"`
+       Message   string   `xml:"Error>Message"`
+       RequestID string   `xml:"RequestId"`
+}
+
+type xmlServiceUnavailableResponse struct {
+       XMLName xml.Name `xml:"ServiceUnavailableException"`
+}
+
+// UnmarshalErrorHandler is a name request handler to unmarshal request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
+
+// UnmarshalError unmarshals an error response for an AWS Query service.
+func UnmarshalError(r *request.Request) {
+       defer r.HTTPResponse.Body.Close()
+
+       bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
+       if err != nil {
+               r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err)
+               return
+       }
+
+       // First check for specific error
+       resp := xmlErrorResponse{}
+       decodeErr := xml.Unmarshal(bodyBytes, &resp)
+       if decodeErr == nil {
+               reqID := resp.RequestID
+               if reqID == "" {
+                       reqID = r.RequestID
+               }
+               r.Error = awserr.NewRequestFailure(
+                       awserr.New(resp.Code, resp.Message, nil),
+                       r.HTTPResponse.StatusCode,
+                       reqID,
+               )
+               return
+       }
+
+       // Check for unhandled error
+       servUnavailResp := xmlServiceUnavailableResponse{}
+       unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp)
+       if unavailErr == nil {
+               r.Error = awserr.NewRequestFailure(
+                       awserr.New("ServiceUnavailableException", "service is unavailable", nil),
+                       r.HTTPResponse.StatusCode,
+                       r.RequestID,
+               )
+               return
+       }
+
+       // Failed to retrieve any error message from the response body
+       r.Error = awserr.New("SerializationError",
+               "failed to decode query XML error response", decodeErr)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
new file mode 100644 (file)
index 0000000..7161835
--- /dev/null
@@ -0,0 +1,290 @@
+// Package rest provides RESTful serialization of AWS requests and responses.
+package rest
+
+import (
+       "bytes"
+       "encoding/base64"
+       "encoding/json"
+       "fmt"
+       "io"
+       "net/http"
+       "net/url"
+       "path"
+       "reflect"
+       "strconv"
+       "strings"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// RFC822 returns an RFC822 formatted timestamp for AWS protocols
+const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT"
+
+// Whether the byte value can be sent without escaping in AWS URLs
+var noEscape [256]bool
+
+var errValueNotSet = fmt.Errorf("value not set")
+
+func init() {
+       for i := 0; i < len(noEscape); i++ {
+               // AWS expects every character except these to be escaped
+               noEscape[i] = (i >= 'A' && i <= 'Z') ||
+                       (i >= 'a' && i <= 'z') ||
+                       (i >= '0' && i <= '9') ||
+                       i == '-' ||
+                       i == '.' ||
+                       i == '_' ||
+                       i == '~'
+       }
+}
+
+// BuildHandler is a named request handler for building rest protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
+
+// Build builds the REST component of a service request.
+func Build(r *request.Request) {
+       if r.ParamsFilled() {
+               v := reflect.ValueOf(r.Params).Elem()
+               buildLocationElements(r, v, false)
+               buildBody(r, v)
+       }
+}
+
+// BuildAsGET builds the REST component of a service request with the ability to hoist
+// data from the body.
+func BuildAsGET(r *request.Request) {
+       if r.ParamsFilled() {
+               v := reflect.ValueOf(r.Params).Elem()
+               buildLocationElements(r, v, true)
+               buildBody(r, v)
+       }
+}
+
+func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) {
+       query := r.HTTPRequest.URL.Query()
+
+       // Setup the raw path to match the base path pattern. This is needed
+       // so that when the path is mutated a custom escaped version can be
+       // stored in RawPath that will be used by the Go client.
+       r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path
+
+       for i := 0; i < v.NumField(); i++ {
+               m := v.Field(i)
+               if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
+                       continue
+               }
+
+               if m.IsValid() {
+                       field := v.Type().Field(i)
+                       name := field.Tag.Get("locationName")
+                       if name == "" {
+                               name = field.Name
+                       }
+                       if kind := m.Kind(); kind == reflect.Ptr {
+                               m = m.Elem()
+                       } else if kind == reflect.Interface {
+                               if !m.Elem().IsValid() {
+                                       continue
+                               }
+                       }
+                       if !m.IsValid() {
+                               continue
+                       }
+                       if field.Tag.Get("ignore") != "" {
+                               continue
+                       }
+
+                       var err error
+                       switch field.Tag.Get("location") {
+                       case "headers": // header maps
+                               err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag)
+                       case "header":
+                               err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag)
+                       case "uri":
+                               err = buildURI(r.HTTPRequest.URL, m, name, field.Tag)
+                       case "querystring":
+                               err = buildQueryString(query, m, name, field.Tag)
+                       default:
+                               if buildGETQuery {
+                                       err = buildQueryString(query, m, name, field.Tag)
+                               }
+                       }
+                       r.Error = err
+               }
+               if r.Error != nil {
+                       return
+               }
+       }
+
+       r.HTTPRequest.URL.RawQuery = query.Encode()
+       if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) {
+               cleanPath(r.HTTPRequest.URL)
+       }
+}
+
+func buildBody(r *request.Request, v reflect.Value) {
+       if field, ok := v.Type().FieldByName("_"); ok {
+               if payloadName := field.Tag.Get("payload"); payloadName != "" {
+                       pfield, _ := v.Type().FieldByName(payloadName)
+                       if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+                               payload := reflect.Indirect(v.FieldByName(payloadName))
+                               if payload.IsValid() && payload.Interface() != nil {
+                                       switch reader := payload.Interface().(type) {
+                                       case io.ReadSeeker:
+                                               r.SetReaderBody(reader)
+                                       case []byte:
+                                               r.SetBufferBody(reader)
+                                       case string:
+                                               r.SetStringBody(reader)
+                                       default:
+                                               r.Error = awserr.New("SerializationError",
+                                                       "failed to encode REST request",
+                                                       fmt.Errorf("unknown payload type %s", payload.Type()))
+                                       }
+                               }
+                       }
+               }
+       }
+}
+
+func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error {
+       str, err := convertType(v, tag)
+       if err == errValueNotSet {
+               return nil
+       } else if err != nil {
+               return awserr.New("SerializationError", "failed to encode REST request", err)
+       }
+
+       header.Add(name, str)
+
+       return nil
+}
+
+func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error {
+       prefix := tag.Get("locationName")
+       for _, key := range v.MapKeys() {
+               str, err := convertType(v.MapIndex(key), tag)
+               if err == errValueNotSet {
+                       continue
+               } else if err != nil {
+                       return awserr.New("SerializationError", "failed to encode REST request", err)
+
+               }
+
+               header.Add(prefix+key.String(), str)
+       }
+       return nil
+}
+
+func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error {
+       value, err := convertType(v, tag)
+       if err == errValueNotSet {
+               return nil
+       } else if err != nil {
+               return awserr.New("SerializationError", "failed to encode REST request", err)
+       }
+
+       u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1)
+       u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1)
+
+       u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1)
+       u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1)
+
+       return nil
+}
+
+func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error {
+       switch value := v.Interface().(type) {
+       case []*string:
+               for _, item := range value {
+                       query.Add(name, *item)
+               }
+       case map[string]*string:
+               for key, item := range value {
+                       query.Add(key, *item)
+               }
+       case map[string][]*string:
+               for key, items := range value {
+                       for _, item := range items {
+                               query.Add(key, *item)
+                       }
+               }
+       default:
+               str, err := convertType(v, tag)
+               if err == errValueNotSet {
+                       return nil
+               } else if err != nil {
+                       return awserr.New("SerializationError", "failed to encode REST request", err)
+               }
+               query.Set(name, str)
+       }
+
+       return nil
+}
+
+func cleanPath(u *url.URL) {
+       hasSlash := strings.HasSuffix(u.Path, "/")
+
+       // clean up path, removing duplicate `/`
+       u.Path = path.Clean(u.Path)
+       u.RawPath = path.Clean(u.RawPath)
+
+       if hasSlash && !strings.HasSuffix(u.Path, "/") {
+               u.Path += "/"
+               u.RawPath += "/"
+       }
+}
+
+// EscapePath escapes part of a URL path in Amazon style
+func EscapePath(path string, encodeSep bool) string {
+       var buf bytes.Buffer
+       for i := 0; i < len(path); i++ {
+               c := path[i]
+               if noEscape[c] || (c == '/' && !encodeSep) {
+                       buf.WriteByte(c)
+               } else {
+                       fmt.Fprintf(&buf, "%%%02X", c)
+               }
+       }
+       return buf.String()
+}
+
+func convertType(v reflect.Value, tag reflect.StructTag) (string, error) {
+       v = reflect.Indirect(v)
+       if !v.IsValid() {
+               return "", errValueNotSet
+       }
+
+       var str string
+       switch value := v.Interface().(type) {
+       case string:
+               str = value
+       case []byte:
+               str = base64.StdEncoding.EncodeToString(value)
+       case bool:
+               str = strconv.FormatBool(value)
+       case int64:
+               str = strconv.FormatInt(value, 10)
+       case float64:
+               str = strconv.FormatFloat(value, 'f', -1, 64)
+       case time.Time:
+               str = value.UTC().Format(RFC822)
+       case aws.JSONValue:
+               b, err := json.Marshal(value)
+               if err != nil {
+                       return "", err
+               }
+               if tag.Get("location") == "header" {
+                       str = base64.StdEncoding.EncodeToString(b)
+               } else {
+                       str = string(b)
+               }
+       default:
+               err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+               return "", err
+       }
+       return str, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
new file mode 100644 (file)
index 0000000..4366de2
--- /dev/null
@@ -0,0 +1,45 @@
+package rest
+
+import "reflect"
+
+// PayloadMember returns the payload field member of i if there is one, or nil.
+func PayloadMember(i interface{}) interface{} {
+       if i == nil {
+               return nil
+       }
+
+       v := reflect.ValueOf(i).Elem()
+       if !v.IsValid() {
+               return nil
+       }
+       if field, ok := v.Type().FieldByName("_"); ok {
+               if payloadName := field.Tag.Get("payload"); payloadName != "" {
+                       field, _ := v.Type().FieldByName(payloadName)
+                       if field.Tag.Get("type") != "structure" {
+                               return nil
+                       }
+
+                       payload := v.FieldByName(payloadName)
+                       if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
+                               return payload.Interface()
+                       }
+               }
+       }
+       return nil
+}
+
+// PayloadType returns the type of a payload field member of i if there is one, or "".
+func PayloadType(i interface{}) string {
+       v := reflect.Indirect(reflect.ValueOf(i))
+       if !v.IsValid() {
+               return ""
+       }
+       if field, ok := v.Type().FieldByName("_"); ok {
+               if payloadName := field.Tag.Get("payload"); payloadName != "" {
+                       if member, ok := v.Type().FieldByName(payloadName); ok {
+                               return member.Tag.Get("type")
+                       }
+               }
+       }
+       return ""
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
new file mode 100644 (file)
index 0000000..7a779ee
--- /dev/null
@@ -0,0 +1,227 @@
+package rest
+
+import (
+       "bytes"
+       "encoding/base64"
+       "encoding/json"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/http"
+       "reflect"
+       "strconv"
+       "strings"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals the REST component of a response in a REST service.
+func Unmarshal(r *request.Request) {
+       if r.DataFilled() {
+               v := reflect.Indirect(reflect.ValueOf(r.Data))
+               unmarshalBody(r, v)
+       }
+}
+
+// UnmarshalMeta unmarshals the REST metadata of a response in a REST service
+func UnmarshalMeta(r *request.Request) {
+       r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+       if r.RequestID == "" {
+               // Alternative version of request id in the header
+               r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
+       }
+       if r.DataFilled() {
+               v := reflect.Indirect(reflect.ValueOf(r.Data))
+               unmarshalLocationElements(r, v)
+       }
+}
+
+func unmarshalBody(r *request.Request, v reflect.Value) {
+       if field, ok := v.Type().FieldByName("_"); ok {
+               if payloadName := field.Tag.Get("payload"); payloadName != "" {
+                       pfield, _ := v.Type().FieldByName(payloadName)
+                       if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+                               payload := v.FieldByName(payloadName)
+                               if payload.IsValid() {
+                                       switch payload.Interface().(type) {
+                                       case []byte:
+                                               defer r.HTTPResponse.Body.Close()
+                                               b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+                                               if err != nil {
+                                                       r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+                                               } else {
+                                                       payload.Set(reflect.ValueOf(b))
+                                               }
+                                       case *string:
+                                               defer r.HTTPResponse.Body.Close()
+                                               b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+                                               if err != nil {
+                                                       r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+                                               } else {
+                                                       str := string(b)
+                                                       payload.Set(reflect.ValueOf(&str))
+                                               }
+                                       default:
+                                               switch payload.Type().String() {
+                                               case "io.ReadCloser":
+                                                       payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
+                                               case "io.ReadSeeker":
+                                                       b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+                                                       if err != nil {
+                                                               r.Error = awserr.New("SerializationError",
+                                                                       "failed to read response body", err)
+                                                               return
+                                                       }
+                                                       payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b))))
+                                               default:
+                                                       io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+                                                       defer r.HTTPResponse.Body.Close()
+                                                       r.Error = awserr.New("SerializationError",
+                                                               "failed to decode REST response",
+                                                               fmt.Errorf("unknown payload type %s", payload.Type()))
+                                               }
+                                       }
+                               }
+                       }
+               }
+       }
+}
+
+func unmarshalLocationElements(r *request.Request, v reflect.Value) {
+       for i := 0; i < v.NumField(); i++ {
+               m, field := v.Field(i), v.Type().Field(i)
+               if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
+                       continue
+               }
+
+               if m.IsValid() {
+                       name := field.Tag.Get("locationName")
+                       if name == "" {
+                               name = field.Name
+                       }
+
+                       switch field.Tag.Get("location") {
+                       case "statusCode":
+                               unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
+                       case "header":
+                               err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag)
+                               if err != nil {
+                                       r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+                                       break
+                               }
+                       case "headers":
+                               prefix := field.Tag.Get("locationName")
+                               err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
+                               if err != nil {
+                                       r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+                                       break
+                               }
+                       }
+               }
+               if r.Error != nil {
+                       return
+               }
+       }
+}
+
+func unmarshalStatusCode(v reflect.Value, statusCode int) {
+       if !v.IsValid() {
+               return
+       }
+
+       switch v.Interface().(type) {
+       case *int64:
+               s := int64(statusCode)
+               v.Set(reflect.ValueOf(&s))
+       }
+}
+
+func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
+       switch r.Interface().(type) {
+       case map[string]*string: // we only support string map value types
+               out := map[string]*string{}
+               for k, v := range headers {
+                       k = http.CanonicalHeaderKey(k)
+                       if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
+                               out[k[len(prefix):]] = &v[0]
+                       }
+               }
+               r.Set(reflect.ValueOf(out))
+       }
+       return nil
+}
+
+func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error {
+       isJSONValue := tag.Get("type") == "jsonvalue"
+       if isJSONValue {
+               if len(header) == 0 {
+                       return nil
+               }
+       } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
+               return nil
+       }
+
+       switch v.Interface().(type) {
+       case *string:
+               v.Set(reflect.ValueOf(&header))
+       case []byte:
+               b, err := base64.StdEncoding.DecodeString(header)
+               if err != nil {
+                       return err
+               }
+               v.Set(reflect.ValueOf(&b))
+       case *bool:
+               b, err := strconv.ParseBool(header)
+               if err != nil {
+                       return err
+               }
+               v.Set(reflect.ValueOf(&b))
+       case *int64:
+               i, err := strconv.ParseInt(header, 10, 64)
+               if err != nil {
+                       return err
+               }
+               v.Set(reflect.ValueOf(&i))
+       case *float64:
+               f, err := strconv.ParseFloat(header, 64)
+               if err != nil {
+                       return err
+               }
+               v.Set(reflect.ValueOf(&f))
+       case *time.Time:
+               t, err := time.Parse(RFC822, header)
+               if err != nil {
+                       return err
+               }
+               v.Set(reflect.ValueOf(&t))
+       case aws.JSONValue:
+               b := []byte(header)
+               var err error
+               if tag.Get("location") == "header" {
+                       b, err = base64.StdEncoding.DecodeString(header)
+                       if err != nil {
+                               return err
+                       }
+               }
+
+               m := aws.JSONValue{}
+               err = json.Unmarshal(b, &m)
+               if err != nil {
+                       return err
+               }
+               v.Set(reflect.ValueOf(m))
+       default:
+               err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+               return err
+       }
+       return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
new file mode 100644 (file)
index 0000000..7bdf4c8
--- /dev/null
@@ -0,0 +1,69 @@
+// Package restxml provides RESTful XML serialization of AWS
+// requests and responses.
+package restxml
+
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go
+//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go
+
+import (
+       "bytes"
+       "encoding/xml"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/request"
+       "github.com/aws/aws-sdk-go/private/protocol/query"
+       "github.com/aws/aws-sdk-go/private/protocol/rest"
+       "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// BuildHandler is a named request handler for building restxml protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build}
+
+// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError}
+
+// Build builds a request payload for the REST XML protocol.
+func Build(r *request.Request) {
+       rest.Build(r)
+
+       if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
+               var buf bytes.Buffer
+               err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf))
+               if err != nil {
+                       r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err)
+                       return
+               }
+               r.SetBufferBody(buf.Bytes())
+       }
+}
+
+// Unmarshal unmarshals a payload response for the REST XML protocol.
+func Unmarshal(r *request.Request) {
+       if t := rest.PayloadType(r.Data); t == "structure" || t == "" {
+               defer r.HTTPResponse.Body.Close()
+               decoder := xml.NewDecoder(r.HTTPResponse.Body)
+               err := xmlutil.UnmarshalXML(r.Data, decoder, "")
+               if err != nil {
+                       r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err)
+                       return
+               }
+       } else {
+               rest.Unmarshal(r)
+       }
+}
+
+// UnmarshalMeta unmarshals response headers for the REST XML protocol.
+func UnmarshalMeta(r *request.Request) {
+       rest.UnmarshalMeta(r)
+}
+
+// UnmarshalError unmarshals a response error for the REST XML protocol.
+func UnmarshalError(r *request.Request) {
+       query.UnmarshalError(r)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
new file mode 100644 (file)
index 0000000..da1a681
--- /dev/null
@@ -0,0 +1,21 @@
+package protocol
+
+import (
+       "io"
+       "io/ioutil"
+
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body
+var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody}
+
+// UnmarshalDiscardBody is a request handler to empty a response's body and closing it.
+func UnmarshalDiscardBody(r *request.Request) {
+       if r.HTTPResponse == nil || r.HTTPResponse.Body == nil {
+               return
+       }
+
+       io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+       r.HTTPResponse.Body.Close()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
new file mode 100644 (file)
index 0000000..7091b45
--- /dev/null
@@ -0,0 +1,296 @@
+// Package xmlutil provides XML serialization of AWS requests and responses.
+package xmlutil
+
+import (
+       "encoding/base64"
+       "encoding/xml"
+       "fmt"
+       "reflect"
+       "sort"
+       "strconv"
+       "time"
+
+       "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// BuildXML will serialize params into an xml.Encoder.
+// Error will be returned if the serialization of any of the params or nested values fails.
+func BuildXML(params interface{}, e *xml.Encoder) error {
+       b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
+       root := NewXMLElement(xml.Name{})
+       if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
+               return err
+       }
+       for _, c := range root.Children {
+               for _, v := range c {
+                       return StructToXML(e, v, false)
+               }
+       }
+       return nil
+}
+
+// Returns the reflection element of a value, if it is a pointer.
+func elemOf(value reflect.Value) reflect.Value {
+       for value.Kind() == reflect.Ptr {
+               value = value.Elem()
+       }
+       return value
+}
+
+// A xmlBuilder serializes values from Go code to XML
+type xmlBuilder struct {
+       encoder    *xml.Encoder
+       namespaces map[string]string
+}
+
+// buildValue generic XMLNode builder for any type. Will build value for their specific type
+// struct, list, map, scalar.
+//
+// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
+// type is not provided reflect will be used to determine the value's type.
+func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+       value = elemOf(value)
+       if !value.IsValid() { // no need to handle zero values
+               return nil
+       } else if tag.Get("location") != "" { // don't handle non-body location values
+               return nil
+       }
+
+       t := tag.Get("type")
+       if t == "" {
+               switch value.Kind() {
+               case reflect.Struct:
+                       t = "structure"
+               case reflect.Slice:
+                       t = "list"
+               case reflect.Map:
+                       t = "map"
+               }
+       }
+
+       switch t {
+       case "structure":
+               if field, ok := value.Type().FieldByName("_"); ok {
+                       tag = tag + reflect.StructTag(" ") + field.Tag
+               }
+               return b.buildStruct(value, current, tag)
+       case "list":
+               return b.buildList(value, current, tag)
+       case "map":
+               return b.buildMap(value, current, tag)
+       default:
+               return b.buildScalar(value, current, tag)
+       }
+}
+
+// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested
+// types are converted to XMLNodes also.
+func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+       if !value.IsValid() {
+               return nil
+       }
+
+       fieldAdded := false
+
+       // unwrap payloads
+       if payload := tag.Get("payload"); payload != "" {
+               field, _ := value.Type().FieldByName(payload)
+               tag = field.Tag
+               value = elemOf(value.FieldByName(payload))
+
+               if !value.IsValid() {
+                       return nil
+               }
+       }
+
+       child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+
+       // there is an xmlNamespace associated with this struct
+       if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
+               ns := xml.Attr{
+                       Name:  xml.Name{Local: "xmlns"},
+                       Value: uri,
+               }
+               if prefix != "" {
+                       b.namespaces[prefix] = uri // register the namespace
+                       ns.Name.Local = "xmlns:" + prefix
+               }
+
+               child.Attr = append(child.Attr, ns)
+       }
+
+       t := value.Type()
+       for i := 0; i < value.NumField(); i++ {
+               member := elemOf(value.Field(i))
+               field := t.Field(i)
+
+               if field.PkgPath != "" {
+                       continue // ignore unexported fields
+               }
+               if field.Tag.Get("ignore") != "" {
+                       continue
+               }
+
+               mTag := field.Tag
+               if mTag.Get("location") != "" { // skip non-body members
+                       continue
+               }
+
+               if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+                       token := protocol.GetIdempotencyToken()
+                       member = reflect.ValueOf(token)
+               }
+
+               memberName := mTag.Get("locationName")
+               if memberName == "" {
+                       memberName = field.Name
+                       mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
+               }
+               if err := b.buildValue(member, child, mTag); err != nil {
+                       return err
+               }
+
+               fieldAdded = true
+       }
+
+       if fieldAdded { // only append this child if we have one ore more valid members
+               current.AddChild(child)
+       }
+
+       return nil
+}
+
+// buildList adds the value's list items to the current XMLNode as children nodes. All
+// nested values in the list are converted to XMLNodes also.
+func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+       if value.IsNil() { // don't build omitted lists
+               return nil
+       }
+
+       // check for unflattened list member
+       flattened := tag.Get("flattened") != ""
+
+       xname := xml.Name{Local: tag.Get("locationName")}
+       if flattened {
+               for i := 0; i < value.Len(); i++ {
+                       child := NewXMLElement(xname)
+                       current.AddChild(child)
+                       if err := b.buildValue(value.Index(i), child, ""); err != nil {
+                               return err
+                       }
+               }
+       } else {
+               list := NewXMLElement(xname)
+               current.AddChild(list)
+
+               for i := 0; i < value.Len(); i++ {
+                       iname := tag.Get("locationNameList")
+                       if iname == "" {
+                               iname = "member"
+                       }
+
+                       child := NewXMLElement(xml.Name{Local: iname})
+                       list.AddChild(child)
+                       if err := b.buildValue(value.Index(i), child, ""); err != nil {
+                               return err
+                       }
+               }
+       }
+
+       return nil
+}
+
+// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
+// nested values in the map are converted to XMLNodes also.
+//
+// Error will be returned if it is unable to build the map's values into XMLNodes
+func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+       if value.IsNil() { // don't build omitted maps
+               return nil
+       }
+
+       maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+       current.AddChild(maproot)
+       current = maproot
+
+       kname, vname := "key", "value"
+       if n := tag.Get("locationNameKey"); n != "" {
+               kname = n
+       }
+       if n := tag.Get("locationNameValue"); n != "" {
+               vname = n
+       }
+
+       // sorting is not required for compliance, but it makes testing easier
+       keys := make([]string, value.Len())
+       for i, k := range value.MapKeys() {
+               keys[i] = k.String()
+       }
+       sort.Strings(keys)
+
+       for _, k := range keys {
+               v := value.MapIndex(reflect.ValueOf(k))
+
+               mapcur := current
+               if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
+                       child := NewXMLElement(xml.Name{Local: "entry"})
+                       mapcur.AddChild(child)
+                       mapcur = child
+               }
+
+               kchild := NewXMLElement(xml.Name{Local: kname})
+               kchild.Text = k
+               vchild := NewXMLElement(xml.Name{Local: vname})
+               mapcur.AddChild(kchild)
+               mapcur.AddChild(vchild)
+
+               if err := b.buildValue(v, vchild, ""); err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
+
+// buildScalar will convert the value into a string and append it as a attribute or child
+// of the current XMLNode.
+//
+// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
+//
+// Error will be returned if the value type is unsupported.
+func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+       var str string
+       switch converted := value.Interface().(type) {
+       case string:
+               str = converted
+       case []byte:
+               if !value.IsNil() {
+                       str = base64.StdEncoding.EncodeToString(converted)
+               }
+       case bool:
+               str = strconv.FormatBool(converted)
+       case int64:
+               str = strconv.FormatInt(converted, 10)
+       case int:
+               str = strconv.Itoa(converted)
+       case float64:
+               str = strconv.FormatFloat(converted, 'f', -1, 64)
+       case float32:
+               str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
+       case time.Time:
+               const ISO8601UTC = "2006-01-02T15:04:05Z"
+               str = converted.UTC().Format(ISO8601UTC)
+       default:
+               return fmt.Errorf("unsupported value for param %s: %v (%s)",
+                       tag.Get("locationName"), value.Interface(), value.Type().Name())
+       }
+
+       xname := xml.Name{Local: tag.Get("locationName")}
+       if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
+               attr := xml.Attr{Name: xname, Value: str}
+               current.Attr = append(current.Attr, attr)
+       } else { // regular text node
+               current.AddChild(&XMLNode{Name: xname, Text: str})
+       }
+       return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
new file mode 100644 (file)
index 0000000..8758462
--- /dev/null
@@ -0,0 +1,260 @@
+package xmlutil
+
+import (
+       "encoding/base64"
+       "encoding/xml"
+       "fmt"
+       "io"
+       "reflect"
+       "strconv"
+       "strings"
+       "time"
+)
+
+// UnmarshalXML deserializes an xml.Decoder into the container v. V
+// needs to match the shape of the XML expected to be decoded.
+// If the shape doesn't match unmarshaling will fail.
+func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
+       n, err := XMLToStruct(d, nil)
+       if err != nil {
+               return err
+       }
+       if n.Children != nil {
+               for _, root := range n.Children {
+                       for _, c := range root {
+                               if wrappedChild, ok := c.Children[wrapper]; ok {
+                                       c = wrappedChild[0] // pull out wrapped element
+                               }
+
+                               err = parse(reflect.ValueOf(v), c, "")
+                               if err != nil {
+                                       if err == io.EOF {
+                                               return nil
+                                       }
+                                       return err
+                               }
+                       }
+               }
+               return nil
+       }
+       return nil
+}
+
+// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
+// will be used to determine the type from r.
+func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+       rtype := r.Type()
+       if rtype.Kind() == reflect.Ptr {
+               rtype = rtype.Elem() // check kind of actual element type
+       }
+
+       t := tag.Get("type")
+       if t == "" {
+               switch rtype.Kind() {
+               case reflect.Struct:
+                       t = "structure"
+               case reflect.Slice:
+                       t = "list"
+               case reflect.Map:
+                       t = "map"
+               }
+       }
+
+       switch t {
+       case "structure":
+               if field, ok := rtype.FieldByName("_"); ok {
+                       tag = field.Tag
+               }
+               return parseStruct(r, node, tag)
+       case "list":
+               return parseList(r, node, tag)
+       case "map":
+               return parseMap(r, node, tag)
+       default:
+               return parseScalar(r, node, tag)
+       }
+}
+
+// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
+// types in the structure will also be deserialized.
+func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+       t := r.Type()
+       if r.Kind() == reflect.Ptr {
+               if r.IsNil() { // create the structure if it's nil
+                       s := reflect.New(r.Type().Elem())
+                       r.Set(s)
+                       r = s
+               }
+
+               r = r.Elem()
+               t = t.Elem()
+       }
+
+       // unwrap any payloads
+       if payload := tag.Get("payload"); payload != "" {
+               field, _ := t.FieldByName(payload)
+               return parseStruct(r.FieldByName(payload), node, field.Tag)
+       }
+
+       for i := 0; i < t.NumField(); i++ {
+               field := t.Field(i)
+               if c := field.Name[0:1]; strings.ToLower(c) == c {
+                       continue // ignore unexported fields
+               }
+
+               // figure out what this field is called
+               name := field.Name
+               if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+                       name = field.Tag.Get("locationNameList")
+               } else if locName := field.Tag.Get("locationName"); locName != "" {
+                       name = locName
+               }
+
+               // try to find the field by name in elements
+               elems := node.Children[name]
+
+               if elems == nil { // try to find the field in attributes
+                       if val, ok := node.findElem(name); ok {
+                               elems = []*XMLNode{{Text: val}}
+                       }
+               }
+
+               member := r.FieldByName(field.Name)
+               for _, elem := range elems {
+                       err := parse(member, elem, field.Tag)
+                       if err != nil {
+                               return err
+                       }
+               }
+       }
+       return nil
+}
+
+// parseList deserializes a list of values from an XML node. Each list entry
+// will also be deserialized.
+func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+       t := r.Type()
+
+       if tag.Get("flattened") == "" { // look at all item entries
+               mname := "member"
+               if name := tag.Get("locationNameList"); name != "" {
+                       mname = name
+               }
+
+               if Children, ok := node.Children[mname]; ok {
+                       if r.IsNil() {
+                               r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
+                       }
+
+                       for i, c := range Children {
+                               err := parse(r.Index(i), c, "")
+                               if err != nil {
+                                       return err
+                               }
+                       }
+               }
+       } else { // flattened list means this is a single element
+               if r.IsNil() {
+                       r.Set(reflect.MakeSlice(t, 0, 0))
+               }
+
+               childR := reflect.Zero(t.Elem())
+               r.Set(reflect.Append(r, childR))
+               err := parse(r.Index(r.Len()-1), node, "")
+               if err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
+
+// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
+// will also be deserialized as map entries.
+func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+       if r.IsNil() {
+               r.Set(reflect.MakeMap(r.Type()))
+       }
+
+       if tag.Get("flattened") == "" { // look at all child entries
+               for _, entry := range node.Children["entry"] {
+                       parseMapEntry(r, entry, tag)
+               }
+       } else { // this element is itself an entry
+               parseMapEntry(r, node, tag)
+       }
+
+       return nil
+}
+
+// parseMapEntry deserializes a map entry from a XML node.
+func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+       kname, vname := "key", "value"
+       if n := tag.Get("locationNameKey"); n != "" {
+               kname = n
+       }
+       if n := tag.Get("locationNameValue"); n != "" {
+               vname = n
+       }
+
+       keys, ok := node.Children[kname]
+       values := node.Children[vname]
+       if ok {
+               for i, key := range keys {
+                       keyR := reflect.ValueOf(key.Text)
+                       value := values[i]
+                       valueR := reflect.New(r.Type().Elem()).Elem()
+
+                       parse(valueR, value, "")
+                       r.SetMapIndex(keyR, valueR)
+               }
+       }
+       return nil
+}
+
+// parseScaller deserializes an XMLNode value into a concrete type based on the
+// interface type of r.
+//
+// Error is returned if the deserialization fails due to invalid type conversion,
+// or unsupported interface type.
+func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+       switch r.Interface().(type) {
+       case *string:
+               r.Set(reflect.ValueOf(&node.Text))
+               return nil
+       case []byte:
+               b, err := base64.StdEncoding.DecodeString(node.Text)
+               if err != nil {
+                       return err
+               }
+               r.Set(reflect.ValueOf(b))
+       case *bool:
+               v, err := strconv.ParseBool(node.Text)
+               if err != nil {
+                       return err
+               }
+               r.Set(reflect.ValueOf(&v))
+       case *int64:
+               v, err := strconv.ParseInt(node.Text, 10, 64)
+               if err != nil {
+                       return err
+               }
+               r.Set(reflect.ValueOf(&v))
+       case *float64:
+               v, err := strconv.ParseFloat(node.Text, 64)
+               if err != nil {
+                       return err
+               }
+               r.Set(reflect.ValueOf(&v))
+       case *time.Time:
+               const ISO8601UTC = "2006-01-02T15:04:05Z"
+               t, err := time.Parse(ISO8601UTC, node.Text)
+               if err != nil {
+                       return err
+               }
+               r.Set(reflect.ValueOf(&t))
+       default:
+               return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
+       }
+       return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
new file mode 100644 (file)
index 0000000..3e970b6
--- /dev/null
@@ -0,0 +1,147 @@
+package xmlutil
+
+import (
+       "encoding/xml"
+       "fmt"
+       "io"
+       "sort"
+)
+
+// A XMLNode contains the values to be encoded or decoded.
+type XMLNode struct {
+       Name     xml.Name              `json:",omitempty"`
+       Children map[string][]*XMLNode `json:",omitempty"`
+       Text     string                `json:",omitempty"`
+       Attr     []xml.Attr            `json:",omitempty"`
+
+       namespaces map[string]string
+       parent     *XMLNode
+}
+
+// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
+func NewXMLElement(name xml.Name) *XMLNode {
+       return &XMLNode{
+               Name:     name,
+               Children: map[string][]*XMLNode{},
+               Attr:     []xml.Attr{},
+       }
+}
+
+// AddChild adds child to the XMLNode.
+func (n *XMLNode) AddChild(child *XMLNode) {
+       if _, ok := n.Children[child.Name.Local]; !ok {
+               n.Children[child.Name.Local] = []*XMLNode{}
+       }
+       n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
+}
+
+// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
+func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
+       out := &XMLNode{}
+       for {
+               tok, err := d.Token()
+               if err != nil {
+                       if err == io.EOF {
+                               break
+                       } else {
+                               return out, err
+                       }
+               }
+
+               if tok == nil {
+                       break
+               }
+
+               switch typed := tok.(type) {
+               case xml.CharData:
+                       out.Text = string(typed.Copy())
+               case xml.StartElement:
+                       el := typed.Copy()
+                       out.Attr = el.Attr
+                       if out.Children == nil {
+                               out.Children = map[string][]*XMLNode{}
+                       }
+
+                       name := typed.Name.Local
+                       slice := out.Children[name]
+                       if slice == nil {
+                               slice = []*XMLNode{}
+                       }
+                       node, e := XMLToStruct(d, &el)
+                       out.findNamespaces()
+                       if e != nil {
+                               return out, e
+                       }
+                       node.Name = typed.Name
+                       node.findNamespaces()
+                       tempOut := *out
+                       // Save into a temp variable, simply because out gets squashed during
+                       // loop iterations
+                       node.parent = &tempOut
+                       slice = append(slice, node)
+                       out.Children[name] = slice
+               case xml.EndElement:
+                       if s != nil && s.Name.Local == typed.Name.Local { // matching end token
+                               return out, nil
+                       }
+                       out = &XMLNode{}
+               }
+       }
+       return out, nil
+}
+
+func (n *XMLNode) findNamespaces() {
+       ns := map[string]string{}
+       for _, a := range n.Attr {
+               if a.Name.Space == "xmlns" {
+                       ns[a.Value] = a.Name.Local
+               }
+       }
+
+       n.namespaces = ns
+}
+
+func (n *XMLNode) findElem(name string) (string, bool) {
+       for node := n; node != nil; node = node.parent {
+               for _, a := range node.Attr {
+                       namespace := a.Name.Space
+                       if v, ok := node.namespaces[namespace]; ok {
+                               namespace = v
+                       }
+                       if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) {
+                               return a.Value, true
+                       }
+               }
+       }
+       return "", false
+}
+
+// StructToXML writes an XMLNode to a xml.Encoder as tokens.
+func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
+       e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
+
+       if node.Text != "" {
+               e.EncodeToken(xml.CharData([]byte(node.Text)))
+       } else if sorted {
+               sortedNames := []string{}
+               for k := range node.Children {
+                       sortedNames = append(sortedNames, k)
+               }
+               sort.Strings(sortedNames)
+
+               for _, k := range sortedNames {
+                       for _, v := range node.Children[k] {
+                               StructToXML(e, v, sorted)
+                       }
+               }
+       } else {
+               for _, c := range node.Children {
+                       for _, v := range c {
+                               StructToXML(e, v, sorted)
+                       }
+               }
+       }
+
+       e.EncodeToken(xml.EndElement{Name: node.Name})
+       return e.Flush()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
new file mode 100644 (file)
index 0000000..52ac02c
--- /dev/null
@@ -0,0 +1,19245 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+import (
+       "fmt"
+       "io"
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awsutil"
+       "github.com/aws/aws-sdk-go/aws/request"
+       "github.com/aws/aws-sdk-go/private/protocol"
+       "github.com/aws/aws-sdk-go/private/protocol/restxml"
+)
+
+const opAbortMultipartUpload = "AbortMultipartUpload"
+
+// AbortMultipartUploadRequest generates a "aws/request.Request" representing the
+// client's request for the AbortMultipartUpload operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See AbortMultipartUpload for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the AbortMultipartUpload method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the AbortMultipartUploadRequest method.
+//    req, resp := client.AbortMultipartUploadRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload
+func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) {
+       op := &request.Operation{
+               Name:       opAbortMultipartUpload,
+               HTTPMethod: "DELETE",
+               HTTPPath:   "/{Bucket}/{Key+}",
+       }
+
+       if input == nil {
+               input = &AbortMultipartUploadInput{}
+       }
+
+       output = &AbortMultipartUploadOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// AbortMultipartUpload API operation for Amazon Simple Storage Service.
+//
+// Aborts a multipart upload.
+//
+// To verify that all parts have been removed, so you don't get charged for
+// the part storage, you should call the List Parts operation and ensure the
+// parts list is empty.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation AbortMultipartUpload for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeNoSuchUpload "NoSuchUpload"
+//   The specified multipart upload does not exist.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload
+func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) {
+       req, out := c.AbortMultipartUploadRequest(input)
+       return out, req.Send()
+}
+
+// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AbortMultipartUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) {
+       req, out := c.AbortMultipartUploadRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opCompleteMultipartUpload = "CompleteMultipartUpload"
+
+// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the
+// client's request for the CompleteMultipartUpload operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See CompleteMultipartUpload for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the CompleteMultipartUpload method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the CompleteMultipartUploadRequest method.
+//    req, resp := client.CompleteMultipartUploadRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload
+func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) {
+       op := &request.Operation{
+               Name:       opCompleteMultipartUpload,
+               HTTPMethod: "POST",
+               HTTPPath:   "/{Bucket}/{Key+}",
+       }
+
+       if input == nil {
+               input = &CompleteMultipartUploadInput{}
+       }
+
+       output = &CompleteMultipartUploadOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// CompleteMultipartUpload API operation for Amazon Simple Storage Service.
+//
+// Completes a multipart upload by assembling previously uploaded parts.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CompleteMultipartUpload for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload
+func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) {
+       req, out := c.CompleteMultipartUploadRequest(input)
+       return out, req.Send()
+}
+
+// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CompleteMultipartUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) {
+       req, out := c.CompleteMultipartUploadRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opCopyObject = "CopyObject"
+
+// CopyObjectRequest generates a "aws/request.Request" representing the
+// client's request for the CopyObject operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See CopyObject for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the CopyObject method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the CopyObjectRequest method.
+//    req, resp := client.CopyObjectRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
+func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) {
+       op := &request.Operation{
+               Name:       opCopyObject,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}/{Key+}",
+       }
+
+       if input == nil {
+               input = &CopyObjectInput{}
+       }
+
+       output = &CopyObjectOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// CopyObject API operation for Amazon Simple Storage Service.
+//
+// Creates a copy of an object that is already stored in Amazon S3.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CopyObject for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError"
+//   The source object of the COPY operation is not in the active tier and is
+//   only stored in Amazon Glacier.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
+func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) {
+       req, out := c.CopyObjectRequest(input)
+       return out, req.Send()
+}
+
+// CopyObjectWithContext is the same as CopyObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CopyObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) {
+       req, out := c.CopyObjectRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opCreateBucket = "CreateBucket"
+
+// CreateBucketRequest generates a "aws/request.Request" representing the
+// client's request for the CreateBucket operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See CreateBucket for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the CreateBucket method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the CreateBucketRequest method.
+//    req, resp := client.CreateBucketRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
+func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) {
+       op := &request.Operation{
+               Name:       opCreateBucket,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}",
+       }
+
+       if input == nil {
+               input = &CreateBucketInput{}
+       }
+
+       output = &CreateBucketOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// CreateBucket API operation for Amazon Simple Storage Service.
+//
+// Creates a new bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CreateBucket for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeBucketAlreadyExists "BucketAlreadyExists"
+//   The requested bucket name is not available. The bucket namespace is shared
+//   by all users of the system. Please select a different name and try again.
+//
+//   * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou"
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
+func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) {
+       req, out := c.CreateBucketRequest(input)
+       return out, req.Send()
+}
+
+// CreateBucketWithContext is the same as CreateBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) {
+       req, out := c.CreateBucketRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opCreateMultipartUpload = "CreateMultipartUpload"
+
+// CreateMultipartUploadRequest generates a "aws/request.Request" representing the
+// client's request for the CreateMultipartUpload operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See CreateMultipartUpload for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the CreateMultipartUpload method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the CreateMultipartUploadRequest method.
+//    req, resp := client.CreateMultipartUploadRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload
+func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) {
+       op := &request.Operation{
+               Name:       opCreateMultipartUpload,
+               HTTPMethod: "POST",
+               HTTPPath:   "/{Bucket}/{Key+}?uploads",
+       }
+
+       if input == nil {
+               input = &CreateMultipartUploadInput{}
+       }
+
+       output = &CreateMultipartUploadOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// CreateMultipartUpload API operation for Amazon Simple Storage Service.
+//
+// Initiates a multipart upload and returns an upload ID.
+//
+// Note: After you initiate multipart upload and upload one or more parts, you
+// must either complete or abort multipart upload in order to stop getting charged
+// for storage of the uploaded parts. Only after you either complete or abort
+// multipart upload, Amazon S3 frees up the parts storage and stops charging
+// you for the parts storage.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation CreateMultipartUpload for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload
+func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) {
+       req, out := c.CreateMultipartUploadRequest(input)
+       return out, req.Send()
+}
+
+// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateMultipartUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) {
+       req, out := c.CreateMultipartUploadRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opDeleteBucket = "DeleteBucket"
+
+// DeleteBucketRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucket operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucket for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucket method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the DeleteBucketRequest method.
+//    req, resp := client.DeleteBucketRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket
+func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) {
+       op := &request.Operation{
+               Name:       opDeleteBucket,
+               HTTPMethod: "DELETE",
+               HTTPPath:   "/{Bucket}",
+       }
+
+       if input == nil {
+               input = &DeleteBucketInput{}
+       }
+
+       output = &DeleteBucketOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// DeleteBucket API operation for Amazon Simple Storage Service.
+//
+// Deletes the bucket. All objects (including all object versions and Delete
+// Markers) in the bucket must be deleted before the bucket itself can be deleted.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucket for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket
+func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) {
+       req, out := c.DeleteBucketRequest(input)
+       return out, req.Send()
+}
+
+// DeleteBucketWithContext is the same as DeleteBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) {
+       req, out := c.DeleteBucketRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration"
+
+// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketAnalyticsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketAnalyticsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method.
+//    req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration
+func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) {
+       op := &request.Operation{
+               Name:       opDeleteBucketAnalyticsConfiguration,
+               HTTPMethod: "DELETE",
+               HTTPPath:   "/{Bucket}?analytics",
+       }
+
+       if input == nil {
+               input = &DeleteBucketAnalyticsConfigurationInput{}
+       }
+
+       output = &DeleteBucketAnalyticsConfigurationOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Deletes an analytics configuration for the bucket (specified by the analytics
+// configuration ID).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketAnalyticsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration
+func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) {
+       req, out := c.DeleteBucketAnalyticsConfigurationRequest(input)
+       return out, req.Send()
+}
+
+// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) {
+       req, out := c.DeleteBucketAnalyticsConfigurationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opDeleteBucketCors = "DeleteBucketCors"
+
+// DeleteBucketCorsRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketCors operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketCors for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketCors method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the DeleteBucketCorsRequest method.
+//    req, resp := client.DeleteBucketCorsRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors
+func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) {
+       op := &request.Operation{
+               Name:       opDeleteBucketCors,
+               HTTPMethod: "DELETE",
+               HTTPPath:   "/{Bucket}?cors",
+       }
+
+       if input == nil {
+               input = &DeleteBucketCorsInput{}
+       }
+
+       output = &DeleteBucketCorsOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// DeleteBucketCors API operation for Amazon Simple Storage Service.
+//
+// Deletes the cors configuration information set for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketCors for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors
+func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) {
+       req, out := c.DeleteBucketCorsRequest(input)
+       return out, req.Send()
+}
+
+// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketCors for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) {
+       req, out := c.DeleteBucketCorsRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration"
+
+// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketInventoryConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketInventoryConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the DeleteBucketInventoryConfigurationRequest method.
+//    req, resp := client.DeleteBucketInventoryConfigurationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration
+func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) {
+       op := &request.Operation{
+               Name:       opDeleteBucketInventoryConfiguration,
+               HTTPMethod: "DELETE",
+               HTTPPath:   "/{Bucket}?inventory",
+       }
+
+       if input == nil {
+               input = &DeleteBucketInventoryConfigurationInput{}
+       }
+
+       output = &DeleteBucketInventoryConfigurationOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
+//
+// Deletes an inventory configuration (identified by the inventory ID) from
+// the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketInventoryConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration
+func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) {
+       req, out := c.DeleteBucketInventoryConfigurationRequest(input)
+       return out, req.Send()
+}
+
+// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketInventoryConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) {
+       req, out := c.DeleteBucketInventoryConfigurationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
+
+// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketLifecycle operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketLifecycle for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketLifecycle method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the DeleteBucketLifecycleRequest method.
+//    req, resp := client.DeleteBucketLifecycleRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle
+func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) {
+       op := &request.Operation{
+               Name:       opDeleteBucketLifecycle,
+               HTTPMethod: "DELETE",
+               HTTPPath:   "/{Bucket}?lifecycle",
+       }
+
+       if input == nil {
+               input = &DeleteBucketLifecycleInput{}
+       }
+
+       output = &DeleteBucketLifecycleOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// DeleteBucketLifecycle API operation for Amazon Simple Storage Service.
+//
+// Deletes the lifecycle configuration from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketLifecycle for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle
+func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) {
+       req, out := c.DeleteBucketLifecycleRequest(input)
+       return out, req.Send()
+}
+
+// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketLifecycle for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) {
+       req, out := c.DeleteBucketLifecycleRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration"
+
+// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketMetricsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketMetricsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the DeleteBucketMetricsConfigurationRequest method.
+//    req, resp := client.DeleteBucketMetricsConfigurationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration
+func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) {
+       op := &request.Operation{
+               Name:       opDeleteBucketMetricsConfiguration,
+               HTTPMethod: "DELETE",
+               HTTPPath:   "/{Bucket}?metrics",
+       }
+
+       if input == nil {
+               input = &DeleteBucketMetricsConfigurationInput{}
+       }
+
+       output = &DeleteBucketMetricsConfigurationOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Deletes a metrics configuration (specified by the metrics configuration ID)
+// from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketMetricsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration
+func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) {
+       req, out := c.DeleteBucketMetricsConfigurationRequest(input)
+       return out, req.Send()
+}
+
+// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketMetricsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) {
+       req, out := c.DeleteBucketMetricsConfigurationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opDeleteBucketPolicy = "DeleteBucketPolicy"
+
+// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketPolicy operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketPolicy for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketPolicy method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the DeleteBucketPolicyRequest method.
+//    req, resp := client.DeleteBucketPolicyRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy
+func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) {
+       op := &request.Operation{
+               Name:       opDeleteBucketPolicy,
+               HTTPMethod: "DELETE",
+               HTTPPath:   "/{Bucket}?policy",
+       }
+
+       if input == nil {
+               input = &DeleteBucketPolicyInput{}
+       }
+
+       output = &DeleteBucketPolicyOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// DeleteBucketPolicy API operation for Amazon Simple Storage Service.
+//
+// Deletes the policy from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketPolicy for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy
+func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) {
+       req, out := c.DeleteBucketPolicyRequest(input)
+       return out, req.Send()
+}
+
+// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) {
+       req, out := c.DeleteBucketPolicyRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opDeleteBucketReplication = "DeleteBucketReplication"
+
+// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketReplication operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketReplication for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketReplication method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the DeleteBucketReplicationRequest method.
+//    req, resp := client.DeleteBucketReplicationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication
+func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) {
+       op := &request.Operation{
+               Name:       opDeleteBucketReplication,
+               HTTPMethod: "DELETE",
+               HTTPPath:   "/{Bucket}?replication",
+       }
+
+       if input == nil {
+               input = &DeleteBucketReplicationInput{}
+       }
+
+       output = &DeleteBucketReplicationOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// DeleteBucketReplication API operation for Amazon Simple Storage Service.
+//
+// Deletes the replication configuration from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketReplication for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication
+func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
+       req, out := c.DeleteBucketReplicationRequest(input)
+       return out, req.Send()
+}
+
+// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketReplication for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) {
+       req, out := c.DeleteBucketReplicationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opDeleteBucketTagging = "DeleteBucketTagging"
+
+// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketTagging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketTagging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketTagging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the DeleteBucketTaggingRequest method.
+//    req, resp := client.DeleteBucketTaggingRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging
+func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) {
+       op := &request.Operation{
+               Name:       opDeleteBucketTagging,
+               HTTPMethod: "DELETE",
+               HTTPPath:   "/{Bucket}?tagging",
+       }
+
+       if input == nil {
+               input = &DeleteBucketTaggingInput{}
+       }
+
+       output = &DeleteBucketTaggingOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// DeleteBucketTagging API operation for Amazon Simple Storage Service.
+//
+// Deletes the tags from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging
+func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) {
+       req, out := c.DeleteBucketTaggingRequest(input)
+       return out, req.Send()
+}
+
+// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) {
+       req, out := c.DeleteBucketTaggingRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opDeleteBucketWebsite = "DeleteBucketWebsite"
+
+// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteBucketWebsite operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteBucketWebsite for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteBucketWebsite method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the DeleteBucketWebsiteRequest method.
+//    req, resp := client.DeleteBucketWebsiteRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite
+func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) {
+       op := &request.Operation{
+               Name:       opDeleteBucketWebsite,
+               HTTPMethod: "DELETE",
+               HTTPPath:   "/{Bucket}?website",
+       }
+
+       if input == nil {
+               input = &DeleteBucketWebsiteInput{}
+       }
+
+       output = &DeleteBucketWebsiteOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// DeleteBucketWebsite API operation for Amazon Simple Storage Service.
+//
+// This operation removes the website configuration from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteBucketWebsite for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite
+func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) {
+       req, out := c.DeleteBucketWebsiteRequest(input)
+       return out, req.Send()
+}
+
+// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketWebsite for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) {
+       req, out := c.DeleteBucketWebsiteRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opDeleteObject = "DeleteObject"
+
+// DeleteObjectRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteObject operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteObject for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteObject method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the DeleteObjectRequest method.
+//    req, resp := client.DeleteObjectRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject
+func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) {
+       op := &request.Operation{
+               Name:       opDeleteObject,
+               HTTPMethod: "DELETE",
+               HTTPPath:   "/{Bucket}/{Key+}",
+       }
+
+       if input == nil {
+               input = &DeleteObjectInput{}
+       }
+
+       output = &DeleteObjectOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// DeleteObject API operation for Amazon Simple Storage Service.
+//
+// Removes the null version (if there is one) of an object and inserts a delete
+// marker, which becomes the latest version of the object. If there isn't a
+// null version, Amazon S3 does not remove any objects.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteObject for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject
+func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) {
+       req, out := c.DeleteObjectRequest(input)
+       return out, req.Send()
+}
+
+// DeleteObjectWithContext is the same as DeleteObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) {
+       req, out := c.DeleteObjectRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opDeleteObjectTagging = "DeleteObjectTagging"
+
+// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteObjectTagging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteObjectTagging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteObjectTagging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the DeleteObjectTaggingRequest method.
+//    req, resp := client.DeleteObjectTaggingRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging
+func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) {
+       op := &request.Operation{
+               Name:       opDeleteObjectTagging,
+               HTTPMethod: "DELETE",
+               HTTPPath:   "/{Bucket}/{Key+}?tagging",
+       }
+
+       if input == nil {
+               input = &DeleteObjectTaggingInput{}
+       }
+
+       output = &DeleteObjectTaggingOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// DeleteObjectTagging API operation for Amazon Simple Storage Service.
+//
+// Removes the tag-set from an existing object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteObjectTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging
+func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) {
+       req, out := c.DeleteObjectTaggingRequest(input)
+       return out, req.Send()
+}
+
+// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteObjectTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) {
+       req, out := c.DeleteObjectTaggingRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opDeleteObjects = "DeleteObjects"
+
+// DeleteObjectsRequest generates a "aws/request.Request" representing the
+// client's request for the DeleteObjects operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DeleteObjects for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DeleteObjects method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the DeleteObjectsRequest method.
+//    req, resp := client.DeleteObjectsRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects
+func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) {
+       op := &request.Operation{
+               Name:       opDeleteObjects,
+               HTTPMethod: "POST",
+               HTTPPath:   "/{Bucket}?delete",
+       }
+
+       if input == nil {
+               input = &DeleteObjectsInput{}
+       }
+
+       output = &DeleteObjectsOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// DeleteObjects API operation for Amazon Simple Storage Service.
+//
+// This operation enables you to delete multiple objects from a bucket using
+// a single HTTP request. You may specify up to 1000 keys.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation DeleteObjects for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects
+func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) {
+       req, out := c.DeleteObjectsRequest(input)
+       return out, req.Send()
+}
+
+// DeleteObjectsWithContext is the same as DeleteObjects with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteObjects for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) {
+       req, out := c.DeleteObjectsRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration"
+
+// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketAccelerateConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketAccelerateConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketAccelerateConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketAccelerateConfigurationRequest method.
+//    req, resp := client.GetBucketAccelerateConfigurationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration
+func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) {
+       op := &request.Operation{
+               Name:       opGetBucketAccelerateConfiguration,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?accelerate",
+       }
+
+       if input == nil {
+               input = &GetBucketAccelerateConfigurationInput{}
+       }
+
+       output = &GetBucketAccelerateConfigurationOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service.
+//
+// Returns the accelerate configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketAccelerateConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration
+func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) {
+       req, out := c.GetBucketAccelerateConfigurationRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketAccelerateConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) {
+       req, out := c.GetBucketAccelerateConfigurationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketAcl = "GetBucketAcl"
+
+// GetBucketAclRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketAcl operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketAcl for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketAcl method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketAclRequest method.
+//    req, resp := client.GetBucketAclRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl
+func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) {
+       op := &request.Operation{
+               Name:       opGetBucketAcl,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?acl",
+       }
+
+       if input == nil {
+               input = &GetBucketAclInput{}
+       }
+
+       output = &GetBucketAclOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketAcl API operation for Amazon Simple Storage Service.
+//
+// Gets the access control policy for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketAcl for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl
+func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) {
+       req, out := c.GetBucketAclRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketAclWithContext is the same as GetBucketAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) {
+       req, out := c.GetBucketAclRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration"
+
+// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketAnalyticsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketAnalyticsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketAnalyticsConfigurationRequest method.
+//    req, resp := client.GetBucketAnalyticsConfigurationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration
+func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) {
+       op := &request.Operation{
+               Name:       opGetBucketAnalyticsConfiguration,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?analytics",
+       }
+
+       if input == nil {
+               input = &GetBucketAnalyticsConfigurationInput{}
+       }
+
+       output = &GetBucketAnalyticsConfigurationOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Gets an analytics configuration for the bucket (specified by the analytics
+// configuration ID).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketAnalyticsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration
+func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) {
+       req, out := c.GetBucketAnalyticsConfigurationRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketAnalyticsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) {
+       req, out := c.GetBucketAnalyticsConfigurationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketCors = "GetBucketCors"
+
+// GetBucketCorsRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketCors operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketCors for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketCors method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketCorsRequest method.
+//    req, resp := client.GetBucketCorsRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors
+func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) {
+       op := &request.Operation{
+               Name:       opGetBucketCors,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?cors",
+       }
+
+       if input == nil {
+               input = &GetBucketCorsInput{}
+       }
+
+       output = &GetBucketCorsOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketCors API operation for Amazon Simple Storage Service.
+//
+// Returns the cors configuration for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketCors for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors
+func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) {
+       req, out := c.GetBucketCorsRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketCorsWithContext is the same as GetBucketCors with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketCors for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) {
+       req, out := c.GetBucketCorsRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration"
+
+// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketInventoryConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketInventoryConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketInventoryConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketInventoryConfigurationRequest method.
+//    req, resp := client.GetBucketInventoryConfigurationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration
+func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) {
+       op := &request.Operation{
+               Name:       opGetBucketInventoryConfiguration,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?inventory",
+       }
+
+       if input == nil {
+               input = &GetBucketInventoryConfigurationInput{}
+       }
+
+       output = &GetBucketInventoryConfigurationOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
+//
+// Returns an inventory configuration (identified by the inventory ID) from
+// the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketInventoryConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration
+func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) {
+       req, out := c.GetBucketInventoryConfigurationRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketInventoryConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) {
+       req, out := c.GetBucketInventoryConfigurationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketLifecycle = "GetBucketLifecycle"
+
+// GetBucketLifecycleRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketLifecycle operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketLifecycle for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketLifecycle method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketLifecycleRequest method.
+//    req, resp := client.GetBucketLifecycleRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
+func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) {
+       if c.Client.Config.Logger != nil {
+               c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated")
+       }
+       op := &request.Operation{
+               Name:       opGetBucketLifecycle,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?lifecycle",
+       }
+
+       if input == nil {
+               input = &GetBucketLifecycleInput{}
+       }
+
+       output = &GetBucketLifecycleOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketLifecycle API operation for Amazon Simple Storage Service.
+//
+// Deprecated, see the GetBucketLifecycleConfiguration operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLifecycle for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
+func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) {
+       req, out := c.GetBucketLifecycleRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLifecycle for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) {
+       req, out := c.GetBucketLifecycleRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration"
+
+// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketLifecycleConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketLifecycleConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketLifecycleConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketLifecycleConfigurationRequest method.
+//    req, resp := client.GetBucketLifecycleConfigurationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration
+func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) {
+       op := &request.Operation{
+               Name:       opGetBucketLifecycleConfiguration,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?lifecycle",
+       }
+
+       if input == nil {
+               input = &GetBucketLifecycleConfigurationInput{}
+       }
+
+       output = &GetBucketLifecycleConfigurationOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
+//
+// Returns the lifecycle configuration information set on the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLifecycleConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration
+func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) {
+       req, out := c.GetBucketLifecycleConfigurationRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLifecycleConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) {
+       req, out := c.GetBucketLifecycleConfigurationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketLocation = "GetBucketLocation"
+
+// GetBucketLocationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketLocation operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketLocation for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketLocation method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketLocationRequest method.
+//    req, resp := client.GetBucketLocationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation
+func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) {
+       op := &request.Operation{
+               Name:       opGetBucketLocation,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?location",
+       }
+
+       if input == nil {
+               input = &GetBucketLocationInput{}
+       }
+
+       output = &GetBucketLocationOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketLocation API operation for Amazon Simple Storage Service.
+//
+// Returns the region the bucket resides in.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLocation for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation
+func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) {
+       req, out := c.GetBucketLocationRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLocation for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) {
+       req, out := c.GetBucketLocationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketLogging = "GetBucketLogging"
+
+// GetBucketLoggingRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketLogging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketLogging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketLogging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketLoggingRequest method.
+//    req, resp := client.GetBucketLoggingRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging
+func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) {
+       op := &request.Operation{
+               Name:       opGetBucketLogging,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?logging",
+       }
+
+       if input == nil {
+               input = &GetBucketLoggingInput{}
+       }
+
+       output = &GetBucketLoggingOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketLogging API operation for Amazon Simple Storage Service.
+//
+// Returns the logging status of a bucket and the permissions users have to
+// view and modify that status. To use GET, you must be the bucket owner.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketLogging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging
+func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) {
+       req, out := c.GetBucketLoggingRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLogging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) {
+       req, out := c.GetBucketLoggingRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration"
+
+// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketMetricsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketMetricsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketMetricsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketMetricsConfigurationRequest method.
+//    req, resp := client.GetBucketMetricsConfigurationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration
+func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) {
+       op := &request.Operation{
+               Name:       opGetBucketMetricsConfiguration,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?metrics",
+       }
+
+       if input == nil {
+               input = &GetBucketMetricsConfigurationInput{}
+       }
+
+       output = &GetBucketMetricsConfigurationOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Gets a metrics configuration (specified by the metrics configuration ID)
+// from the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketMetricsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration
+func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) {
+       req, out := c.GetBucketMetricsConfigurationRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketMetricsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) {
+       req, out := c.GetBucketMetricsConfigurationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketNotification = "GetBucketNotification"
+
+// GetBucketNotificationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketNotification operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketNotification for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketNotification method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketNotificationRequest method.
+//    req, resp := client.GetBucketNotificationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
+func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) {
+       if c.Client.Config.Logger != nil {
+               c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated")
+       }
+       op := &request.Operation{
+               Name:       opGetBucketNotification,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?notification",
+       }
+
+       if input == nil {
+               input = &GetBucketNotificationConfigurationRequest{}
+       }
+
+       output = &NotificationConfigurationDeprecated{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketNotification API operation for Amazon Simple Storage Service.
+//
+// Deprecated, see the GetBucketNotificationConfiguration operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketNotification for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
+func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) {
+       req, out := c.GetBucketNotificationRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketNotification for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) {
+       req, out := c.GetBucketNotificationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration"
+
+// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketNotificationConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketNotificationConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketNotificationConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketNotificationConfigurationRequest method.
+//    req, resp := client.GetBucketNotificationConfigurationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration
+func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) {
+       op := &request.Operation{
+               Name:       opGetBucketNotificationConfiguration,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?notification",
+       }
+
+       if input == nil {
+               input = &GetBucketNotificationConfigurationRequest{}
+       }
+
+       output = &NotificationConfiguration{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service.
+//
+// Returns the notification configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketNotificationConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration
+func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) {
+       req, out := c.GetBucketNotificationConfigurationRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketNotificationConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) {
+       req, out := c.GetBucketNotificationConfigurationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketPolicy = "GetBucketPolicy"
+
+// GetBucketPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketPolicy operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketPolicy for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketPolicy method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketPolicyRequest method.
+//    req, resp := client.GetBucketPolicyRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy
+func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) {
+       op := &request.Operation{
+               Name:       opGetBucketPolicy,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?policy",
+       }
+
+       if input == nil {
+               input = &GetBucketPolicyInput{}
+       }
+
+       output = &GetBucketPolicyOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketPolicy API operation for Amazon Simple Storage Service.
+//
+// Returns the policy of a specified bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketPolicy for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy
+func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) {
+       req, out := c.GetBucketPolicyRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) {
+       req, out := c.GetBucketPolicyRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketReplication = "GetBucketReplication"
+
+// GetBucketReplicationRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketReplication operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketReplication for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketReplication method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketReplicationRequest method.
+//    req, resp := client.GetBucketReplicationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication
+func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) {
+       op := &request.Operation{
+               Name:       opGetBucketReplication,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?replication",
+       }
+
+       if input == nil {
+               input = &GetBucketReplicationInput{}
+       }
+
+       output = &GetBucketReplicationOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketReplication API operation for Amazon Simple Storage Service.
+//
+// Returns the replication configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketReplication for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication
+func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
+       req, out := c.GetBucketReplicationRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketReplication for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) {
+       req, out := c.GetBucketReplicationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketRequestPayment = "GetBucketRequestPayment"
+
+// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketRequestPayment operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketRequestPayment for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketRequestPayment method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketRequestPaymentRequest method.
+//    req, resp := client.GetBucketRequestPaymentRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment
+func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) {
+       op := &request.Operation{
+               Name:       opGetBucketRequestPayment,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?requestPayment",
+       }
+
+       if input == nil {
+               input = &GetBucketRequestPaymentInput{}
+       }
+
+       output = &GetBucketRequestPaymentOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketRequestPayment API operation for Amazon Simple Storage Service.
+//
+// Returns the request payment configuration of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketRequestPayment for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment
+func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) {
+       req, out := c.GetBucketRequestPaymentRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketRequestPayment for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) {
+       req, out := c.GetBucketRequestPaymentRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketTagging = "GetBucketTagging"
+
+// GetBucketTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketTagging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketTagging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketTagging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketTaggingRequest method.
+//    req, resp := client.GetBucketTaggingRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging
+func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) {
+       op := &request.Operation{
+               Name:       opGetBucketTagging,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?tagging",
+       }
+
+       if input == nil {
+               input = &GetBucketTaggingInput{}
+       }
+
+       output = &GetBucketTaggingOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketTagging API operation for Amazon Simple Storage Service.
+//
+// Returns the tag set associated with the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging
+func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) {
+       req, out := c.GetBucketTaggingRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) {
+       req, out := c.GetBucketTaggingRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketVersioning = "GetBucketVersioning"
+
+// GetBucketVersioningRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketVersioning operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketVersioning for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketVersioning method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketVersioningRequest method.
+//    req, resp := client.GetBucketVersioningRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning
+func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) {
+       op := &request.Operation{
+               Name:       opGetBucketVersioning,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?versioning",
+       }
+
+       if input == nil {
+               input = &GetBucketVersioningInput{}
+       }
+
+       output = &GetBucketVersioningOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketVersioning API operation for Amazon Simple Storage Service.
+//
+// Returns the versioning state of a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketVersioning for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning
+func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) {
+       req, out := c.GetBucketVersioningRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketVersioning for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) {
+       req, out := c.GetBucketVersioningRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetBucketWebsite = "GetBucketWebsite"
+
+// GetBucketWebsiteRequest generates a "aws/request.Request" representing the
+// client's request for the GetBucketWebsite operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetBucketWebsite for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetBucketWebsite method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetBucketWebsiteRequest method.
+//    req, resp := client.GetBucketWebsiteRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite
+func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) {
+       op := &request.Operation{
+               Name:       opGetBucketWebsite,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?website",
+       }
+
+       if input == nil {
+               input = &GetBucketWebsiteInput{}
+       }
+
+       output = &GetBucketWebsiteOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetBucketWebsite API operation for Amazon Simple Storage Service.
+//
+// Returns the website configuration for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetBucketWebsite for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite
+func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) {
+       req, out := c.GetBucketWebsiteRequest(input)
+       return out, req.Send()
+}
+
+// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketWebsite for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) {
+       req, out := c.GetBucketWebsiteRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetObject = "GetObject"
+
+// GetObjectRequest generates a "aws/request.Request" representing the
+// client's request for the GetObject operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetObject for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetObject method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetObjectRequest method.
+//    req, resp := client.GetObjectRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
+func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) {
+       op := &request.Operation{
+               Name:       opGetObject,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}/{Key+}",
+       }
+
+       if input == nil {
+               input = &GetObjectInput{}
+       }
+
+       output = &GetObjectOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetObject API operation for Amazon Simple Storage Service.
+//
+// Retrieves objects from Amazon S3.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObject for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeNoSuchKey "NoSuchKey"
+//   The specified key does not exist.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
+func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) {
+       req, out := c.GetObjectRequest(input)
+       return out, req.Send()
+}
+
+// GetObjectWithContext is the same as GetObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) {
+       req, out := c.GetObjectRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetObjectAcl = "GetObjectAcl"
+
+// GetObjectAclRequest generates a "aws/request.Request" representing the
+// client's request for the GetObjectAcl operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetObjectAcl for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetObjectAcl method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetObjectAclRequest method.
+//    req, resp := client.GetObjectAclRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl
+func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) {
+       op := &request.Operation{
+               Name:       opGetObjectAcl,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}/{Key+}?acl",
+       }
+
+       if input == nil {
+               input = &GetObjectAclInput{}
+       }
+
+       output = &GetObjectAclOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetObjectAcl API operation for Amazon Simple Storage Service.
+//
+// Returns the access control list (ACL) of an object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectAcl for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeNoSuchKey "NoSuchKey"
+//   The specified key does not exist.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl
+func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) {
+       req, out := c.GetObjectAclRequest(input)
+       return out, req.Send()
+}
+
+// GetObjectAclWithContext is the same as GetObjectAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) {
+       req, out := c.GetObjectAclRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetObjectTagging = "GetObjectTagging"
+
+// GetObjectTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the GetObjectTagging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetObjectTagging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetObjectTagging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetObjectTaggingRequest method.
+//    req, resp := client.GetObjectTaggingRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging
+func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) {
+       op := &request.Operation{
+               Name:       opGetObjectTagging,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}/{Key+}?tagging",
+       }
+
+       if input == nil {
+               input = &GetObjectTaggingInput{}
+       }
+
+       output = &GetObjectTaggingOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetObjectTagging API operation for Amazon Simple Storage Service.
+//
+// Returns the tag-set of an object.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging
+func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) {
+       req, out := c.GetObjectTaggingRequest(input)
+       return out, req.Send()
+}
+
+// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) {
+       req, out := c.GetObjectTaggingRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetObjectTorrent = "GetObjectTorrent"
+
+// GetObjectTorrentRequest generates a "aws/request.Request" representing the
+// client's request for the GetObjectTorrent operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetObjectTorrent for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetObjectTorrent method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetObjectTorrentRequest method.
+//    req, resp := client.GetObjectTorrentRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent
+func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) {
+       op := &request.Operation{
+               Name:       opGetObjectTorrent,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}/{Key+}?torrent",
+       }
+
+       if input == nil {
+               input = &GetObjectTorrentInput{}
+       }
+
+       output = &GetObjectTorrentOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetObjectTorrent API operation for Amazon Simple Storage Service.
+//
+// Return torrent files from a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation GetObjectTorrent for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent
+func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) {
+       req, out := c.GetObjectTorrentRequest(input)
+       return out, req.Send()
+}
+
+// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectTorrent for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) {
+       req, out := c.GetObjectTorrentRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opHeadBucket = "HeadBucket"
+
+// HeadBucketRequest generates a "aws/request.Request" representing the
+// client's request for the HeadBucket operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See HeadBucket for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the HeadBucket method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the HeadBucketRequest method.
+//    req, resp := client.HeadBucketRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket
+func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) {
+       op := &request.Operation{
+               Name:       opHeadBucket,
+               HTTPMethod: "HEAD",
+               HTTPPath:   "/{Bucket}",
+       }
+
+       if input == nil {
+               input = &HeadBucketInput{}
+       }
+
+       output = &HeadBucketOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// HeadBucket API operation for Amazon Simple Storage Service.
+//
+// This operation is useful to determine if a bucket exists and you have permission
+// to access it.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation HeadBucket for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeNoSuchBucket "NoSuchBucket"
+//   The specified bucket does not exist.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket
+func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) {
+       req, out := c.HeadBucketRequest(input)
+       return out, req.Send()
+}
+
+// HeadBucketWithContext is the same as HeadBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See HeadBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) {
+       req, out := c.HeadBucketRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opHeadObject = "HeadObject"
+
+// HeadObjectRequest generates a "aws/request.Request" representing the
+// client's request for the HeadObject operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See HeadObject for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the HeadObject method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the HeadObjectRequest method.
+//    req, resp := client.HeadObjectRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
+func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) {
+       op := &request.Operation{
+               Name:       opHeadObject,
+               HTTPMethod: "HEAD",
+               HTTPPath:   "/{Bucket}/{Key+}",
+       }
+
+       if input == nil {
+               input = &HeadObjectInput{}
+       }
+
+       output = &HeadObjectOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// HeadObject API operation for Amazon Simple Storage Service.
+//
+// The HEAD operation retrieves metadata from an object without returning the
+// object itself. This operation is useful if you're only interested in an object's
+// metadata. To use HEAD, you must have READ access to the object.
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses
+// for more information on returned errors.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation HeadObject for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
+func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) {
+       req, out := c.HeadObjectRequest(input)
+       return out, req.Send()
+}
+
+// HeadObjectWithContext is the same as HeadObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See HeadObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) {
+       req, out := c.HeadObjectRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations"
+
+// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListBucketAnalyticsConfigurations for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListBucketAnalyticsConfigurations method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method.
+//    req, resp := client.ListBucketAnalyticsConfigurationsRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations
+func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) {
+       op := &request.Operation{
+               Name:       opListBucketAnalyticsConfigurations,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?analytics",
+       }
+
+       if input == nil {
+               input = &ListBucketAnalyticsConfigurationsInput{}
+       }
+
+       output = &ListBucketAnalyticsConfigurationsOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service.
+//
+// Lists the analytics configurations for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBucketAnalyticsConfigurations for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations
+func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) {
+       req, out := c.ListBucketAnalyticsConfigurationsRequest(input)
+       return out, req.Send()
+}
+
+// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBucketAnalyticsConfigurations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) {
+       req, out := c.ListBucketAnalyticsConfigurationsRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations"
+
+// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBucketInventoryConfigurations operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListBucketInventoryConfigurations for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListBucketInventoryConfigurations method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the ListBucketInventoryConfigurationsRequest method.
+//    req, resp := client.ListBucketInventoryConfigurationsRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations
+func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) {
+       op := &request.Operation{
+               Name:       opListBucketInventoryConfigurations,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?inventory",
+       }
+
+       if input == nil {
+               input = &ListBucketInventoryConfigurationsInput{}
+       }
+
+       output = &ListBucketInventoryConfigurationsOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service.
+//
+// Returns a list of inventory configurations for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBucketInventoryConfigurations for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations
+func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) {
+       req, out := c.ListBucketInventoryConfigurationsRequest(input)
+       return out, req.Send()
+}
+
+// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBucketInventoryConfigurations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) {
+       req, out := c.ListBucketInventoryConfigurationsRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations"
+
+// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBucketMetricsConfigurations operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListBucketMetricsConfigurations for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListBucketMetricsConfigurations method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the ListBucketMetricsConfigurationsRequest method.
+//    req, resp := client.ListBucketMetricsConfigurationsRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations
+func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) {
+       op := &request.Operation{
+               Name:       opListBucketMetricsConfigurations,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?metrics",
+       }
+
+       if input == nil {
+               input = &ListBucketMetricsConfigurationsInput{}
+       }
+
+       output = &ListBucketMetricsConfigurationsOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service.
+//
+// Lists the metrics configurations for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBucketMetricsConfigurations for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations
+func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) {
+       req, out := c.ListBucketMetricsConfigurationsRequest(input)
+       return out, req.Send()
+}
+
+// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBucketMetricsConfigurations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) {
+       req, out := c.ListBucketMetricsConfigurationsRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opListBuckets = "ListBuckets"
+
+// ListBucketsRequest generates a "aws/request.Request" representing the
+// client's request for the ListBuckets operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListBuckets for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListBuckets method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the ListBucketsRequest method.
+//    req, resp := client.ListBucketsRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets
+func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) {
+       op := &request.Operation{
+               Name:       opListBuckets,
+               HTTPMethod: "GET",
+               HTTPPath:   "/",
+       }
+
+       if input == nil {
+               input = &ListBucketsInput{}
+       }
+
+       output = &ListBucketsOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// ListBuckets API operation for Amazon Simple Storage Service.
+//
+// Returns a list of all buckets owned by the authenticated sender of the request.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListBuckets for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets
+func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) {
+       req, out := c.ListBucketsRequest(input)
+       return out, req.Send()
+}
+
+// ListBucketsWithContext is the same as ListBuckets with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBuckets for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) {
+       req, out := c.ListBucketsRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opListMultipartUploads = "ListMultipartUploads"
+
+// ListMultipartUploadsRequest generates a "aws/request.Request" representing the
+// client's request for the ListMultipartUploads operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListMultipartUploads for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListMultipartUploads method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the ListMultipartUploadsRequest method.
+//    req, resp := client.ListMultipartUploadsRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads
+func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) {
+       op := &request.Operation{
+               Name:       opListMultipartUploads,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?uploads",
+               Paginator: &request.Paginator{
+                       InputTokens:     []string{"KeyMarker", "UploadIdMarker"},
+                       OutputTokens:    []string{"NextKeyMarker", "NextUploadIdMarker"},
+                       LimitToken:      "MaxUploads",
+                       TruncationToken: "IsTruncated",
+               },
+       }
+
+       if input == nil {
+               input = &ListMultipartUploadsInput{}
+       }
+
+       output = &ListMultipartUploadsOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// ListMultipartUploads API operation for Amazon Simple Storage Service.
+//
+// This operation lists in-progress multipart uploads.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListMultipartUploads for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads
+func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) {
+       req, out := c.ListMultipartUploadsRequest(input)
+       return out, req.Send()
+}
+
+// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListMultipartUploads for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) {
+       req, out := c.ListMultipartUploadsRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListMultipartUploads method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+//    // Example iterating over at most 3 pages of a ListMultipartUploads operation.
+//    pageNum := 0
+//    err := client.ListMultipartUploadsPages(params,
+//        func(page *ListMultipartUploadsOutput, lastPage bool) bool {
+//            pageNum++
+//            fmt.Println(page)
+//            return pageNum <= 3
+//        })
+//
+func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error {
+       return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error {
+       p := request.Pagination{
+               NewRequest: func() (*request.Request, error) {
+                       var inCpy *ListMultipartUploadsInput
+                       if input != nil {
+                               tmp := *input
+                               inCpy = &tmp
+                       }
+                       req, _ := c.ListMultipartUploadsRequest(inCpy)
+                       req.SetContext(ctx)
+                       req.ApplyOptions(opts...)
+                       return req, nil
+               },
+       }
+
+       cont := true
+       for p.Next() && cont {
+               cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage())
+       }
+       return p.Err()
+}
+
+const opListObjectVersions = "ListObjectVersions"
+
+// ListObjectVersionsRequest generates a "aws/request.Request" representing the
+// client's request for the ListObjectVersions operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListObjectVersions for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListObjectVersions method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the ListObjectVersionsRequest method.
+//    req, resp := client.ListObjectVersionsRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions
+func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) {
+       op := &request.Operation{
+               Name:       opListObjectVersions,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?versions",
+               Paginator: &request.Paginator{
+                       InputTokens:     []string{"KeyMarker", "VersionIdMarker"},
+                       OutputTokens:    []string{"NextKeyMarker", "NextVersionIdMarker"},
+                       LimitToken:      "MaxKeys",
+                       TruncationToken: "IsTruncated",
+               },
+       }
+
+       if input == nil {
+               input = &ListObjectVersionsInput{}
+       }
+
+       output = &ListObjectVersionsOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// ListObjectVersions API operation for Amazon Simple Storage Service.
+//
+// Returns metadata about all of the versions of objects in a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListObjectVersions for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions
+func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) {
+       req, out := c.ListObjectVersionsRequest(input)
+       return out, req.Send()
+}
+
+// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListObjectVersions for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) {
+       req, out := c.ListObjectVersionsRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListObjectVersions method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+//    // Example iterating over at most 3 pages of a ListObjectVersions operation.
+//    pageNum := 0
+//    err := client.ListObjectVersionsPages(params,
+//        func(page *ListObjectVersionsOutput, lastPage bool) bool {
+//            pageNum++
+//            fmt.Println(page)
+//            return pageNum <= 3
+//        })
+//
+func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error {
+       return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error {
+       p := request.Pagination{
+               NewRequest: func() (*request.Request, error) {
+                       var inCpy *ListObjectVersionsInput
+                       if input != nil {
+                               tmp := *input
+                               inCpy = &tmp
+                       }
+                       req, _ := c.ListObjectVersionsRequest(inCpy)
+                       req.SetContext(ctx)
+                       req.ApplyOptions(opts...)
+                       return req, nil
+               },
+       }
+
+       cont := true
+       for p.Next() && cont {
+               cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage())
+       }
+       return p.Err()
+}
+
+const opListObjects = "ListObjects"
+
+// ListObjectsRequest generates a "aws/request.Request" representing the
+// client's request for the ListObjects operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListObjects for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListObjects method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the ListObjectsRequest method.
+//    req, resp := client.ListObjectsRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects
+func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) {
+       op := &request.Operation{
+               Name:       opListObjects,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}",
+               Paginator: &request.Paginator{
+                       InputTokens:     []string{"Marker"},
+                       OutputTokens:    []string{"NextMarker || Contents[-1].Key"},
+                       LimitToken:      "MaxKeys",
+                       TruncationToken: "IsTruncated",
+               },
+       }
+
+       if input == nil {
+               input = &ListObjectsInput{}
+       }
+
+       output = &ListObjectsOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// ListObjects API operation for Amazon Simple Storage Service.
+//
+// Returns some or all (up to 1000) of the objects in a bucket. You can use
+// the request parameters as selection criteria to return a subset of the objects
+// in a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListObjects for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeNoSuchBucket "NoSuchBucket"
+//   The specified bucket does not exist.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects
+func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) {
+       req, out := c.ListObjectsRequest(input)
+       return out, req.Send()
+}
+
+// ListObjectsWithContext is the same as ListObjects with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListObjects for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) {
+       req, out := c.ListObjectsRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+// ListObjectsPages iterates over the pages of a ListObjects operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListObjects method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+//    // Example iterating over at most 3 pages of a ListObjects operation.
+//    pageNum := 0
+//    err := client.ListObjectsPages(params,
+//        func(page *ListObjectsOutput, lastPage bool) bool {
+//            pageNum++
+//            fmt.Println(page)
+//            return pageNum <= 3
+//        })
+//
+func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error {
+       return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListObjectsPagesWithContext same as ListObjectsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error {
+       p := request.Pagination{
+               NewRequest: func() (*request.Request, error) {
+                       var inCpy *ListObjectsInput
+                       if input != nil {
+                               tmp := *input
+                               inCpy = &tmp
+                       }
+                       req, _ := c.ListObjectsRequest(inCpy)
+                       req.SetContext(ctx)
+                       req.ApplyOptions(opts...)
+                       return req, nil
+               },
+       }
+
+       cont := true
+       for p.Next() && cont {
+               cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage())
+       }
+       return p.Err()
+}
+
+const opListObjectsV2 = "ListObjectsV2"
+
+// ListObjectsV2Request generates a "aws/request.Request" representing the
+// client's request for the ListObjectsV2 operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListObjectsV2 for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListObjectsV2 method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the ListObjectsV2Request method.
+//    req, resp := client.ListObjectsV2Request(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2
+func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) {
+       op := &request.Operation{
+               Name:       opListObjectsV2,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}?list-type=2",
+               Paginator: &request.Paginator{
+                       InputTokens:     []string{"ContinuationToken"},
+                       OutputTokens:    []string{"NextContinuationToken"},
+                       LimitToken:      "MaxKeys",
+                       TruncationToken: "",
+               },
+       }
+
+       if input == nil {
+               input = &ListObjectsV2Input{}
+       }
+
+       output = &ListObjectsV2Output{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// ListObjectsV2 API operation for Amazon Simple Storage Service.
+//
+// Returns some or all (up to 1000) of the objects in a bucket. You can use
+// the request parameters as selection criteria to return a subset of the objects
+// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend
+// you use this revised API for new application development.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListObjectsV2 for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeNoSuchBucket "NoSuchBucket"
+//   The specified bucket does not exist.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2
+func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) {
+       req, out := c.ListObjectsV2Request(input)
+       return out, req.Send()
+}
+
+// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListObjectsV2 for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) {
+       req, out := c.ListObjectsV2Request(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListObjectsV2 method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+//    // Example iterating over at most 3 pages of a ListObjectsV2 operation.
+//    pageNum := 0
+//    err := client.ListObjectsV2Pages(params,
+//        func(page *ListObjectsV2Output, lastPage bool) bool {
+//            pageNum++
+//            fmt.Println(page)
+//            return pageNum <= 3
+//        })
+//
+func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error {
+       return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error {
+       p := request.Pagination{
+               NewRequest: func() (*request.Request, error) {
+                       var inCpy *ListObjectsV2Input
+                       if input != nil {
+                               tmp := *input
+                               inCpy = &tmp
+                       }
+                       req, _ := c.ListObjectsV2Request(inCpy)
+                       req.SetContext(ctx)
+                       req.ApplyOptions(opts...)
+                       return req, nil
+               },
+       }
+
+       cont := true
+       for p.Next() && cont {
+               cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage())
+       }
+       return p.Err()
+}
+
+const opListParts = "ListParts"
+
+// ListPartsRequest generates a "aws/request.Request" representing the
+// client's request for the ListParts operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See ListParts for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the ListParts method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the ListPartsRequest method.
+//    req, resp := client.ListPartsRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts
+func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) {
+       op := &request.Operation{
+               Name:       opListParts,
+               HTTPMethod: "GET",
+               HTTPPath:   "/{Bucket}/{Key+}",
+               Paginator: &request.Paginator{
+                       InputTokens:     []string{"PartNumberMarker"},
+                       OutputTokens:    []string{"NextPartNumberMarker"},
+                       LimitToken:      "MaxParts",
+                       TruncationToken: "IsTruncated",
+               },
+       }
+
+       if input == nil {
+               input = &ListPartsInput{}
+       }
+
+       output = &ListPartsOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// ListParts API operation for Amazon Simple Storage Service.
+//
+// Lists the parts that have been uploaded for a specific multipart upload.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation ListParts for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts
+func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) {
+       req, out := c.ListPartsRequest(input)
+       return out, req.Send()
+}
+
+// ListPartsWithContext is the same as ListParts with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListParts for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) {
+       req, out := c.ListPartsRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+// ListPartsPages iterates over the pages of a ListParts operation,
+// calling the "fn" function with the response data for each page. To stop
+// iterating, return false from the fn function.
+//
+// See ListParts method for more information on how to use this operation.
+//
+// Note: This operation can generate multiple requests to a service.
+//
+//    // Example iterating over at most 3 pages of a ListParts operation.
+//    pageNum := 0
+//    err := client.ListPartsPages(params,
+//        func(page *ListPartsOutput, lastPage bool) bool {
+//            pageNum++
+//            fmt.Println(page)
+//            return pageNum <= 3
+//        })
+//
+func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error {
+       return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListPartsPagesWithContext same as ListPartsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error {
+       p := request.Pagination{
+               NewRequest: func() (*request.Request, error) {
+                       var inCpy *ListPartsInput
+                       if input != nil {
+                               tmp := *input
+                               inCpy = &tmp
+                       }
+                       req, _ := c.ListPartsRequest(inCpy)
+                       req.SetContext(ctx)
+                       req.ApplyOptions(opts...)
+                       return req, nil
+               },
+       }
+
+       cont := true
+       for p.Next() && cont {
+               cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage())
+       }
+       return p.Err()
+}
+
+const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration"
+
+// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketAccelerateConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketAccelerateConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketAccelerateConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketAccelerateConfigurationRequest method.
+//    req, resp := client.PutBucketAccelerateConfigurationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration
+func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) {
+       op := &request.Operation{
+               Name:       opPutBucketAccelerateConfiguration,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?accelerate",
+       }
+
+       if input == nil {
+               input = &PutBucketAccelerateConfigurationInput{}
+       }
+
+       output = &PutBucketAccelerateConfigurationOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service.
+//
+// Sets the accelerate configuration of an existing bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketAccelerateConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration
+func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) {
+       req, out := c.PutBucketAccelerateConfigurationRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketAccelerateConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) {
+       req, out := c.PutBucketAccelerateConfigurationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketAcl = "PutBucketAcl"
+
+// PutBucketAclRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketAcl operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketAcl for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketAcl method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketAclRequest method.
+//    req, resp := client.PutBucketAclRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl
+func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) {
+       op := &request.Operation{
+               Name:       opPutBucketAcl,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?acl",
+       }
+
+       if input == nil {
+               input = &PutBucketAclInput{}
+       }
+
+       output = &PutBucketAclOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketAcl API operation for Amazon Simple Storage Service.
+//
+// Sets the permissions on a bucket using access control lists (ACL).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketAcl for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl
+func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) {
+       req, out := c.PutBucketAclRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketAclWithContext is the same as PutBucketAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) {
+       req, out := c.PutBucketAclRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration"
+
+// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketAnalyticsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketAnalyticsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketAnalyticsConfigurationRequest method.
+//    req, resp := client.PutBucketAnalyticsConfigurationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration
+func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) {
+       op := &request.Operation{
+               Name:       opPutBucketAnalyticsConfiguration,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?analytics",
+       }
+
+       if input == nil {
+               input = &PutBucketAnalyticsConfigurationInput{}
+       }
+
+       output = &PutBucketAnalyticsConfigurationOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Sets an analytics configuration for the bucket (specified by the analytics
+// configuration ID).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketAnalyticsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration
+func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) {
+       req, out := c.PutBucketAnalyticsConfigurationRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketAnalyticsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) {
+       req, out := c.PutBucketAnalyticsConfigurationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketCors = "PutBucketCors"
+
+// PutBucketCorsRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketCors operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketCors for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketCors method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketCorsRequest method.
+//    req, resp := client.PutBucketCorsRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors
+func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) {
+       op := &request.Operation{
+               Name:       opPutBucketCors,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?cors",
+       }
+
+       if input == nil {
+               input = &PutBucketCorsInput{}
+       }
+
+       output = &PutBucketCorsOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketCors API operation for Amazon Simple Storage Service.
+//
+// Sets the cors configuration for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketCors for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors
+func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) {
+       req, out := c.PutBucketCorsRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketCorsWithContext is the same as PutBucketCors with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketCors for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) {
+       req, out := c.PutBucketCorsRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration"
+
+// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketInventoryConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketInventoryConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketInventoryConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketInventoryConfigurationRequest method.
+//    req, resp := client.PutBucketInventoryConfigurationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration
+func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) {
+       op := &request.Operation{
+               Name:       opPutBucketInventoryConfiguration,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?inventory",
+       }
+
+       if input == nil {
+               input = &PutBucketInventoryConfigurationInput{}
+       }
+
+       output = &PutBucketInventoryConfigurationOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service.
+//
+// Adds an inventory configuration (identified by the inventory ID) from the
+// bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketInventoryConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration
+func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) {
+       req, out := c.PutBucketInventoryConfigurationRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketInventoryConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) {
+       req, out := c.PutBucketInventoryConfigurationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketLifecycle = "PutBucketLifecycle"
+
+// PutBucketLifecycleRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketLifecycle operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketLifecycle for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketLifecycle method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketLifecycleRequest method.
+//    req, resp := client.PutBucketLifecycleRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
+func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) {
+       if c.Client.Config.Logger != nil {
+               c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated")
+       }
+       op := &request.Operation{
+               Name:       opPutBucketLifecycle,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?lifecycle",
+       }
+
+       if input == nil {
+               input = &PutBucketLifecycleInput{}
+       }
+
+       output = &PutBucketLifecycleOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketLifecycle API operation for Amazon Simple Storage Service.
+//
+// Deprecated, see the PutBucketLifecycleConfiguration operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketLifecycle for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
+func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) {
+       req, out := c.PutBucketLifecycleRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketLifecycle for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) {
+       req, out := c.PutBucketLifecycleRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration"
+
+// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketLifecycleConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketLifecycleConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketLifecycleConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketLifecycleConfigurationRequest method.
+//    req, resp := client.PutBucketLifecycleConfigurationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
+func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) {
+       op := &request.Operation{
+               Name:       opPutBucketLifecycleConfiguration,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?lifecycle",
+       }
+
+       if input == nil {
+               input = &PutBucketLifecycleConfigurationInput{}
+       }
+
+       output = &PutBucketLifecycleConfigurationOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service.
+//
+// Sets lifecycle configuration for your bucket. If a lifecycle configuration
+// exists, it replaces it.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketLifecycleConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
+func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) {
+       req, out := c.PutBucketLifecycleConfigurationRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketLifecycleConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) {
+       req, out := c.PutBucketLifecycleConfigurationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketLogging = "PutBucketLogging"
+
+// PutBucketLoggingRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketLogging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketLogging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketLogging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketLoggingRequest method.
+//    req, resp := client.PutBucketLoggingRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging
+func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) {
+       op := &request.Operation{
+               Name:       opPutBucketLogging,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?logging",
+       }
+
+       if input == nil {
+               input = &PutBucketLoggingInput{}
+       }
+
+       output = &PutBucketLoggingOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketLogging API operation for Amazon Simple Storage Service.
+//
+// Set the logging parameters for a bucket and to specify permissions for who
+// can view and modify the logging parameters. To set the logging status of
+// a bucket, you must be the bucket owner.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketLogging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging
+func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) {
+       req, out := c.PutBucketLoggingRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketLogging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) {
+       req, out := c.PutBucketLoggingRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration"
+
+// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketMetricsConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketMetricsConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketMetricsConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketMetricsConfigurationRequest method.
+//    req, resp := client.PutBucketMetricsConfigurationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration
+func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) {
+       op := &request.Operation{
+               Name:       opPutBucketMetricsConfiguration,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?metrics",
+       }
+
+       if input == nil {
+               input = &PutBucketMetricsConfigurationInput{}
+       }
+
+       output = &PutBucketMetricsConfigurationOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service.
+//
+// Sets a metrics configuration (specified by the metrics configuration ID)
+// for the bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketMetricsConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration
+func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) {
+       req, out := c.PutBucketMetricsConfigurationRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketMetricsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) {
+       req, out := c.PutBucketMetricsConfigurationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketNotification = "PutBucketNotification"
+
+// PutBucketNotificationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketNotification operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketNotification for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketNotification method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketNotificationRequest method.
+//    req, resp := client.PutBucketNotificationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
+func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) {
+       if c.Client.Config.Logger != nil {
+               c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated")
+       }
+       op := &request.Operation{
+               Name:       opPutBucketNotification,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?notification",
+       }
+
+       if input == nil {
+               input = &PutBucketNotificationInput{}
+       }
+
+       output = &PutBucketNotificationOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketNotification API operation for Amazon Simple Storage Service.
+//
+// Deprecated, see the PutBucketNotificationConfiguraiton operation.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketNotification for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
+func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) {
+       req, out := c.PutBucketNotificationRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketNotification for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) {
+       req, out := c.PutBucketNotificationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration"
+
+// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketNotificationConfiguration operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketNotificationConfiguration for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketNotificationConfiguration method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketNotificationConfigurationRequest method.
+//    req, resp := client.PutBucketNotificationConfigurationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration
+func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) {
+       op := &request.Operation{
+               Name:       opPutBucketNotificationConfiguration,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?notification",
+       }
+
+       if input == nil {
+               input = &PutBucketNotificationConfigurationInput{}
+       }
+
+       output = &PutBucketNotificationConfigurationOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service.
+//
+// Enables notifications of specified events for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketNotificationConfiguration for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration
+func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) {
+       req, out := c.PutBucketNotificationConfigurationRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketNotificationConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) {
+       req, out := c.PutBucketNotificationConfigurationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketPolicy = "PutBucketPolicy"
+
+// PutBucketPolicyRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketPolicy operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketPolicy for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketPolicy method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketPolicyRequest method.
+//    req, resp := client.PutBucketPolicyRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy
+func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) {
+       op := &request.Operation{
+               Name:       opPutBucketPolicy,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?policy",
+       }
+
+       if input == nil {
+               input = &PutBucketPolicyInput{}
+       }
+
+       output = &PutBucketPolicyOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketPolicy API operation for Amazon Simple Storage Service.
+//
+// Replaces a policy on a bucket. If the bucket already has a policy, the one
+// in this request completely replaces it.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketPolicy for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy
+func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) {
+       req, out := c.PutBucketPolicyRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) {
+       req, out := c.PutBucketPolicyRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketReplication = "PutBucketReplication"
+
+// PutBucketReplicationRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketReplication operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketReplication for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketReplication method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketReplicationRequest method.
+//    req, resp := client.PutBucketReplicationRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication
+func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) {
+       op := &request.Operation{
+               Name:       opPutBucketReplication,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?replication",
+       }
+
+       if input == nil {
+               input = &PutBucketReplicationInput{}
+       }
+
+       output = &PutBucketReplicationOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketReplication API operation for Amazon Simple Storage Service.
+//
+// Creates a new replication configuration (or replaces an existing one, if
+// present).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketReplication for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication
+func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) {
+       req, out := c.PutBucketReplicationRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketReplication for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) {
+       req, out := c.PutBucketReplicationRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketRequestPayment = "PutBucketRequestPayment"
+
+// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketRequestPayment operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketRequestPayment for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketRequestPayment method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketRequestPaymentRequest method.
+//    req, resp := client.PutBucketRequestPaymentRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment
+func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) {
+       op := &request.Operation{
+               Name:       opPutBucketRequestPayment,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?requestPayment",
+       }
+
+       if input == nil {
+               input = &PutBucketRequestPaymentInput{}
+       }
+
+       output = &PutBucketRequestPaymentOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketRequestPayment API operation for Amazon Simple Storage Service.
+//
+// Sets the request payment configuration for a bucket. By default, the bucket
+// owner pays for downloads from the bucket. This configuration parameter enables
+// the bucket owner (only) to specify that the person requesting the download
+// will be charged for the download. Documentation on requester pays buckets
+// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketRequestPayment for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment
+func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) {
+       req, out := c.PutBucketRequestPaymentRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketRequestPayment for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) {
+       req, out := c.PutBucketRequestPaymentRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketTagging = "PutBucketTagging"
+
+// PutBucketTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketTagging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketTagging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketTagging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketTaggingRequest method.
+//    req, resp := client.PutBucketTaggingRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging
+func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) {
+       op := &request.Operation{
+               Name:       opPutBucketTagging,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?tagging",
+       }
+
+       if input == nil {
+               input = &PutBucketTaggingInput{}
+       }
+
+       output = &PutBucketTaggingOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketTagging API operation for Amazon Simple Storage Service.
+//
+// Sets the tags for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging
+func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) {
+       req, out := c.PutBucketTaggingRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) {
+       req, out := c.PutBucketTaggingRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketVersioning = "PutBucketVersioning"
+
+// PutBucketVersioningRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketVersioning operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketVersioning for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketVersioning method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketVersioningRequest method.
+//    req, resp := client.PutBucketVersioningRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning
+func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) {
+       op := &request.Operation{
+               Name:       opPutBucketVersioning,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?versioning",
+       }
+
+       if input == nil {
+               input = &PutBucketVersioningInput{}
+       }
+
+       output = &PutBucketVersioningOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketVersioning API operation for Amazon Simple Storage Service.
+//
+// Sets the versioning state of an existing bucket. To set the versioning state,
+// you must be the bucket owner.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketVersioning for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning
+func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) {
+       req, out := c.PutBucketVersioningRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketVersioning for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) {
+       req, out := c.PutBucketVersioningRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutBucketWebsite = "PutBucketWebsite"
+
+// PutBucketWebsiteRequest generates a "aws/request.Request" representing the
+// client's request for the PutBucketWebsite operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutBucketWebsite for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutBucketWebsite method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutBucketWebsiteRequest method.
+//    req, resp := client.PutBucketWebsiteRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite
+func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) {
+       op := &request.Operation{
+               Name:       opPutBucketWebsite,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}?website",
+       }
+
+       if input == nil {
+               input = &PutBucketWebsiteInput{}
+       }
+
+       output = &PutBucketWebsiteOutput{}
+       req = c.newRequest(op, input, output)
+       req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+       req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+       return
+}
+
+// PutBucketWebsite API operation for Amazon Simple Storage Service.
+//
+// Set the website configuration for a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutBucketWebsite for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite
+func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) {
+       req, out := c.PutBucketWebsiteRequest(input)
+       return out, req.Send()
+}
+
+// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketWebsite for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) {
+       req, out := c.PutBucketWebsiteRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutObject = "PutObject"
+
+// PutObjectRequest generates a "aws/request.Request" representing the
+// client's request for the PutObject operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutObject for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutObject method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutObjectRequest method.
+//    req, resp := client.PutObjectRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject
+func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) {
+       op := &request.Operation{
+               Name:       opPutObject,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}/{Key+}",
+       }
+
+       if input == nil {
+               input = &PutObjectInput{}
+       }
+
+       output = &PutObjectOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// PutObject API operation for Amazon Simple Storage Service.
+//
+// Adds an object to a bucket.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObject for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject
+func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) {
+       req, out := c.PutObjectRequest(input)
+       return out, req.Send()
+}
+
+// PutObjectWithContext is the same as PutObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) {
+       req, out := c.PutObjectRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutObjectAcl = "PutObjectAcl"
+
+// PutObjectAclRequest generates a "aws/request.Request" representing the
+// client's request for the PutObjectAcl operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutObjectAcl for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutObjectAcl method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutObjectAclRequest method.
+//    req, resp := client.PutObjectAclRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl
+func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) {
+       op := &request.Operation{
+               Name:       opPutObjectAcl,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}/{Key+}?acl",
+       }
+
+       if input == nil {
+               input = &PutObjectAclInput{}
+       }
+
+       output = &PutObjectAclOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// PutObjectAcl API operation for Amazon Simple Storage Service.
+//
+// uses the acl subresource to set the access control list (ACL) permissions
+// for an object that already exists in a bucket
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObjectAcl for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeNoSuchKey "NoSuchKey"
+//   The specified key does not exist.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl
+func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) {
+       req, out := c.PutObjectAclRequest(input)
+       return out, req.Send()
+}
+
+// PutObjectAclWithContext is the same as PutObjectAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObjectAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) {
+       req, out := c.PutObjectAclRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opPutObjectTagging = "PutObjectTagging"
+
+// PutObjectTaggingRequest generates a "aws/request.Request" representing the
+// client's request for the PutObjectTagging operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See PutObjectTagging for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the PutObjectTagging method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the PutObjectTaggingRequest method.
+//    req, resp := client.PutObjectTaggingRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging
+func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) {
+       op := &request.Operation{
+               Name:       opPutObjectTagging,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}/{Key+}?tagging",
+       }
+
+       if input == nil {
+               input = &PutObjectTaggingInput{}
+       }
+
+       output = &PutObjectTaggingOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// PutObjectTagging API operation for Amazon Simple Storage Service.
+//
+// Sets the supplied tag-set to an object that already exists in a bucket
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation PutObjectTagging for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging
+func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) {
+       req, out := c.PutObjectTaggingRequest(input)
+       return out, req.Send()
+}
+
+// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObjectTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) {
+       req, out := c.PutObjectTaggingRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opRestoreObject = "RestoreObject"
+
+// RestoreObjectRequest generates a "aws/request.Request" representing the
+// client's request for the RestoreObject operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See RestoreObject for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the RestoreObject method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the RestoreObjectRequest method.
+//    req, resp := client.RestoreObjectRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
+func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) {
+       op := &request.Operation{
+               Name:       opRestoreObject,
+               HTTPMethod: "POST",
+               HTTPPath:   "/{Bucket}/{Key+}?restore",
+       }
+
+       if input == nil {
+               input = &RestoreObjectInput{}
+       }
+
+       output = &RestoreObjectOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// RestoreObject API operation for Amazon Simple Storage Service.
+//
+// Restores an archived copy of an object back into Amazon S3
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation RestoreObject for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError"
+//   This operation is not allowed against this storage tier
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
+func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) {
+       req, out := c.RestoreObjectRequest(input)
+       return out, req.Send()
+}
+
+// RestoreObjectWithContext is the same as RestoreObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See RestoreObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) {
+       req, out := c.RestoreObjectRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opUploadPart = "UploadPart"
+
+// UploadPartRequest generates a "aws/request.Request" representing the
+// client's request for the UploadPart operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See UploadPart for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the UploadPart method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the UploadPartRequest method.
+//    req, resp := client.UploadPartRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart
+func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) {
+       op := &request.Operation{
+               Name:       opUploadPart,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}/{Key+}",
+       }
+
+       if input == nil {
+               input = &UploadPartInput{}
+       }
+
+       output = &UploadPartOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// UploadPart API operation for Amazon Simple Storage Service.
+//
+// Uploads a part in a multipart upload.
+//
+// Note: After you initiate multipart upload and upload one or more parts, you
+// must either complete or abort multipart upload in order to stop getting charged
+// for storage of the uploaded parts. Only after you either complete or abort
+// multipart upload, Amazon S3 frees up the parts storage and stops charging
+// you for the parts storage.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation UploadPart for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart
+func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) {
+       req, out := c.UploadPartRequest(input)
+       return out, req.Send()
+}
+
+// UploadPartWithContext is the same as UploadPart with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UploadPart for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) {
+       req, out := c.UploadPartRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opUploadPartCopy = "UploadPartCopy"
+
+// UploadPartCopyRequest generates a "aws/request.Request" representing the
+// client's request for the UploadPartCopy operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See UploadPartCopy for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the UploadPartCopy method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the UploadPartCopyRequest method.
+//    req, resp := client.UploadPartCopyRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy
+func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) {
+       op := &request.Operation{
+               Name:       opUploadPartCopy,
+               HTTPMethod: "PUT",
+               HTTPPath:   "/{Bucket}/{Key+}",
+       }
+
+       if input == nil {
+               input = &UploadPartCopyInput{}
+       }
+
+       output = &UploadPartCopyOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// UploadPartCopy API operation for Amazon Simple Storage Service.
+//
+// Uploads a part by copying data from an existing object as data source.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for Amazon Simple Storage Service's
+// API operation UploadPartCopy for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy
+func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) {
+       req, out := c.UploadPartCopyRequest(input)
+       return out, req.Send()
+}
+
+// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UploadPartCopy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) {
+       req, out := c.UploadPartCopyRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+// Specifies the days since the initiation of an Incomplete Multipart Upload
+// that Lifecycle will wait before permanently removing all parts of the upload.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload
+type AbortIncompleteMultipartUpload struct {
+       _ struct{} `type:"structure"`
+
+       // Indicates the number of days that must pass since initiation for Lifecycle
+       // to abort an Incomplete Multipart Upload.
+       DaysAfterInitiation *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s AbortIncompleteMultipartUpload) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AbortIncompleteMultipartUpload) GoString() string {
+       return s.String()
+}
+
+// SetDaysAfterInitiation sets the DaysAfterInitiation field's value.
+func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload {
+       s.DaysAfterInitiation = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest
+type AbortMultipartUploadInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+       // UploadId is a required field
+       UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AbortMultipartUploadInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AbortMultipartUploadInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AbortMultipartUploadInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+       if s.UploadId == nil {
+               invalidParams.Add(request.NewErrParamRequired("UploadId"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput {
+       s.Key = &v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput {
+       s.UploadId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput
+type AbortMultipartUploadOutput struct {
+       _ struct{} `type:"structure"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s AbortMultipartUploadOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AbortMultipartUploadOutput) GoString() string {
+       return s.String()
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration
+type AccelerateConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       // The accelerate configuration of the bucket.
+       Status *string `type:"string" enum:"BucketAccelerateStatus"`
+}
+
+// String returns the string representation
+func (s AccelerateConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccelerateConfiguration) GoString() string {
+       return s.String()
+}
+
+// SetStatus sets the Status field's value.
+func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration {
+       s.Status = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy
+type AccessControlPolicy struct {
+       _ struct{} `type:"structure"`
+
+       // A list of grants.
+       Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+       Owner *Owner `type:"structure"`
+}
+
+// String returns the string representation
+func (s AccessControlPolicy) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccessControlPolicy) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AccessControlPolicy) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"}
+       if s.Grants != nil {
+               for i, v := range s.Grants {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetGrants sets the Grants field's value.
+func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy {
+       s.Grants = v
+       return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy {
+       s.Owner = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator
+type AnalyticsAndOperator struct {
+       _ struct{} `type:"structure"`
+
+       // The prefix to use when evaluating an AND predicate.
+       Prefix *string `type:"string"`
+
+       // The list of tags to use when evaluating an AND predicate.
+       Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s AnalyticsAndOperator) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsAndOperator) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsAndOperator) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"}
+       if s.Tags != nil {
+               for i, v := range s.Tags {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator {
+       s.Prefix = &v
+       return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator {
+       s.Tags = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration
+type AnalyticsConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       // The filter used to describe a set of objects for analyses. A filter must
+       // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator).
+       // If no filter is provided, all objects will be considered in any analysis.
+       Filter *AnalyticsFilter `type:"structure"`
+
+       // The identifier used to represent an analytics configuration.
+       //
+       // Id is a required field
+       Id *string `type:"string" required:"true"`
+
+       // If present, it indicates that data related to access patterns will be collected
+       // and made available to analyze the tradeoffs between different storage classes.
+       //
+       // StorageClassAnalysis is a required field
+       StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s AnalyticsConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsConfiguration) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsConfiguration) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"}
+       if s.Id == nil {
+               invalidParams.Add(request.NewErrParamRequired("Id"))
+       }
+       if s.StorageClassAnalysis == nil {
+               invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis"))
+       }
+       if s.Filter != nil {
+               if err := s.Filter.Validate(); err != nil {
+                       invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+               }
+       }
+       if s.StorageClassAnalysis != nil {
+               if err := s.StorageClassAnalysis.Validate(); err != nil {
+                       invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetFilter sets the Filter field's value.
+func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration {
+       s.Filter = v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration {
+       s.Id = &v
+       return s
+}
+
+// SetStorageClassAnalysis sets the StorageClassAnalysis field's value.
+func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration {
+       s.StorageClassAnalysis = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination
+type AnalyticsExportDestination struct {
+       _ struct{} `type:"structure"`
+
+       // A destination signifying output to an S3 bucket.
+       //
+       // S3BucketDestination is a required field
+       S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s AnalyticsExportDestination) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsExportDestination) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsExportDestination) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"}
+       if s.S3BucketDestination == nil {
+               invalidParams.Add(request.NewErrParamRequired("S3BucketDestination"))
+       }
+       if s.S3BucketDestination != nil {
+               if err := s.S3BucketDestination.Validate(); err != nil {
+                       invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetS3BucketDestination sets the S3BucketDestination field's value.
+func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination {
+       s.S3BucketDestination = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter
+type AnalyticsFilter struct {
+       _ struct{} `type:"structure"`
+
+       // A conjunction (logical AND) of predicates, which is used in evaluating an
+       // analytics filter. The operator must have at least two predicates.
+       And *AnalyticsAndOperator `type:"structure"`
+
+       // The prefix to use when evaluating an analytics filter.
+       Prefix *string `type:"string"`
+
+       // The tag to use when evaluating an analytics filter.
+       Tag *Tag `type:"structure"`
+}
+
+// String returns the string representation
+func (s AnalyticsFilter) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsFilter) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsFilter) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"}
+       if s.And != nil {
+               if err := s.And.Validate(); err != nil {
+                       invalidParams.AddNested("And", err.(request.ErrInvalidParams))
+               }
+       }
+       if s.Tag != nil {
+               if err := s.Tag.Validate(); err != nil {
+                       invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetAnd sets the And field's value.
+func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter {
+       s.And = v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter {
+       s.Prefix = &v
+       return s
+}
+
+// SetTag sets the Tag field's value.
+func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter {
+       s.Tag = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination
+type AnalyticsS3BucketDestination struct {
+       _ struct{} `type:"structure"`
+
+       // The Amazon resource name (ARN) of the bucket to which data is exported.
+       //
+       // Bucket is a required field
+       Bucket *string `type:"string" required:"true"`
+
+       // The account ID that owns the destination bucket. If no account ID is provided,
+       // the owner will not be validated prior to exporting data.
+       BucketAccountId *string `type:"string"`
+
+       // The file format used when exporting data to Amazon S3.
+       //
+       // Format is a required field
+       Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"`
+
+       // The prefix to use when exporting data. The exported data begins with this
+       // prefix.
+       Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s AnalyticsS3BucketDestination) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AnalyticsS3BucketDestination) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AnalyticsS3BucketDestination) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Format == nil {
+               invalidParams.Add(request.NewErrParamRequired("Format"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination {
+       s.Bucket = &v
+       return s
+}
+
+// SetBucketAccountId sets the BucketAccountId field's value.
+func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination {
+       s.BucketAccountId = &v
+       return s
+}
+
+// SetFormat sets the Format field's value.
+func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination {
+       s.Format = &v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination {
+       s.Prefix = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket
+type Bucket struct {
+       _ struct{} `type:"structure"`
+
+       // Date the bucket was created.
+       CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+       // The name of the bucket.
+       Name *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Bucket) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Bucket) GoString() string {
+       return s.String()
+}
+
+// SetCreationDate sets the CreationDate field's value.
+func (s *Bucket) SetCreationDate(v time.Time) *Bucket {
+       s.CreationDate = &v
+       return s
+}
+
+// SetName sets the Name field's value.
+func (s *Bucket) SetName(v string) *Bucket {
+       s.Name = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration
+type BucketLifecycleConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       // Rules is a required field
+       Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s BucketLifecycleConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BucketLifecycleConfiguration) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BucketLifecycleConfiguration) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"}
+       if s.Rules == nil {
+               invalidParams.Add(request.NewErrParamRequired("Rules"))
+       }
+       if s.Rules != nil {
+               for i, v := range s.Rules {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetRules sets the Rules field's value.
+func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration {
+       s.Rules = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus
+type BucketLoggingStatus struct {
+       _ struct{} `type:"structure"`
+
+       LoggingEnabled *LoggingEnabled `type:"structure"`
+}
+
+// String returns the string representation
+func (s BucketLoggingStatus) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BucketLoggingStatus) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BucketLoggingStatus) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"}
+       if s.LoggingEnabled != nil {
+               if err := s.LoggingEnabled.Validate(); err != nil {
+                       invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetLoggingEnabled sets the LoggingEnabled field's value.
+func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus {
+       s.LoggingEnabled = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration
+type CORSConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       // CORSRules is a required field
+       CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s CORSConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CORSConfiguration) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CORSConfiguration) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"}
+       if s.CORSRules == nil {
+               invalidParams.Add(request.NewErrParamRequired("CORSRules"))
+       }
+       if s.CORSRules != nil {
+               for i, v := range s.CORSRules {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetCORSRules sets the CORSRules field's value.
+func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration {
+       s.CORSRules = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule
+type CORSRule struct {
+       _ struct{} `type:"structure"`
+
+       // Specifies which headers are allowed in a pre-flight OPTIONS request.
+       AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"`
+
+       // Identifies HTTP methods that the domain/origin specified in the rule is allowed
+       // to execute.
+       //
+       // AllowedMethods is a required field
+       AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"`
+
+       // One or more origins you want customers to be able to access the bucket from.
+       //
+       // AllowedOrigins is a required field
+       AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"`
+
+       // One or more headers in the response that you want customers to be able to
+       // access from their applications (for example, from a JavaScript XMLHttpRequest
+       // object).
+       ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"`
+
+       // The time in seconds that your browser is to cache the preflight response
+       // for the specified resource.
+       MaxAgeSeconds *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s CORSRule) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CORSRule) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CORSRule) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "CORSRule"}
+       if s.AllowedMethods == nil {
+               invalidParams.Add(request.NewErrParamRequired("AllowedMethods"))
+       }
+       if s.AllowedOrigins == nil {
+               invalidParams.Add(request.NewErrParamRequired("AllowedOrigins"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetAllowedHeaders sets the AllowedHeaders field's value.
+func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule {
+       s.AllowedHeaders = v
+       return s
+}
+
+// SetAllowedMethods sets the AllowedMethods field's value.
+func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule {
+       s.AllowedMethods = v
+       return s
+}
+
+// SetAllowedOrigins sets the AllowedOrigins field's value.
+func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule {
+       s.AllowedOrigins = v
+       return s
+}
+
+// SetExposeHeaders sets the ExposeHeaders field's value.
+func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule {
+       s.ExposeHeaders = v
+       return s
+}
+
+// SetMaxAgeSeconds sets the MaxAgeSeconds field's value.
+func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule {
+       s.MaxAgeSeconds = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration
+type CloudFunctionConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       CloudFunction *string `type:"string"`
+
+       // Bucket event for which to send notifications.
+       Event *string `deprecated:"true" type:"string" enum:"Event"`
+
+       Events []*string `locationName:"Event" type:"list" flattened:"true"`
+
+       // Optional unique identifier for configurations in a notification configuration.
+       // If you don't provide one, Amazon S3 will assign an ID.
+       Id *string `type:"string"`
+
+       InvocationRole *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CloudFunctionConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CloudFunctionConfiguration) GoString() string {
+       return s.String()
+}
+
+// SetCloudFunction sets the CloudFunction field's value.
+func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration {
+       s.CloudFunction = &v
+       return s
+}
+
+// SetEvent sets the Event field's value.
+func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration {
+       s.Event = &v
+       return s
+}
+
+// SetEvents sets the Events field's value.
+func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration {
+       s.Events = v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration {
+       s.Id = &v
+       return s
+}
+
+// SetInvocationRole sets the InvocationRole field's value.
+func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration {
+       s.InvocationRole = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix
+type CommonPrefix struct {
+       _ struct{} `type:"structure"`
+
+       Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CommonPrefix) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CommonPrefix) GoString() string {
+       return s.String()
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix {
+       s.Prefix = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest
+type CompleteMultipartUploadInput struct {
+       _ struct{} `type:"structure" payload:"MultipartUpload"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+       // UploadId is a required field
+       UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s CompleteMultipartUploadInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompleteMultipartUploadInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CompleteMultipartUploadInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+       if s.UploadId == nil {
+               invalidParams.Add(request.NewErrParamRequired("UploadId"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput {
+       s.Key = &v
+       return s
+}
+
+// SetMultipartUpload sets the MultipartUpload field's value.
+func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput {
+       s.MultipartUpload = v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput {
+       s.UploadId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput
+type CompleteMultipartUploadOutput struct {
+       _ struct{} `type:"structure"`
+
+       Bucket *string `type:"string"`
+
+       // Entity tag of the object.
+       ETag *string `type:"string"`
+
+       // If the object expiration is configured, this will contain the expiration
+       // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
+       Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+       Key *string `min:"1" type:"string"`
+
+       Location *string `type:"string"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+       // If present, specifies the ID of the AWS Key Management Service (KMS) master
+       // encryption key that was used for the object.
+       SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+       // The Server-side encryption algorithm used when storing this object in S3
+       // (e.g., AES256, aws:kms).
+       ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+       // Version of the object.
+       VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s CompleteMultipartUploadOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompleteMultipartUploadOutput) GoString() string {
+       return s.String()
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput {
+       s.Bucket = &v
+       return s
+}
+
+// SetETag sets the ETag field's value.
+func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput {
+       s.ETag = &v
+       return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput {
+       s.Expiration = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput {
+       s.Key = &v
+       return s
+}
+
+// SetLocation sets the Location field's value.
+func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput {
+       s.Location = &v
+       return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput {
+       s.SSEKMSKeyId = &v
+       return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput {
+       s.ServerSideEncryption = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload
+type CompletedMultipartUpload struct {
+       _ struct{} `type:"structure"`
+
+       Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s CompletedMultipartUpload) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompletedMultipartUpload) GoString() string {
+       return s.String()
+}
+
+// SetParts sets the Parts field's value.
+func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload {
+       s.Parts = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart
+type CompletedPart struct {
+       _ struct{} `type:"structure"`
+
+       // Entity tag returned when the part was uploaded.
+       ETag *string `type:"string"`
+
+       // Part number that identifies the part. This is a positive integer between
+       // 1 and 10,000.
+       PartNumber *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s CompletedPart) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompletedPart) GoString() string {
+       return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *CompletedPart) SetETag(v string) *CompletedPart {
+       s.ETag = &v
+       return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart {
+       s.PartNumber = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition
+type Condition struct {
+       _ struct{} `type:"structure"`
+
+       // The HTTP error code when the redirect is applied. In the event of an error,
+       // if the error code equals this value, then the specified redirect is applied.
+       // Required when parent element Condition is specified and sibling KeyPrefixEquals
+       // is not specified. If both are specified, then both must be true for the redirect
+       // to be applied.
+       HttpErrorCodeReturnedEquals *string `type:"string"`
+
+       // The object key name prefix when the redirect is applied. For example, to
+       // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html.
+       // To redirect request for all pages with the prefix docs/, the key prefix will
+       // be /docs, which identifies all objects in the docs/ folder. Required when
+       // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals
+       // is not specified. If both conditions are specified, both must be true for
+       // the redirect to be applied.
+       KeyPrefixEquals *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Condition) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Condition) GoString() string {
+       return s.String()
+}
+
+// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value.
+func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition {
+       s.HttpErrorCodeReturnedEquals = &v
+       return s
+}
+
+// SetKeyPrefixEquals sets the KeyPrefixEquals field's value.
+func (s *Condition) SetKeyPrefixEquals(v string) *Condition {
+       s.KeyPrefixEquals = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest
+type CopyObjectInput struct {
+       _ struct{} `type:"structure"`
+
+       // The canned ACL to apply to the object.
+       ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Specifies caching behavior along the request/reply chain.
+       CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+       // Specifies presentational information for the object.
+       ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+       // Specifies what content encodings have been applied to the object and thus
+       // what decoding mechanisms must be applied to obtain the media-type referenced
+       // by the Content-Type header field.
+       ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+       // The language the content is in.
+       ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+       // A standard MIME type describing the format of the object data.
+       ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+       // The name of the source bucket and key name of the source object, separated
+       // by a slash (/). Must be URL-encoded.
+       //
+       // CopySource is a required field
+       CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
+
+       // Copies the object if its entity tag (ETag) matches the specified tag.
+       CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
+
+       // Copies the object if it has been modified since the specified time.
+       CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+       // Copies the object if its entity tag (ETag) is different than the specified
+       // ETag.
+       CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
+
+       // Copies the object if it hasn't been modified since the specified time.
+       CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+       // Specifies the algorithm to use when decrypting the source object (e.g., AES256).
+       CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
+
+       // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+       // the source object. The encryption key provided in this header must be one
+       // that was used when the source object was created.
+       CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"`
+
+       // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+       // Amazon S3 uses this header for a message integrity check to ensure the encryption
+       // key was transmitted without error.
+       CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // The date and time at which the object is no longer cacheable.
+       Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
+
+       // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+       GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+       // Allows grantee to read the object data and its metadata.
+       GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+       // Allows grantee to read the object ACL.
+       GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+       // Allows grantee to write the ACL for the applicable object.
+       GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // A map of metadata to store with the object in S3.
+       Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+       // Specifies whether the metadata is copied from the source object or replaced
+       // with metadata provided in the request.
+       MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+       // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+       SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+       // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+       // data. This value is used to store the object and then it is discarded; Amazon
+       // does not store the encryption key. The key must be appropriate for use with
+       // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+       // header.
+       SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+       // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+       // Amazon S3 uses this header for a message integrity check to ensure the encryption
+       // key was transmitted without error.
+       SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+       // requests for an object protected by AWS KMS will fail if not made via SSL
+       // or using SigV4. Documentation on configuring any of the officially supported
+       // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+       SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+       // The Server-side encryption algorithm used when storing this object in S3
+       // (e.g., AES256, aws:kms).
+       ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+       // The type of storage to use for the object. Defaults to 'STANDARD'.
+       StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+       // The tag-set for the object destination object this value must be used in
+       // conjunction with the TaggingDirective. The tag-set must be encoded as URL
+       // Query parameters
+       Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
+
+       // Specifies whether the object tag-set are copied from the source object or
+       // replaced with tag-set provided in the request.
+       TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"`
+
+       // If the bucket is configured as a website, redirects requests for this object
+       // to another object in the same bucket or to an external URL. Amazon S3 stores
+       // the value of this header in the object metadata.
+       WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s CopyObjectInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyObjectInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CopyObjectInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.CopySource == nil {
+               invalidParams.Add(request.NewErrParamRequired("CopySource"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput {
+       s.ACL = &v
+       return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput {
+       s.CacheControl = &v
+       return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput {
+       s.ContentDisposition = &v
+       return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput {
+       s.ContentEncoding = &v
+       return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput {
+       s.ContentLanguage = &v
+       return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput {
+       s.ContentType = &v
+       return s
+}
+
+// SetCopySource sets the CopySource field's value.
+func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput {
+       s.CopySource = &v
+       return s
+}
+
+// SetCopySourceIfMatch sets the CopySourceIfMatch field's value.
+func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput {
+       s.CopySourceIfMatch = &v
+       return s
+}
+
+// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value.
+func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput {
+       s.CopySourceIfModifiedSince = &v
+       return s
+}
+
+// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value.
+func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput {
+       s.CopySourceIfNoneMatch = &v
+       return s
+}
+
+// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value.
+func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput {
+       s.CopySourceIfUnmodifiedSince = &v
+       return s
+}
+
+// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value.
+func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput {
+       s.CopySourceSSECustomerAlgorithm = &v
+       return s
+}
+
+// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value.
+func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput {
+       s.CopySourceSSECustomerKey = &v
+       return s
+}
+
+// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
+func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput {
+       s.CopySourceSSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput {
+       s.Expires = &v
+       return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput {
+       s.GrantFullControl = &v
+       return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput {
+       s.GrantRead = &v
+       return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput {
+       s.GrantReadACP = &v
+       return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput {
+       s.GrantWriteACP = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput {
+       s.Key = &v
+       return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput {
+       s.Metadata = v
+       return s
+}
+
+// SetMetadataDirective sets the MetadataDirective field's value.
+func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput {
+       s.MetadataDirective = &v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput {
+       s.SSECustomerAlgorithm = &v
+       return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput {
+       s.SSECustomerKey = &v
+       return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput {
+       s.SSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput {
+       s.SSEKMSKeyId = &v
+       return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput {
+       s.ServerSideEncryption = &v
+       return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput {
+       s.StorageClass = &v
+       return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput {
+       s.Tagging = &v
+       return s
+}
+
+// SetTaggingDirective sets the TaggingDirective field's value.
+func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput {
+       s.TaggingDirective = &v
+       return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput {
+       s.WebsiteRedirectLocation = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput
+type CopyObjectOutput struct {
+       _ struct{} `type:"structure" payload:"CopyObjectResult"`
+
+       CopyObjectResult *CopyObjectResult `type:"structure"`
+
+       CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
+
+       // If the object expiration is configured, the response includes this header.
+       Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+       // If server-side encryption with a customer-provided encryption key was requested,
+       // the response will include this header confirming the encryption algorithm
+       // used.
+       SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+       // If server-side encryption with a customer-provided encryption key was requested,
+       // the response will include this header to provide round trip message integrity
+       // verification of the customer-provided encryption key.
+       SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // If present, specifies the ID of the AWS Key Management Service (KMS) master
+       // encryption key that was used for the object.
+       SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+       // The Server-side encryption algorithm used when storing this object in S3
+       // (e.g., AES256, aws:kms).
+       ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+       // Version ID of the newly created copy.
+       VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s CopyObjectOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyObjectOutput) GoString() string {
+       return s.String()
+}
+
+// SetCopyObjectResult sets the CopyObjectResult field's value.
+func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput {
+       s.CopyObjectResult = v
+       return s
+}
+
+// SetCopySourceVersionId sets the CopySourceVersionId field's value.
+func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput {
+       s.CopySourceVersionId = &v
+       return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput {
+       s.Expiration = &v
+       return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput {
+       s.SSECustomerAlgorithm = &v
+       return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput {
+       s.SSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput {
+       s.SSEKMSKeyId = &v
+       return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput {
+       s.ServerSideEncryption = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult
+type CopyObjectResult struct {
+       _ struct{} `type:"structure"`
+
+       ETag *string `type:"string"`
+
+       LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+}
+
+// String returns the string representation
+func (s CopyObjectResult) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyObjectResult) GoString() string {
+       return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult {
+       s.ETag = &v
+       return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult {
+       s.LastModified = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult
+type CopyPartResult struct {
+       _ struct{} `type:"structure"`
+
+       // Entity tag of the object.
+       ETag *string `type:"string"`
+
+       // Date and time at which the object was uploaded.
+       LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+}
+
+// String returns the string representation
+func (s CopyPartResult) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyPartResult) GoString() string {
+       return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *CopyPartResult) SetETag(v string) *CopyPartResult {
+       s.ETag = &v
+       return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult {
+       s.LastModified = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration
+type CreateBucketConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       // Specifies the region where the bucket will be created. If you don't specify
+       // a region, the bucket will be created in US Standard.
+       LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
+}
+
+// String returns the string representation
+func (s CreateBucketConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketConfiguration) GoString() string {
+       return s.String()
+}
+
+// SetLocationConstraint sets the LocationConstraint field's value.
+func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration {
+       s.LocationConstraint = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest
+type CreateBucketInput struct {
+       _ struct{} `type:"structure" payload:"CreateBucketConfiguration"`
+
+       // The canned ACL to apply to the bucket.
+       ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"`
+
+       // Allows grantee the read, write, read ACP, and write ACP permissions on the
+       // bucket.
+       GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+       // Allows grantee to list the objects in the bucket.
+       GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+       // Allows grantee to read the bucket ACL.
+       GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+       // Allows grantee to create, overwrite, and delete any object in the bucket.
+       GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+       // Allows grantee to write the ACL for the applicable bucket.
+       GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateBucketInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateBucketInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput {
+       s.ACL = &v
+       return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value.
+func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput {
+       s.CreateBucketConfiguration = v
+       return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput {
+       s.GrantFullControl = &v
+       return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput {
+       s.GrantRead = &v
+       return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput {
+       s.GrantReadACP = &v
+       return s
+}
+
+// SetGrantWrite sets the GrantWrite field's value.
+func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput {
+       s.GrantWrite = &v
+       return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput {
+       s.GrantWriteACP = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput
+type CreateBucketOutput struct {
+       _ struct{} `type:"structure"`
+
+       Location *string `location:"header" locationName:"Location" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateBucketOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketOutput) GoString() string {
+       return s.String()
+}
+
+// SetLocation sets the Location field's value.
+func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput {
+       s.Location = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest
+type CreateMultipartUploadInput struct {
+       _ struct{} `type:"structure"`
+
+       // The canned ACL to apply to the object.
+       ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Specifies caching behavior along the request/reply chain.
+       CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+       // Specifies presentational information for the object.
+       ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+       // Specifies what content encodings have been applied to the object and thus
+       // what decoding mechanisms must be applied to obtain the media-type referenced
+       // by the Content-Type header field.
+       ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+       // The language the content is in.
+       ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+       // A standard MIME type describing the format of the object data.
+       ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+       // The date and time at which the object is no longer cacheable.
+       Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
+
+       // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+       GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+       // Allows grantee to read the object data and its metadata.
+       GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+       // Allows grantee to read the object ACL.
+       GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+       // Allows grantee to write the ACL for the applicable object.
+       GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // A map of metadata to store with the object in S3.
+       Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+       // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+       SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+       // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+       // data. This value is used to store the object and then it is discarded; Amazon
+       // does not store the encryption key. The key must be appropriate for use with
+       // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+       // header.
+       SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+       // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+       // Amazon S3 uses this header for a message integrity check to ensure the encryption
+       // key was transmitted without error.
+       SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+       // requests for an object protected by AWS KMS will fail if not made via SSL
+       // or using SigV4. Documentation on configuring any of the officially supported
+       // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+       SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+       // The Server-side encryption algorithm used when storing this object in S3
+       // (e.g., AES256, aws:kms).
+       ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+       // The type of storage to use for the object. Defaults to 'STANDARD'.
+       StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+       // If the bucket is configured as a website, redirects requests for this object
+       // to another object in the same bucket or to an external URL. Amazon S3 stores
+       // the value of this header in the object metadata.
+       WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateMultipartUploadInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateMultipartUploadInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateMultipartUploadInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput {
+       s.ACL = &v
+       return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput {
+       s.CacheControl = &v
+       return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput {
+       s.ContentDisposition = &v
+       return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput {
+       s.ContentEncoding = &v
+       return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput {
+       s.ContentLanguage = &v
+       return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput {
+       s.ContentType = &v
+       return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput {
+       s.Expires = &v
+       return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput {
+       s.GrantFullControl = &v
+       return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput {
+       s.GrantRead = &v
+       return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput {
+       s.GrantReadACP = &v
+       return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput {
+       s.GrantWriteACP = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput {
+       s.Key = &v
+       return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput {
+       s.Metadata = v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput {
+       s.SSECustomerAlgorithm = &v
+       return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput {
+       s.SSECustomerKey = &v
+       return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput {
+       s.SSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput {
+       s.SSEKMSKeyId = &v
+       return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput {
+       s.ServerSideEncryption = &v
+       return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput {
+       s.StorageClass = &v
+       return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput {
+       s.WebsiteRedirectLocation = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput
+type CreateMultipartUploadOutput struct {
+       _ struct{} `type:"structure"`
+
+       // Date when multipart upload will become eligible for abort operation by lifecycle.
+       AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"`
+
+       // Id of the lifecycle rule that makes a multipart upload eligible for abort
+       // operation.
+       AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
+
+       // Name of the bucket to which the multipart upload was initiated.
+       Bucket *string `locationName:"Bucket" type:"string"`
+
+       // Object key for which the multipart upload was initiated.
+       Key *string `min:"1" type:"string"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+       // If server-side encryption with a customer-provided encryption key was requested,
+       // the response will include this header confirming the encryption algorithm
+       // used.
+       SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+       // If server-side encryption with a customer-provided encryption key was requested,
+       // the response will include this header to provide round trip message integrity
+       // verification of the customer-provided encryption key.
+       SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // If present, specifies the ID of the AWS Key Management Service (KMS) master
+       // encryption key that was used for the object.
+       SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+       // The Server-side encryption algorithm used when storing this object in S3
+       // (e.g., AES256, aws:kms).
+       ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+       // ID for the initiated multipart upload.
+       UploadId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CreateMultipartUploadOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateMultipartUploadOutput) GoString() string {
+       return s.String()
+}
+
+// SetAbortDate sets the AbortDate field's value.
+func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput {
+       s.AbortDate = &v
+       return s
+}
+
+// SetAbortRuleId sets the AbortRuleId field's value.
+func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput {
+       s.AbortRuleId = &v
+       return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput {
+       s.Bucket = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput {
+       s.Key = &v
+       return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput {
+       s.SSECustomerAlgorithm = &v
+       return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput {
+       s.SSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput {
+       s.SSEKMSKeyId = &v
+       return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput {
+       s.ServerSideEncryption = &v
+       return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput {
+       s.UploadId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete
+type Delete struct {
+       _ struct{} `type:"structure"`
+
+       // Objects is a required field
+       Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"`
+
+       // Element to enable quiet mode for the request. When you add this element,
+       // you must set its value to true.
+       Quiet *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s Delete) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Delete) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Delete) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "Delete"}
+       if s.Objects == nil {
+               invalidParams.Add(request.NewErrParamRequired("Objects"))
+       }
+       if s.Objects != nil {
+               for i, v := range s.Objects {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetObjects sets the Objects field's value.
+func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete {
+       s.Objects = v
+       return s
+}
+
+// SetQuiet sets the Quiet field's value.
+func (s *Delete) SetQuiet(v bool) *Delete {
+       s.Quiet = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest
+type DeleteBucketAnalyticsConfigurationInput struct {
+       _ struct{} `type:"structure"`
+
+       // The name of the bucket from which an analytics configuration is deleted.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // The identifier used to represent an analytics configuration.
+       //
+       // Id is a required field
+       Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketAnalyticsConfigurationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketAnalyticsConfigurationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Id == nil {
+               invalidParams.Add(request.NewErrParamRequired("Id"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput {
+       s.Id = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationOutput
+type DeleteBucketAnalyticsConfigurationOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketAnalyticsConfigurationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest
+type DeleteBucketCorsInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketCorsInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketCorsInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketCorsInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput
+type DeleteBucketCorsOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketCorsOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketCorsOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest
+type DeleteBucketInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest
+type DeleteBucketInventoryConfigurationInput struct {
+       _ struct{} `type:"structure"`
+
+       // The name of the bucket containing the inventory configuration to delete.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // The ID used to identify the inventory configuration.
+       //
+       // Id is a required field
+       Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketInventoryConfigurationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketInventoryConfigurationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketInventoryConfigurationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Id == nil {
+               invalidParams.Add(request.NewErrParamRequired("Id"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput {
+       s.Id = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationOutput
+type DeleteBucketInventoryConfigurationOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketInventoryConfigurationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketInventoryConfigurationOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest
+type DeleteBucketLifecycleInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketLifecycleInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketLifecycleInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketLifecycleInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput
+type DeleteBucketLifecycleOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketLifecycleOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketLifecycleOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest
+type DeleteBucketMetricsConfigurationInput struct {
+       _ struct{} `type:"structure"`
+
+       // The name of the bucket containing the metrics configuration to delete.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // The ID used to identify the metrics configuration.
+       //
+       // Id is a required field
+       Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketMetricsConfigurationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketMetricsConfigurationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketMetricsConfigurationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Id == nil {
+               invalidParams.Add(request.NewErrParamRequired("Id"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput {
+       s.Id = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationOutput
+type DeleteBucketMetricsConfigurationOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketMetricsConfigurationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketMetricsConfigurationOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOutput
+type DeleteBucketOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest
+type DeleteBucketPolicyInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketPolicyInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketPolicyInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketPolicyInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput
+type DeleteBucketPolicyOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketPolicyOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketPolicyOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest
+type DeleteBucketReplicationInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketReplicationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketReplicationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketReplicationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput
+type DeleteBucketReplicationOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketReplicationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketReplicationOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest
+type DeleteBucketTaggingInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketTaggingInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketTaggingInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketTaggingInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput
+type DeleteBucketTaggingOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketTaggingOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketTaggingOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest
+type DeleteBucketWebsiteInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketWebsiteInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketWebsiteInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketWebsiteInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput
+type DeleteBucketWebsiteOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketWebsiteOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketWebsiteOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry
+type DeleteMarkerEntry struct {
+       _ struct{} `type:"structure"`
+
+       // Specifies whether the object is (true) or is not (false) the latest version
+       // of an object.
+       IsLatest *bool `type:"boolean"`
+
+       // The object key.
+       Key *string `min:"1" type:"string"`
+
+       // Date and time the object was last modified.
+       LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+       Owner *Owner `type:"structure"`
+
+       // Version ID of an object.
+       VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteMarkerEntry) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteMarkerEntry) GoString() string {
+       return s.String()
+}
+
+// SetIsLatest sets the IsLatest field's value.
+func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry {
+       s.IsLatest = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry {
+       s.Key = &v
+       return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry {
+       s.LastModified = &v
+       return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry {
+       s.Owner = v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest
+type DeleteObjectInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // The concatenation of the authentication device's serial number, a space,
+       // and the value that is displayed on your authentication device.
+       MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+       // VersionId used to reference a specific version of the object.
+       VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteObjectInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput {
+       s.Key = &v
+       return s
+}
+
+// SetMFA sets the MFA field's value.
+func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput {
+       s.MFA = &v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput
+type DeleteObjectOutput struct {
+       _ struct{} `type:"structure"`
+
+       // Specifies whether the versioned object that was permanently deleted was (true)
+       // or was not (false) a delete marker.
+       DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+       // Returns the version ID of the delete marker created as a result of the DELETE
+       // operation.
+       VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectOutput) GoString() string {
+       return s.String()
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput {
+       s.DeleteMarker = &v
+       return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest
+type DeleteObjectTaggingInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // The versionId of the object that the tag-set will be removed from.
+       VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectTaggingInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectTaggingInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteObjectTaggingInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput {
+       s.Key = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput
+type DeleteObjectTaggingOutput struct {
+       _ struct{} `type:"structure"`
+
+       // The versionId of the object the tag-set was removed from.
+       VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectTaggingOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectTaggingOutput) GoString() string {
+       return s.String()
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest
+type DeleteObjectsInput struct {
+       _ struct{} `type:"structure" payload:"Delete"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Delete is a required field
+       Delete *Delete `locationName:"Delete" type:"structure" required:"true"`
+
+       // The concatenation of the authentication device's serial number, a space,
+       // and the value that is displayed on your authentication device.
+       MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+}
+
+// String returns the string representation
+func (s DeleteObjectsInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectsInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteObjectsInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Delete == nil {
+               invalidParams.Add(request.NewErrParamRequired("Delete"))
+       }
+       if s.Delete != nil {
+               if err := s.Delete.Validate(); err != nil {
+                       invalidParams.AddNested("Delete", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetDelete sets the Delete field's value.
+func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput {
+       s.Delete = v
+       return s
+}
+
+// SetMFA sets the MFA field's value.
+func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput {
+       s.MFA = &v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput
+type DeleteObjectsOutput struct {
+       _ struct{} `type:"structure"`
+
+       Deleted []*DeletedObject `type:"list" flattened:"true"`
+
+       Errors []*Error `locationName:"Error" type:"list" flattened:"true"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s DeleteObjectsOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectsOutput) GoString() string {
+       return s.String()
+}
+
+// SetDeleted sets the Deleted field's value.
+func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput {
+       s.Deleted = v
+       return s
+}
+
+// SetErrors sets the Errors field's value.
+func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput {
+       s.Errors = v
+       return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject
+type DeletedObject struct {
+       _ struct{} `type:"structure"`
+
+       DeleteMarker *bool `type:"boolean"`
+
+       DeleteMarkerVersionId *string `type:"string"`
+
+       Key *string `min:"1" type:"string"`
+
+       VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DeletedObject) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeletedObject) GoString() string {
+       return s.String()
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject {
+       s.DeleteMarker = &v
+       return s
+}
+
+// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value.
+func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject {
+       s.DeleteMarkerVersionId = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *DeletedObject) SetKey(v string) *DeletedObject {
+       s.Key = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *DeletedObject) SetVersionId(v string) *DeletedObject {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination
+type Destination struct {
+       _ struct{} `type:"structure"`
+
+       // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store
+       // replicas of the object identified by the rule.
+       //
+       // Bucket is a required field
+       Bucket *string `type:"string" required:"true"`
+
+       // The class of storage used to store the object.
+       StorageClass *string `type:"string" enum:"StorageClass"`
+}
+
+// String returns the string representation
+func (s Destination) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Destination) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Destination) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "Destination"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *Destination) SetBucket(v string) *Destination {
+       s.Bucket = &v
+       return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *Destination) SetStorageClass(v string) *Destination {
+       s.StorageClass = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error
+type Error struct {
+       _ struct{} `type:"structure"`
+
+       Code *string `type:"string"`
+
+       Key *string `min:"1" type:"string"`
+
+       Message *string `type:"string"`
+
+       VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Error) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Error) GoString() string {
+       return s.String()
+}
+
+// SetCode sets the Code field's value.
+func (s *Error) SetCode(v string) *Error {
+       s.Code = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *Error) SetKey(v string) *Error {
+       s.Key = &v
+       return s
+}
+
+// SetMessage sets the Message field's value.
+func (s *Error) SetMessage(v string) *Error {
+       s.Message = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *Error) SetVersionId(v string) *Error {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument
+type ErrorDocument struct {
+       _ struct{} `type:"structure"`
+
+       // The object key name to use when a 4XX class error occurs.
+       //
+       // Key is a required field
+       Key *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ErrorDocument) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ErrorDocument) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ErrorDocument) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"}
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *ErrorDocument) SetKey(v string) *ErrorDocument {
+       s.Key = &v
+       return s
+}
+
+// Container for key value pair that defines the criteria for the filter rule.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule
+type FilterRule struct {
+       _ struct{} `type:"structure"`
+
+       // Object key name prefix or suffix identifying one or more objects to which
+       // the filtering rule applies. Maximum prefix length can be up to 1,024 characters.
+       // Overlapping prefixes and suffixes are not supported. For more information,
+       // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+       Name *string `type:"string" enum:"FilterRuleName"`
+
+       Value *string `type:"string"`
+}
+
+// String returns the string representation
+func (s FilterRule) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FilterRule) GoString() string {
+       return s.String()
+}
+
+// SetName sets the Name field's value.
+func (s *FilterRule) SetName(v string) *FilterRule {
+       s.Name = &v
+       return s
+}
+
+// SetValue sets the Value field's value.
+func (s *FilterRule) SetValue(v string) *FilterRule {
+       s.Value = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest
+type GetBucketAccelerateConfigurationInput struct {
+       _ struct{} `type:"structure"`
+
+       // Name of the bucket for which the accelerate configuration is retrieved.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketAccelerateConfigurationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAccelerateConfigurationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketAccelerateConfigurationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput
+type GetBucketAccelerateConfigurationOutput struct {
+       _ struct{} `type:"structure"`
+
+       // The accelerate configuration of the bucket.
+       Status *string `type:"string" enum:"BucketAccelerateStatus"`
+}
+
+// String returns the string representation
+func (s GetBucketAccelerateConfigurationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAccelerateConfigurationOutput) GoString() string {
+       return s.String()
+}
+
+// SetStatus sets the Status field's value.
+func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput {
+       s.Status = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest
+type GetBucketAclInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketAclInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAclInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketAclInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput
+type GetBucketAclOutput struct {
+       _ struct{} `type:"structure"`
+
+       // A list of grants.
+       Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+       Owner *Owner `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketAclOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAclOutput) GoString() string {
+       return s.String()
+}
+
+// SetGrants sets the Grants field's value.
+func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput {
+       s.Grants = v
+       return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput {
+       s.Owner = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest
+type GetBucketAnalyticsConfigurationInput struct {
+       _ struct{} `type:"structure"`
+
+       // The name of the bucket from which an analytics configuration is retrieved.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // The identifier used to represent an analytics configuration.
+       //
+       // Id is a required field
+       Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketAnalyticsConfigurationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAnalyticsConfigurationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketAnalyticsConfigurationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Id == nil {
+               invalidParams.Add(request.NewErrParamRequired("Id"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput {
+       s.Id = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput
+type GetBucketAnalyticsConfigurationOutput struct {
+       _ struct{} `type:"structure" payload:"AnalyticsConfiguration"`
+
+       // The configuration and any analyses for the analytics filter.
+       AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketAnalyticsConfigurationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAnalyticsConfigurationOutput) GoString() string {
+       return s.String()
+}
+
+// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value.
+func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput {
+       s.AnalyticsConfiguration = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest
+type GetBucketCorsInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketCorsInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketCorsInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketCorsInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput
+type GetBucketCorsOutput struct {
+       _ struct{} `type:"structure"`
+
+       CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketCorsOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketCorsOutput) GoString() string {
+       return s.String()
+}
+
+// SetCORSRules sets the CORSRules field's value.
+func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput {
+       s.CORSRules = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest
+type GetBucketInventoryConfigurationInput struct {
+       _ struct{} `type:"structure"`
+
+       // The name of the bucket containing the inventory configuration to retrieve.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // The ID used to identify the inventory configuration.
+       //
+       // Id is a required field
+       Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketInventoryConfigurationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketInventoryConfigurationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketInventoryConfigurationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Id == nil {
+               invalidParams.Add(request.NewErrParamRequired("Id"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput {
+       s.Id = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput
+type GetBucketInventoryConfigurationOutput struct {
+       _ struct{} `type:"structure" payload:"InventoryConfiguration"`
+
+       // Specifies the inventory configuration.
+       InventoryConfiguration *InventoryConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketInventoryConfigurationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketInventoryConfigurationOutput) GoString() string {
+       return s.String()
+}
+
+// SetInventoryConfiguration sets the InventoryConfiguration field's value.
+func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput {
+       s.InventoryConfiguration = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest
+type GetBucketLifecycleConfigurationInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleConfigurationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleConfigurationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLifecycleConfigurationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput
+type GetBucketLifecycleConfigurationOutput struct {
+       _ struct{} `type:"structure"`
+
+       Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleConfigurationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleConfigurationOutput) GoString() string {
+       return s.String()
+}
+
+// SetRules sets the Rules field's value.
+func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput {
+       s.Rules = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest
+type GetBucketLifecycleInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLifecycleInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput
+type GetBucketLifecycleOutput struct {
+       _ struct{} `type:"structure"`
+
+       Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleOutput) GoString() string {
+       return s.String()
+}
+
+// SetRules sets the Rules field's value.
+func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput {
+       s.Rules = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest
+type GetBucketLocationInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLocationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLocationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLocationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput
+type GetBucketLocationOutput struct {
+       _ struct{} `type:"structure"`
+
+       LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
+}
+
+// String returns the string representation
+func (s GetBucketLocationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLocationOutput) GoString() string {
+       return s.String()
+}
+
+// SetLocationConstraint sets the LocationConstraint field's value.
+func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput {
+       s.LocationConstraint = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest
+type GetBucketLoggingInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLoggingInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLoggingInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLoggingInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput
+type GetBucketLoggingOutput struct {
+       _ struct{} `type:"structure"`
+
+       LoggingEnabled *LoggingEnabled `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketLoggingOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLoggingOutput) GoString() string {
+       return s.String()
+}
+
+// SetLoggingEnabled sets the LoggingEnabled field's value.
+func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput {
+       s.LoggingEnabled = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest
+type GetBucketMetricsConfigurationInput struct {
+       _ struct{} `type:"structure"`
+
+       // The name of the bucket containing the metrics configuration to retrieve.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // The ID used to identify the metrics configuration.
+       //
+       // Id is a required field
+       Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketMetricsConfigurationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketMetricsConfigurationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketMetricsConfigurationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Id == nil {
+               invalidParams.Add(request.NewErrParamRequired("Id"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput {
+       s.Id = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput
+type GetBucketMetricsConfigurationOutput struct {
+       _ struct{} `type:"structure" payload:"MetricsConfiguration"`
+
+       // Specifies the metrics configuration.
+       MetricsConfiguration *MetricsConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketMetricsConfigurationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketMetricsConfigurationOutput) GoString() string {
+       return s.String()
+}
+
+// SetMetricsConfiguration sets the MetricsConfiguration field's value.
+func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput {
+       s.MetricsConfiguration = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest
+type GetBucketNotificationConfigurationRequest struct {
+       _ struct{} `type:"structure"`
+
+       // Name of the bucket to get the notification configuration for.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketNotificationConfigurationRequest) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketNotificationConfigurationRequest) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketNotificationConfigurationRequest) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest
+type GetBucketPolicyInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketPolicyInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketPolicyInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketPolicyInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput
+type GetBucketPolicyOutput struct {
+       _ struct{} `type:"structure" payload:"Policy"`
+
+       // The bucket policy as a JSON document.
+       Policy *string `type:"string"`
+}
+
+// String returns the string representation
+func (s GetBucketPolicyOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketPolicyOutput) GoString() string {
+       return s.String()
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput {
+       s.Policy = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest
+type GetBucketReplicationInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketReplicationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketReplicationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketReplicationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput
+type GetBucketReplicationOutput struct {
+       _ struct{} `type:"structure" payload:"ReplicationConfiguration"`
+
+       // Container for replication rules. You can add as many as 1,000 rules. Total
+       // replication configuration size can be up to 2 MB.
+       ReplicationConfiguration *ReplicationConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketReplicationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketReplicationOutput) GoString() string {
+       return s.String()
+}
+
+// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
+func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput {
+       s.ReplicationConfiguration = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest
+type GetBucketRequestPaymentInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketRequestPaymentInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketRequestPaymentInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketRequestPaymentInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput
+type GetBucketRequestPaymentOutput struct {
+       _ struct{} `type:"structure"`
+
+       // Specifies who pays for the download and request fees.
+       Payer *string `type:"string" enum:"Payer"`
+}
+
+// String returns the string representation
+func (s GetBucketRequestPaymentOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketRequestPaymentOutput) GoString() string {
+       return s.String()
+}
+
+// SetPayer sets the Payer field's value.
+func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput {
+       s.Payer = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest
+type GetBucketTaggingInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketTaggingInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketTaggingInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketTaggingInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput
+type GetBucketTaggingOutput struct {
+       _ struct{} `type:"structure"`
+
+       // TagSet is a required field
+       TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketTaggingOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketTaggingOutput) GoString() string {
+       return s.String()
+}
+
+// SetTagSet sets the TagSet field's value.
+func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput {
+       s.TagSet = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest
+type GetBucketVersioningInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketVersioningInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketVersioningInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketVersioningInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput
+type GetBucketVersioningOutput struct {
+       _ struct{} `type:"structure"`
+
+       // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+       // This element is only returned if the bucket has been configured with MFA
+       // delete. If the bucket has never been so configured, this element is not returned.
+       MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"`
+
+       // The versioning state of the bucket.
+       Status *string `type:"string" enum:"BucketVersioningStatus"`
+}
+
+// String returns the string representation
+func (s GetBucketVersioningOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketVersioningOutput) GoString() string {
+       return s.String()
+}
+
+// SetMFADelete sets the MFADelete field's value.
+func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput {
+       s.MFADelete = &v
+       return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput {
+       s.Status = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest
+type GetBucketWebsiteInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketWebsiteInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketWebsiteInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketWebsiteInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput
+type GetBucketWebsiteOutput struct {
+       _ struct{} `type:"structure"`
+
+       ErrorDocument *ErrorDocument `type:"structure"`
+
+       IndexDocument *IndexDocument `type:"structure"`
+
+       RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
+
+       RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
+}
+
+// String returns the string representation
+func (s GetBucketWebsiteOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketWebsiteOutput) GoString() string {
+       return s.String()
+}
+
+// SetErrorDocument sets the ErrorDocument field's value.
+func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput {
+       s.ErrorDocument = v
+       return s
+}
+
+// SetIndexDocument sets the IndexDocument field's value.
+func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput {
+       s.IndexDocument = v
+       return s
+}
+
+// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value.
+func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput {
+       s.RedirectAllRequestsTo = v
+       return s
+}
+
+// SetRoutingRules sets the RoutingRules field's value.
+func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput {
+       s.RoutingRules = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest
+type GetObjectAclInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+       // VersionId used to reference a specific version of the object.
+       VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectAclInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectAclInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectAclInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput {
+       s.Key = &v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput
+type GetObjectAclOutput struct {
+       _ struct{} `type:"structure"`
+
+       // A list of grants.
+       Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+       Owner *Owner `type:"structure"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s GetObjectAclOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectAclOutput) GoString() string {
+       return s.String()
+}
+
+// SetGrants sets the Grants field's value.
+func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput {
+       s.Grants = v
+       return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput {
+       s.Owner = v
+       return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest
+type GetObjectInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Return the object only if its entity tag (ETag) is the same as the one specified,
+       // otherwise return a 412 (precondition failed).
+       IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
+
+       // Return the object only if it has been modified since the specified time,
+       // otherwise return a 304 (not modified).
+       IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+       // Return the object only if its entity tag (ETag) is different from the one
+       // specified, otherwise return a 304 (not modified).
+       IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
+
+       // Return the object only if it has not been modified since the specified time,
+       // otherwise return a 412 (precondition failed).
+       IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // Part number of the object being read. This is a positive integer between
+       // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified.
+       // Useful for downloading just a part of an object.
+       PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"`
+
+       // Downloads the specified range bytes of an object. For more information about
+       // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+       Range *string `location:"header" locationName:"Range" type:"string"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+       // Sets the Cache-Control header of the response.
+       ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"`
+
+       // Sets the Content-Disposition header of the response
+       ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"`
+
+       // Sets the Content-Encoding header of the response.
+       ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"`
+
+       // Sets the Content-Language header of the response.
+       ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"`
+
+       // Sets the Content-Type header of the response.
+       ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"`
+
+       // Sets the Expires header of the response.
+       ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"`
+
+       // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+       SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+       // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+       // data. This value is used to store the object and then it is discarded; Amazon
+       // does not store the encryption key. The key must be appropriate for use with
+       // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+       // header.
+       SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+       // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+       // Amazon S3 uses this header for a message integrity check to ensure the encryption
+       // key was transmitted without error.
+       SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // VersionId used to reference a specific version of the object.
+       VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectInput) SetBucket(v string) *GetObjectInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetIfMatch sets the IfMatch field's value.
+func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput {
+       s.IfMatch = &v
+       return s
+}
+
+// SetIfModifiedSince sets the IfModifiedSince field's value.
+func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput {
+       s.IfModifiedSince = &v
+       return s
+}
+
+// SetIfNoneMatch sets the IfNoneMatch field's value.
+func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput {
+       s.IfNoneMatch = &v
+       return s
+}
+
+// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value.
+func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput {
+       s.IfUnmodifiedSince = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectInput) SetKey(v string) *GetObjectInput {
+       s.Key = &v
+       return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput {
+       s.PartNumber = &v
+       return s
+}
+
+// SetRange sets the Range field's value.
+func (s *GetObjectInput) SetRange(v string) *GetObjectInput {
+       s.Range = &v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// SetResponseCacheControl sets the ResponseCacheControl field's value.
+func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput {
+       s.ResponseCacheControl = &v
+       return s
+}
+
+// SetResponseContentDisposition sets the ResponseContentDisposition field's value.
+func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput {
+       s.ResponseContentDisposition = &v
+       return s
+}
+
+// SetResponseContentEncoding sets the ResponseContentEncoding field's value.
+func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput {
+       s.ResponseContentEncoding = &v
+       return s
+}
+
+// SetResponseContentLanguage sets the ResponseContentLanguage field's value.
+func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput {
+       s.ResponseContentLanguage = &v
+       return s
+}
+
+// SetResponseContentType sets the ResponseContentType field's value.
+func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput {
+       s.ResponseContentType = &v
+       return s
+}
+
+// SetResponseExpires sets the ResponseExpires field's value.
+func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput {
+       s.ResponseExpires = &v
+       return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput {
+       s.SSECustomerAlgorithm = &v
+       return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput {
+       s.SSECustomerKey = &v
+       return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput {
+       s.SSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput
+type GetObjectOutput struct {
+       _ struct{} `type:"structure" payload:"Body"`
+
+       AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
+
+       // Object data.
+       Body io.ReadCloser `type:"blob"`
+
+       // Specifies caching behavior along the request/reply chain.
+       CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+       // Specifies presentational information for the object.
+       ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+       // Specifies what content encodings have been applied to the object and thus
+       // what decoding mechanisms must be applied to obtain the media-type referenced
+       // by the Content-Type header field.
+       ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+       // The language the content is in.
+       ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+       // Size of the body in bytes.
+       ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+       // The portion of the object returned in the response.
+       ContentRange *string `location:"header" locationName:"Content-Range" type:"string"`
+
+       // A standard MIME type describing the format of the object data.
+       ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+       // Specifies whether the object retrieved was (true) or was not (false) a Delete
+       // Marker. If false, this response header does not appear in the response.
+       DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+       // An ETag is an opaque identifier assigned by a web server to a specific version
+       // of a resource found at a URL
+       ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+       // If the object expiration is configured (see PUT Bucket lifecycle), the response
+       // includes this header. It includes the expiry-date and rule-id key value pairs
+       // providing object expiration information. The value of the rule-id is URL
+       // encoded.
+       Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+       // The date and time at which the object is no longer cacheable.
+       Expires *string `location:"header" locationName:"Expires" type:"string"`
+
+       // Last modified date of the object
+       LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"`
+
+       // A map of metadata to store with the object in S3.
+       Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+       // This is set to the number of metadata entries not returned in x-amz-meta
+       // headers. This can happen if you create metadata using an API like SOAP that
+       // supports more flexible metadata than the REST API. For example, using SOAP,
+       // you can create metadata whose values are not legal HTTP headers.
+       MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
+
+       // The count of parts this object has.
+       PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"`
+
+       ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+       // Provides information about object restoration operation and expiration time
+       // of the restored object copy.
+       Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
+
+       // If server-side encryption with a customer-provided encryption key was requested,
+       // the response will include this header confirming the encryption algorithm
+       // used.
+       SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+       // If server-side encryption with a customer-provided encryption key was requested,
+       // the response will include this header to provide round trip message integrity
+       // verification of the customer-provided encryption key.
+       SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // If present, specifies the ID of the AWS Key Management Service (KMS) master
+       // encryption key that was used for the object.
+       SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+       // The Server-side encryption algorithm used when storing this object in S3
+       // (e.g., AES256, aws:kms).
+       ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+       StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+       // The number of tags, if any, on the object.
+       TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"`
+
+       // Version of the object.
+       VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+
+       // If the bucket is configured as a website, redirects requests for this object
+       // to another object in the same bucket or to an external URL. Amazon S3 stores
+       // the value of this header in the object metadata.
+       WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectOutput) GoString() string {
+       return s.String()
+}
+
+// SetAcceptRanges sets the AcceptRanges field's value.
+func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput {
+       s.AcceptRanges = &v
+       return s
+}
+
+// SetBody sets the Body field's value.
+func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput {
+       s.Body = v
+       return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput {
+       s.CacheControl = &v
+       return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput {
+       s.ContentDisposition = &v
+       return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput {
+       s.ContentEncoding = &v
+       return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput {
+       s.ContentLanguage = &v
+       return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput {
+       s.ContentLength = &v
+       return s
+}
+
+// SetContentRange sets the ContentRange field's value.
+func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput {
+       s.ContentRange = &v
+       return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput {
+       s.ContentType = &v
+       return s
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput {
+       s.DeleteMarker = &v
+       return s
+}
+
+// SetETag sets the ETag field's value.
+func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput {
+       s.ETag = &v
+       return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput {
+       s.Expiration = &v
+       return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput {
+       s.Expires = &v
+       return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput {
+       s.LastModified = &v
+       return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput {
+       s.Metadata = v
+       return s
+}
+
+// SetMissingMeta sets the MissingMeta field's value.
+func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput {
+       s.MissingMeta = &v
+       return s
+}
+
+// SetPartsCount sets the PartsCount field's value.
+func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput {
+       s.PartsCount = &v
+       return s
+}
+
+// SetReplicationStatus sets the ReplicationStatus field's value.
+func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput {
+       s.ReplicationStatus = &v
+       return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// SetRestore sets the Restore field's value.
+func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput {
+       s.Restore = &v
+       return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput {
+       s.SSECustomerAlgorithm = &v
+       return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput {
+       s.SSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput {
+       s.SSEKMSKeyId = &v
+       return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput {
+       s.ServerSideEncryption = &v
+       return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput {
+       s.StorageClass = &v
+       return s
+}
+
+// SetTagCount sets the TagCount field's value.
+func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput {
+       s.TagCount = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput {
+       s.VersionId = &v
+       return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput {
+       s.WebsiteRedirectLocation = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest
+type GetObjectTaggingInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectTaggingInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTaggingInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectTaggingInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput {
+       s.Key = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput
+type GetObjectTaggingOutput struct {
+       _ struct{} `type:"structure"`
+
+       // TagSet is a required field
+       TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+
+       VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectTaggingOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTaggingOutput) GoString() string {
+       return s.String()
+}
+
+// SetTagSet sets the TagSet field's value.
+func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput {
+       s.TagSet = v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest
+type GetObjectTorrentInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+}
+
+// String returns the string representation
+func (s GetObjectTorrentInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTorrentInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectTorrentInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput {
+       s.Key = &v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput
+type GetObjectTorrentOutput struct {
+       _ struct{} `type:"structure" payload:"Body"`
+
+       Body io.ReadCloser `type:"blob"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s GetObjectTorrentOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTorrentOutput) GoString() string {
+       return s.String()
+}
+
+// SetBody sets the Body field's value.
+func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput {
+       s.Body = v
+       return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters
+type GlacierJobParameters struct {
+       _ struct{} `type:"structure"`
+
+       // Glacier retrieval tier at which the restore will be processed.
+       //
+       // Tier is a required field
+       Tier *string `type:"string" required:"true" enum:"Tier"`
+}
+
+// String returns the string representation
+func (s GlacierJobParameters) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GlacierJobParameters) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GlacierJobParameters) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"}
+       if s.Tier == nil {
+               invalidParams.Add(request.NewErrParamRequired("Tier"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetTier sets the Tier field's value.
+func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters {
+       s.Tier = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant
+type Grant struct {
+       _ struct{} `type:"structure"`
+
+       Grantee *Grantee `type:"structure"`
+
+       // Specifies the permission given to the grantee.
+       Permission *string `type:"string" enum:"Permission"`
+}
+
+// String returns the string representation
+func (s Grant) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Grant) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Grant) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "Grant"}
+       if s.Grantee != nil {
+               if err := s.Grantee.Validate(); err != nil {
+                       invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetGrantee sets the Grantee field's value.
+func (s *Grant) SetGrantee(v *Grantee) *Grant {
+       s.Grantee = v
+       return s
+}
+
+// SetPermission sets the Permission field's value.
+func (s *Grant) SetPermission(v string) *Grant {
+       s.Permission = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee
+type Grantee struct {
+       _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
+
+       // Screen name of the grantee.
+       DisplayName *string `type:"string"`
+
+       // Email address of the grantee.
+       EmailAddress *string `type:"string"`
+
+       // The canonical user ID of the grantee.
+       ID *string `type:"string"`
+
+       // Type of grantee
+       //
+       // Type is a required field
+       Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"`
+
+       // URI of the grantee group.
+       URI *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Grantee) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Grantee) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Grantee) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "Grantee"}
+       if s.Type == nil {
+               invalidParams.Add(request.NewErrParamRequired("Type"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetDisplayName sets the DisplayName field's value.
+func (s *Grantee) SetDisplayName(v string) *Grantee {
+       s.DisplayName = &v
+       return s
+}
+
+// SetEmailAddress sets the EmailAddress field's value.
+func (s *Grantee) SetEmailAddress(v string) *Grantee {
+       s.EmailAddress = &v
+       return s
+}
+
+// SetID sets the ID field's value.
+func (s *Grantee) SetID(v string) *Grantee {
+       s.ID = &v
+       return s
+}
+
+// SetType sets the Type field's value.
+func (s *Grantee) SetType(v string) *Grantee {
+       s.Type = &v
+       return s
+}
+
+// SetURI sets the URI field's value.
+func (s *Grantee) SetURI(v string) *Grantee {
+       s.URI = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest
+type HeadBucketInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s HeadBucketInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadBucketInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *HeadBucketInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput
+type HeadBucketOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s HeadBucketOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadBucketOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest
+type HeadObjectInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Return the object only if its entity tag (ETag) is the same as the one specified,
+       // otherwise return a 412 (precondition failed).
+       IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
+
+       // Return the object only if it has been modified since the specified time,
+       // otherwise return a 304 (not modified).
+       IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+       // Return the object only if its entity tag (ETag) is different from the one
+       // specified, otherwise return a 304 (not modified).
+       IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
+
+       // Return the object only if it has not been modified since the specified time,
+       // otherwise return a 412 (precondition failed).
+       IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // Part number of the object being read. This is a positive integer between
+       // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified.
+       // Useful querying about the size of the part and the number of parts in this
+       // object.
+       PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"`
+
+       // Downloads the specified range bytes of an object. For more information about
+       // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+       Range *string `location:"header" locationName:"Range" type:"string"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+       // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+       SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+       // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+       // data. This value is used to store the object and then it is discarded; Amazon
+       // does not store the encryption key. The key must be appropriate for use with
+       // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+       // header.
+       SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+       // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+       // Amazon S3 uses this header for a message integrity check to ensure the encryption
+       // key was transmitted without error.
+       SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // VersionId used to reference a specific version of the object.
+       VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s HeadObjectInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadObjectInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *HeadObjectInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetIfMatch sets the IfMatch field's value.
+func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput {
+       s.IfMatch = &v
+       return s
+}
+
+// SetIfModifiedSince sets the IfModifiedSince field's value.
+func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput {
+       s.IfModifiedSince = &v
+       return s
+}
+
+// SetIfNoneMatch sets the IfNoneMatch field's value.
+func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput {
+       s.IfNoneMatch = &v
+       return s
+}
+
+// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value.
+func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput {
+       s.IfUnmodifiedSince = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput {
+       s.Key = &v
+       return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput {
+       s.PartNumber = &v
+       return s
+}
+
+// SetRange sets the Range field's value.
+func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput {
+       s.Range = &v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput {
+       s.SSECustomerAlgorithm = &v
+       return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput {
+       s.SSECustomerKey = &v
+       return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput {
+       s.SSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput
+type HeadObjectOutput struct {
+       _ struct{} `type:"structure"`
+
+       AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
+
+       // Specifies caching behavior along the request/reply chain.
+       CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+       // Specifies presentational information for the object.
+       ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+       // Specifies what content encodings have been applied to the object and thus
+       // what decoding mechanisms must be applied to obtain the media-type referenced
+       // by the Content-Type header field.
+       ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+       // The language the content is in.
+       ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+       // Size of the body in bytes.
+       ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+       // A standard MIME type describing the format of the object data.
+       ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+       // Specifies whether the object retrieved was (true) or was not (false) a Delete
+       // Marker. If false, this response header does not appear in the response.
+       DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+       // An ETag is an opaque identifier assigned by a web server to a specific version
+       // of a resource found at a URL
+       ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+       // If the object expiration is configured (see PUT Bucket lifecycle), the response
+       // includes this header. It includes the expiry-date and rule-id key value pairs
+       // providing object expiration information. The value of the rule-id is URL
+       // encoded.
+       Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+       // The date and time at which the object is no longer cacheable.
+       Expires *string `location:"header" locationName:"Expires" type:"string"`
+
+       // Last modified date of the object
+       LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"`
+
+       // A map of metadata to store with the object in S3.
+       Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+       // This is set to the number of metadata entries not returned in x-amz-meta
+       // headers. This can happen if you create metadata using an API like SOAP that
+       // supports more flexible metadata than the REST API. For example, using SOAP,
+       // you can create metadata whose values are not legal HTTP headers.
+       MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
+
+       // The count of parts this object has.
+       PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"`
+
+       ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+       // Provides information about object restoration operation and expiration time
+       // of the restored object copy.
+       Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
+
+       // If server-side encryption with a customer-provided encryption key was requested,
+       // the response will include this header confirming the encryption algorithm
+       // used.
+       SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+       // If server-side encryption with a customer-provided encryption key was requested,
+       // the response will include this header to provide round trip message integrity
+       // verification of the customer-provided encryption key.
+       SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // If present, specifies the ID of the AWS Key Management Service (KMS) master
+       // encryption key that was used for the object.
+       SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+       // The Server-side encryption algorithm used when storing this object in S3
+       // (e.g., AES256, aws:kms).
+       ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+       StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+       // Version of the object.
+       VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+
+       // If the bucket is configured as a website, redirects requests for this object
+       // to another object in the same bucket or to an external URL. Amazon S3 stores
+       // the value of this header in the object metadata.
+       WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s HeadObjectOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadObjectOutput) GoString() string {
+       return s.String()
+}
+
+// SetAcceptRanges sets the AcceptRanges field's value.
+func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput {
+       s.AcceptRanges = &v
+       return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput {
+       s.CacheControl = &v
+       return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput {
+       s.ContentDisposition = &v
+       return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput {
+       s.ContentEncoding = &v
+       return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput {
+       s.ContentLanguage = &v
+       return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput {
+       s.ContentLength = &v
+       return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput {
+       s.ContentType = &v
+       return s
+}
+
+// SetDeleteMarker sets the DeleteMarker field's value.
+func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput {
+       s.DeleteMarker = &v
+       return s
+}
+
+// SetETag sets the ETag field's value.
+func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput {
+       s.ETag = &v
+       return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput {
+       s.Expiration = &v
+       return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput {
+       s.Expires = &v
+       return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput {
+       s.LastModified = &v
+       return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput {
+       s.Metadata = v
+       return s
+}
+
+// SetMissingMeta sets the MissingMeta field's value.
+func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput {
+       s.MissingMeta = &v
+       return s
+}
+
+// SetPartsCount sets the PartsCount field's value.
+func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput {
+       s.PartsCount = &v
+       return s
+}
+
+// SetReplicationStatus sets the ReplicationStatus field's value.
+func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput {
+       s.ReplicationStatus = &v
+       return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// SetRestore sets the Restore field's value.
+func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput {
+       s.Restore = &v
+       return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput {
+       s.SSECustomerAlgorithm = &v
+       return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput {
+       s.SSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput {
+       s.SSEKMSKeyId = &v
+       return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput {
+       s.ServerSideEncryption = &v
+       return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput {
+       s.StorageClass = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput {
+       s.VersionId = &v
+       return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput {
+       s.WebsiteRedirectLocation = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument
+type IndexDocument struct {
+       _ struct{} `type:"structure"`
+
+       // A suffix that is appended to a request that is for a directory on the website
+       // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/
+       // the data that is returned will be for the object with the key name images/index.html)
+       // The suffix must not be empty and must not include a slash character.
+       //
+       // Suffix is a required field
+       Suffix *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s IndexDocument) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s IndexDocument) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *IndexDocument) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "IndexDocument"}
+       if s.Suffix == nil {
+               invalidParams.Add(request.NewErrParamRequired("Suffix"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetSuffix sets the Suffix field's value.
+func (s *IndexDocument) SetSuffix(v string) *IndexDocument {
+       s.Suffix = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator
+type Initiator struct {
+       _ struct{} `type:"structure"`
+
+       // Name of the Principal.
+       DisplayName *string `type:"string"`
+
+       // If the principal is an AWS account, it provides the Canonical User ID. If
+       // the principal is an IAM User, it provides a user ARN value.
+       ID *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Initiator) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Initiator) GoString() string {
+       return s.String()
+}
+
+// SetDisplayName sets the DisplayName field's value.
+func (s *Initiator) SetDisplayName(v string) *Initiator {
+       s.DisplayName = &v
+       return s
+}
+
+// SetID sets the ID field's value.
+func (s *Initiator) SetID(v string) *Initiator {
+       s.ID = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration
+type InventoryConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       // Contains information about where to publish the inventory results.
+       //
+       // Destination is a required field
+       Destination *InventoryDestination `type:"structure" required:"true"`
+
+       // Specifies an inventory filter. The inventory only includes objects that meet
+       // the filter's criteria.
+       Filter *InventoryFilter `type:"structure"`
+
+       // The ID used to identify the inventory configuration.
+       //
+       // Id is a required field
+       Id *string `type:"string" required:"true"`
+
+       // Specifies which object version(s) to included in the inventory results.
+       //
+       // IncludedObjectVersions is a required field
+       IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"`
+
+       // Specifies whether the inventory is enabled or disabled.
+       //
+       // IsEnabled is a required field
+       IsEnabled *bool `type:"boolean" required:"true"`
+
+       // Contains the optional fields that are included in the inventory results.
+       OptionalFields []*string `locationNameList:"Field" type:"list"`
+
+       // Specifies the schedule for generating inventory results.
+       //
+       // Schedule is a required field
+       Schedule *InventorySchedule `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s InventoryConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryConfiguration) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryConfiguration) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"}
+       if s.Destination == nil {
+               invalidParams.Add(request.NewErrParamRequired("Destination"))
+       }
+       if s.Id == nil {
+               invalidParams.Add(request.NewErrParamRequired("Id"))
+       }
+       if s.IncludedObjectVersions == nil {
+               invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions"))
+       }
+       if s.IsEnabled == nil {
+               invalidParams.Add(request.NewErrParamRequired("IsEnabled"))
+       }
+       if s.Schedule == nil {
+               invalidParams.Add(request.NewErrParamRequired("Schedule"))
+       }
+       if s.Destination != nil {
+               if err := s.Destination.Validate(); err != nil {
+                       invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
+               }
+       }
+       if s.Filter != nil {
+               if err := s.Filter.Validate(); err != nil {
+                       invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+               }
+       }
+       if s.Schedule != nil {
+               if err := s.Schedule.Validate(); err != nil {
+                       invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetDestination sets the Destination field's value.
+func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration {
+       s.Destination = v
+       return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration {
+       s.Filter = v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration {
+       s.Id = &v
+       return s
+}
+
+// SetIncludedObjectVersions sets the IncludedObjectVersions field's value.
+func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration {
+       s.IncludedObjectVersions = &v
+       return s
+}
+
+// SetIsEnabled sets the IsEnabled field's value.
+func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration {
+       s.IsEnabled = &v
+       return s
+}
+
+// SetOptionalFields sets the OptionalFields field's value.
+func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration {
+       s.OptionalFields = v
+       return s
+}
+
+// SetSchedule sets the Schedule field's value.
+func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration {
+       s.Schedule = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination
+type InventoryDestination struct {
+       _ struct{} `type:"structure"`
+
+       // Contains the bucket name, file format, bucket owner (optional), and prefix
+       // (optional) where inventory results are published.
+       //
+       // S3BucketDestination is a required field
+       S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s InventoryDestination) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryDestination) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryDestination) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"}
+       if s.S3BucketDestination == nil {
+               invalidParams.Add(request.NewErrParamRequired("S3BucketDestination"))
+       }
+       if s.S3BucketDestination != nil {
+               if err := s.S3BucketDestination.Validate(); err != nil {
+                       invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetS3BucketDestination sets the S3BucketDestination field's value.
+func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination {
+       s.S3BucketDestination = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter
+type InventoryFilter struct {
+       _ struct{} `type:"structure"`
+
+       // The prefix that an object must have to be included in the inventory results.
+       //
+       // Prefix is a required field
+       Prefix *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s InventoryFilter) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryFilter) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryFilter) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"}
+       if s.Prefix == nil {
+               invalidParams.Add(request.NewErrParamRequired("Prefix"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter {
+       s.Prefix = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination
+type InventoryS3BucketDestination struct {
+       _ struct{} `type:"structure"`
+
+       // The ID of the account that owns the destination bucket.
+       AccountId *string `type:"string"`
+
+       // The Amazon resource name (ARN) of the bucket where inventory results will
+       // be published.
+       //
+       // Bucket is a required field
+       Bucket *string `type:"string" required:"true"`
+
+       // Specifies the output format of the inventory results.
+       //
+       // Format is a required field
+       Format *string `type:"string" required:"true" enum:"InventoryFormat"`
+
+       // The prefix that is prepended to all inventory results.
+       Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s InventoryS3BucketDestination) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventoryS3BucketDestination) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventoryS3BucketDestination) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Format == nil {
+               invalidParams.Add(request.NewErrParamRequired("Format"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetAccountId sets the AccountId field's value.
+func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination {
+       s.AccountId = &v
+       return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination {
+       s.Bucket = &v
+       return s
+}
+
+// SetFormat sets the Format field's value.
+func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination {
+       s.Format = &v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination {
+       s.Prefix = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule
+type InventorySchedule struct {
+       _ struct{} `type:"structure"`
+
+       // Specifies how frequently inventory results are produced.
+       //
+       // Frequency is a required field
+       Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"`
+}
+
+// String returns the string representation
+func (s InventorySchedule) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s InventorySchedule) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *InventorySchedule) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"}
+       if s.Frequency == nil {
+               invalidParams.Add(request.NewErrParamRequired("Frequency"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetFrequency sets the Frequency field's value.
+func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule {
+       s.Frequency = &v
+       return s
+}
+
+// Container for object key name prefix and suffix filtering rules.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter
+type KeyFilter struct {
+       _ struct{} `type:"structure"`
+
+       // A list of containers for key value pair that defines the criteria for the
+       // filter rule.
+       FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s KeyFilter) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s KeyFilter) GoString() string {
+       return s.String()
+}
+
+// SetFilterRules sets the FilterRules field's value.
+func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter {
+       s.FilterRules = v
+       return s
+}
+
+// Container for specifying the AWS Lambda notification configuration.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration
+type LambdaFunctionConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       // Events is a required field
+       Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
+
+       // Container for object key name filtering rules. For information about key
+       // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+       Filter *NotificationConfigurationFilter `type:"structure"`
+
+       // Optional unique identifier for configurations in a notification configuration.
+       // If you don't provide one, Amazon S3 will assign an ID.
+       Id *string `type:"string"`
+
+       // Lambda cloud function ARN that Amazon S3 can invoke when it detects events
+       // of the specified type.
+       //
+       // LambdaFunctionArn is a required field
+       LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s LambdaFunctionConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LambdaFunctionConfiguration) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LambdaFunctionConfiguration) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"}
+       if s.Events == nil {
+               invalidParams.Add(request.NewErrParamRequired("Events"))
+       }
+       if s.LambdaFunctionArn == nil {
+               invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetEvents sets the Events field's value.
+func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration {
+       s.Events = v
+       return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration {
+       s.Filter = v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration {
+       s.Id = &v
+       return s
+}
+
+// SetLambdaFunctionArn sets the LambdaFunctionArn field's value.
+func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration {
+       s.LambdaFunctionArn = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration
+type LifecycleConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       // Rules is a required field
+       Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s LifecycleConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleConfiguration) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleConfiguration) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"}
+       if s.Rules == nil {
+               invalidParams.Add(request.NewErrParamRequired("Rules"))
+       }
+       if s.Rules != nil {
+               for i, v := range s.Rules {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetRules sets the Rules field's value.
+func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration {
+       s.Rules = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration
+type LifecycleExpiration struct {
+       _ struct{} `type:"structure"`
+
+       // Indicates at what date the object is to be moved or deleted. Should be in
+       // GMT ISO 8601 Format.
+       Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+       // Indicates the lifetime, in days, of the objects that are subject to the rule.
+       // The value must be a non-zero positive integer.
+       Days *int64 `type:"integer"`
+
+       // Indicates whether Amazon S3 will remove a delete marker with no noncurrent
+       // versions. If set to true, the delete marker will be expired; if set to false
+       // the policy takes no action. This cannot be specified with Days or Date in
+       // a Lifecycle Expiration Policy.
+       ExpiredObjectDeleteMarker *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s LifecycleExpiration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleExpiration) GoString() string {
+       return s.String()
+}
+
+// SetDate sets the Date field's value.
+func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration {
+       s.Date = &v
+       return s
+}
+
+// SetDays sets the Days field's value.
+func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration {
+       s.Days = &v
+       return s
+}
+
+// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value.
+func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration {
+       s.ExpiredObjectDeleteMarker = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule
+type LifecycleRule struct {
+       _ struct{} `type:"structure"`
+
+       // Specifies the days since the initiation of an Incomplete Multipart Upload
+       // that Lifecycle will wait before permanently removing all parts of the upload.
+       AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
+
+       Expiration *LifecycleExpiration `type:"structure"`
+
+       // The Filter is used to identify objects that a Lifecycle Rule applies to.
+       // A Filter must have exactly one of Prefix, Tag, or And specified.
+       Filter *LifecycleRuleFilter `type:"structure"`
+
+       // Unique identifier for the rule. The value cannot be longer than 255 characters.
+       ID *string `type:"string"`
+
+       // Specifies when noncurrent object versions expire. Upon expiration, Amazon
+       // S3 permanently deletes the noncurrent object versions. You set this lifecycle
+       // configuration action on a bucket that has versioning enabled (or suspended)
+       // to request that Amazon S3 delete noncurrent object versions at a specific
+       // period in the object's lifetime.
+       NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
+
+       NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"`
+
+       // Prefix identifying one or more objects to which the rule applies. This is
+       // deprecated; use Filter instead.
+       Prefix *string `deprecated:"true" type:"string"`
+
+       // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule
+       // is not currently being applied.
+       //
+       // Status is a required field
+       Status *string `type:"string" required:"true" enum:"ExpirationStatus"`
+
+       Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s LifecycleRule) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleRule) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleRule) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"}
+       if s.Status == nil {
+               invalidParams.Add(request.NewErrParamRequired("Status"))
+       }
+       if s.Filter != nil {
+               if err := s.Filter.Validate(); err != nil {
+                       invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value.
+func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule {
+       s.AbortIncompleteMultipartUpload = v
+       return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule {
+       s.Expiration = v
+       return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule {
+       s.Filter = v
+       return s
+}
+
+// SetID sets the ID field's value.
+func (s *LifecycleRule) SetID(v string) *LifecycleRule {
+       s.ID = &v
+       return s
+}
+
+// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value.
+func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule {
+       s.NoncurrentVersionExpiration = v
+       return s
+}
+
+// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value.
+func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule {
+       s.NoncurrentVersionTransitions = v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule {
+       s.Prefix = &v
+       return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *LifecycleRule) SetStatus(v string) *LifecycleRule {
+       s.Status = &v
+       return s
+}
+
+// SetTransitions sets the Transitions field's value.
+func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule {
+       s.Transitions = v
+       return s
+}
+
+// This is used in a Lifecycle Rule Filter to apply a logical AND to two or
+// more predicates. The Lifecycle Rule will apply to any object matching all
+// of the predicates configured inside the And operator.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator
+type LifecycleRuleAndOperator struct {
+       _ struct{} `type:"structure"`
+
+       Prefix *string `type:"string"`
+
+       // All of these tags must exist in the object's tag set in order for the rule
+       // to apply.
+       Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s LifecycleRuleAndOperator) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleRuleAndOperator) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleRuleAndOperator) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"}
+       if s.Tags != nil {
+               for i, v := range s.Tags {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator {
+       s.Prefix = &v
+       return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator {
+       s.Tags = v
+       return s
+}
+
+// The Filter is used to identify objects that a Lifecycle Rule applies to.
+// A Filter must have exactly one of Prefix, Tag, or And specified.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter
+type LifecycleRuleFilter struct {
+       _ struct{} `type:"structure"`
+
+       // This is used in a Lifecycle Rule Filter to apply a logical AND to two or
+       // more predicates. The Lifecycle Rule will apply to any object matching all
+       // of the predicates configured inside the And operator.
+       And *LifecycleRuleAndOperator `type:"structure"`
+
+       // Prefix identifying one or more objects to which the rule applies.
+       Prefix *string `type:"string"`
+
+       // This tag must exist in the object's tag set in order for the rule to apply.
+       Tag *Tag `type:"structure"`
+}
+
+// String returns the string representation
+func (s LifecycleRuleFilter) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleRuleFilter) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleRuleFilter) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"}
+       if s.And != nil {
+               if err := s.And.Validate(); err != nil {
+                       invalidParams.AddNested("And", err.(request.ErrInvalidParams))
+               }
+       }
+       if s.Tag != nil {
+               if err := s.Tag.Validate(); err != nil {
+                       invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetAnd sets the And field's value.
+func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter {
+       s.And = v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter {
+       s.Prefix = &v
+       return s
+}
+
+// SetTag sets the Tag field's value.
+func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter {
+       s.Tag = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest
+type ListBucketAnalyticsConfigurationsInput struct {
+       _ struct{} `type:"structure"`
+
+       // The name of the bucket from which analytics configurations are retrieved.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // The ContinuationToken that represents a placeholder from where this request
+       // should begin.
+       ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketAnalyticsConfigurationsInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketAnalyticsConfigurationsInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListBucketAnalyticsConfigurationsInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput {
+       s.ContinuationToken = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput
+type ListBucketAnalyticsConfigurationsOutput struct {
+       _ struct{} `type:"structure"`
+
+       // The list of analytics configurations for a bucket.
+       AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"`
+
+       // The ContinuationToken that represents where this request began.
+       ContinuationToken *string `type:"string"`
+
+       // Indicates whether the returned list of analytics configurations is complete.
+       // A value of true indicates that the list is not complete and the NextContinuationToken
+       // will be provided for a subsequent request.
+       IsTruncated *bool `type:"boolean"`
+
+       // NextContinuationToken is sent when isTruncated is true, which indicates that
+       // there are more analytics configurations to list. The next request must include
+       // this NextContinuationToken. The token is obfuscated and is not a usable value.
+       NextContinuationToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketAnalyticsConfigurationsOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketAnalyticsConfigurationsOutput) GoString() string {
+       return s.String()
+}
+
+// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput {
+       s.AnalyticsConfigurationList = v
+       return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput {
+       s.ContinuationToken = &v
+       return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput {
+       s.IsTruncated = &v
+       return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput {
+       s.NextContinuationToken = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest
+type ListBucketInventoryConfigurationsInput struct {
+       _ struct{} `type:"structure"`
+
+       // The name of the bucket containing the inventory configurations to retrieve.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // The marker used to continue an inventory configuration listing that has been
+       // truncated. Use the NextContinuationToken from a previously truncated list
+       // response to continue the listing. The continuation token is an opaque value
+       // that Amazon S3 understands.
+       ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketInventoryConfigurationsInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketInventoryConfigurationsInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListBucketInventoryConfigurationsInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput {
+       s.ContinuationToken = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput
+type ListBucketInventoryConfigurationsOutput struct {
+       _ struct{} `type:"structure"`
+
+       // If sent in the request, the marker that is used as a starting point for this
+       // inventory configuration list response.
+       ContinuationToken *string `type:"string"`
+
+       // The list of inventory configurations for a bucket.
+       InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"`
+
+       // Indicates whether the returned list of inventory configurations is truncated
+       // in this response. A value of true indicates that the list is truncated.
+       IsTruncated *bool `type:"boolean"`
+
+       // The marker used to continue this inventory configuration listing. Use the
+       // NextContinuationToken from this response to continue the listing in a subsequent
+       // request. The continuation token is an opaque value that Amazon S3 understands.
+       NextContinuationToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketInventoryConfigurationsOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketInventoryConfigurationsOutput) GoString() string {
+       return s.String()
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput {
+       s.ContinuationToken = &v
+       return s
+}
+
+// SetInventoryConfigurationList sets the InventoryConfigurationList field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput {
+       s.InventoryConfigurationList = v
+       return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput {
+       s.IsTruncated = &v
+       return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput {
+       s.NextContinuationToken = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest
+type ListBucketMetricsConfigurationsInput struct {
+       _ struct{} `type:"structure"`
+
+       // The name of the bucket containing the metrics configurations to retrieve.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // The marker that is used to continue a metrics configuration listing that
+       // has been truncated. Use the NextContinuationToken from a previously truncated
+       // list response to continue the listing. The continuation token is an opaque
+       // value that Amazon S3 understands.
+       ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketMetricsConfigurationsInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketMetricsConfigurationsInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListBucketMetricsConfigurationsInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput {
+       s.ContinuationToken = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput
+type ListBucketMetricsConfigurationsOutput struct {
+       _ struct{} `type:"structure"`
+
+       // The marker that is used as a starting point for this metrics configuration
+       // list response. This value is present if it was sent in the request.
+       ContinuationToken *string `type:"string"`
+
+       // Indicates whether the returned list of metrics configurations is complete.
+       // A value of true indicates that the list is not complete and the NextContinuationToken
+       // will be provided for a subsequent request.
+       IsTruncated *bool `type:"boolean"`
+
+       // The list of metrics configurations for a bucket.
+       MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"`
+
+       // The marker used to continue a metrics configuration listing that has been
+       // truncated. Use the NextContinuationToken from a previously truncated list
+       // response to continue the listing. The continuation token is an opaque value
+       // that Amazon S3 understands.
+       NextContinuationToken *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListBucketMetricsConfigurationsOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketMetricsConfigurationsOutput) GoString() string {
+       return s.String()
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput {
+       s.ContinuationToken = &v
+       return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput {
+       s.IsTruncated = &v
+       return s
+}
+
+// SetMetricsConfigurationList sets the MetricsConfigurationList field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput {
+       s.MetricsConfigurationList = v
+       return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput {
+       s.NextContinuationToken = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsInput
+type ListBucketsInput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s ListBucketsInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketsInput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput
+type ListBucketsOutput struct {
+       _ struct{} `type:"structure"`
+
+       Buckets []*Bucket `locationNameList:"Bucket" type:"list"`
+
+       Owner *Owner `type:"structure"`
+}
+
+// String returns the string representation
+func (s ListBucketsOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketsOutput) GoString() string {
+       return s.String()
+}
+
+// SetBuckets sets the Buckets field's value.
+func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput {
+       s.Buckets = v
+       return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput {
+       s.Owner = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest
+type ListMultipartUploadsInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Character you use to group keys.
+       Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+       // Requests Amazon S3 to encode the object keys in the response and specifies
+       // the encoding method to use. An object key may contain any Unicode character;
+       // however, XML 1.0 parser cannot parse some characters, such as characters
+       // with an ASCII value from 0 to 10. For characters that are not supported in
+       // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+       // keys in the response.
+       EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+       // Together with upload-id-marker, this parameter specifies the multipart upload
+       // after which listing should begin.
+       KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
+
+       // Sets the maximum number of multipart uploads, from 1 to 1,000, to return
+       // in the response body. 1,000 is the maximum number of uploads that can be
+       // returned in a response.
+       MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"`
+
+       // Lists in-progress uploads only for those keys that begin with the specified
+       // prefix.
+       Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+       // Together with key-marker, specifies the multipart upload after which listing
+       // should begin. If key-marker is not specified, the upload-id-marker parameter
+       // is ignored.
+       UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"`
+}
+
+// String returns the string representation
+func (s ListMultipartUploadsInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListMultipartUploadsInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListMultipartUploadsInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput {
+       s.Delimiter = &v
+       return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput {
+       s.EncodingType = &v
+       return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput {
+       s.KeyMarker = &v
+       return s
+}
+
+// SetMaxUploads sets the MaxUploads field's value.
+func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput {
+       s.MaxUploads = &v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput {
+       s.Prefix = &v
+       return s
+}
+
+// SetUploadIdMarker sets the UploadIdMarker field's value.
+func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput {
+       s.UploadIdMarker = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput
+type ListMultipartUploadsOutput struct {
+       _ struct{} `type:"structure"`
+
+       // Name of the bucket to which the multipart upload was initiated.
+       Bucket *string `type:"string"`
+
+       CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+       Delimiter *string `type:"string"`
+
+       // Encoding type used by Amazon S3 to encode object keys in the response.
+       EncodingType *string `type:"string" enum:"EncodingType"`
+
+       // Indicates whether the returned list of multipart uploads is truncated. A
+       // value of true indicates that the list was truncated. The list can be truncated
+       // if the number of multipart uploads exceeds the limit allowed or specified
+       // by max uploads.
+       IsTruncated *bool `type:"boolean"`
+
+       // The key at or after which the listing began.
+       KeyMarker *string `type:"string"`
+
+       // Maximum number of multipart uploads that could have been included in the
+       // response.
+       MaxUploads *int64 `type:"integer"`
+
+       // When a list is truncated, this element specifies the value that should be
+       // used for the key-marker request parameter in a subsequent request.
+       NextKeyMarker *string `type:"string"`
+
+       // When a list is truncated, this element specifies the value that should be
+       // used for the upload-id-marker request parameter in a subsequent request.
+       NextUploadIdMarker *string `type:"string"`
+
+       // When a prefix is provided in the request, this field contains the specified
+       // prefix. The result contains only keys starting with the specified prefix.
+       Prefix *string `type:"string"`
+
+       // Upload ID after which listing began.
+       UploadIdMarker *string `type:"string"`
+
+       Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s ListMultipartUploadsOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListMultipartUploadsOutput) GoString() string {
+       return s.String()
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput {
+       s.Bucket = &v
+       return s
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput {
+       s.CommonPrefixes = v
+       return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput {
+       s.Delimiter = &v
+       return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput {
+       s.EncodingType = &v
+       return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput {
+       s.IsTruncated = &v
+       return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput {
+       s.KeyMarker = &v
+       return s
+}
+
+// SetMaxUploads sets the MaxUploads field's value.
+func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput {
+       s.MaxUploads = &v
+       return s
+}
+
+// SetNextKeyMarker sets the NextKeyMarker field's value.
+func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput {
+       s.NextKeyMarker = &v
+       return s
+}
+
+// SetNextUploadIdMarker sets the NextUploadIdMarker field's value.
+func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput {
+       s.NextUploadIdMarker = &v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput {
+       s.Prefix = &v
+       return s
+}
+
+// SetUploadIdMarker sets the UploadIdMarker field's value.
+func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput {
+       s.UploadIdMarker = &v
+       return s
+}
+
+// SetUploads sets the Uploads field's value.
+func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput {
+       s.Uploads = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest
+type ListObjectVersionsInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // A delimiter is a character you use to group keys.
+       Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+       // Requests Amazon S3 to encode the object keys in the response and specifies
+       // the encoding method to use. An object key may contain any Unicode character;
+       // however, XML 1.0 parser cannot parse some characters, such as characters
+       // with an ASCII value from 0 to 10. For characters that are not supported in
+       // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+       // keys in the response.
+       EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+       // Specifies the key to start with when listing objects in a bucket.
+       KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
+
+       // Sets the maximum number of keys returned in the response. The response might
+       // contain fewer keys but will never contain more.
+       MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+       // Limits the response to keys that begin with the specified prefix.
+       Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+       // Specifies the object version you want to start listing from.
+       VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectVersionsInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectVersionsInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListObjectVersionsInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput {
+       s.Delimiter = &v
+       return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput {
+       s.EncodingType = &v
+       return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput {
+       s.KeyMarker = &v
+       return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput {
+       s.MaxKeys = &v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput {
+       s.Prefix = &v
+       return s
+}
+
+// SetVersionIdMarker sets the VersionIdMarker field's value.
+func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput {
+       s.VersionIdMarker = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput
+type ListObjectVersionsOutput struct {
+       _ struct{} `type:"structure"`
+
+       CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+       DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"`
+
+       Delimiter *string `type:"string"`
+
+       // Encoding type used by Amazon S3 to encode object keys in the response.
+       EncodingType *string `type:"string" enum:"EncodingType"`
+
+       // A flag that indicates whether or not Amazon S3 returned all of the results
+       // that satisfied the search criteria. If your results were truncated, you can
+       // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker
+       // response parameters as a starting place in another request to return the
+       // rest of the results.
+       IsTruncated *bool `type:"boolean"`
+
+       // Marks the last Key returned in a truncated response.
+       KeyMarker *string `type:"string"`
+
+       MaxKeys *int64 `type:"integer"`
+
+       Name *string `type:"string"`
+
+       // Use this value for the key marker request parameter in a subsequent request.
+       NextKeyMarker *string `type:"string"`
+
+       // Use this value for the next version id marker parameter in a subsequent request.
+       NextVersionIdMarker *string `type:"string"`
+
+       Prefix *string `type:"string"`
+
+       VersionIdMarker *string `type:"string"`
+
+       Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s ListObjectVersionsOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectVersionsOutput) GoString() string {
+       return s.String()
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput {
+       s.CommonPrefixes = v
+       return s
+}
+
+// SetDeleteMarkers sets the DeleteMarkers field's value.
+func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput {
+       s.DeleteMarkers = v
+       return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput {
+       s.Delimiter = &v
+       return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput {
+       s.EncodingType = &v
+       return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput {
+       s.IsTruncated = &v
+       return s
+}
+
+// SetKeyMarker sets the KeyMarker field's value.
+func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput {
+       s.KeyMarker = &v
+       return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput {
+       s.MaxKeys = &v
+       return s
+}
+
+// SetName sets the Name field's value.
+func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput {
+       s.Name = &v
+       return s
+}
+
+// SetNextKeyMarker sets the NextKeyMarker field's value.
+func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput {
+       s.NextKeyMarker = &v
+       return s
+}
+
+// SetNextVersionIdMarker sets the NextVersionIdMarker field's value.
+func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput {
+       s.NextVersionIdMarker = &v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput {
+       s.Prefix = &v
+       return s
+}
+
+// SetVersionIdMarker sets the VersionIdMarker field's value.
+func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput {
+       s.VersionIdMarker = &v
+       return s
+}
+
+// SetVersions sets the Versions field's value.
+func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput {
+       s.Versions = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest
+type ListObjectsInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // A delimiter is a character you use to group keys.
+       Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+       // Requests Amazon S3 to encode the object keys in the response and specifies
+       // the encoding method to use. An object key may contain any Unicode character;
+       // however, XML 1.0 parser cannot parse some characters, such as characters
+       // with an ASCII value from 0 to 10. For characters that are not supported in
+       // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+       // keys in the response.
+       EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+       // Specifies the key to start with when listing objects in a bucket.
+       Marker *string `location:"querystring" locationName:"marker" type:"string"`
+
+       // Sets the maximum number of keys returned in the response. The response might
+       // contain fewer keys but will never contain more.
+       MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+       // Limits the response to keys that begin with the specified prefix.
+       Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // list objects request. Bucket owners need not specify this parameter in their
+       // requests.
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+}
+
+// String returns the string representation
+func (s ListObjectsInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListObjectsInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput {
+       s.Delimiter = &v
+       return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput {
+       s.EncodingType = &v
+       return s
+}
+
+// SetMarker sets the Marker field's value.
+func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput {
+       s.Marker = &v
+       return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput {
+       s.MaxKeys = &v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput {
+       s.Prefix = &v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput
+type ListObjectsOutput struct {
+       _ struct{} `type:"structure"`
+
+       CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+       Contents []*Object `type:"list" flattened:"true"`
+
+       Delimiter *string `type:"string"`
+
+       // Encoding type used by Amazon S3 to encode object keys in the response.
+       EncodingType *string `type:"string" enum:"EncodingType"`
+
+       // A flag that indicates whether or not Amazon S3 returned all of the results
+       // that satisfied the search criteria.
+       IsTruncated *bool `type:"boolean"`
+
+       Marker *string `type:"string"`
+
+       MaxKeys *int64 `type:"integer"`
+
+       Name *string `type:"string"`
+
+       // When response is truncated (the IsTruncated element value in the response
+       // is true), you can use the key name in this field as marker in the subsequent
+       // request to get next set of objects. Amazon S3 lists objects in alphabetical
+       // order Note: This element is returned only if you have delimiter request parameter
+       // specified. If response does not include the NextMaker and it is truncated,
+       // you can use the value of the last Key in the response as the marker in the
+       // subsequent request to get the next set of object keys.
+       NextMarker *string `type:"string"`
+
+       Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectsOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsOutput) GoString() string {
+       return s.String()
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput {
+       s.CommonPrefixes = v
+       return s
+}
+
+// SetContents sets the Contents field's value.
+func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput {
+       s.Contents = v
+       return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput {
+       s.Delimiter = &v
+       return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput {
+       s.EncodingType = &v
+       return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput {
+       s.IsTruncated = &v
+       return s
+}
+
+// SetMarker sets the Marker field's value.
+func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput {
+       s.Marker = &v
+       return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput {
+       s.MaxKeys = &v
+       return s
+}
+
+// SetName sets the Name field's value.
+func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput {
+       s.Name = &v
+       return s
+}
+
+// SetNextMarker sets the NextMarker field's value.
+func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput {
+       s.NextMarker = &v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput {
+       s.Prefix = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request
+type ListObjectsV2Input struct {
+       _ struct{} `type:"structure"`
+
+       // Name of the bucket to list.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // ContinuationToken indicates Amazon S3 that the list is being continued on
+       // this bucket with a token. ContinuationToken is obfuscated and is not a real
+       // key
+       ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+
+       // A delimiter is a character you use to group keys.
+       Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+       // Encoding type used by Amazon S3 to encode object keys in the response.
+       EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+       // The owner field is not present in listV2 by default, if you want to return
+       // owner field with each key in the result then set the fetch owner field to
+       // true
+       FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"`
+
+       // Sets the maximum number of keys returned in the response. The response might
+       // contain fewer keys but will never contain more.
+       MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+       // Limits the response to keys that begin with the specified prefix.
+       Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // list objects request in V2 style. Bucket owners need not specify this parameter
+       // in their requests.
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+       // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
+       // listing after this specified key. StartAfter can be any key in the bucket
+       StartAfter *string `location:"querystring" locationName:"start-after" type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectsV2Input) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsV2Input) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListObjectsV2Input) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input {
+       s.Bucket = &v
+       return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input {
+       s.ContinuationToken = &v
+       return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input {
+       s.Delimiter = &v
+       return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input {
+       s.EncodingType = &v
+       return s
+}
+
+// SetFetchOwner sets the FetchOwner field's value.
+func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input {
+       s.FetchOwner = &v
+       return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input {
+       s.MaxKeys = &v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input {
+       s.Prefix = &v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input {
+       s.RequestPayer = &v
+       return s
+}
+
+// SetStartAfter sets the StartAfter field's value.
+func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input {
+       s.StartAfter = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output
+type ListObjectsV2Output struct {
+       _ struct{} `type:"structure"`
+
+       // CommonPrefixes contains all (if there are any) keys between Prefix and the
+       // next occurrence of the string specified by delimiter
+       CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+       // Metadata about each object returned.
+       Contents []*Object `type:"list" flattened:"true"`
+
+       // ContinuationToken indicates Amazon S3 that the list is being continued on
+       // this bucket with a token. ContinuationToken is obfuscated and is not a real
+       // key
+       ContinuationToken *string `type:"string"`
+
+       // A delimiter is a character you use to group keys.
+       Delimiter *string `type:"string"`
+
+       // Encoding type used by Amazon S3 to encode object keys in the response.
+       EncodingType *string `type:"string" enum:"EncodingType"`
+
+       // A flag that indicates whether or not Amazon S3 returned all of the results
+       // that satisfied the search criteria.
+       IsTruncated *bool `type:"boolean"`
+
+       // KeyCount is the number of keys returned with this request. KeyCount will
+       // always be less than equals to MaxKeys field. Say you ask for 50 keys, your
+       // result will include less than equals 50 keys
+       KeyCount *int64 `type:"integer"`
+
+       // Sets the maximum number of keys returned in the response. The response might
+       // contain fewer keys but will never contain more.
+       MaxKeys *int64 `type:"integer"`
+
+       // Name of the bucket to list.
+       Name *string `type:"string"`
+
+       // NextContinuationToken is sent when isTruncated is true which means there
+       // are more keys in the bucket that can be listed. The next list requests to
+       // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken
+       // is obfuscated and is not a real key
+       NextContinuationToken *string `type:"string"`
+
+       // Limits the response to keys that begin with the specified prefix.
+       Prefix *string `type:"string"`
+
+       // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
+       // listing after this specified key. StartAfter can be any key in the bucket
+       StartAfter *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectsV2Output) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsV2Output) GoString() string {
+       return s.String()
+}
+
+// SetCommonPrefixes sets the CommonPrefixes field's value.
+func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output {
+       s.CommonPrefixes = v
+       return s
+}
+
+// SetContents sets the Contents field's value.
+func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output {
+       s.Contents = v
+       return s
+}
+
+// SetContinuationToken sets the ContinuationToken field's value.
+func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output {
+       s.ContinuationToken = &v
+       return s
+}
+
+// SetDelimiter sets the Delimiter field's value.
+func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output {
+       s.Delimiter = &v
+       return s
+}
+
+// SetEncodingType sets the EncodingType field's value.
+func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output {
+       s.EncodingType = &v
+       return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output {
+       s.IsTruncated = &v
+       return s
+}
+
+// SetKeyCount sets the KeyCount field's value.
+func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output {
+       s.KeyCount = &v
+       return s
+}
+
+// SetMaxKeys sets the MaxKeys field's value.
+func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output {
+       s.MaxKeys = &v
+       return s
+}
+
+// SetName sets the Name field's value.
+func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output {
+       s.Name = &v
+       return s
+}
+
+// SetNextContinuationToken sets the NextContinuationToken field's value.
+func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output {
+       s.NextContinuationToken = &v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output {
+       s.Prefix = &v
+       return s
+}
+
+// SetStartAfter sets the StartAfter field's value.
+func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output {
+       s.StartAfter = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest
+type ListPartsInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // Sets the maximum number of parts to return.
+       MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"`
+
+       // Specifies the part after which listing should begin. Only parts with higher
+       // part numbers will be listed.
+       PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+       // Upload ID identifying the multipart upload whose parts are being listed.
+       //
+       // UploadId is a required field
+       UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ListPartsInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListPartsInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListPartsInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+       if s.UploadId == nil {
+               invalidParams.Add(request.NewErrParamRequired("UploadId"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListPartsInput) SetBucket(v string) *ListPartsInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ListPartsInput) SetKey(v string) *ListPartsInput {
+       s.Key = &v
+       return s
+}
+
+// SetMaxParts sets the MaxParts field's value.
+func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput {
+       s.MaxParts = &v
+       return s
+}
+
+// SetPartNumberMarker sets the PartNumberMarker field's value.
+func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput {
+       s.PartNumberMarker = &v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput {
+       s.UploadId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput
+type ListPartsOutput struct {
+       _ struct{} `type:"structure"`
+
+       // Date when multipart upload will become eligible for abort operation by lifecycle.
+       AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"`
+
+       // Id of the lifecycle rule that makes a multipart upload eligible for abort
+       // operation.
+       AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
+
+       // Name of the bucket to which the multipart upload was initiated.
+       Bucket *string `type:"string"`
+
+       // Identifies who initiated the multipart upload.
+       Initiator *Initiator `type:"structure"`
+
+       // Indicates whether the returned list of parts is truncated.
+       IsTruncated *bool `type:"boolean"`
+
+       // Object key for which the multipart upload was initiated.
+       Key *string `min:"1" type:"string"`
+
+       // Maximum number of parts that were allowed in the response.
+       MaxParts *int64 `type:"integer"`
+
+       // When a list is truncated, this element specifies the last part in the list,
+       // as well as the value to use for the part-number-marker request parameter
+       // in a subsequent request.
+       NextPartNumberMarker *int64 `type:"integer"`
+
+       Owner *Owner `type:"structure"`
+
+       // Part number after which listing begins.
+       PartNumberMarker *int64 `type:"integer"`
+
+       Parts []*Part `locationName:"Part" type:"list" flattened:"true"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+       // The class of storage used to store the object.
+       StorageClass *string `type:"string" enum:"StorageClass"`
+
+       // Upload ID identifying the multipart upload whose parts are being listed.
+       UploadId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListPartsOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListPartsOutput) GoString() string {
+       return s.String()
+}
+
+// SetAbortDate sets the AbortDate field's value.
+func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput {
+       s.AbortDate = &v
+       return s
+}
+
+// SetAbortRuleId sets the AbortRuleId field's value.
+func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput {
+       s.AbortRuleId = &v
+       return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput {
+       s.Bucket = &v
+       return s
+}
+
+// SetInitiator sets the Initiator field's value.
+func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput {
+       s.Initiator = v
+       return s
+}
+
+// SetIsTruncated sets the IsTruncated field's value.
+func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput {
+       s.IsTruncated = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput {
+       s.Key = &v
+       return s
+}
+
+// SetMaxParts sets the MaxParts field's value.
+func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput {
+       s.MaxParts = &v
+       return s
+}
+
+// SetNextPartNumberMarker sets the NextPartNumberMarker field's value.
+func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput {
+       s.NextPartNumberMarker = &v
+       return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput {
+       s.Owner = v
+       return s
+}
+
+// SetPartNumberMarker sets the PartNumberMarker field's value.
+func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput {
+       s.PartNumberMarker = &v
+       return s
+}
+
+// SetParts sets the Parts field's value.
+func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput {
+       s.Parts = v
+       return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput {
+       s.StorageClass = &v
+       return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput {
+       s.UploadId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled
+type LoggingEnabled struct {
+       _ struct{} `type:"structure"`
+
+       // Specifies the bucket where you want Amazon S3 to store server access logs.
+       // You can have your logs delivered to any bucket that you own, including the
+       // same bucket that is being logged. You can also configure multiple buckets
+       // to deliver their logs to the same target bucket. In this case you should
+       // choose a different TargetPrefix for each source bucket so that the delivered
+       // log files can be distinguished by key.
+       TargetBucket *string `type:"string"`
+
+       TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"`
+
+       // This element lets you specify a prefix for the keys that the log files will
+       // be stored under.
+       TargetPrefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s LoggingEnabled) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LoggingEnabled) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LoggingEnabled) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"}
+       if s.TargetGrants != nil {
+               for i, v := range s.TargetGrants {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetTargetBucket sets the TargetBucket field's value.
+func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled {
+       s.TargetBucket = &v
+       return s
+}
+
+// SetTargetGrants sets the TargetGrants field's value.
+func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled {
+       s.TargetGrants = v
+       return s
+}
+
+// SetTargetPrefix sets the TargetPrefix field's value.
+func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled {
+       s.TargetPrefix = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator
+type MetricsAndOperator struct {
+       _ struct{} `type:"structure"`
+
+       // The prefix used when evaluating an AND predicate.
+       Prefix *string `type:"string"`
+
+       // The list of tags used when evaluating an AND predicate.
+       Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s MetricsAndOperator) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricsAndOperator) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *MetricsAndOperator) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"}
+       if s.Tags != nil {
+               for i, v := range s.Tags {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator {
+       s.Prefix = &v
+       return s
+}
+
+// SetTags sets the Tags field's value.
+func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator {
+       s.Tags = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration
+type MetricsConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       // Specifies a metrics configuration filter. The metrics configuration will
+       // only include objects that meet the filter's criteria. A filter must be a
+       // prefix, a tag, or a conjunction (MetricsAndOperator).
+       Filter *MetricsFilter `type:"structure"`
+
+       // The ID used to identify the metrics configuration.
+       //
+       // Id is a required field
+       Id *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s MetricsConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricsConfiguration) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *MetricsConfiguration) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"}
+       if s.Id == nil {
+               invalidParams.Add(request.NewErrParamRequired("Id"))
+       }
+       if s.Filter != nil {
+               if err := s.Filter.Validate(); err != nil {
+                       invalidParams.AddNested("Filter", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetFilter sets the Filter field's value.
+func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration {
+       s.Filter = v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration {
+       s.Id = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter
+type MetricsFilter struct {
+       _ struct{} `type:"structure"`
+
+       // A conjunction (logical AND) of predicates, which is used in evaluating a
+       // metrics filter. The operator must have at least two predicates, and an object
+       // must match all of the predicates in order for the filter to apply.
+       And *MetricsAndOperator `type:"structure"`
+
+       // The prefix used when evaluating a metrics filter.
+       Prefix *string `type:"string"`
+
+       // The tag used when evaluating a metrics filter.
+       Tag *Tag `type:"structure"`
+}
+
+// String returns the string representation
+func (s MetricsFilter) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MetricsFilter) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *MetricsFilter) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"}
+       if s.And != nil {
+               if err := s.And.Validate(); err != nil {
+                       invalidParams.AddNested("And", err.(request.ErrInvalidParams))
+               }
+       }
+       if s.Tag != nil {
+               if err := s.Tag.Validate(); err != nil {
+                       invalidParams.AddNested("Tag", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetAnd sets the And field's value.
+func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter {
+       s.And = v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter {
+       s.Prefix = &v
+       return s
+}
+
+// SetTag sets the Tag field's value.
+func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter {
+       s.Tag = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload
+type MultipartUpload struct {
+       _ struct{} `type:"structure"`
+
+       // Date and time at which the multipart upload was initiated.
+       Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+       // Identifies who initiated the multipart upload.
+       Initiator *Initiator `type:"structure"`
+
+       // Key of the object for which the multipart upload was initiated.
+       Key *string `min:"1" type:"string"`
+
+       Owner *Owner `type:"structure"`
+
+       // The class of storage used to store the object.
+       StorageClass *string `type:"string" enum:"StorageClass"`
+
+       // Upload ID that identifies the multipart upload.
+       UploadId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s MultipartUpload) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MultipartUpload) GoString() string {
+       return s.String()
+}
+
+// SetInitiated sets the Initiated field's value.
+func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload {
+       s.Initiated = &v
+       return s
+}
+
+// SetInitiator sets the Initiator field's value.
+func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload {
+       s.Initiator = v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *MultipartUpload) SetKey(v string) *MultipartUpload {
+       s.Key = &v
+       return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload {
+       s.Owner = v
+       return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload {
+       s.StorageClass = &v
+       return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload {
+       s.UploadId = &v
+       return s
+}
+
+// Specifies when noncurrent object versions expire. Upon expiration, Amazon
+// S3 permanently deletes the noncurrent object versions. You set this lifecycle
+// configuration action on a bucket that has versioning enabled (or suspended)
+// to request that Amazon S3 delete noncurrent object versions at a specific
+// period in the object's lifetime.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration
+type NoncurrentVersionExpiration struct {
+       _ struct{} `type:"structure"`
+
+       // Specifies the number of days an object is noncurrent before Amazon S3 can
+       // perform the associated action. For information about the noncurrent days
+       // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
+       // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
+       NoncurrentDays *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s NoncurrentVersionExpiration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NoncurrentVersionExpiration) GoString() string {
+       return s.String()
+}
+
+// SetNoncurrentDays sets the NoncurrentDays field's value.
+func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration {
+       s.NoncurrentDays = &v
+       return s
+}
+
+// Container for the transition rule that describes when noncurrent objects
+// transition to the STANDARD_IA or GLACIER storage class. If your bucket is
+// versioning-enabled (or versioning is suspended), you can set this action
+// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA
+// or GLACIER storage class at a specific period in the object's lifetime.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition
+type NoncurrentVersionTransition struct {
+       _ struct{} `type:"structure"`
+
+       // Specifies the number of days an object is noncurrent before Amazon S3 can
+       // perform the associated action. For information about the noncurrent days
+       // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
+       // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html)
+       NoncurrentDays *int64 `type:"integer"`
+
+       // The class of storage used to store the object.
+       StorageClass *string `type:"string" enum:"TransitionStorageClass"`
+}
+
+// String returns the string representation
+func (s NoncurrentVersionTransition) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NoncurrentVersionTransition) GoString() string {
+       return s.String()
+}
+
+// SetNoncurrentDays sets the NoncurrentDays field's value.
+func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition {
+       s.NoncurrentDays = &v
+       return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition {
+       s.StorageClass = &v
+       return s
+}
+
+// Container for specifying the notification configuration of the bucket. If
+// this element is empty, notifications are turned off on the bucket.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration
+type NotificationConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"`
+
+       QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"`
+
+       TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s NotificationConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NotificationConfiguration) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *NotificationConfiguration) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"}
+       if s.LambdaFunctionConfigurations != nil {
+               for i, v := range s.LambdaFunctionConfigurations {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
+       if s.QueueConfigurations != nil {
+               for i, v := range s.QueueConfigurations {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
+       if s.TopicConfigurations != nil {
+               for i, v := range s.TopicConfigurations {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value.
+func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration {
+       s.LambdaFunctionConfigurations = v
+       return s
+}
+
+// SetQueueConfigurations sets the QueueConfigurations field's value.
+func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration {
+       s.QueueConfigurations = v
+       return s
+}
+
+// SetTopicConfigurations sets the TopicConfigurations field's value.
+func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration {
+       s.TopicConfigurations = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated
+type NotificationConfigurationDeprecated struct {
+       _ struct{} `type:"structure"`
+
+       CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"`
+
+       QueueConfiguration *QueueConfigurationDeprecated `type:"structure"`
+
+       TopicConfiguration *TopicConfigurationDeprecated `type:"structure"`
+}
+
+// String returns the string representation
+func (s NotificationConfigurationDeprecated) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NotificationConfigurationDeprecated) GoString() string {
+       return s.String()
+}
+
+// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value.
+func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated {
+       s.CloudFunctionConfiguration = v
+       return s
+}
+
+// SetQueueConfiguration sets the QueueConfiguration field's value.
+func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated {
+       s.QueueConfiguration = v
+       return s
+}
+
+// SetTopicConfiguration sets the TopicConfiguration field's value.
+func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated {
+       s.TopicConfiguration = v
+       return s
+}
+
+// Container for object key name filtering rules. For information about key
+// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter
+type NotificationConfigurationFilter struct {
+       _ struct{} `type:"structure"`
+
+       // Container for object key name prefix and suffix filtering rules.
+       Key *KeyFilter `locationName:"S3Key" type:"structure"`
+}
+
+// String returns the string representation
+func (s NotificationConfigurationFilter) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NotificationConfigurationFilter) GoString() string {
+       return s.String()
+}
+
+// SetKey sets the Key field's value.
+func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter {
+       s.Key = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object
+type Object struct {
+       _ struct{} `type:"structure"`
+
+       ETag *string `type:"string"`
+
+       Key *string `min:"1" type:"string"`
+
+       LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+       Owner *Owner `type:"structure"`
+
+       Size *int64 `type:"integer"`
+
+       // The class of storage used to store the object.
+       StorageClass *string `type:"string" enum:"ObjectStorageClass"`
+}
+
+// String returns the string representation
+func (s Object) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Object) GoString() string {
+       return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *Object) SetETag(v string) *Object {
+       s.ETag = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *Object) SetKey(v string) *Object {
+       s.Key = &v
+       return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *Object) SetLastModified(v time.Time) *Object {
+       s.LastModified = &v
+       return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *Object) SetOwner(v *Owner) *Object {
+       s.Owner = v
+       return s
+}
+
+// SetSize sets the Size field's value.
+func (s *Object) SetSize(v int64) *Object {
+       s.Size = &v
+       return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *Object) SetStorageClass(v string) *Object {
+       s.StorageClass = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier
+type ObjectIdentifier struct {
+       _ struct{} `type:"structure"`
+
+       // Key name of the object to delete.
+       //
+       // Key is a required field
+       Key *string `min:"1" type:"string" required:"true"`
+
+       // VersionId for the specific version of the object to delete.
+       VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ObjectIdentifier) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ObjectIdentifier) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ObjectIdentifier) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"}
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier {
+       s.Key = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion
+type ObjectVersion struct {
+       _ struct{} `type:"structure"`
+
+       ETag *string `type:"string"`
+
+       // Specifies whether the object is (true) or is not (false) the latest version
+       // of an object.
+       IsLatest *bool `type:"boolean"`
+
+       // The object key.
+       Key *string `min:"1" type:"string"`
+
+       // Date and time the object was last modified.
+       LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+       Owner *Owner `type:"structure"`
+
+       // Size in bytes of the object.
+       Size *int64 `type:"integer"`
+
+       // The class of storage used to store the object.
+       StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"`
+
+       // Version ID of an object.
+       VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ObjectVersion) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ObjectVersion) GoString() string {
+       return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *ObjectVersion) SetETag(v string) *ObjectVersion {
+       s.ETag = &v
+       return s
+}
+
+// SetIsLatest sets the IsLatest field's value.
+func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion {
+       s.IsLatest = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *ObjectVersion) SetKey(v string) *ObjectVersion {
+       s.Key = &v
+       return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion {
+       s.LastModified = &v
+       return s
+}
+
+// SetOwner sets the Owner field's value.
+func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion {
+       s.Owner = v
+       return s
+}
+
+// SetSize sets the Size field's value.
+func (s *ObjectVersion) SetSize(v int64) *ObjectVersion {
+       s.Size = &v
+       return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion {
+       s.StorageClass = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner
+type Owner struct {
+       _ struct{} `type:"structure"`
+
+       DisplayName *string `type:"string"`
+
+       ID *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Owner) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Owner) GoString() string {
+       return s.String()
+}
+
+// SetDisplayName sets the DisplayName field's value.
+func (s *Owner) SetDisplayName(v string) *Owner {
+       s.DisplayName = &v
+       return s
+}
+
+// SetID sets the ID field's value.
+func (s *Owner) SetID(v string) *Owner {
+       s.ID = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part
+type Part struct {
+       _ struct{} `type:"structure"`
+
+       // Entity tag returned when the part was uploaded.
+       ETag *string `type:"string"`
+
+       // Date and time at which the part was uploaded.
+       LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+       // Part number identifying the part. This is a positive integer between 1 and
+       // 10,000.
+       PartNumber *int64 `type:"integer"`
+
+       // Size of the uploaded part data.
+       Size *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s Part) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Part) GoString() string {
+       return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *Part) SetETag(v string) *Part {
+       s.ETag = &v
+       return s
+}
+
+// SetLastModified sets the LastModified field's value.
+func (s *Part) SetLastModified(v time.Time) *Part {
+       s.LastModified = &v
+       return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *Part) SetPartNumber(v int64) *Part {
+       s.PartNumber = &v
+       return s
+}
+
+// SetSize sets the Size field's value.
+func (s *Part) SetSize(v int64) *Part {
+       s.Size = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest
+type PutBucketAccelerateConfigurationInput struct {
+       _ struct{} `type:"structure" payload:"AccelerateConfiguration"`
+
+       // Specifies the Accelerate Configuration you want to set for the bucket.
+       //
+       // AccelerateConfiguration is a required field
+       AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"`
+
+       // Name of the bucket for which the accelerate configuration is set.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketAccelerateConfigurationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAccelerateConfigurationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketAccelerateConfigurationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"}
+       if s.AccelerateConfiguration == nil {
+               invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration"))
+       }
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetAccelerateConfiguration sets the AccelerateConfiguration field's value.
+func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput {
+       s.AccelerateConfiguration = v
+       return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput {
+       s.Bucket = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput
+type PutBucketAccelerateConfigurationOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketAccelerateConfigurationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAccelerateConfigurationOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest
+type PutBucketAclInput struct {
+       _ struct{} `type:"structure" payload:"AccessControlPolicy"`
+
+       // The canned ACL to apply to the bucket.
+       ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
+
+       AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Allows grantee the read, write, read ACP, and write ACP permissions on the
+       // bucket.
+       GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+       // Allows grantee to list the objects in the bucket.
+       GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+       // Allows grantee to read the bucket ACL.
+       GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+       // Allows grantee to create, overwrite, and delete any object in the bucket.
+       GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+       // Allows grantee to write the ACL for the applicable bucket.
+       GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+}
+
+// String returns the string representation
+func (s PutBucketAclInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAclInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketAclInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.AccessControlPolicy != nil {
+               if err := s.AccessControlPolicy.Validate(); err != nil {
+                       invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput {
+       s.ACL = &v
+       return s
+}
+
+// SetAccessControlPolicy sets the AccessControlPolicy field's value.
+func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput {
+       s.AccessControlPolicy = v
+       return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput {
+       s.GrantFullControl = &v
+       return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput {
+       s.GrantRead = &v
+       return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput {
+       s.GrantReadACP = &v
+       return s
+}
+
+// SetGrantWrite sets the GrantWrite field's value.
+func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput {
+       s.GrantWrite = &v
+       return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput {
+       s.GrantWriteACP = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclOutput
+type PutBucketAclOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketAclOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAclOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest
+type PutBucketAnalyticsConfigurationInput struct {
+       _ struct{} `type:"structure" payload:"AnalyticsConfiguration"`
+
+       // The configuration and any analyses for the analytics filter.
+       //
+       // AnalyticsConfiguration is a required field
+       AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true"`
+
+       // The name of the bucket to which an analytics configuration is stored.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // The identifier used to represent an analytics configuration.
+       //
+       // Id is a required field
+       Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketAnalyticsConfigurationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAnalyticsConfigurationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketAnalyticsConfigurationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"}
+       if s.AnalyticsConfiguration == nil {
+               invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration"))
+       }
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Id == nil {
+               invalidParams.Add(request.NewErrParamRequired("Id"))
+       }
+       if s.AnalyticsConfiguration != nil {
+               if err := s.AnalyticsConfiguration.Validate(); err != nil {
+                       invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value.
+func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput {
+       s.AnalyticsConfiguration = v
+       return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput {
+       s.Id = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationOutput
+type PutBucketAnalyticsConfigurationOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketAnalyticsConfigurationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAnalyticsConfigurationOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest
+type PutBucketCorsInput struct {
+       _ struct{} `type:"structure" payload:"CORSConfiguration"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // CORSConfiguration is a required field
+       CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketCorsInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketCorsInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketCorsInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.CORSConfiguration == nil {
+               invalidParams.Add(request.NewErrParamRequired("CORSConfiguration"))
+       }
+       if s.CORSConfiguration != nil {
+               if err := s.CORSConfiguration.Validate(); err != nil {
+                       invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetCORSConfiguration sets the CORSConfiguration field's value.
+func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput {
+       s.CORSConfiguration = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsOutput
+type PutBucketCorsOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketCorsOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketCorsOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest
+type PutBucketInventoryConfigurationInput struct {
+       _ struct{} `type:"structure" payload:"InventoryConfiguration"`
+
+       // The name of the bucket where the inventory configuration will be stored.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // The ID used to identify the inventory configuration.
+       //
+       // Id is a required field
+       Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+
+       // Specifies the inventory configuration.
+       //
+       // InventoryConfiguration is a required field
+       InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketInventoryConfigurationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketInventoryConfigurationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketInventoryConfigurationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Id == nil {
+               invalidParams.Add(request.NewErrParamRequired("Id"))
+       }
+       if s.InventoryConfiguration == nil {
+               invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration"))
+       }
+       if s.InventoryConfiguration != nil {
+               if err := s.InventoryConfiguration.Validate(); err != nil {
+                       invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput {
+       s.Id = &v
+       return s
+}
+
+// SetInventoryConfiguration sets the InventoryConfiguration field's value.
+func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput {
+       s.InventoryConfiguration = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationOutput
+type PutBucketInventoryConfigurationOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketInventoryConfigurationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketInventoryConfigurationOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest
+type PutBucketLifecycleConfigurationInput struct {
+       _ struct{} `type:"structure" payload:"LifecycleConfiguration"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleConfigurationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleConfigurationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketLifecycleConfigurationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.LifecycleConfiguration != nil {
+               if err := s.LifecycleConfiguration.Validate(); err != nil {
+                       invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetLifecycleConfiguration sets the LifecycleConfiguration field's value.
+func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput {
+       s.LifecycleConfiguration = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationOutput
+type PutBucketLifecycleConfigurationOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleConfigurationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleConfigurationOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest
+type PutBucketLifecycleInput struct {
+       _ struct{} `type:"structure" payload:"LifecycleConfiguration"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketLifecycleInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.LifecycleConfiguration != nil {
+               if err := s.LifecycleConfiguration.Validate(); err != nil {
+                       invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetLifecycleConfiguration sets the LifecycleConfiguration field's value.
+func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput {
+       s.LifecycleConfiguration = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleOutput
+type PutBucketLifecycleOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest
+type PutBucketLoggingInput struct {
+       _ struct{} `type:"structure" payload:"BucketLoggingStatus"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // BucketLoggingStatus is a required field
+       BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketLoggingInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLoggingInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketLoggingInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.BucketLoggingStatus == nil {
+               invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus"))
+       }
+       if s.BucketLoggingStatus != nil {
+               if err := s.BucketLoggingStatus.Validate(); err != nil {
+                       invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetBucketLoggingStatus sets the BucketLoggingStatus field's value.
+func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput {
+       s.BucketLoggingStatus = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingOutput
+type PutBucketLoggingOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLoggingOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLoggingOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest
+type PutBucketMetricsConfigurationInput struct {
+       _ struct{} `type:"structure" payload:"MetricsConfiguration"`
+
+       // The name of the bucket for which the metrics configuration is set.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // The ID used to identify the metrics configuration.
+       //
+       // Id is a required field
+       Id *string `location:"querystring" locationName:"id" type:"string" required:"true"`
+
+       // Specifies the metrics configuration.
+       //
+       // MetricsConfiguration is a required field
+       MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketMetricsConfigurationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketMetricsConfigurationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketMetricsConfigurationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Id == nil {
+               invalidParams.Add(request.NewErrParamRequired("Id"))
+       }
+       if s.MetricsConfiguration == nil {
+               invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration"))
+       }
+       if s.MetricsConfiguration != nil {
+               if err := s.MetricsConfiguration.Validate(); err != nil {
+                       invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput {
+       s.Id = &v
+       return s
+}
+
+// SetMetricsConfiguration sets the MetricsConfiguration field's value.
+func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput {
+       s.MetricsConfiguration = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationOutput
+type PutBucketMetricsConfigurationOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketMetricsConfigurationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketMetricsConfigurationOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest
+type PutBucketNotificationConfigurationInput struct {
+       _ struct{} `type:"structure" payload:"NotificationConfiguration"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Container for specifying the notification configuration of the bucket. If
+       // this element is empty, notifications are turned off on the bucket.
+       //
+       // NotificationConfiguration is a required field
+       NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationConfigurationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationConfigurationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketNotificationConfigurationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.NotificationConfiguration == nil {
+               invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration"))
+       }
+       if s.NotificationConfiguration != nil {
+               if err := s.NotificationConfiguration.Validate(); err != nil {
+                       invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetNotificationConfiguration sets the NotificationConfiguration field's value.
+func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput {
+       s.NotificationConfiguration = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationOutput
+type PutBucketNotificationConfigurationOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationConfigurationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationConfigurationOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest
+type PutBucketNotificationInput struct {
+       _ struct{} `type:"structure" payload:"NotificationConfiguration"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // NotificationConfiguration is a required field
+       NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketNotificationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.NotificationConfiguration == nil {
+               invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetNotificationConfiguration sets the NotificationConfiguration field's value.
+func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput {
+       s.NotificationConfiguration = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationOutput
+type PutBucketNotificationOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest
+type PutBucketPolicyInput struct {
+       _ struct{} `type:"structure" payload:"Policy"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // The bucket policy as a JSON document.
+       //
+       // Policy is a required field
+       Policy *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketPolicyInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketPolicyInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketPolicyInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Policy == nil {
+               invalidParams.Add(request.NewErrParamRequired("Policy"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput {
+       s.Policy = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyOutput
+type PutBucketPolicyOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketPolicyOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketPolicyOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest
+type PutBucketReplicationInput struct {
+       _ struct{} `type:"structure" payload:"ReplicationConfiguration"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Container for replication rules. You can add as many as 1,000 rules. Total
+       // replication configuration size can be up to 2 MB.
+       //
+       // ReplicationConfiguration is a required field
+       ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketReplicationInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketReplicationInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketReplicationInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.ReplicationConfiguration == nil {
+               invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration"))
+       }
+       if s.ReplicationConfiguration != nil {
+               if err := s.ReplicationConfiguration.Validate(); err != nil {
+                       invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
+func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput {
+       s.ReplicationConfiguration = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationOutput
+type PutBucketReplicationOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketReplicationOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketReplicationOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest
+type PutBucketRequestPaymentInput struct {
+       _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // RequestPaymentConfiguration is a required field
+       RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketRequestPaymentInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketRequestPaymentInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketRequestPaymentInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.RequestPaymentConfiguration == nil {
+               invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration"))
+       }
+       if s.RequestPaymentConfiguration != nil {
+               if err := s.RequestPaymentConfiguration.Validate(); err != nil {
+                       invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value.
+func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput {
+       s.RequestPaymentConfiguration = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentOutput
+type PutBucketRequestPaymentOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketRequestPaymentOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketRequestPaymentOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest
+type PutBucketTaggingInput struct {
+       _ struct{} `type:"structure" payload:"Tagging"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Tagging is a required field
+       Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketTaggingInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketTaggingInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketTaggingInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Tagging == nil {
+               invalidParams.Add(request.NewErrParamRequired("Tagging"))
+       }
+       if s.Tagging != nil {
+               if err := s.Tagging.Validate(); err != nil {
+                       invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput {
+       s.Tagging = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingOutput
+type PutBucketTaggingOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketTaggingOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketTaggingOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest
+type PutBucketVersioningInput struct {
+       _ struct{} `type:"structure" payload:"VersioningConfiguration"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // The concatenation of the authentication device's serial number, a space,
+       // and the value that is displayed on your authentication device.
+       MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+       // VersioningConfiguration is a required field
+       VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketVersioningInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketVersioningInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketVersioningInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.VersioningConfiguration == nil {
+               invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetMFA sets the MFA field's value.
+func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput {
+       s.MFA = &v
+       return s
+}
+
+// SetVersioningConfiguration sets the VersioningConfiguration field's value.
+func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput {
+       s.VersioningConfiguration = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningOutput
+type PutBucketVersioningOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketVersioningOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketVersioningOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest
+type PutBucketWebsiteInput struct {
+       _ struct{} `type:"structure" payload:"WebsiteConfiguration"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // WebsiteConfiguration is a required field
+       WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketWebsiteInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketWebsiteInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketWebsiteInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.WebsiteConfiguration == nil {
+               invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration"))
+       }
+       if s.WebsiteConfiguration != nil {
+               if err := s.WebsiteConfiguration.Validate(); err != nil {
+                       invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetWebsiteConfiguration sets the WebsiteConfiguration field's value.
+func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput {
+       s.WebsiteConfiguration = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteOutput
+type PutBucketWebsiteOutput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketWebsiteOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketWebsiteOutput) GoString() string {
+       return s.String()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest
+type PutObjectAclInput struct {
+       _ struct{} `type:"structure" payload:"AccessControlPolicy"`
+
+       // The canned ACL to apply to the object.
+       ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+       AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Allows grantee the read, write, read ACP, and write ACP permissions on the
+       // bucket.
+       GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+       // Allows grantee to list the objects in the bucket.
+       GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+       // Allows grantee to read the bucket ACL.
+       GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+       // Allows grantee to create, overwrite, and delete any object in the bucket.
+       GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+       // Allows grantee to write the ACL for the applicable bucket.
+       GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+       // VersionId used to reference a specific version of the object.
+       VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectAclInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectAclInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectAclInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+       if s.AccessControlPolicy != nil {
+               if err := s.AccessControlPolicy.Validate(); err != nil {
+                       invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput {
+       s.ACL = &v
+       return s
+}
+
+// SetAccessControlPolicy sets the AccessControlPolicy field's value.
+func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput {
+       s.AccessControlPolicy = v
+       return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput {
+       s.GrantFullControl = &v
+       return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput {
+       s.GrantRead = &v
+       return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput {
+       s.GrantReadACP = &v
+       return s
+}
+
+// SetGrantWrite sets the GrantWrite field's value.
+func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput {
+       s.GrantWrite = &v
+       return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput {
+       s.GrantWriteACP = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput {
+       s.Key = &v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput
+type PutObjectAclOutput struct {
+       _ struct{} `type:"structure"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s PutObjectAclOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectAclOutput) GoString() string {
+       return s.String()
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest
+type PutObjectInput struct {
+       _ struct{} `type:"structure" payload:"Body"`
+
+       // The canned ACL to apply to the object.
+       ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+       // Object data.
+       Body io.ReadSeeker `type:"blob"`
+
+       // Name of the bucket to which the PUT operation was initiated.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Specifies caching behavior along the request/reply chain.
+       CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+       // Specifies presentational information for the object.
+       ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+       // Specifies what content encodings have been applied to the object and thus
+       // what decoding mechanisms must be applied to obtain the media-type referenced
+       // by the Content-Type header field.
+       ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+       // The language the content is in.
+       ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+       // Size of the body in bytes. This parameter is useful when the size of the
+       // body cannot be determined automatically.
+       ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+       // A standard MIME type describing the format of the object data.
+       ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+       // The date and time at which the object is no longer cacheable.
+       Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
+
+       // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+       GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+       // Allows grantee to read the object data and its metadata.
+       GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+       // Allows grantee to read the object ACL.
+       GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+       // Allows grantee to write the ACL for the applicable object.
+       GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+       // Object key for which the PUT operation was initiated.
+       //
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // A map of metadata to store with the object in S3.
+       Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+       // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+       SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+       // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+       // data. This value is used to store the object and then it is discarded; Amazon
+       // does not store the encryption key. The key must be appropriate for use with
+       // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+       // header.
+       SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+       // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+       // Amazon S3 uses this header for a message integrity check to ensure the encryption
+       // key was transmitted without error.
+       SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+       // requests for an object protected by AWS KMS will fail if not made via SSL
+       // or using SigV4. Documentation on configuring any of the officially supported
+       // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+       SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+       // The Server-side encryption algorithm used when storing this object in S3
+       // (e.g., AES256, aws:kms).
+       ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+       // The type of storage to use for the object. Defaults to 'STANDARD'.
+       StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+       // The tag-set for the object. The tag-set must be encoded as URL Query parameters
+       Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
+
+       // If the bucket is configured as a website, redirects requests for this object
+       // to another object in the same bucket or to an external URL. Amazon S3 stores
+       // the value of this header in the object metadata.
+       WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetACL sets the ACL field's value.
+func (s *PutObjectInput) SetACL(v string) *PutObjectInput {
+       s.ACL = &v
+       return s
+}
+
+// SetBody sets the Body field's value.
+func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput {
+       s.Body = v
+       return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectInput) SetBucket(v string) *PutObjectInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetCacheControl sets the CacheControl field's value.
+func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput {
+       s.CacheControl = &v
+       return s
+}
+
+// SetContentDisposition sets the ContentDisposition field's value.
+func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput {
+       s.ContentDisposition = &v
+       return s
+}
+
+// SetContentEncoding sets the ContentEncoding field's value.
+func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput {
+       s.ContentEncoding = &v
+       return s
+}
+
+// SetContentLanguage sets the ContentLanguage field's value.
+func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput {
+       s.ContentLanguage = &v
+       return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput {
+       s.ContentLength = &v
+       return s
+}
+
+// SetContentType sets the ContentType field's value.
+func (s *PutObjectInput) SetContentType(v string) *PutObjectInput {
+       s.ContentType = &v
+       return s
+}
+
+// SetExpires sets the Expires field's value.
+func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput {
+       s.Expires = &v
+       return s
+}
+
+// SetGrantFullControl sets the GrantFullControl field's value.
+func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput {
+       s.GrantFullControl = &v
+       return s
+}
+
+// SetGrantRead sets the GrantRead field's value.
+func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput {
+       s.GrantRead = &v
+       return s
+}
+
+// SetGrantReadACP sets the GrantReadACP field's value.
+func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput {
+       s.GrantReadACP = &v
+       return s
+}
+
+// SetGrantWriteACP sets the GrantWriteACP field's value.
+func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput {
+       s.GrantWriteACP = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectInput) SetKey(v string) *PutObjectInput {
+       s.Key = &v
+       return s
+}
+
+// SetMetadata sets the Metadata field's value.
+func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput {
+       s.Metadata = v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput {
+       s.SSECustomerAlgorithm = &v
+       return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput {
+       s.SSECustomerKey = &v
+       return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput {
+       s.SSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput {
+       s.SSEKMSKeyId = &v
+       return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput {
+       s.ServerSideEncryption = &v
+       return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput {
+       s.StorageClass = &v
+       return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *PutObjectInput) SetTagging(v string) *PutObjectInput {
+       s.Tagging = &v
+       return s
+}
+
+// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
+func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput {
+       s.WebsiteRedirectLocation = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput
+type PutObjectOutput struct {
+       _ struct{} `type:"structure"`
+
+       // Entity tag for the uploaded object.
+       ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+       // If the object expiration is configured, this will contain the expiration
+       // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
+       Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+       // If server-side encryption with a customer-provided encryption key was requested,
+       // the response will include this header confirming the encryption algorithm
+       // used.
+       SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+       // If server-side encryption with a customer-provided encryption key was requested,
+       // the response will include this header to provide round trip message integrity
+       // verification of the customer-provided encryption key.
+       SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // If present, specifies the ID of the AWS Key Management Service (KMS) master
+       // encryption key that was used for the object.
+       SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+       // The Server-side encryption algorithm used when storing this object in S3
+       // (e.g., AES256, aws:kms).
+       ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+       // Version of the object.
+       VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectOutput) GoString() string {
+       return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput {
+       s.ETag = &v
+       return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput {
+       s.Expiration = &v
+       return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput {
+       s.SSECustomerAlgorithm = &v
+       return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput {
+       s.SSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput {
+       s.SSEKMSKeyId = &v
+       return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput {
+       s.ServerSideEncryption = &v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest
+type PutObjectTaggingInput struct {
+       _ struct{} `type:"structure" payload:"Tagging"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // Tagging is a required field
+       Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"`
+
+       VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectTaggingInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectTaggingInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectTaggingInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+       if s.Tagging == nil {
+               invalidParams.Add(request.NewErrParamRequired("Tagging"))
+       }
+       if s.Tagging != nil {
+               if err := s.Tagging.Validate(); err != nil {
+                       invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput {
+       s.Key = &v
+       return s
+}
+
+// SetTagging sets the Tagging field's value.
+func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput {
+       s.Tagging = v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput
+type PutObjectTaggingOutput struct {
+       _ struct{} `type:"structure"`
+
+       VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectTaggingOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectTaggingOutput) GoString() string {
+       return s.String()
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput {
+       s.VersionId = &v
+       return s
+}
+
+// Container for specifying an configuration when you want Amazon S3 to publish
+// events to an Amazon Simple Queue Service (Amazon SQS) queue.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration
+type QueueConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       // Events is a required field
+       Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
+
+       // Container for object key name filtering rules. For information about key
+       // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+       Filter *NotificationConfigurationFilter `type:"structure"`
+
+       // Optional unique identifier for configurations in a notification configuration.
+       // If you don't provide one, Amazon S3 will assign an ID.
+       Id *string `type:"string"`
+
+       // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects
+       // events of specified type.
+       //
+       // QueueArn is a required field
+       QueueArn *string `locationName:"Queue" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s QueueConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s QueueConfiguration) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *QueueConfiguration) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"}
+       if s.Events == nil {
+               invalidParams.Add(request.NewErrParamRequired("Events"))
+       }
+       if s.QueueArn == nil {
+               invalidParams.Add(request.NewErrParamRequired("QueueArn"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetEvents sets the Events field's value.
+func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration {
+       s.Events = v
+       return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration {
+       s.Filter = v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *QueueConfiguration) SetId(v string) *QueueConfiguration {
+       s.Id = &v
+       return s
+}
+
+// SetQueueArn sets the QueueArn field's value.
+func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration {
+       s.QueueArn = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated
+type QueueConfigurationDeprecated struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket event for which to send notifications.
+       Event *string `deprecated:"true" type:"string" enum:"Event"`
+
+       Events []*string `locationName:"Event" type:"list" flattened:"true"`
+
+       // Optional unique identifier for configurations in a notification configuration.
+       // If you don't provide one, Amazon S3 will assign an ID.
+       Id *string `type:"string"`
+
+       Queue *string `type:"string"`
+}
+
+// String returns the string representation
+func (s QueueConfigurationDeprecated) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s QueueConfigurationDeprecated) GoString() string {
+       return s.String()
+}
+
+// SetEvent sets the Event field's value.
+func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated {
+       s.Event = &v
+       return s
+}
+
+// SetEvents sets the Events field's value.
+func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated {
+       s.Events = v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated {
+       s.Id = &v
+       return s
+}
+
+// SetQueue sets the Queue field's value.
+func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated {
+       s.Queue = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect
+type Redirect struct {
+       _ struct{} `type:"structure"`
+
+       // The host name to use in the redirect request.
+       HostName *string `type:"string"`
+
+       // The HTTP redirect code to use on the response. Not required if one of the
+       // siblings is present.
+       HttpRedirectCode *string `type:"string"`
+
+       // Protocol to use (http, https) when redirecting requests. The default is the
+       // protocol that is used in the original request.
+       Protocol *string `type:"string" enum:"Protocol"`
+
+       // The object key prefix to use in the redirect request. For example, to redirect
+       // requests for all pages with prefix docs/ (objects in the docs/ folder) to
+       // documents/, you can set a condition block with KeyPrefixEquals set to docs/
+       // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required
+       // if one of the siblings is present. Can be present only if ReplaceKeyWith
+       // is not provided.
+       ReplaceKeyPrefixWith *string `type:"string"`
+
+       // The specific object key to use in the redirect request. For example, redirect
+       // request to error.html. Not required if one of the sibling is present. Can
+       // be present only if ReplaceKeyPrefixWith is not provided.
+       ReplaceKeyWith *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Redirect) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Redirect) GoString() string {
+       return s.String()
+}
+
+// SetHostName sets the HostName field's value.
+func (s *Redirect) SetHostName(v string) *Redirect {
+       s.HostName = &v
+       return s
+}
+
+// SetHttpRedirectCode sets the HttpRedirectCode field's value.
+func (s *Redirect) SetHttpRedirectCode(v string) *Redirect {
+       s.HttpRedirectCode = &v
+       return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *Redirect) SetProtocol(v string) *Redirect {
+       s.Protocol = &v
+       return s
+}
+
+// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value.
+func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect {
+       s.ReplaceKeyPrefixWith = &v
+       return s
+}
+
+// SetReplaceKeyWith sets the ReplaceKeyWith field's value.
+func (s *Redirect) SetReplaceKeyWith(v string) *Redirect {
+       s.ReplaceKeyWith = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo
+type RedirectAllRequestsTo struct {
+       _ struct{} `type:"structure"`
+
+       // Name of the host where requests will be redirected.
+       //
+       // HostName is a required field
+       HostName *string `type:"string" required:"true"`
+
+       // Protocol to use (http, https) when redirecting requests. The default is the
+       // protocol that is used in the original request.
+       Protocol *string `type:"string" enum:"Protocol"`
+}
+
+// String returns the string representation
+func (s RedirectAllRequestsTo) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RedirectAllRequestsTo) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RedirectAllRequestsTo) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"}
+       if s.HostName == nil {
+               invalidParams.Add(request.NewErrParamRequired("HostName"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetHostName sets the HostName field's value.
+func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo {
+       s.HostName = &v
+       return s
+}
+
+// SetProtocol sets the Protocol field's value.
+func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo {
+       s.Protocol = &v
+       return s
+}
+
+// Container for replication rules. You can add as many as 1,000 rules. Total
+// replication configuration size can be up to 2 MB.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration
+type ReplicationConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating
+       // the objects.
+       //
+       // Role is a required field
+       Role *string `type:"string" required:"true"`
+
+       // Container for information about a particular replication rule. Replication
+       // configuration must have at least one rule and can contain up to 1,000 rules.
+       //
+       // Rules is a required field
+       Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s ReplicationConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicationConfiguration) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicationConfiguration) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"}
+       if s.Role == nil {
+               invalidParams.Add(request.NewErrParamRequired("Role"))
+       }
+       if s.Rules == nil {
+               invalidParams.Add(request.NewErrParamRequired("Rules"))
+       }
+       if s.Rules != nil {
+               for i, v := range s.Rules {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetRole sets the Role field's value.
+func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration {
+       s.Role = &v
+       return s
+}
+
+// SetRules sets the Rules field's value.
+func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration {
+       s.Rules = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule
+type ReplicationRule struct {
+       _ struct{} `type:"structure"`
+
+       // Destination is a required field
+       Destination *Destination `type:"structure" required:"true"`
+
+       // Unique identifier for the rule. The value cannot be longer than 255 characters.
+       ID *string `type:"string"`
+
+       // Object keyname prefix identifying one or more objects to which the rule applies.
+       // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes
+       // are not supported.
+       //
+       // Prefix is a required field
+       Prefix *string `type:"string" required:"true"`
+
+       // The rule is ignored if status is not Enabled.
+       //
+       // Status is a required field
+       Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"`
+}
+
+// String returns the string representation
+func (s ReplicationRule) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicationRule) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicationRule) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"}
+       if s.Destination == nil {
+               invalidParams.Add(request.NewErrParamRequired("Destination"))
+       }
+       if s.Prefix == nil {
+               invalidParams.Add(request.NewErrParamRequired("Prefix"))
+       }
+       if s.Status == nil {
+               invalidParams.Add(request.NewErrParamRequired("Status"))
+       }
+       if s.Destination != nil {
+               if err := s.Destination.Validate(); err != nil {
+                       invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetDestination sets the Destination field's value.
+func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule {
+       s.Destination = v
+       return s
+}
+
+// SetID sets the ID field's value.
+func (s *ReplicationRule) SetID(v string) *ReplicationRule {
+       s.ID = &v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule {
+       s.Prefix = &v
+       return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *ReplicationRule) SetStatus(v string) *ReplicationRule {
+       s.Status = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration
+type RequestPaymentConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       // Specifies who pays for the download and request fees.
+       //
+       // Payer is a required field
+       Payer *string `type:"string" required:"true" enum:"Payer"`
+}
+
+// String returns the string representation
+func (s RequestPaymentConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RequestPaymentConfiguration) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RequestPaymentConfiguration) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"}
+       if s.Payer == nil {
+               invalidParams.Add(request.NewErrParamRequired("Payer"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetPayer sets the Payer field's value.
+func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration {
+       s.Payer = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest
+type RestoreObjectInput struct {
+       _ struct{} `type:"structure" payload:"RestoreRequest"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+       RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"`
+
+       VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s RestoreObjectInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreObjectInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RestoreObjectInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+       if s.RestoreRequest != nil {
+               if err := s.RestoreRequest.Validate(); err != nil {
+                       invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput {
+       s.Key = &v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// SetRestoreRequest sets the RestoreRequest field's value.
+func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput {
+       s.RestoreRequest = v
+       return s
+}
+
+// SetVersionId sets the VersionId field's value.
+func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput {
+       s.VersionId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput
+type RestoreObjectOutput struct {
+       _ struct{} `type:"structure"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s RestoreObjectOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreObjectOutput) GoString() string {
+       return s.String()
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest
+type RestoreRequest struct {
+       _ struct{} `type:"structure"`
+
+       // Lifetime of the active copy in days
+       //
+       // Days is a required field
+       Days *int64 `type:"integer" required:"true"`
+
+       // Glacier related prameters pertaining to this job.
+       GlacierJobParameters *GlacierJobParameters `type:"structure"`
+}
+
+// String returns the string representation
+func (s RestoreRequest) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreRequest) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RestoreRequest) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"}
+       if s.Days == nil {
+               invalidParams.Add(request.NewErrParamRequired("Days"))
+       }
+       if s.GlacierJobParameters != nil {
+               if err := s.GlacierJobParameters.Validate(); err != nil {
+                       invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetDays sets the Days field's value.
+func (s *RestoreRequest) SetDays(v int64) *RestoreRequest {
+       s.Days = &v
+       return s
+}
+
+// SetGlacierJobParameters sets the GlacierJobParameters field's value.
+func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest {
+       s.GlacierJobParameters = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule
+type RoutingRule struct {
+       _ struct{} `type:"structure"`
+
+       // A container for describing a condition that must be met for the specified
+       // redirect to apply. For example, 1. If request is for pages in the /docs folder,
+       // redirect to the /documents folder. 2. If request results in HTTP error 4xx,
+       // redirect request to another host where you might process the error.
+       Condition *Condition `type:"structure"`
+
+       // Container for redirect information. You can redirect requests to another
+       // host, to another page, or with another protocol. In the event of an error,
+       // you can can specify a different error code to return.
+       //
+       // Redirect is a required field
+       Redirect *Redirect `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s RoutingRule) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RoutingRule) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RoutingRule) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "RoutingRule"}
+       if s.Redirect == nil {
+               invalidParams.Add(request.NewErrParamRequired("Redirect"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetCondition sets the Condition field's value.
+func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule {
+       s.Condition = v
+       return s
+}
+
+// SetRedirect sets the Redirect field's value.
+func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule {
+       s.Redirect = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule
+type Rule struct {
+       _ struct{} `type:"structure"`
+
+       // Specifies the days since the initiation of an Incomplete Multipart Upload
+       // that Lifecycle will wait before permanently removing all parts of the upload.
+       AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
+
+       Expiration *LifecycleExpiration `type:"structure"`
+
+       // Unique identifier for the rule. The value cannot be longer than 255 characters.
+       ID *string `type:"string"`
+
+       // Specifies when noncurrent object versions expire. Upon expiration, Amazon
+       // S3 permanently deletes the noncurrent object versions. You set this lifecycle
+       // configuration action on a bucket that has versioning enabled (or suspended)
+       // to request that Amazon S3 delete noncurrent object versions at a specific
+       // period in the object's lifetime.
+       NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
+
+       // Container for the transition rule that describes when noncurrent objects
+       // transition to the STANDARD_IA or GLACIER storage class. If your bucket is
+       // versioning-enabled (or versioning is suspended), you can set this action
+       // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA
+       // or GLACIER storage class at a specific period in the object's lifetime.
+       NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"`
+
+       // Prefix identifying one or more objects to which the rule applies.
+       //
+       // Prefix is a required field
+       Prefix *string `type:"string" required:"true"`
+
+       // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule
+       // is not currently being applied.
+       //
+       // Status is a required field
+       Status *string `type:"string" required:"true" enum:"ExpirationStatus"`
+
+       Transition *Transition `type:"structure"`
+}
+
+// String returns the string representation
+func (s Rule) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Rule) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Rule) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "Rule"}
+       if s.Prefix == nil {
+               invalidParams.Add(request.NewErrParamRequired("Prefix"))
+       }
+       if s.Status == nil {
+               invalidParams.Add(request.NewErrParamRequired("Status"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value.
+func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule {
+       s.AbortIncompleteMultipartUpload = v
+       return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule {
+       s.Expiration = v
+       return s
+}
+
+// SetID sets the ID field's value.
+func (s *Rule) SetID(v string) *Rule {
+       s.ID = &v
+       return s
+}
+
+// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value.
+func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule {
+       s.NoncurrentVersionExpiration = v
+       return s
+}
+
+// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value.
+func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule {
+       s.NoncurrentVersionTransition = v
+       return s
+}
+
+// SetPrefix sets the Prefix field's value.
+func (s *Rule) SetPrefix(v string) *Rule {
+       s.Prefix = &v
+       return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *Rule) SetStatus(v string) *Rule {
+       s.Status = &v
+       return s
+}
+
+// SetTransition sets the Transition field's value.
+func (s *Rule) SetTransition(v *Transition) *Rule {
+       s.Transition = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis
+type StorageClassAnalysis struct {
+       _ struct{} `type:"structure"`
+
+       // A container used to describe how data related to the storage class analysis
+       // should be exported.
+       DataExport *StorageClassAnalysisDataExport `type:"structure"`
+}
+
+// String returns the string representation
+func (s StorageClassAnalysis) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StorageClassAnalysis) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *StorageClassAnalysis) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"}
+       if s.DataExport != nil {
+               if err := s.DataExport.Validate(); err != nil {
+                       invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetDataExport sets the DataExport field's value.
+func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis {
+       s.DataExport = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport
+type StorageClassAnalysisDataExport struct {
+       _ struct{} `type:"structure"`
+
+       // The place to store the data for an analysis.
+       //
+       // Destination is a required field
+       Destination *AnalyticsExportDestination `type:"structure" required:"true"`
+
+       // The version of the output schema to use when exporting data. Must be V_1.
+       //
+       // OutputSchemaVersion is a required field
+       OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"`
+}
+
+// String returns the string representation
+func (s StorageClassAnalysisDataExport) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s StorageClassAnalysisDataExport) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *StorageClassAnalysisDataExport) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"}
+       if s.Destination == nil {
+               invalidParams.Add(request.NewErrParamRequired("Destination"))
+       }
+       if s.OutputSchemaVersion == nil {
+               invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion"))
+       }
+       if s.Destination != nil {
+               if err := s.Destination.Validate(); err != nil {
+                       invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetDestination sets the Destination field's value.
+func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport {
+       s.Destination = v
+       return s
+}
+
+// SetOutputSchemaVersion sets the OutputSchemaVersion field's value.
+func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport {
+       s.OutputSchemaVersion = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag
+type Tag struct {
+       _ struct{} `type:"structure"`
+
+       // Name of the tag.
+       //
+       // Key is a required field
+       Key *string `min:"1" type:"string" required:"true"`
+
+       // Value of the tag.
+       //
+       // Value is a required field
+       Value *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Tag) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Tag) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Tag) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "Tag"}
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+       if s.Value == nil {
+               invalidParams.Add(request.NewErrParamRequired("Value"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetKey sets the Key field's value.
+func (s *Tag) SetKey(v string) *Tag {
+       s.Key = &v
+       return s
+}
+
+// SetValue sets the Value field's value.
+func (s *Tag) SetValue(v string) *Tag {
+       s.Value = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging
+type Tagging struct {
+       _ struct{} `type:"structure"`
+
+       // TagSet is a required field
+       TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s Tagging) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Tagging) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Tagging) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "Tagging"}
+       if s.TagSet == nil {
+               invalidParams.Add(request.NewErrParamRequired("TagSet"))
+       }
+       if s.TagSet != nil {
+               for i, v := range s.TagSet {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetTagSet sets the TagSet field's value.
+func (s *Tagging) SetTagSet(v []*Tag) *Tagging {
+       s.TagSet = v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant
+type TargetGrant struct {
+       _ struct{} `type:"structure"`
+
+       Grantee *Grantee `type:"structure"`
+
+       // Logging permissions assigned to the Grantee for the bucket.
+       Permission *string `type:"string" enum:"BucketLogsPermission"`
+}
+
+// String returns the string representation
+func (s TargetGrant) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TargetGrant) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TargetGrant) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "TargetGrant"}
+       if s.Grantee != nil {
+               if err := s.Grantee.Validate(); err != nil {
+                       invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams))
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetGrantee sets the Grantee field's value.
+func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant {
+       s.Grantee = v
+       return s
+}
+
+// SetPermission sets the Permission field's value.
+func (s *TargetGrant) SetPermission(v string) *TargetGrant {
+       s.Permission = &v
+       return s
+}
+
+// Container for specifying the configuration when you want Amazon S3 to publish
+// events to an Amazon Simple Notification Service (Amazon SNS) topic.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration
+type TopicConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       // Events is a required field
+       Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
+
+       // Container for object key name filtering rules. For information about key
+       // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+       Filter *NotificationConfigurationFilter `type:"structure"`
+
+       // Optional unique identifier for configurations in a notification configuration.
+       // If you don't provide one, Amazon S3 will assign an ID.
+       Id *string `type:"string"`
+
+       // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects
+       // events of specified type.
+       //
+       // TopicArn is a required field
+       TopicArn *string `locationName:"Topic" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s TopicConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TopicConfiguration) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TopicConfiguration) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"}
+       if s.Events == nil {
+               invalidParams.Add(request.NewErrParamRequired("Events"))
+       }
+       if s.TopicArn == nil {
+               invalidParams.Add(request.NewErrParamRequired("TopicArn"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetEvents sets the Events field's value.
+func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration {
+       s.Events = v
+       return s
+}
+
+// SetFilter sets the Filter field's value.
+func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration {
+       s.Filter = v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *TopicConfiguration) SetId(v string) *TopicConfiguration {
+       s.Id = &v
+       return s
+}
+
+// SetTopicArn sets the TopicArn field's value.
+func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration {
+       s.TopicArn = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated
+type TopicConfigurationDeprecated struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket event for which to send notifications.
+       Event *string `deprecated:"true" type:"string" enum:"Event"`
+
+       Events []*string `locationName:"Event" type:"list" flattened:"true"`
+
+       // Optional unique identifier for configurations in a notification configuration.
+       // If you don't provide one, Amazon S3 will assign an ID.
+       Id *string `type:"string"`
+
+       // Amazon SNS topic to which Amazon S3 will publish a message to report the
+       // specified events for the bucket.
+       Topic *string `type:"string"`
+}
+
+// String returns the string representation
+func (s TopicConfigurationDeprecated) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TopicConfigurationDeprecated) GoString() string {
+       return s.String()
+}
+
+// SetEvent sets the Event field's value.
+func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated {
+       s.Event = &v
+       return s
+}
+
+// SetEvents sets the Events field's value.
+func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated {
+       s.Events = v
+       return s
+}
+
+// SetId sets the Id field's value.
+func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated {
+       s.Id = &v
+       return s
+}
+
+// SetTopic sets the Topic field's value.
+func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated {
+       s.Topic = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition
+type Transition struct {
+       _ struct{} `type:"structure"`
+
+       // Indicates at what date the object is to be moved or deleted. Should be in
+       // GMT ISO 8601 Format.
+       Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+       // Indicates the lifetime, in days, of the objects that are subject to the rule.
+       // The value must be a non-zero positive integer.
+       Days *int64 `type:"integer"`
+
+       // The class of storage used to store the object.
+       StorageClass *string `type:"string" enum:"TransitionStorageClass"`
+}
+
+// String returns the string representation
+func (s Transition) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Transition) GoString() string {
+       return s.String()
+}
+
+// SetDate sets the Date field's value.
+func (s *Transition) SetDate(v time.Time) *Transition {
+       s.Date = &v
+       return s
+}
+
+// SetDays sets the Days field's value.
+func (s *Transition) SetDays(v int64) *Transition {
+       s.Days = &v
+       return s
+}
+
+// SetStorageClass sets the StorageClass field's value.
+func (s *Transition) SetStorageClass(v string) *Transition {
+       s.StorageClass = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest
+type UploadPartCopyInput struct {
+       _ struct{} `type:"structure"`
+
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // The name of the source bucket and key name of the source object, separated
+       // by a slash (/). Must be URL-encoded.
+       //
+       // CopySource is a required field
+       CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
+
+       // Copies the object if its entity tag (ETag) matches the specified tag.
+       CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
+
+       // Copies the object if it has been modified since the specified time.
+       CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+       // Copies the object if its entity tag (ETag) is different than the specified
+       // ETag.
+       CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
+
+       // Copies the object if it hasn't been modified since the specified time.
+       CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+       // The range of bytes to copy from the source object. The range value must use
+       // the form bytes=first-last, where the first and last are the zero-based byte
+       // offsets to copy. For example, bytes=0-9 indicates that you want to copy the
+       // first ten bytes of the source. You can copy a range only if the source object
+       // is greater than 5 GB.
+       CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"`
+
+       // Specifies the algorithm to use when decrypting the source object (e.g., AES256).
+       CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
+
+       // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+       // the source object. The encryption key provided in this header must be one
+       // that was used when the source object was created.
+       CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"`
+
+       // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+       // Amazon S3 uses this header for a message integrity check to ensure the encryption
+       // key was transmitted without error.
+       CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // Part number of part being copied. This is a positive integer between 1 and
+       // 10,000.
+       //
+       // PartNumber is a required field
+       PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+       // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+       SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+       // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+       // data. This value is used to store the object and then it is discarded; Amazon
+       // does not store the encryption key. The key must be appropriate for use with
+       // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+       // header. This must be the same encryption key specified in the initiate multipart
+       // upload request.
+       SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+       // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+       // Amazon S3 uses this header for a message integrity check to ensure the encryption
+       // key was transmitted without error.
+       SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // Upload ID identifying the multipart upload whose part is being copied.
+       //
+       // UploadId is a required field
+       UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UploadPartCopyInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartCopyInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UploadPartCopyInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.CopySource == nil {
+               invalidParams.Add(request.NewErrParamRequired("CopySource"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+       if s.PartNumber == nil {
+               invalidParams.Add(request.NewErrParamRequired("PartNumber"))
+       }
+       if s.UploadId == nil {
+               invalidParams.Add(request.NewErrParamRequired("UploadId"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetCopySource sets the CopySource field's value.
+func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput {
+       s.CopySource = &v
+       return s
+}
+
+// SetCopySourceIfMatch sets the CopySourceIfMatch field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput {
+       s.CopySourceIfMatch = &v
+       return s
+}
+
+// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput {
+       s.CopySourceIfModifiedSince = &v
+       return s
+}
+
+// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput {
+       s.CopySourceIfNoneMatch = &v
+       return s
+}
+
+// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value.
+func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput {
+       s.CopySourceIfUnmodifiedSince = &v
+       return s
+}
+
+// SetCopySourceRange sets the CopySourceRange field's value.
+func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput {
+       s.CopySourceRange = &v
+       return s
+}
+
+// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value.
+func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput {
+       s.CopySourceSSECustomerAlgorithm = &v
+       return s
+}
+
+// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value.
+func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput {
+       s.CopySourceSSECustomerKey = &v
+       return s
+}
+
+// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
+func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput {
+       s.CopySourceSSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput {
+       s.Key = &v
+       return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput {
+       s.PartNumber = &v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput {
+       s.SSECustomerAlgorithm = &v
+       return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput {
+       s.SSECustomerKey = &v
+       return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput {
+       s.SSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput {
+       s.UploadId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput
+type UploadPartCopyOutput struct {
+       _ struct{} `type:"structure" payload:"CopyPartResult"`
+
+       CopyPartResult *CopyPartResult `type:"structure"`
+
+       // The version of the source object that was copied, if you have enabled versioning
+       // on the source bucket.
+       CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+       // If server-side encryption with a customer-provided encryption key was requested,
+       // the response will include this header confirming the encryption algorithm
+       // used.
+       SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+       // If server-side encryption with a customer-provided encryption key was requested,
+       // the response will include this header to provide round trip message integrity
+       // verification of the customer-provided encryption key.
+       SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // If present, specifies the ID of the AWS Key Management Service (KMS) master
+       // encryption key that was used for the object.
+       SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+       // The Server-side encryption algorithm used when storing this object in S3
+       // (e.g., AES256, aws:kms).
+       ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+}
+
+// String returns the string representation
+func (s UploadPartCopyOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartCopyOutput) GoString() string {
+       return s.String()
+}
+
+// SetCopyPartResult sets the CopyPartResult field's value.
+func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput {
+       s.CopyPartResult = v
+       return s
+}
+
+// SetCopySourceVersionId sets the CopySourceVersionId field's value.
+func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput {
+       s.CopySourceVersionId = &v
+       return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput {
+       s.SSECustomerAlgorithm = &v
+       return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput {
+       s.SSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput {
+       s.SSEKMSKeyId = &v
+       return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput {
+       s.ServerSideEncryption = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest
+type UploadPartInput struct {
+       _ struct{} `type:"structure" payload:"Body"`
+
+       // Object data.
+       Body io.ReadSeeker `type:"blob"`
+
+       // Name of the bucket to which the multipart upload was initiated.
+       //
+       // Bucket is a required field
+       Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+       // Size of the body in bytes. This parameter is useful when the size of the
+       // body cannot be determined automatically.
+       ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+
+       // Object key for which the multipart upload was initiated.
+       //
+       // Key is a required field
+       Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+       // Part number of part being uploaded. This is a positive integer between 1
+       // and 10,000.
+       //
+       // PartNumber is a required field
+       PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
+
+       // Confirms that the requester knows that she or he will be charged for the
+       // request. Bucket owners need not specify this parameter in their requests.
+       // Documentation on downloading objects from requester pays buckets can be found
+       // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+       RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+       // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+       SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+       // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+       // data. This value is used to store the object and then it is discarded; Amazon
+       // does not store the encryption key. The key must be appropriate for use with
+       // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+       // header. This must be the same encryption key specified in the initiate multipart
+       // upload request.
+       SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+       // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+       // Amazon S3 uses this header for a message integrity check to ensure the encryption
+       // key was transmitted without error.
+       SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // Upload ID identifying the multipart upload whose part is being uploaded.
+       //
+       // UploadId is a required field
+       UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UploadPartInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UploadPartInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"}
+       if s.Bucket == nil {
+               invalidParams.Add(request.NewErrParamRequired("Bucket"))
+       }
+       if s.Key == nil {
+               invalidParams.Add(request.NewErrParamRequired("Key"))
+       }
+       if s.Key != nil && len(*s.Key) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+       }
+       if s.PartNumber == nil {
+               invalidParams.Add(request.NewErrParamRequired("PartNumber"))
+       }
+       if s.UploadId == nil {
+               invalidParams.Add(request.NewErrParamRequired("UploadId"))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetBody sets the Body field's value.
+func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput {
+       s.Body = v
+       return s
+}
+
+// SetBucket sets the Bucket field's value.
+func (s *UploadPartInput) SetBucket(v string) *UploadPartInput {
+       s.Bucket = &v
+       return s
+}
+
+// SetContentLength sets the ContentLength field's value.
+func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput {
+       s.ContentLength = &v
+       return s
+}
+
+// SetKey sets the Key field's value.
+func (s *UploadPartInput) SetKey(v string) *UploadPartInput {
+       s.Key = &v
+       return s
+}
+
+// SetPartNumber sets the PartNumber field's value.
+func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput {
+       s.PartNumber = &v
+       return s
+}
+
+// SetRequestPayer sets the RequestPayer field's value.
+func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput {
+       s.RequestPayer = &v
+       return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput {
+       s.SSECustomerAlgorithm = &v
+       return s
+}
+
+// SetSSECustomerKey sets the SSECustomerKey field's value.
+func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput {
+       s.SSECustomerKey = &v
+       return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput {
+       s.SSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetUploadId sets the UploadId field's value.
+func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput {
+       s.UploadId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput
+type UploadPartOutput struct {
+       _ struct{} `type:"structure"`
+
+       // Entity tag for the uploaded object.
+       ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+       // If present, indicates that the requester was successfully charged for the
+       // request.
+       RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+       // If server-side encryption with a customer-provided encryption key was requested,
+       // the response will include this header confirming the encryption algorithm
+       // used.
+       SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+       // If server-side encryption with a customer-provided encryption key was requested,
+       // the response will include this header to provide round trip message integrity
+       // verification of the customer-provided encryption key.
+       SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+       // If present, specifies the ID of the AWS Key Management Service (KMS) master
+       // encryption key that was used for the object.
+       SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+       // The Server-side encryption algorithm used when storing this object in S3
+       // (e.g., AES256, aws:kms).
+       ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+}
+
+// String returns the string representation
+func (s UploadPartOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartOutput) GoString() string {
+       return s.String()
+}
+
+// SetETag sets the ETag field's value.
+func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput {
+       s.ETag = &v
+       return s
+}
+
+// SetRequestCharged sets the RequestCharged field's value.
+func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput {
+       s.RequestCharged = &v
+       return s
+}
+
+// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value.
+func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput {
+       s.SSECustomerAlgorithm = &v
+       return s
+}
+
+// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
+func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput {
+       s.SSECustomerKeyMD5 = &v
+       return s
+}
+
+// SetSSEKMSKeyId sets the SSEKMSKeyId field's value.
+func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput {
+       s.SSEKMSKeyId = &v
+       return s
+}
+
+// SetServerSideEncryption sets the ServerSideEncryption field's value.
+func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput {
+       s.ServerSideEncryption = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration
+type VersioningConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+       // This element is only returned if the bucket has been configured with MFA
+       // delete. If the bucket has never been so configured, this element is not returned.
+       MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"`
+
+       // The versioning state of the bucket.
+       Status *string `type:"string" enum:"BucketVersioningStatus"`
+}
+
+// String returns the string representation
+func (s VersioningConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s VersioningConfiguration) GoString() string {
+       return s.String()
+}
+
+// SetMFADelete sets the MFADelete field's value.
+func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration {
+       s.MFADelete = &v
+       return s
+}
+
+// SetStatus sets the Status field's value.
+func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration {
+       s.Status = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration
+type WebsiteConfiguration struct {
+       _ struct{} `type:"structure"`
+
+       ErrorDocument *ErrorDocument `type:"structure"`
+
+       IndexDocument *IndexDocument `type:"structure"`
+
+       RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
+
+       RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
+}
+
+// String returns the string representation
+func (s WebsiteConfiguration) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s WebsiteConfiguration) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *WebsiteConfiguration) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"}
+       if s.ErrorDocument != nil {
+               if err := s.ErrorDocument.Validate(); err != nil {
+                       invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams))
+               }
+       }
+       if s.IndexDocument != nil {
+               if err := s.IndexDocument.Validate(); err != nil {
+                       invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams))
+               }
+       }
+       if s.RedirectAllRequestsTo != nil {
+               if err := s.RedirectAllRequestsTo.Validate(); err != nil {
+                       invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams))
+               }
+       }
+       if s.RoutingRules != nil {
+               for i, v := range s.RoutingRules {
+                       if v == nil {
+                               continue
+                       }
+                       if err := v.Validate(); err != nil {
+                               invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams))
+                       }
+               }
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetErrorDocument sets the ErrorDocument field's value.
+func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration {
+       s.ErrorDocument = v
+       return s
+}
+
+// SetIndexDocument sets the IndexDocument field's value.
+func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration {
+       s.IndexDocument = v
+       return s
+}
+
+// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value.
+func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration {
+       s.RedirectAllRequestsTo = v
+       return s
+}
+
+// SetRoutingRules sets the RoutingRules field's value.
+func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration {
+       s.RoutingRules = v
+       return s
+}
+
+const (
+       // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value
+       AnalyticsS3ExportFileFormatCsv = "CSV"
+)
+
+const (
+       // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value
+       BucketAccelerateStatusEnabled = "Enabled"
+
+       // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value
+       BucketAccelerateStatusSuspended = "Suspended"
+)
+
+const (
+       // BucketCannedACLPrivate is a BucketCannedACL enum value
+       BucketCannedACLPrivate = "private"
+
+       // BucketCannedACLPublicRead is a BucketCannedACL enum value
+       BucketCannedACLPublicRead = "public-read"
+
+       // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value
+       BucketCannedACLPublicReadWrite = "public-read-write"
+
+       // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value
+       BucketCannedACLAuthenticatedRead = "authenticated-read"
+)
+
+const (
+       // BucketLocationConstraintEu is a BucketLocationConstraint enum value
+       BucketLocationConstraintEu = "EU"
+
+       // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value
+       BucketLocationConstraintEuWest1 = "eu-west-1"
+
+       // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value
+       BucketLocationConstraintUsWest1 = "us-west-1"
+
+       // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value
+       BucketLocationConstraintUsWest2 = "us-west-2"
+
+       // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value
+       BucketLocationConstraintApSouth1 = "ap-south-1"
+
+       // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value
+       BucketLocationConstraintApSoutheast1 = "ap-southeast-1"
+
+       // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value
+       BucketLocationConstraintApSoutheast2 = "ap-southeast-2"
+
+       // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value
+       BucketLocationConstraintApNortheast1 = "ap-northeast-1"
+
+       // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value
+       BucketLocationConstraintSaEast1 = "sa-east-1"
+
+       // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value
+       BucketLocationConstraintCnNorth1 = "cn-north-1"
+
+       // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value
+       BucketLocationConstraintEuCentral1 = "eu-central-1"
+)
+
+const (
+       // BucketLogsPermissionFullControl is a BucketLogsPermission enum value
+       BucketLogsPermissionFullControl = "FULL_CONTROL"
+
+       // BucketLogsPermissionRead is a BucketLogsPermission enum value
+       BucketLogsPermissionRead = "READ"
+
+       // BucketLogsPermissionWrite is a BucketLogsPermission enum value
+       BucketLogsPermissionWrite = "WRITE"
+)
+
+const (
+       // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value
+       BucketVersioningStatusEnabled = "Enabled"
+
+       // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value
+       BucketVersioningStatusSuspended = "Suspended"
+)
+
+// Requests Amazon S3 to encode the object keys in the response and specifies
+// the encoding method to use. An object key may contain any Unicode character;
+// however, XML 1.0 parser cannot parse some characters, such as characters
+// with an ASCII value from 0 to 10. For characters that are not supported in
+// XML 1.0, you can add this parameter to request that Amazon S3 encode the
+// keys in the response.
+const (
+       // EncodingTypeUrl is a EncodingType enum value
+       EncodingTypeUrl = "url"
+)
+
+// Bucket event for which to send notifications.
+const (
+       // EventS3ReducedRedundancyLostObject is a Event enum value
+       EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
+
+       // EventS3ObjectCreated is a Event enum value
+       EventS3ObjectCreated = "s3:ObjectCreated:*"
+
+       // EventS3ObjectCreatedPut is a Event enum value
+       EventS3ObjectCreatedPut = "s3:ObjectCreated:Put"
+
+       // EventS3ObjectCreatedPost is a Event enum value
+       EventS3ObjectCreatedPost = "s3:ObjectCreated:Post"
+
+       // EventS3ObjectCreatedCopy is a Event enum value
+       EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy"
+
+       // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value
+       EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload"
+
+       // EventS3ObjectRemoved is a Event enum value
+       EventS3ObjectRemoved = "s3:ObjectRemoved:*"
+
+       // EventS3ObjectRemovedDelete is a Event enum value
+       EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
+
+       // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value
+       EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
+)
+
+const (
+       // ExpirationStatusEnabled is a ExpirationStatus enum value
+       ExpirationStatusEnabled = "Enabled"
+
+       // ExpirationStatusDisabled is a ExpirationStatus enum value
+       ExpirationStatusDisabled = "Disabled"
+)
+
+const (
+       // FilterRuleNamePrefix is a FilterRuleName enum value
+       FilterRuleNamePrefix = "prefix"
+
+       // FilterRuleNameSuffix is a FilterRuleName enum value
+       FilterRuleNameSuffix = "suffix"
+)
+
+const (
+       // InventoryFormatCsv is a InventoryFormat enum value
+       InventoryFormatCsv = "CSV"
+)
+
+const (
+       // InventoryFrequencyDaily is a InventoryFrequency enum value
+       InventoryFrequencyDaily = "Daily"
+
+       // InventoryFrequencyWeekly is a InventoryFrequency enum value
+       InventoryFrequencyWeekly = "Weekly"
+)
+
+const (
+       // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value
+       InventoryIncludedObjectVersionsAll = "All"
+
+       // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value
+       InventoryIncludedObjectVersionsCurrent = "Current"
+)
+
+const (
+       // InventoryOptionalFieldSize is a InventoryOptionalField enum value
+       InventoryOptionalFieldSize = "Size"
+
+       // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value
+       InventoryOptionalFieldLastModifiedDate = "LastModifiedDate"
+
+       // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value
+       InventoryOptionalFieldStorageClass = "StorageClass"
+
+       // InventoryOptionalFieldEtag is a InventoryOptionalField enum value
+       InventoryOptionalFieldEtag = "ETag"
+
+       // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value
+       InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded"
+
+       // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value
+       InventoryOptionalFieldReplicationStatus = "ReplicationStatus"
+)
+
+const (
+       // MFADeleteEnabled is a MFADelete enum value
+       MFADeleteEnabled = "Enabled"
+
+       // MFADeleteDisabled is a MFADelete enum value
+       MFADeleteDisabled = "Disabled"
+)
+
+const (
+       // MFADeleteStatusEnabled is a MFADeleteStatus enum value
+       MFADeleteStatusEnabled = "Enabled"
+
+       // MFADeleteStatusDisabled is a MFADeleteStatus enum value
+       MFADeleteStatusDisabled = "Disabled"
+)
+
+const (
+       // MetadataDirectiveCopy is a MetadataDirective enum value
+       MetadataDirectiveCopy = "COPY"
+
+       // MetadataDirectiveReplace is a MetadataDirective enum value
+       MetadataDirectiveReplace = "REPLACE"
+)
+
+const (
+       // ObjectCannedACLPrivate is a ObjectCannedACL enum value
+       ObjectCannedACLPrivate = "private"
+
+       // ObjectCannedACLPublicRead is a ObjectCannedACL enum value
+       ObjectCannedACLPublicRead = "public-read"
+
+       // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value
+       ObjectCannedACLPublicReadWrite = "public-read-write"
+
+       // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value
+       ObjectCannedACLAuthenticatedRead = "authenticated-read"
+
+       // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value
+       ObjectCannedACLAwsExecRead = "aws-exec-read"
+
+       // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value
+       ObjectCannedACLBucketOwnerRead = "bucket-owner-read"
+
+       // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value
+       ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control"
+)
+
+const (
+       // ObjectStorageClassStandard is a ObjectStorageClass enum value
+       ObjectStorageClassStandard = "STANDARD"
+
+       // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value
+       ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY"
+
+       // ObjectStorageClassGlacier is a ObjectStorageClass enum value
+       ObjectStorageClassGlacier = "GLACIER"
+)
+
+const (
+       // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value
+       ObjectVersionStorageClassStandard = "STANDARD"
+)
+
+const (
+       // PayerRequester is a Payer enum value
+       PayerRequester = "Requester"
+
+       // PayerBucketOwner is a Payer enum value
+       PayerBucketOwner = "BucketOwner"
+)
+
+const (
+       // PermissionFullControl is a Permission enum value
+       PermissionFullControl = "FULL_CONTROL"
+
+       // PermissionWrite is a Permission enum value
+       PermissionWrite = "WRITE"
+
+       // PermissionWriteAcp is a Permission enum value
+       PermissionWriteAcp = "WRITE_ACP"
+
+       // PermissionRead is a Permission enum value
+       PermissionRead = "READ"
+
+       // PermissionReadAcp is a Permission enum value
+       PermissionReadAcp = "READ_ACP"
+)
+
+const (
+       // ProtocolHttp is a Protocol enum value
+       ProtocolHttp = "http"
+
+       // ProtocolHttps is a Protocol enum value
+       ProtocolHttps = "https"
+)
+
+const (
+       // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value
+       ReplicationRuleStatusEnabled = "Enabled"
+
+       // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value
+       ReplicationRuleStatusDisabled = "Disabled"
+)
+
+const (
+       // ReplicationStatusComplete is a ReplicationStatus enum value
+       ReplicationStatusComplete = "COMPLETE"
+
+       // ReplicationStatusPending is a ReplicationStatus enum value
+       ReplicationStatusPending = "PENDING"
+
+       // ReplicationStatusFailed is a ReplicationStatus enum value
+       ReplicationStatusFailed = "FAILED"
+
+       // ReplicationStatusReplica is a ReplicationStatus enum value
+       ReplicationStatusReplica = "REPLICA"
+)
+
+// If present, indicates that the requester was successfully charged for the
+// request.
+const (
+       // RequestChargedRequester is a RequestCharged enum value
+       RequestChargedRequester = "requester"
+)
+
+// Confirms that the requester knows that she or he will be charged for the
+// request. Bucket owners need not specify this parameter in their requests.
+// Documentation on downloading objects from requester pays buckets can be found
+// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+const (
+       // RequestPayerRequester is a RequestPayer enum value
+       RequestPayerRequester = "requester"
+)
+
+const (
+       // ServerSideEncryptionAes256 is a ServerSideEncryption enum value
+       ServerSideEncryptionAes256 = "AES256"
+
+       // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value
+       ServerSideEncryptionAwsKms = "aws:kms"
+)
+
+const (
+       // StorageClassStandard is a StorageClass enum value
+       StorageClassStandard = "STANDARD"
+
+       // StorageClassReducedRedundancy is a StorageClass enum value
+       StorageClassReducedRedundancy = "REDUCED_REDUNDANCY"
+
+       // StorageClassStandardIa is a StorageClass enum value
+       StorageClassStandardIa = "STANDARD_IA"
+)
+
+const (
+       // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value
+       StorageClassAnalysisSchemaVersionV1 = "V_1"
+)
+
+const (
+       // TaggingDirectiveCopy is a TaggingDirective enum value
+       TaggingDirectiveCopy = "COPY"
+
+       // TaggingDirectiveReplace is a TaggingDirective enum value
+       TaggingDirectiveReplace = "REPLACE"
+)
+
+const (
+       // TierStandard is a Tier enum value
+       TierStandard = "Standard"
+
+       // TierBulk is a Tier enum value
+       TierBulk = "Bulk"
+
+       // TierExpedited is a Tier enum value
+       TierExpedited = "Expedited"
+)
+
+const (
+       // TransitionStorageClassGlacier is a TransitionStorageClass enum value
+       TransitionStorageClassGlacier = "GLACIER"
+
+       // TransitionStorageClassStandardIa is a TransitionStorageClass enum value
+       TransitionStorageClassStandardIa = "STANDARD_IA"
+)
+
+const (
+       // TypeCanonicalUser is a Type enum value
+       TypeCanonicalUser = "CanonicalUser"
+
+       // TypeAmazonCustomerByEmail is a Type enum value
+       TypeAmazonCustomerByEmail = "AmazonCustomerByEmail"
+
+       // TypeGroup is a Type enum value
+       TypeGroup = "Group"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
new file mode 100644 (file)
index 0000000..bc68a46
--- /dev/null
@@ -0,0 +1,106 @@
+package s3
+
+import (
+       "io/ioutil"
+       "regexp"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/awsutil"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`)
+
+// NormalizeBucketLocation is a utility function which will update the
+// passed in value to always be a region ID. Generally this would be used
+// with GetBucketLocation API operation.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+func NormalizeBucketLocation(loc string) string {
+       switch loc {
+       case "":
+               loc = "us-east-1"
+       case "EU":
+               loc = "eu-west-1"
+       }
+
+       return loc
+}
+
+// NormalizeBucketLocationHandler is a request handler which will update the
+// GetBucketLocation's result LocationConstraint value to always be a region ID.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+//
+//     req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{
+//         Bucket: aws.String(bucket),
+//     })
+//     req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
+//     err := req.Send()
+var NormalizeBucketLocationHandler = request.NamedHandler{
+       Name: "awssdk.s3.NormalizeBucketLocation",
+       Fn: func(req *request.Request) {
+               if req.Error != nil {
+                       return
+               }
+
+               out := req.Data.(*GetBucketLocationOutput)
+               loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint))
+               out.LocationConstraint = aws.String(loc)
+       },
+}
+
+// WithNormalizeBucketLocation is a request option which will update the
+// GetBucketLocation's result LocationConstraint value to always be a region ID.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+//
+//     result, err := svc.GetBucketLocationWithContext(ctx,
+//         &s3.GetBucketLocationInput{
+//             Bucket: aws.String(bucket),
+//         },
+//         s3.WithNormalizeBucketLocation,
+//     )
+func WithNormalizeBucketLocation(r *request.Request) {
+       r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
+}
+
+func buildGetBucketLocation(r *request.Request) {
+       if r.DataFilled() {
+               out := r.Data.(*GetBucketLocationOutput)
+               b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+               if err != nil {
+                       r.Error = awserr.New("SerializationError", "failed reading response body", err)
+                       return
+               }
+
+               match := reBucketLocation.FindSubmatch(b)
+               if len(match) > 1 {
+                       loc := string(match[1])
+                       out.LocationConstraint = aws.String(loc)
+               }
+       }
+}
+
+func populateLocationConstraint(r *request.Request) {
+       if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" {
+               in := r.Params.(*CreateBucketInput)
+               if in.CreateBucketConfiguration == nil {
+                       r.Params = awsutil.CopyOf(r.Params)
+                       in = r.Params.(*CreateBucketInput)
+                       in.CreateBucketConfiguration = &CreateBucketConfiguration{
+                               LocationConstraint: r.Config.Region,
+                       }
+               }
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go
new file mode 100644 (file)
index 0000000..9fc5df9
--- /dev/null
@@ -0,0 +1,36 @@
+package s3
+
+import (
+       "crypto/md5"
+       "encoding/base64"
+       "io"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
+// require it.
+func contentMD5(r *request.Request) {
+       h := md5.New()
+
+       // hash the body.  seek back to the first position after reading to reset
+       // the body for transmission.  copy errors may be assumed to be from the
+       // body.
+       _, err := io.Copy(h, r.Body)
+       if err != nil {
+               r.Error = awserr.New("ContentMD5", "failed to read body", err)
+               return
+       }
+       _, err = r.Body.Seek(0, 0)
+       if err != nil {
+               r.Error = awserr.New("ContentMD5", "failed to seek body", err)
+               return
+       }
+
+       // encode the md5 checksum in base64 and set the request header.
+       sum := h.Sum(nil)
+       sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
+       base64.StdEncoding.Encode(sum64, sum)
+       r.HTTPRequest.Header.Set("Content-MD5", string(sum64))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
new file mode 100644 (file)
index 0000000..8463347
--- /dev/null
@@ -0,0 +1,46 @@
+package s3
+
+import (
+       "github.com/aws/aws-sdk-go/aws/client"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+func init() {
+       initClient = defaultInitClientFn
+       initRequest = defaultInitRequestFn
+}
+
+func defaultInitClientFn(c *client.Client) {
+       // Support building custom endpoints based on config
+       c.Handlers.Build.PushFront(updateEndpointForS3Config)
+
+       // Require SSL when using SSE keys
+       c.Handlers.Validate.PushBack(validateSSERequiresSSL)
+       c.Handlers.Build.PushBack(computeSSEKeys)
+
+       // S3 uses custom error unmarshaling logic
+       c.Handlers.UnmarshalError.Clear()
+       c.Handlers.UnmarshalError.PushBack(unmarshalError)
+}
+
+func defaultInitRequestFn(r *request.Request) {
+       // Add reuest handlers for specific platforms.
+       // e.g. 100-continue support for PUT requests using Go 1.6
+       platformRequestHandlers(r)
+
+       switch r.Operation.Name {
+       case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
+               opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
+               opPutBucketReplication:
+               // These S3 operations require Content-MD5 to be set
+               r.Handlers.Build.PushBack(contentMD5)
+       case opGetBucketLocation:
+               // GetBucketLocation has custom parsing logic
+               r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
+       case opCreateBucket:
+               // Auto-populate LocationConstraint with current region
+               r.Handlers.Validate.PushFront(populateLocationConstraint)
+       case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
+               r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
new file mode 100644 (file)
index 0000000..f045fd0
--- /dev/null
@@ -0,0 +1,78 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package s3 provides the client and types for making API
+// requests to Amazon Simple Storage Service.
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service.
+//
+// See s3 package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/
+//
+// Using the Client
+//
+// To use the client for Amazon Simple Storage Service you will first need
+// to create a new instance of it.
+//
+// When creating a client for an AWS service you'll first need to have a Session
+// already created. The Session provides configuration that can be shared
+// between multiple service clients. Additional configuration can be applied to
+// the Session and service's client when they are constructed. The aws package's
+// Config type contains several fields such as Region for the AWS Region the
+// client should make API requests too. The optional Config value can be provided
+// as the variadic argument for Sessions and client creation.
+//
+// Once the service's client is created you can use it to make API requests the
+// AWS service. These clients are safe to use concurrently.
+//
+//   // Create a session to share configuration, and load external configuration.
+//   sess := session.Must(session.NewSession())
+//
+//   // Create the service's client with the session.
+//   svc := s3.New(sess)
+//
+// See the SDK's documentation for more information on how to use service clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws package's Config type for more information on configuration options.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the Amazon Simple Storage Service client S3 for more
+// information on creating the service's client.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New
+//
+// Once the client is created you can make an API request to the service.
+// Each API method takes a input parameter, and returns the service response
+// and an error.
+//
+// The API method will document which error codes the service can be returned
+// by the operation if the service models the API operation's errors. These
+// errors will also be available as const strings prefixed with "ErrCode".
+//
+//   result, err := svc.AbortMultipartUpload(params)
+//   if err != nil {
+//       // Cast err to awserr.Error to handle specific error codes.
+//       aerr, ok := err.(awserr.Error)
+//       if ok && aerr.Code() == <error code to check for> {
+//           // Specific error code handling
+//       }
+//       return err
+//   }
+//
+//   fmt.Println("AbortMultipartUpload result:")
+//   fmt.Println(result)
+//
+// Using the Client with Context
+//
+// The service's client also provides methods to make API requests with a Context
+// value. This allows you to control the timeout, and cancellation of pending
+// requests. These methods also take request Option as variadic parameter to apply
+// additional configuration to the API request.
+//
+//   ctx := context.Background()
+//
+//   result, err := svc.AbortMultipartUploadWithContext(ctx, params)
+//
+// See the request package documentation for more information on using Context pattern
+// with the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/
+package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
new file mode 100644 (file)
index 0000000..b794a63
--- /dev/null
@@ -0,0 +1,109 @@
+// Upload Managers
+//
+// The s3manager package's Uploader provides concurrent upload of content to S3
+// by taking advantage of S3's Multipart APIs. The Uploader also supports both
+// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker
+// for optimizations if the Body satisfies that type. Once the Uploader instance
+// is created you can call Upload concurrently from multiple goroutines safely.
+//
+//   // The session the S3 Uploader will use
+//   sess := session.Must(session.NewSession())
+//
+//   // Create an uploader with the session and default options
+//   uploader := s3manager.NewUploader(sess)
+//
+//   f, err  := os.Open(filename)
+//   if err != nil {
+//       return fmt.Errorf("failed to open file %q, %v", filename, err)
+//   }
+//
+//   // Upload the file to S3.
+//   result, err := uploader.Upload(&s3manager.UploadInput{
+//       Bucket: aws.String(myBucket),
+//       Key:    aws.String(myString),
+//       Body:   f,
+//   })
+//   if err != nil {
+//       return fmt.Errorf("failed to upload file, %v", err)
+//   }
+//   fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location))
+//
+// See the s3manager package's Uploader type documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader
+//
+// Download Manager
+//
+// The s3manager package's Downloader provides concurrently downloading of Objects
+// from S3. The Downloader will write S3 Object content with an io.WriterAt.
+// Once the Downloader instance is created you can call Upload concurrently from
+// multiple goroutines safely.
+//
+//   // The session the S3 Downloader will use
+//   sess := session.Must(session.NewSession())
+//
+//   // Create a downloader with the session and default options
+//   downloader := s3manager.NewDownloader(sess)
+//
+//   // Create a file to write the S3 Object contents to.
+//   f, err := os.Create(filename)
+//   if err != nil {
+//       return fmt.Errorf("failed to create file %q, %v", filename, err)
+//   }
+//
+//   // Write the contents of S3 Object to the file
+//   n, err := downloader.Download(f, &s3.GetObjectInput{
+//       Bucket: aws.String(myBucket),
+//       Key:    aws.String(myString),
+//   })
+//   if err != nil {
+//       return fmt.Errorf("failed to upload file, %v", err)
+//   }
+//   fmt.Printf("file downloaded, %d bytes\n", n)
+//
+// See the s3manager package's Downloader type documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader
+//
+// Get Bucket Region
+//
+// GetBucketRegion will attempt to get the region for a bucket using a region
+// hint to determine which AWS partition to perform the query on. Use this utility
+// to determine the region a bucket is in.
+//
+//   sess := session.Must(session.NewSession())
+//
+//   bucket := "my-bucket"
+//   region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
+//   if err != nil {
+//       if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
+//            fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
+//       }
+//       return err
+//   }
+//   fmt.Printf("Bucket %s is in %s region\n", bucket, region)
+//
+// See the s3manager package's GetBucketRegion function documentation for more information
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion
+//
+// S3 Crypto Client
+//
+// The s3crypto package provides the tools to upload and download encrypted
+// content from S3. The Encryption and Decryption clients can be used concurrently
+// once the client is created.
+//
+//    sess := session.Must(session.NewSession())
+//
+//    // Create the decryption client.
+//    svc := s3crypto.NewDecryptionClient(sess)
+//
+//    // The object will be downloaded from S3 and decrypted locally. By metadata
+//    // about the object's encryption will instruct the decryption client how
+//    // decrypt the content of the object. By default KMS is used for keys.
+//    result, err := svc.GetObject(&s3.GetObjectInput {
+//        Bucket: aws.String(myBucket),
+//        Key: aws.String(myKey),
+//    })
+//
+// See the s3crypto package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
+//
+package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
new file mode 100644 (file)
index 0000000..931cb17
--- /dev/null
@@ -0,0 +1,48 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+const (
+
+       // ErrCodeBucketAlreadyExists for service response error code
+       // "BucketAlreadyExists".
+       //
+       // The requested bucket name is not available. The bucket namespace is shared
+       // by all users of the system. Please select a different name and try again.
+       ErrCodeBucketAlreadyExists = "BucketAlreadyExists"
+
+       // ErrCodeBucketAlreadyOwnedByYou for service response error code
+       // "BucketAlreadyOwnedByYou".
+       ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou"
+
+       // ErrCodeNoSuchBucket for service response error code
+       // "NoSuchBucket".
+       //
+       // The specified bucket does not exist.
+       ErrCodeNoSuchBucket = "NoSuchBucket"
+
+       // ErrCodeNoSuchKey for service response error code
+       // "NoSuchKey".
+       //
+       // The specified key does not exist.
+       ErrCodeNoSuchKey = "NoSuchKey"
+
+       // ErrCodeNoSuchUpload for service response error code
+       // "NoSuchUpload".
+       //
+       // The specified multipart upload does not exist.
+       ErrCodeNoSuchUpload = "NoSuchUpload"
+
+       // ErrCodeObjectAlreadyInActiveTierError for service response error code
+       // "ObjectAlreadyInActiveTierError".
+       //
+       // This operation is not allowed against this storage tier
+       ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError"
+
+       // ErrCodeObjectNotInActiveTierError for service response error code
+       // "ObjectNotInActiveTierError".
+       //
+       // The source object of the COPY operation is not in the active tier and is
+       // only stored in Amazon Glacier.
+       ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
new file mode 100644 (file)
index 0000000..ec3ffe4
--- /dev/null
@@ -0,0 +1,162 @@
+package s3
+
+import (
+       "fmt"
+       "net/url"
+       "regexp"
+       "strings"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/awsutil"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// an operationBlacklist is a list of operation names that should a
+// request handler should not be executed with.
+type operationBlacklist []string
+
+// Continue will return true of the Request's operation name is not
+// in the blacklist. False otherwise.
+func (b operationBlacklist) Continue(r *request.Request) bool {
+       for i := 0; i < len(b); i++ {
+               if b[i] == r.Operation.Name {
+                       return false
+               }
+       }
+       return true
+}
+
+var accelerateOpBlacklist = operationBlacklist{
+       opListBuckets, opCreateBucket, opDeleteBucket,
+}
+
+// Request handler to automatically add the bucket name to the endpoint domain
+// if possible. This style of bucket is valid for all bucket names which are
+// DNS compatible and do not contain "."
+func updateEndpointForS3Config(r *request.Request) {
+       forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
+       accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
+
+       if accelerate && accelerateOpBlacklist.Continue(r) {
+               if forceHostStyle {
+                       if r.Config.Logger != nil {
+                               r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
+                       }
+               }
+               updateEndpointForAccelerate(r)
+       } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
+               updateEndpointForHostStyle(r)
+       }
+}
+
+func updateEndpointForHostStyle(r *request.Request) {
+       bucket, ok := bucketNameFromReqParams(r.Params)
+       if !ok {
+               // Ignore operation requests if the bucketname was not provided
+               // if this is an input validation error the validation handler
+               // will report it.
+               return
+       }
+
+       if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
+               // bucket name must be valid to put into the host
+               return
+       }
+
+       moveBucketToHost(r.HTTPRequest.URL, bucket)
+}
+
+var (
+       accelElem = []byte("s3-accelerate.dualstack.")
+)
+
+func updateEndpointForAccelerate(r *request.Request) {
+       bucket, ok := bucketNameFromReqParams(r.Params)
+       if !ok {
+               // Ignore operation requests if the bucketname was not provided
+               // if this is an input validation error the validation handler
+               // will report it.
+               return
+       }
+
+       if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
+               r.Error = awserr.New("InvalidParameterException",
+                       fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket),
+                       nil)
+               return
+       }
+
+       parts := strings.Split(r.HTTPRequest.URL.Host, ".")
+       if len(parts) < 3 {
+               r.Error = awserr.New("InvalidParameterExecption",
+                       fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s",
+                               r.HTTPRequest.URL.Host), nil)
+               return
+       }
+
+       if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") {
+               parts[0] = "s3-accelerate"
+       }
+       for i := 1; i+1 < len(parts); i++ {
+               if parts[i] == aws.StringValue(r.Config.Region) {
+                       parts = append(parts[:i], parts[i+1:]...)
+                       break
+               }
+       }
+
+       r.HTTPRequest.URL.Host = strings.Join(parts, ".")
+
+       moveBucketToHost(r.HTTPRequest.URL, bucket)
+}
+
+// Attempts to retrieve the bucket name from the request input parameters.
+// If no bucket is found, or the field is empty "", false will be returned.
+func bucketNameFromReqParams(params interface{}) (string, bool) {
+       b, _ := awsutil.ValuesAtPath(params, "Bucket")
+       if len(b) == 0 {
+               return "", false
+       }
+
+       if bucket, ok := b[0].(*string); ok {
+               if bucketStr := aws.StringValue(bucket); bucketStr != "" {
+                       return bucketStr, true
+               }
+       }
+
+       return "", false
+}
+
+// hostCompatibleBucketName returns true if the request should
+// put the bucket in the host. This is false if S3ForcePathStyle is
+// explicitly set or if the bucket is not DNS compatible.
+func hostCompatibleBucketName(u *url.URL, bucket string) bool {
+       // Bucket might be DNS compatible but dots in the hostname will fail
+       // certificate validation, so do not use host-style.
+       if u.Scheme == "https" && strings.Contains(bucket, ".") {
+               return false
+       }
+
+       // if the bucket is DNS compatible
+       return dnsCompatibleBucketName(bucket)
+}
+
+var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
+var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
+
+// dnsCompatibleBucketName returns true if the bucket name is DNS compatible.
+// Buckets created outside of the classic region MUST be DNS compatible.
+func dnsCompatibleBucketName(bucket string) bool {
+       return reDomain.MatchString(bucket) &&
+               !reIPAddress.MatchString(bucket) &&
+               !strings.Contains(bucket, "..")
+}
+
+// moveBucketToHost moves the bucket name from the URI path to URL host.
+func moveBucketToHost(u *url.URL, bucket string) {
+       u.Host = bucket + "." + u.Host
+       u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
+       if u.Path == "" {
+               u.Path = "/"
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
new file mode 100644 (file)
index 0000000..8e6f330
--- /dev/null
@@ -0,0 +1,8 @@
+// +build !go1.6
+
+package s3
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+func platformRequestHandlers(r *request.Request) {
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
new file mode 100644 (file)
index 0000000..14d05f7
--- /dev/null
@@ -0,0 +1,28 @@
+// +build go1.6
+
+package s3
+
+import (
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+func platformRequestHandlers(r *request.Request) {
+       if r.Operation.HTTPMethod == "PUT" {
+               // 100-Continue should only be used on put requests.
+               r.Handlers.Sign.PushBack(add100Continue)
+       }
+}
+
+func add100Continue(r *request.Request) {
+       if aws.BoolValue(r.Config.S3Disable100Continue) {
+               return
+       }
+       if r.HTTPRequest.ContentLength < 1024*1024*2 {
+               // Ignore requests smaller than 2MB. This helps prevent delaying
+               // requests unnecessarily.
+               return
+       }
+
+       r.HTTPRequest.Header.Set("Expect", "100-Continue")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
new file mode 100644 (file)
index 0000000..614e477
--- /dev/null
@@ -0,0 +1,93 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+import (
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/client"
+       "github.com/aws/aws-sdk-go/aws/client/metadata"
+       "github.com/aws/aws-sdk-go/aws/request"
+       "github.com/aws/aws-sdk-go/aws/signer/v4"
+       "github.com/aws/aws-sdk-go/private/protocol/restxml"
+)
+
+// S3 provides the API operation methods for making requests to
+// Amazon Simple Storage Service. See this package's package overview docs
+// for details on the service.
+//
+// S3 methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type S3 struct {
+       *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+       ServiceName = "s3"        // Service endpoint prefix API calls made to.
+       EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
+)
+
+// New creates a new instance of the S3 client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+//     // Create a S3 client from just a session.
+//     svc := s3.New(mySession)
+//
+//     // Create a S3 client with additional configuration
+//     svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
+       c := p.ClientConfig(EndpointsID, cfgs...)
+       return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 {
+       svc := &S3{
+               Client: client.New(
+                       cfg,
+                       metadata.ClientInfo{
+                               ServiceName:   ServiceName,
+                               SigningName:   signingName,
+                               SigningRegion: signingRegion,
+                               Endpoint:      endpoint,
+                               APIVersion:    "2006-03-01",
+                       },
+                       handlers,
+               ),
+       }
+
+       // Handlers
+       svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+       svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
+       svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
+       svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
+       svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
+
+       // Run custom client initialization if present
+       if initClient != nil {
+               initClient(svc.Client)
+       }
+
+       return svc
+}
+
+// newRequest creates a new request for a S3 operation and runs any
+// custom request initialization.
+func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+       req := c.NewRequest(op, params, data)
+
+       // Run custom request initialization if present
+       if initRequest != nil {
+               initRequest(req)
+       }
+
+       return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
new file mode 100644 (file)
index 0000000..268ea2f
--- /dev/null
@@ -0,0 +1,44 @@
+package s3
+
+import (
+       "crypto/md5"
+       "encoding/base64"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/awsutil"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
+
+func validateSSERequiresSSL(r *request.Request) {
+       if r.HTTPRequest.URL.Scheme != "https" {
+               p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey")
+               if len(p) > 0 {
+                       r.Error = errSSERequiresSSL
+               }
+       }
+}
+
+func computeSSEKeys(r *request.Request) {
+       headers := []string{
+               "x-amz-server-side-encryption-customer-key",
+               "x-amz-copy-source-server-side-encryption-customer-key",
+       }
+
+       for _, h := range headers {
+               md5h := h + "-md5"
+               if key := r.HTTPRequest.Header.Get(h); key != "" {
+                       // Base64-encode the value
+                       b64v := base64.StdEncoding.EncodeToString([]byte(key))
+                       r.HTTPRequest.Header.Set(h, b64v)
+
+                       // Add MD5 if it wasn't computed
+                       if r.HTTPRequest.Header.Get(md5h) == "" {
+                               sum := md5.Sum([]byte(key))
+                               b64sum := base64.StdEncoding.EncodeToString(sum[:])
+                               r.HTTPRequest.Header.Set(md5h, b64sum)
+                       }
+               }
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
new file mode 100644 (file)
index 0000000..5a78fd3
--- /dev/null
@@ -0,0 +1,35 @@
+package s3
+
+import (
+       "bytes"
+       "io/ioutil"
+       "net/http"
+
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
+       b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+       if err != nil {
+               r.Error = awserr.New("SerializationError", "unable to read response body", err)
+               return
+       }
+       body := bytes.NewReader(b)
+       r.HTTPResponse.Body = ioutil.NopCloser(body)
+       defer body.Seek(0, 0)
+
+       if body.Len() == 0 {
+               // If there is no body don't attempt to parse the body.
+               return
+       }
+
+       unmarshalError(r)
+       if err, ok := r.Error.(awserr.Error); ok && err != nil {
+               if err.Code() == "SerializationError" {
+                       r.Error = nil
+                       return
+               }
+               r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
new file mode 100644 (file)
index 0000000..bcca862
--- /dev/null
@@ -0,0 +1,103 @@
+package s3
+
+import (
+       "encoding/xml"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/http"
+       "strings"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awserr"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+type xmlErrorResponse struct {
+       XMLName xml.Name `xml:"Error"`
+       Code    string   `xml:"Code"`
+       Message string   `xml:"Message"`
+}
+
+func unmarshalError(r *request.Request) {
+       defer r.HTTPResponse.Body.Close()
+       defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+
+       hostID := r.HTTPResponse.Header.Get("X-Amz-Id-2")
+
+       // Bucket exists in a different region, and request needs
+       // to be made to the correct region.
+       if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
+               r.Error = requestFailure{
+                       RequestFailure: awserr.NewRequestFailure(
+                               awserr.New("BucketRegionError",
+                                       fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
+                                               aws.StringValue(r.Config.Region)),
+                                       nil),
+                               r.HTTPResponse.StatusCode,
+                               r.RequestID,
+                       ),
+                       hostID: hostID,
+               }
+               return
+       }
+
+       var errCode, errMsg string
+
+       // Attempt to parse error from body if it is known
+       resp := &xmlErrorResponse{}
+       err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
+       if err != nil && err != io.EOF {
+               errCode = "SerializationError"
+               errMsg = "failed to decode S3 XML error response"
+       } else {
+               errCode = resp.Code
+               errMsg = resp.Message
+               err = nil
+       }
+
+       // Fallback to status code converted to message if still no error code
+       if len(errCode) == 0 {
+               statusText := http.StatusText(r.HTTPResponse.StatusCode)
+               errCode = strings.Replace(statusText, " ", "", -1)
+               errMsg = statusText
+       }
+
+       r.Error = requestFailure{
+               RequestFailure: awserr.NewRequestFailure(
+                       awserr.New(errCode, errMsg, err),
+                       r.HTTPResponse.StatusCode,
+                       r.RequestID,
+               ),
+               hostID: hostID,
+       }
+}
+
+// A RequestFailure provides access to the S3 Request ID and Host ID values
+// returned from API operation errors. Getting the error as a string will
+// return the formated error with the same information as awserr.RequestFailure,
+// while also adding the HostID value from the response.
+type RequestFailure interface {
+       awserr.RequestFailure
+
+       // Host ID is the S3 Host ID needed for debug, and contacting support
+       HostID() string
+}
+
+type requestFailure struct {
+       awserr.RequestFailure
+
+       hostID string
+}
+
+func (r requestFailure) Error() string {
+       extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s",
+               r.StatusCode(), r.RequestID(), r.hostID)
+       return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+func (r requestFailure) String() string {
+       return r.Error()
+}
+func (r requestFailure) HostID() string {
+       return r.hostID
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
new file mode 100644 (file)
index 0000000..cccfa8c
--- /dev/null
@@ -0,0 +1,214 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package s3
+
+import (
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// WaitUntilBucketExists uses the Amazon S3 API operation
+// HeadBucket to wait for a condition to be met before returning.
+// If the condition is not meet within the max attempt window an error will
+// be returned.
+func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
+       return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
+       w := request.Waiter{
+               Name:        "WaitUntilBucketExists",
+               MaxAttempts: 20,
+               Delay:       request.ConstantWaiterDelay(5 * time.Second),
+               Acceptors: []request.WaiterAcceptor{
+                       {
+                               State:    request.SuccessWaiterState,
+                               Matcher:  request.StatusWaiterMatch,
+                               Expected: 200,
+                       },
+                       {
+                               State:    request.SuccessWaiterState,
+                               Matcher:  request.StatusWaiterMatch,
+                               Expected: 301,
+                       },
+                       {
+                               State:    request.SuccessWaiterState,
+                               Matcher:  request.StatusWaiterMatch,
+                               Expected: 403,
+                       },
+                       {
+                               State:    request.RetryWaiterState,
+                               Matcher:  request.StatusWaiterMatch,
+                               Expected: 404,
+                       },
+               },
+               Logger: c.Config.Logger,
+               NewRequest: func(opts []request.Option) (*request.Request, error) {
+                       var inCpy *HeadBucketInput
+                       if input != nil {
+                               tmp := *input
+                               inCpy = &tmp
+                       }
+                       req, _ := c.HeadBucketRequest(inCpy)
+                       req.SetContext(ctx)
+                       req.ApplyOptions(opts...)
+                       return req, nil
+               },
+       }
+       w.ApplyOptions(opts...)
+
+       return w.WaitWithContext(ctx)
+}
+
+// WaitUntilBucketNotExists uses the Amazon S3 API operation
+// HeadBucket to wait for a condition to be met before returning.
+// If the condition is not meet within the max attempt window an error will
+// be returned.
+func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
+       return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
+       w := request.Waiter{
+               Name:        "WaitUntilBucketNotExists",
+               MaxAttempts: 20,
+               Delay:       request.ConstantWaiterDelay(5 * time.Second),
+               Acceptors: []request.WaiterAcceptor{
+                       {
+                               State:    request.SuccessWaiterState,
+                               Matcher:  request.StatusWaiterMatch,
+                               Expected: 404,
+                       },
+               },
+               Logger: c.Config.Logger,
+               NewRequest: func(opts []request.Option) (*request.Request, error) {
+                       var inCpy *HeadBucketInput
+                       if input != nil {
+                               tmp := *input
+                               inCpy = &tmp
+                       }
+                       req, _ := c.HeadBucketRequest(inCpy)
+                       req.SetContext(ctx)
+                       req.ApplyOptions(opts...)
+                       return req, nil
+               },
+       }
+       w.ApplyOptions(opts...)
+
+       return w.WaitWithContext(ctx)
+}
+
+// WaitUntilObjectExists uses the Amazon S3 API operation
+// HeadObject to wait for a condition to be met before returning.
+// If the condition is not meet within the max attempt window an error will
+// be returned.
+func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
+       return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
+       w := request.Waiter{
+               Name:        "WaitUntilObjectExists",
+               MaxAttempts: 20,
+               Delay:       request.ConstantWaiterDelay(5 * time.Second),
+               Acceptors: []request.WaiterAcceptor{
+                       {
+                               State:    request.SuccessWaiterState,
+                               Matcher:  request.StatusWaiterMatch,
+                               Expected: 200,
+                       },
+                       {
+                               State:    request.RetryWaiterState,
+                               Matcher:  request.StatusWaiterMatch,
+                               Expected: 404,
+                       },
+               },
+               Logger: c.Config.Logger,
+               NewRequest: func(opts []request.Option) (*request.Request, error) {
+                       var inCpy *HeadObjectInput
+                       if input != nil {
+                               tmp := *input
+                               inCpy = &tmp
+                       }
+                       req, _ := c.HeadObjectRequest(inCpy)
+                       req.SetContext(ctx)
+                       req.ApplyOptions(opts...)
+                       return req, nil
+               },
+       }
+       w.ApplyOptions(opts...)
+
+       return w.WaitWithContext(ctx)
+}
+
+// WaitUntilObjectNotExists uses the Amazon S3 API operation
+// HeadObject to wait for a condition to be met before returning.
+// If the condition is not meet within the max attempt window an error will
+// be returned.
+func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
+       return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
+       w := request.Waiter{
+               Name:        "WaitUntilObjectNotExists",
+               MaxAttempts: 20,
+               Delay:       request.ConstantWaiterDelay(5 * time.Second),
+               Acceptors: []request.WaiterAcceptor{
+                       {
+                               State:    request.SuccessWaiterState,
+                               Matcher:  request.StatusWaiterMatch,
+                               Expected: 404,
+                       },
+               },
+               Logger: c.Config.Logger,
+               NewRequest: func(opts []request.Option) (*request.Request, error) {
+                       var inCpy *HeadObjectInput
+                       if input != nil {
+                               tmp := *input
+                               inCpy = &tmp
+                       }
+                       req, _ := c.HeadObjectRequest(inCpy)
+                       req.SetContext(ctx)
+                       req.ApplyOptions(opts...)
+                       return req, nil
+               },
+       }
+       w.ApplyOptions(opts...)
+
+       return w.WaitWithContext(ctx)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
new file mode 100644 (file)
index 0000000..2de6528
--- /dev/null
@@ -0,0 +1,2365 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+import (
+       "time"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/awsutil"
+       "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const opAssumeRole = "AssumeRole"
+
+// AssumeRoleRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRole operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See AssumeRole for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the AssumeRole method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the AssumeRoleRequest method.
+//    req, resp := client.AssumeRoleRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
+func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) {
+       op := &request.Operation{
+               Name:       opAssumeRole,
+               HTTPMethod: "POST",
+               HTTPPath:   "/",
+       }
+
+       if input == nil {
+               input = &AssumeRoleInput{}
+       }
+
+       output = &AssumeRoleOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// AssumeRole API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) that you can use to access
+// AWS resources that you might not normally have access to. Typically, you
+// use AssumeRole for cross-account access or federation. For a comparison of
+// AssumeRole with the other APIs that produce temporary credentials, see Requesting
+// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// Important: You cannot call AssumeRole by using AWS root account credentials;
+// access is denied. You must use credentials for an IAM user or an IAM role
+// to call AssumeRole.
+//
+// For cross-account access, imagine that you own multiple accounts and need
+// to access resources in each account. You could create long-term credentials
+// in each account to access those resources. However, managing all those credentials
+// and remembering which one can access which account can be time consuming.
+// Instead, you can create one set of long-term credentials in one account and
+// then use temporary security credentials to access all the other accounts
+// by assuming roles in those accounts. For more information about roles, see
+// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html)
+// in the IAM User Guide.
+//
+// For federation, you can, for example, grant single sign-on access to the
+// AWS Management Console. If you already have an identity and authentication
+// system in your corporate network, you don't have to recreate user identities
+// in AWS in order to grant those user identities access to AWS. Instead, after
+// a user has been authenticated, you call AssumeRole (and specify the role
+// with the appropriate permissions) to get temporary security credentials for
+// that user. With those temporary security credentials, you construct a sign-in
+// URL that users can use to access the console. For more information, see Common
+// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction)
+// in the IAM User Guide.
+//
+// The temporary security credentials are valid for the duration that you specified
+// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a
+// maximum of 3600 seconds (1 hour). The default is 1 hour.
+//
+// The temporary security credentials created by AssumeRole can be used to make
+// API calls to any AWS service with the following exception: you cannot call
+// the STS service's GetFederationToken or GetSessionToken APIs.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by both the access policy of the role that
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
+// You cannot use the passed policy to grant permissions that are in excess
+// of those allowed by the access policy of the role that is being assumed.
+// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the IAM User Guide.
+//
+// To assume a role, your AWS account must be trusted by the role. The trust
+// relationship is defined in the role's trust policy when the role is created.
+// That trust policy states which accounts are allowed to delegate access to
+// this account's role.
+//
+// The user who wants to access the role must also have permissions delegated
+// from the role's administrator. If the user is in a different account than
+// the role, then the user's administrator must attach a policy that allows
+// the user to call AssumeRole on the ARN of the role in the other account.
+// If the user is in the same account as the role, then you can either attach
+// a policy to the user (identical to the previous different account user),
+// or you can add the user as a principal directly in the role's trust policy
+//
+// Using MFA with AssumeRole
+//
+// You can optionally include multi-factor authentication (MFA) information
+// when you call AssumeRole. This is useful for cross-account scenarios in which
+// you want to make sure that the user who is assuming the role has been authenticated
+// using an AWS MFA device. In that scenario, the trust policy of the role being
+// assumed includes a condition that tests for MFA authentication; if the caller
+// does not include valid MFA information, the request to assume the role is
+// denied. The condition in a trust policy that tests for MFA authentication
+// might look like the following example.
+//
+// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
+//
+// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
+// in the IAM User Guide guide.
+//
+// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode
+// parameters. The SerialNumber value identifies the user's hardware or virtual
+// MFA device. The TokenCode is the time-based one-time password (TOTP) that
+// the MFA devices produces.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRole for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+//   The request was rejected because the policy document was malformed. The error
+//   message describes the specific error.
+//
+//   * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+//   The request was rejected because the policy document was too large. The error
+//   message describes how big the policy document is, in packed form, as a percentage
+//   of what the API allows.
+//
+//   * ErrCodeRegionDisabledException "RegionDisabledException"
+//   STS is not activated in the requested region for the account that is being
+//   asked to generate credentials. The account administrator must use the IAM
+//   console to activate STS in that region. For more information, see Activating
+//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+//   in the IAM User Guide.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
+func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
+       req, out := c.AssumeRoleRequest(input)
+       return out, req.Send()
+}
+
+// AssumeRoleWithContext is the same as AssumeRole with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRole for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) {
+       req, out := c.AssumeRoleRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
+
+// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRoleWithSAML operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See AssumeRoleWithSAML for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the AssumeRoleWithSAML method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the AssumeRoleWithSAMLRequest method.
+//    req, resp := client.AssumeRoleWithSAMLRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
+func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) {
+       op := &request.Operation{
+               Name:       opAssumeRoleWithSAML,
+               HTTPMethod: "POST",
+               HTTPPath:   "/",
+       }
+
+       if input == nil {
+               input = &AssumeRoleWithSAMLInput{}
+       }
+
+       output = &AssumeRoleWithSAMLOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// AssumeRoleWithSAML API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials for users who have been authenticated
+// via a SAML authentication response. This operation provides a mechanism for
+// tying an enterprise identity store or directory to role-based AWS access
+// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML
+// with the other APIs that produce temporary credentials, see Requesting Temporary
+// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The temporary security credentials returned by this operation consist of
+// an access key ID, a secret access key, and a security token. Applications
+// can use these temporary security credentials to sign calls to AWS services.
+//
+// The temporary security credentials are valid for the duration that you specified
+// when calling AssumeRole, or until the time specified in the SAML authentication
+// response's SessionNotOnOrAfter value, whichever is shorter. The duration
+// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour).
+// The default is 1 hour.
+//
+// The temporary security credentials created by AssumeRoleWithSAML can be used
+// to make API calls to any AWS service with the following exception: you cannot
+// call the STS service's GetFederationToken or GetSessionToken APIs.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by the intersection of both the access policy
+// of the role that is being assumed, and the policy that you pass. This means
+// that both policies must grant the permission for the action to be allowed.
+// This gives you a way to further restrict the permissions for the resulting
+// temporary security credentials. You cannot use the passed policy to grant
+// permissions that are in excess of those allowed by the access policy of the
+// role that is being assumed. For more information, see Permissions for AssumeRole,
+// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the IAM User Guide.
+//
+// Before your application can call AssumeRoleWithSAML, you must configure your
+// SAML identity provider (IdP) to issue the claims required by AWS. Additionally,
+// you must use AWS Identity and Access Management (IAM) to create a SAML provider
+// entity in your AWS account that represents your identity provider, and create
+// an IAM role that specifies this SAML provider in its trust policy.
+//
+// Calling AssumeRoleWithSAML does not require the use of AWS security credentials.
+// The identity of the caller is validated by using keys in the metadata document
+// that is uploaded for the SAML provider entity for your identity provider.
+//
+// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail
+// logs. The entry includes the value in the NameID element of the SAML assertion.
+// We recommend that you use a NameIDType that is not associated with any personally
+// identifiable information (PII). For example, you could instead use the Persistent
+// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
+//
+// For more information, see the following resources:
+//
+//    * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
+//    in the IAM User Guide.
+//
+//    * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
+//    in the IAM User Guide.
+//
+//    * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
+//    in the IAM User Guide.
+//
+//    * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
+//    in the IAM User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRoleWithSAML for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+//   The request was rejected because the policy document was malformed. The error
+//   message describes the specific error.
+//
+//   * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+//   The request was rejected because the policy document was too large. The error
+//   message describes how big the policy document is, in packed form, as a percentage
+//   of what the API allows.
+//
+//   * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
+//   The identity provider (IdP) reported that authentication failed. This might
+//   be because the claim is invalid.
+//
+//   If this error is returned for the AssumeRoleWithWebIdentity operation, it
+//   can also mean that the claim has expired or has been explicitly revoked.
+//
+//   * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
+//   The web identity token that was passed could not be validated by AWS. Get
+//   a new identity token from the identity provider and then retry the request.
+//
+//   * ErrCodeExpiredTokenException "ExpiredTokenException"
+//   The web identity token that was passed is expired or is not valid. Get a
+//   new identity token from the identity provider and then retry the request.
+//
+//   * ErrCodeRegionDisabledException "RegionDisabledException"
+//   STS is not activated in the requested region for the account that is being
+//   asked to generate credentials. The account administrator must use the IAM
+//   console to activate STS in that region. For more information, see Activating
+//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+//   in the IAM User Guide.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
+func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
+       req, out := c.AssumeRoleWithSAMLRequest(input)
+       return out, req.Send()
+}
+
+// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRoleWithSAML for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) {
+       req, out := c.AssumeRoleWithSAMLRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
+
+// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
+// client's request for the AssumeRoleWithWebIdentity operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See AssumeRoleWithWebIdentity for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the AssumeRoleWithWebIdentity method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
+//    req, resp := client.AssumeRoleWithWebIdentityRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
+func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) {
+       op := &request.Operation{
+               Name:       opAssumeRoleWithWebIdentity,
+               HTTPMethod: "POST",
+               HTTPPath:   "/",
+       }
+
+       if input == nil {
+               input = &AssumeRoleWithWebIdentityInput{}
+       }
+
+       output = &AssumeRoleWithWebIdentityOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// AssumeRoleWithWebIdentity API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials for users who have been authenticated
+// in a mobile or web application with a web identity provider, such as Amazon
+// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible
+// identity provider.
+//
+// For mobile applications, we recommend that you use Amazon Cognito. You can
+// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/)
+// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely
+// identify a user and supply the user with a consistent identity throughout
+// the lifetime of an application.
+//
+// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
+// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview
+// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
+// in the AWS SDK for iOS Developer Guide.
+//
+// Calling AssumeRoleWithWebIdentity does not require the use of AWS security
+// credentials. Therefore, you can distribute an application (for example, on
+// mobile devices) that requests temporary security credentials without including
+// long-term AWS credentials in the application, and without deploying server-based
+// proxy services that use long-term AWS credentials. Instead, the identity
+// of the caller is validated by using a token from the web identity provider.
+// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce
+// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The temporary security credentials returned by this API consist of an access
+// key ID, a secret access key, and a security token. Applications can use these
+// temporary security credentials to sign calls to AWS service APIs.
+//
+// The credentials are valid for the duration that you specified when calling
+// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to
+// a maximum of 3600 seconds (1 hour). The default is 1 hour.
+//
+// The temporary security credentials created by AssumeRoleWithWebIdentity can
+// be used to make API calls to any AWS service with the following exception:
+// you cannot call the STS service's GetFederationToken or GetSessionToken APIs.
+//
+// Optionally, you can pass an IAM access policy to this operation. If you choose
+// not to pass a policy, the temporary security credentials that are returned
+// by the operation have the permissions that are defined in the access policy
+// of the role that is being assumed. If you pass a policy to this operation,
+// the temporary security credentials that are returned by the operation have
+// the permissions that are allowed by both the access policy of the role that
+// is being assumed, and the policy that you pass. This gives you a way to further
+// restrict the permissions for the resulting temporary security credentials.
+// You cannot use the passed policy to grant permissions that are in excess
+// of those allowed by the access policy of the role that is being assumed.
+// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+// in the IAM User Guide.
+//
+// Before your application can call AssumeRoleWithWebIdentity, you must have
+// an identity token from a supported identity provider and create a role that
+// the application can assume. The role that your application assumes must trust
+// the identity provider that is associated with the identity token. In other
+// words, the identity provider must be specified in the role's trust policy.
+//
+// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail
+// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims)
+// of the provided Web Identity Token. We recommend that you avoid using any
+// personally identifiable information (PII) in this field. For example, you
+// could instead use a GUID or a pairwise identifier, as suggested in the OIDC
+// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
+//
+// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
+// API, see the following resources:
+//
+//    * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
+//    and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+//
+//    *  Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
+//    This interactive website lets you walk through the process of authenticating
+//    via Login with Amazon, Facebook, or Google, getting temporary security
+//    credentials, and then using those credentials to make a request to AWS.
+//
+//
+//    * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android
+//    (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample
+//    apps that show how to invoke the identity providers, and then how to use
+//    the information from these providers to get and use temporary security
+//    credentials.
+//
+//    * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313).
+//    This article discusses web identity federation and shows an example of
+//    how to use web identity federation to get access to content in Amazon
+//    S3.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation AssumeRoleWithWebIdentity for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+//   The request was rejected because the policy document was malformed. The error
+//   message describes the specific error.
+//
+//   * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+//   The request was rejected because the policy document was too large. The error
+//   message describes how big the policy document is, in packed form, as a percentage
+//   of what the API allows.
+//
+//   * ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
+//   The identity provider (IdP) reported that authentication failed. This might
+//   be because the claim is invalid.
+//
+//   If this error is returned for the AssumeRoleWithWebIdentity operation, it
+//   can also mean that the claim has expired or has been explicitly revoked.
+//
+//   * ErrCodeIDPCommunicationErrorException "IDPCommunicationError"
+//   The request could not be fulfilled because the non-AWS identity provider
+//   (IDP) that was asked to verify the incoming identity token could not be reached.
+//   This is often a transient error caused by network conditions. Retry the request
+//   a limited number of times so that you don't exceed the request rate. If the
+//   error persists, the non-AWS identity provider might be down or not responding.
+//
+//   * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
+//   The web identity token that was passed could not be validated by AWS. Get
+//   a new identity token from the identity provider and then retry the request.
+//
+//   * ErrCodeExpiredTokenException "ExpiredTokenException"
+//   The web identity token that was passed is expired or is not valid. Get a
+//   new identity token from the identity provider and then retry the request.
+//
+//   * ErrCodeRegionDisabledException "RegionDisabledException"
+//   STS is not activated in the requested region for the account that is being
+//   asked to generate credentials. The account administrator must use the IAM
+//   console to activate STS in that region. For more information, see Activating
+//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+//   in the IAM User Guide.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
+func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
+       req, out := c.AssumeRoleWithWebIdentityRequest(input)
+       return out, req.Send()
+}
+
+// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRoleWithWebIdentity for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) {
+       req, out := c.AssumeRoleWithWebIdentityRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
+
+// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
+// client's request for the DecodeAuthorizationMessage operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See DecodeAuthorizationMessage for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the DecodeAuthorizationMessage method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the DecodeAuthorizationMessageRequest method.
+//    req, resp := client.DecodeAuthorizationMessageRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
+func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) {
+       op := &request.Operation{
+               Name:       opDecodeAuthorizationMessage,
+               HTTPMethod: "POST",
+               HTTPPath:   "/",
+       }
+
+       if input == nil {
+               input = &DecodeAuthorizationMessageInput{}
+       }
+
+       output = &DecodeAuthorizationMessageOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// DecodeAuthorizationMessage API operation for AWS Security Token Service.
+//
+// Decodes additional information about the authorization status of a request
+// from an encoded message returned in response to an AWS request.
+//
+// For example, if a user is not authorized to perform an action that he or
+// she has requested, the request returns a Client.UnauthorizedOperation response
+// (an HTTP 403 response). Some AWS actions additionally return an encoded message
+// that can provide details about this authorization failure.
+//
+// Only certain AWS actions return an encoded authorization message. The documentation
+// for an individual action indicates whether that action returns an encoded
+// message in addition to returning an HTTP code.
+//
+// The message is encoded because the details of the authorization status can
+// constitute privileged information that the user who requested the action
+// should not see. To decode an authorization status message, a user must be
+// granted permissions via an IAM policy to request the DecodeAuthorizationMessage
+// (sts:DecodeAuthorizationMessage) action.
+//
+// The decoded message includes the following type of information:
+//
+//    * Whether the request was denied due to an explicit deny or due to the
+//    absence of an explicit allow. For more information, see Determining Whether
+//    a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
+//    in the IAM User Guide.
+//
+//    * The principal who made the request.
+//
+//    * The requested action.
+//
+//    * The requested resource.
+//
+//    * The values of condition keys in the context of the user's request.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation DecodeAuthorizationMessage for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException"
+//   The error returned if the message passed to DecodeAuthorizationMessage was
+//   invalid. This can happen if the token contains invalid characters, such as
+//   linebreaks.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
+func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
+       req, out := c.DecodeAuthorizationMessageRequest(input)
+       return out, req.Send()
+}
+
+// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DecodeAuthorizationMessage for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) {
+       req, out := c.DecodeAuthorizationMessageRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetCallerIdentity = "GetCallerIdentity"
+
+// GetCallerIdentityRequest generates a "aws/request.Request" representing the
+// client's request for the GetCallerIdentity operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetCallerIdentity for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetCallerIdentity method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetCallerIdentityRequest method.
+//    req, resp := client.GetCallerIdentityRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
+func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) {
+       op := &request.Operation{
+               Name:       opGetCallerIdentity,
+               HTTPMethod: "POST",
+               HTTPPath:   "/",
+       }
+
+       if input == nil {
+               input = &GetCallerIdentityInput{}
+       }
+
+       output = &GetCallerIdentityOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetCallerIdentity API operation for AWS Security Token Service.
+//
+// Returns details about the IAM identity whose credentials are used to call
+// the API.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetCallerIdentity for usage and error information.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
+func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) {
+       req, out := c.GetCallerIdentityRequest(input)
+       return out, req.Send()
+}
+
+// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetCallerIdentity for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) {
+       req, out := c.GetCallerIdentityRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetFederationToken = "GetFederationToken"
+
+// GetFederationTokenRequest generates a "aws/request.Request" representing the
+// client's request for the GetFederationToken operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetFederationToken for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetFederationToken method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetFederationTokenRequest method.
+//    req, resp := client.GetFederationTokenRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
+func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) {
+       op := &request.Operation{
+               Name:       opGetFederationToken,
+               HTTPMethod: "POST",
+               HTTPPath:   "/",
+       }
+
+       if input == nil {
+               input = &GetFederationTokenInput{}
+       }
+
+       output = &GetFederationTokenOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetFederationToken API operation for AWS Security Token Service.
+//
+// Returns a set of temporary security credentials (consisting of an access
+// key ID, a secret access key, and a security token) for a federated user.
+// A typical use is in a proxy application that gets temporary security credentials
+// on behalf of distributed applications inside a corporate network. Because
+// you must call the GetFederationToken action using the long-term security
+// credentials of an IAM user, this call is appropriate in contexts where those
+// credentials can be safely stored, usually in a server-based application.
+// For a comparison of GetFederationToken with the other APIs that produce temporary
+// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// If you are creating a mobile-based or browser-based app that can authenticate
+// users using a web identity provider like Login with Amazon, Facebook, Google,
+// or an OpenID Connect-compatible identity provider, we recommend that you
+// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
+// For more information, see Federation Through a Web-based Identity Provider
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+//
+// The GetFederationToken action must be called by using the long-term AWS security
+// credentials of an IAM user. You can also call GetFederationToken using the
+// security credentials of an AWS root account, but we do not recommended it.
+// Instead, we recommend that you create an IAM user for the purpose of the
+// proxy application and then attach a policy to the IAM user that limits federated
+// users to only the actions and resources that they need access to. For more
+// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html)
+// in the IAM User Guide.
+//
+// The temporary security credentials that are obtained by using the long-term
+// credentials of an IAM user are valid for the specified duration, from 900
+// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default
+// is 43200 seconds (12 hours). Temporary credentials that are obtained by using
+// AWS root account credentials have a maximum duration of 3600 seconds (1 hour).
+//
+// The temporary security credentials created by GetFederationToken can be used
+// to make API calls to any AWS service with the following exceptions:
+//
+//    * You cannot use these credentials to call any IAM APIs.
+//
+//    * You cannot call any STS APIs except GetCallerIdentity.
+//
+// Permissions
+//
+// The permissions for the temporary security credentials returned by GetFederationToken
+// are determined by a combination of the following:
+//
+//    * The policy or policies that are attached to the IAM user whose credentials
+//    are used to call GetFederationToken.
+//
+//    * The policy that is passed as a parameter in the call.
+//
+// The passed policy is attached to the temporary security credentials that
+// result from the GetFederationToken API call--that is, to the federated user.
+// When the federated user makes an AWS request, AWS evaluates the policy attached
+// to the federated user in combination with the policy or policies attached
+// to the IAM user whose credentials were used to call GetFederationToken. AWS
+// allows the federated user's request only when both the federated user and
+// the IAM user are explicitly allowed to perform the requested action. The
+// passed policy cannot grant more permissions than those that are defined in
+// the IAM user policy.
+//
+// A typical use case is that the permissions of the IAM user whose credentials
+// are used to call GetFederationToken are designed to allow access to all the
+// actions and resources that any federated user will need. Then, for individual
+// users, you pass a policy to the operation that scopes down the permissions
+// to a level that's appropriate to that individual user, using a policy that
+// allows only a subset of permissions that are granted to the IAM user.
+//
+// If you do not pass a policy, the resulting temporary security credentials
+// have no effective permissions. The only exception is when the temporary security
+// credentials are used to access a resource that has a resource-based policy
+// that specifically allows the federated user to access the resource.
+//
+// For more information about how permissions work, see Permissions for GetFederationToken
+// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
+// For information about using GetFederationToken to create temporary security
+// credentials, see GetFederationToken—Federation Through a Custom Identity
+// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetFederationToken for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
+//   The request was rejected because the policy document was malformed. The error
+//   message describes the specific error.
+//
+//   * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
+//   The request was rejected because the policy document was too large. The error
+//   message describes how big the policy document is, in packed form, as a percentage
+//   of what the API allows.
+//
+//   * ErrCodeRegionDisabledException "RegionDisabledException"
+//   STS is not activated in the requested region for the account that is being
+//   asked to generate credentials. The account administrator must use the IAM
+//   console to activate STS in that region. For more information, see Activating
+//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+//   in the IAM User Guide.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
+func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
+       req, out := c.GetFederationTokenRequest(input)
+       return out, req.Send()
+}
+
+// GetFederationTokenWithContext is the same as GetFederationToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetFederationToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) {
+       req, out := c.GetFederationTokenRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+const opGetSessionToken = "GetSessionToken"
+
+// GetSessionTokenRequest generates a "aws/request.Request" representing the
+// client's request for the GetSessionToken operation. The "output" return
+// value can be used to capture response data after the request's "Send" method
+// is called.
+//
+// See GetSessionToken for usage and error information.
+//
+// Creating a request object using this method should be used when you want to inject
+// custom logic into the request's lifecycle using a custom handler, or if you want to
+// access properties on the request object before or after sending the request. If
+// you just want the service response, call the GetSessionToken method directly
+// instead.
+//
+// Note: You must call the "Send" method on the returned request object in order
+// to execute the request.
+//
+//    // Example sending a request using the GetSessionTokenRequest method.
+//    req, resp := client.GetSessionTokenRequest(params)
+//
+//    err := req.Send()
+//    if err == nil { // resp is now filled
+//        fmt.Println(resp)
+//    }
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
+func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) {
+       op := &request.Operation{
+               Name:       opGetSessionToken,
+               HTTPMethod: "POST",
+               HTTPPath:   "/",
+       }
+
+       if input == nil {
+               input = &GetSessionTokenInput{}
+       }
+
+       output = &GetSessionTokenOutput{}
+       req = c.newRequest(op, input, output)
+       return
+}
+
+// GetSessionToken API operation for AWS Security Token Service.
+//
+// Returns a set of temporary credentials for an AWS account or IAM user. The
+// credentials consist of an access key ID, a secret access key, and a security
+// token. Typically, you use GetSessionToken if you want to use MFA to protect
+// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled
+// IAM users would need to call GetSessionToken and submit an MFA code that
+// is associated with their MFA device. Using the temporary security credentials
+// that are returned from the call, IAM users can then make programmatic calls
+// to APIs that require MFA authentication. If you do not supply a correct MFA
+// code, then the API returns an access denied error. For a comparison of GetSessionToken
+// with the other APIs that produce temporary credentials, see Requesting Temporary
+// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
+// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
+// in the IAM User Guide.
+//
+// The GetSessionToken action must be called by using the long-term AWS security
+// credentials of the AWS account or an IAM user. Credentials that are created
+// by IAM users are valid for the duration that you specify, from 900 seconds
+// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default
+// of 43200 seconds (12 hours); credentials that are created by using account
+// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600
+// seconds (1 hour), with a default of 1 hour.
+//
+// The temporary security credentials created by GetSessionToken can be used
+// to make API calls to any AWS service with the following exceptions:
+//
+//    * You cannot call any IAM APIs unless MFA authentication information is
+//    included in the request.
+//
+//    * You cannot call any STS API exceptAssumeRole or GetCallerIdentity.
+//
+// We recommend that you do not call GetSessionToken with root account credentials.
+// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
+// by creating one or more IAM users, giving them the necessary permissions,
+// and using IAM users for everyday interaction with AWS.
+//
+// The permissions associated with the temporary security credentials returned
+// by GetSessionToken are based on the permissions associated with account or
+// IAM user whose credentials are used to call the action. If GetSessionToken
+// is called using root account credentials, the temporary credentials have
+// root account permissions. Similarly, if GetSessionToken is called using the
+// credentials of an IAM user, the temporary credentials have the same permissions
+// as the IAM user.
+//
+// For more information about using GetSessionToken to create temporary credentials,
+// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
+// in the IAM User Guide.
+//
+// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
+// with awserr.Error's Code and Message methods to get detailed information about
+// the error.
+//
+// See the AWS API reference guide for AWS Security Token Service's
+// API operation GetSessionToken for usage and error information.
+//
+// Returned Error Codes:
+//   * ErrCodeRegionDisabledException "RegionDisabledException"
+//   STS is not activated in the requested region for the account that is being
+//   asked to generate credentials. The account administrator must use the IAM
+//   console to activate STS in that region. For more information, see Activating
+//   and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+//   in the IAM User Guide.
+//
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
+func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
+       req, out := c.GetSessionTokenRequest(input)
+       return out, req.Send()
+}
+
+// GetSessionTokenWithContext is the same as GetSessionToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetSessionToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) {
+       req, out := c.GetSessionTokenRequest(input)
+       req.SetContext(ctx)
+       req.ApplyOptions(opts...)
+       return out, req.Send()
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest
+type AssumeRoleInput struct {
+       _ struct{} `type:"structure"`
+
+       // The duration, in seconds, of the role session. The value can range from 900
+       // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
+       // to 3600 seconds.
+       //
+       // This is separate from the duration of a console session that you might request
+       // using the returned credentials. The request to the federation endpoint for
+       // a console sign-in token takes a SessionDuration parameter that specifies
+       // the maximum length of the console session, separately from the DurationSeconds
+       // parameter on this API. For more information, see Creating a URL that Enables
+       // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+       // in the IAM User Guide.
+       DurationSeconds *int64 `min:"900" type:"integer"`
+
+       // A unique identifier that is used by third parties when assuming roles in
+       // their customers' accounts. For each role that the third party can assume,
+       // they should instruct their customers to ensure the role's trust policy checks
+       // for the external ID that the third party generated. Each time the third party
+       // assumes the role, they should pass the customer's external ID. The external
+       // ID is useful in order to help third parties bind a role to the customer who
+       // created it. For more information about the external ID, see How to Use an
+       // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
+       // in the IAM User Guide.
+       //
+       // The regex used to validated this parameter is a string of characters consisting
+       // of upper- and lower-case alphanumeric characters with no spaces. You can
+       // also include underscores or any of the following characters: =,.@:\/-
+       ExternalId *string `min:"2" type:"string"`
+
+       // An IAM policy in JSON format.
+       //
+       // This parameter is optional. If you pass a policy, the temporary security
+       // credentials that are returned by the operation have the permissions that
+       // are allowed by both (the intersection of) the access policy of the role that
+       // is being assumed, and the policy that you pass. This gives you a way to further
+       // restrict the permissions for the resulting temporary security credentials.
+       // You cannot use the passed policy to grant permissions that are in excess
+       // of those allowed by the access policy of the role that is being assumed.
+       // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML,
+       // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+       // in the IAM User Guide.
+       //
+       // The format for this parameter, as described by its regex pattern, is a string
+       // of characters up to 2048 characters in length. The characters can be any
+       // ASCII character from the space character to the end of the valid character
+       // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+       // and carriage return (\u000D) characters.
+       //
+       // The policy plain text must be 2048 bytes or shorter. However, an internal
+       // conversion compresses it into a packed binary format with a separate limit.
+       // The PackedPolicySize response element indicates by percentage how close to
+       // the upper size limit the policy is, with 100% equaling the maximum allowed
+       // size.
+       Policy *string `min:"1" type:"string"`
+
+       // The Amazon Resource Name (ARN) of the role to assume.
+       //
+       // RoleArn is a required field
+       RoleArn *string `min:"20" type:"string" required:"true"`
+
+       // An identifier for the assumed role session.
+       //
+       // Use the role session name to uniquely identify a session when the same role
+       // is assumed by different principals or for different reasons. In cross-account
+       // scenarios, the role session name is visible to, and can be logged by the
+       // account that owns the role. The role session name is also used in the ARN
+       // of the assumed role principal. This means that subsequent cross-account API
+       // requests using the temporary security credentials will expose the role session
+       // name to the external account in their CloudTrail logs.
+       //
+       // The regex used to validate this parameter is a string of characters consisting
+       // of upper- and lower-case alphanumeric characters with no spaces. You can
+       // also include underscores or any of the following characters: =,.@-
+       //
+       // RoleSessionName is a required field
+       RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+       // The identification number of the MFA device that is associated with the user
+       // who is making the AssumeRole call. Specify this value if the trust policy
+       // of the role being assumed includes a condition that requires MFA authentication.
+       // The value is either the serial number for a hardware device (such as GAHT12345678)
+       // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+       //
+       // The regex used to validate this parameter is a string of characters consisting
+       // of upper- and lower-case alphanumeric characters with no spaces. You can
+       // also include underscores or any of the following characters: =,.@-
+       SerialNumber *string `min:"9" type:"string"`
+
+       // The value provided by the MFA device, if the trust policy of the role being
+       // assumed requires MFA (that is, if the policy includes a condition that tests
+       // for MFA). If the role being assumed requires MFA and if the TokenCode value
+       // is missing or expired, the AssumeRole call returns an "access denied" error.
+       //
+       // The format for this parameter, as described by its regex pattern, is a sequence
+       // of six numeric digits.
+       TokenCode *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"}
+       if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+               invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+       }
+       if s.ExternalId != nil && len(*s.ExternalId) < 2 {
+               invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2))
+       }
+       if s.Policy != nil && len(*s.Policy) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+       }
+       if s.RoleArn == nil {
+               invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+       }
+       if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+               invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+       }
+       if s.RoleSessionName == nil {
+               invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
+       }
+       if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
+               invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
+       }
+       if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
+               invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
+       }
+       if s.TokenCode != nil && len(*s.TokenCode) < 6 {
+               invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput {
+       s.DurationSeconds = &v
+       return s
+}
+
+// SetExternalId sets the ExternalId field's value.
+func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput {
+       s.ExternalId = &v
+       return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput {
+       s.Policy = &v
+       return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
+       s.RoleArn = &v
+       return s
+}
+
+// SetRoleSessionName sets the RoleSessionName field's value.
+func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput {
+       s.RoleSessionName = &v
+       return s
+}
+
+// SetSerialNumber sets the SerialNumber field's value.
+func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput {
+       s.SerialNumber = &v
+       return s
+}
+
+// SetTokenCode sets the TokenCode field's value.
+func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput {
+       s.TokenCode = &v
+       return s
+}
+
+// Contains the response to a successful AssumeRole request, including temporary
+// AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse
+type AssumeRoleOutput struct {
+       _ struct{} `type:"structure"`
+
+       // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+       // that you can use to refer to the resulting temporary security credentials.
+       // For example, you can reference these credentials as a principal in a resource-based
+       // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+       // that you specified when you called AssumeRole.
+       AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+       // The temporary security credentials, which include an access key ID, a secret
+       // access key, and a security (or session) token.
+       //
+       // Note: The size of the security token that STS APIs return is not fixed. We
+       // strongly recommend that you make no assumptions about the maximum size. As
+       // of this writing, the typical size is less than 4096 bytes, but that can vary.
+       // Also, future updates to AWS might require larger sizes.
+       Credentials *Credentials `type:"structure"`
+
+       // A percentage value that indicates the size of the policy in packed form.
+       // The service rejects any policy with a packed size greater than 100 percent,
+       // which means the policy exceeded the allowed space.
+       PackedPolicySize *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s AssumeRoleOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleOutput) GoString() string {
+       return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput {
+       s.AssumedRoleUser = v
+       return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput {
+       s.Credentials = v
+       return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput {
+       s.PackedPolicySize = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest
+type AssumeRoleWithSAMLInput struct {
+       _ struct{} `type:"structure"`
+
+       // The duration, in seconds, of the role session. The value can range from 900
+       // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
+       // to 3600 seconds. An expiration can also be specified in the SAML authentication
+       // response's SessionNotOnOrAfter value. The actual expiration time is whichever
+       // value is shorter.
+       //
+       // This is separate from the duration of a console session that you might request
+       // using the returned credentials. The request to the federation endpoint for
+       // a console sign-in token takes a SessionDuration parameter that specifies
+       // the maximum length of the console session, separately from the DurationSeconds
+       // parameter on this API. For more information, see Enabling SAML 2.0 Federated
+       // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html)
+       // in the IAM User Guide.
+       DurationSeconds *int64 `min:"900" type:"integer"`
+
+       // An IAM policy in JSON format.
+       //
+       // The policy parameter is optional. If you pass a policy, the temporary security
+       // credentials that are returned by the operation have the permissions that
+       // are allowed by both the access policy of the role that is being assumed,
+       // and the policy that you pass. This gives you a way to further restrict the
+       // permissions for the resulting temporary security credentials. You cannot
+       // use the passed policy to grant permissions that are in excess of those allowed
+       // by the access policy of the role that is being assumed. For more information,
+       // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity
+       // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+       // in the IAM User Guide.
+       //
+       // The format for this parameter, as described by its regex pattern, is a string
+       // of characters up to 2048 characters in length. The characters can be any
+       // ASCII character from the space character to the end of the valid character
+       // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+       // and carriage return (\u000D) characters.
+       //
+       // The policy plain text must be 2048 bytes or shorter. However, an internal
+       // conversion compresses it into a packed binary format with a separate limit.
+       // The PackedPolicySize response element indicates by percentage how close to
+       // the upper size limit the policy is, with 100% equaling the maximum allowed
+       // size.
+       Policy *string `min:"1" type:"string"`
+
+       // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes
+       // the IdP.
+       //
+       // PrincipalArn is a required field
+       PrincipalArn *string `min:"20" type:"string" required:"true"`
+
+       // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+       //
+       // RoleArn is a required field
+       RoleArn *string `min:"20" type:"string" required:"true"`
+
+       // The base-64 encoded SAML authentication response provided by the IdP.
+       //
+       // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
+       // in the Using IAM guide.
+       //
+       // SAMLAssertion is a required field
+       SAMLAssertion *string `min:"4" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleWithSAMLInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"}
+       if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+               invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+       }
+       if s.Policy != nil && len(*s.Policy) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+       }
+       if s.PrincipalArn == nil {
+               invalidParams.Add(request.NewErrParamRequired("PrincipalArn"))
+       }
+       if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 {
+               invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20))
+       }
+       if s.RoleArn == nil {
+               invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+       }
+       if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+               invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+       }
+       if s.SAMLAssertion == nil {
+               invalidParams.Add(request.NewErrParamRequired("SAMLAssertion"))
+       }
+       if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 {
+               invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput {
+       s.DurationSeconds = &v
+       return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput {
+       s.Policy = &v
+       return s
+}
+
+// SetPrincipalArn sets the PrincipalArn field's value.
+func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput {
+       s.PrincipalArn = &v
+       return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput {
+       s.RoleArn = &v
+       return s
+}
+
+// SetSAMLAssertion sets the SAMLAssertion field's value.
+func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput {
+       s.SAMLAssertion = &v
+       return s
+}
+
+// Contains the response to a successful AssumeRoleWithSAML request, including
+// temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse
+type AssumeRoleWithSAMLOutput struct {
+       _ struct{} `type:"structure"`
+
+       // The identifiers for the temporary security credentials that the operation
+       // returns.
+       AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+       // The value of the Recipient attribute of the SubjectConfirmationData element
+       // of the SAML assertion.
+       Audience *string `type:"string"`
+
+       // The temporary security credentials, which include an access key ID, a secret
+       // access key, and a security (or session) token.
+       //
+       // Note: The size of the security token that STS APIs return is not fixed. We
+       // strongly recommend that you make no assumptions about the maximum size. As
+       // of this writing, the typical size is less than 4096 bytes, but that can vary.
+       // Also, future updates to AWS might require larger sizes.
+       Credentials *Credentials `type:"structure"`
+
+       // The value of the Issuer element of the SAML assertion.
+       Issuer *string `type:"string"`
+
+       // A hash value based on the concatenation of the Issuer response value, the
+       // AWS account ID, and the friendly name (the last part of the ARN) of the SAML
+       // provider in IAM. The combination of NameQualifier and Subject can be used
+       // to uniquely identify a federated user.
+       //
+       // The following pseudocode shows how the hash value is calculated:
+       //
+       // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP"
+       // ) )
+       NameQualifier *string `type:"string"`
+
+       // A percentage value that indicates the size of the policy in packed form.
+       // The service rejects any policy with a packed size greater than 100 percent,
+       // which means the policy exceeded the allowed space.
+       PackedPolicySize *int64 `type:"integer"`
+
+       // The value of the NameID element in the Subject element of the SAML assertion.
+       Subject *string `type:"string"`
+
+       // The format of the name ID, as defined by the Format attribute in the NameID
+       // element of the SAML assertion. Typical examples of the format are transient
+       // or persistent.
+       //
+       // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format,
+       // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient
+       // is returned as transient. If the format includes any other prefix, the format
+       // is returned with no modifications.
+       SubjectType *string `type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithSAMLOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithSAMLOutput) GoString() string {
+       return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput {
+       s.AssumedRoleUser = v
+       return s
+}
+
+// SetAudience sets the Audience field's value.
+func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput {
+       s.Audience = &v
+       return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput {
+       s.Credentials = v
+       return s
+}
+
+// SetIssuer sets the Issuer field's value.
+func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput {
+       s.Issuer = &v
+       return s
+}
+
+// SetNameQualifier sets the NameQualifier field's value.
+func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput {
+       s.NameQualifier = &v
+       return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput {
+       s.PackedPolicySize = &v
+       return s
+}
+
+// SetSubject sets the Subject field's value.
+func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput {
+       s.Subject = &v
+       return s
+}
+
+// SetSubjectType sets the SubjectType field's value.
+func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput {
+       s.SubjectType = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest
+type AssumeRoleWithWebIdentityInput struct {
+       _ struct{} `type:"structure"`
+
+       // The duration, in seconds, of the role session. The value can range from 900
+       // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set
+       // to 3600 seconds.
+       //
+       // This is separate from the duration of a console session that you might request
+       // using the returned credentials. The request to the federation endpoint for
+       // a console sign-in token takes a SessionDuration parameter that specifies
+       // the maximum length of the console session, separately from the DurationSeconds
+       // parameter on this API. For more information, see Creating a URL that Enables
+       // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
+       // in the IAM User Guide.
+       DurationSeconds *int64 `min:"900" type:"integer"`
+
+       // An IAM policy in JSON format.
+       //
+       // The policy parameter is optional. If you pass a policy, the temporary security
+       // credentials that are returned by the operation have the permissions that
+       // are allowed by both the access policy of the role that is being assumed,
+       // and the policy that you pass. This gives you a way to further restrict the
+       // permissions for the resulting temporary security credentials. You cannot
+       // use the passed policy to grant permissions that are in excess of those allowed
+       // by the access policy of the role that is being assumed. For more information,
+       // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html)
+       // in the IAM User Guide.
+       //
+       // The format for this parameter, as described by its regex pattern, is a string
+       // of characters up to 2048 characters in length. The characters can be any
+       // ASCII character from the space character to the end of the valid character
+       // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+       // and carriage return (\u000D) characters.
+       //
+       // The policy plain text must be 2048 bytes or shorter. However, an internal
+       // conversion compresses it into a packed binary format with a separate limit.
+       // The PackedPolicySize response element indicates by percentage how close to
+       // the upper size limit the policy is, with 100% equaling the maximum allowed
+       // size.
+       Policy *string `min:"1" type:"string"`
+
+       // The fully qualified host component of the domain name of the identity provider.
+       //
+       // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com
+       // and graph.facebook.com are the only supported identity providers for OAuth
+       // 2.0 access tokens. Do not include URL schemes and port numbers.
+       //
+       // Do not specify this value for OpenID Connect ID tokens.
+       ProviderId *string `min:"4" type:"string"`
+
+       // The Amazon Resource Name (ARN) of the role that the caller is assuming.
+       //
+       // RoleArn is a required field
+       RoleArn *string `min:"20" type:"string" required:"true"`
+
+       // An identifier for the assumed role session. Typically, you pass the name
+       // or identifier that is associated with the user who is using your application.
+       // That way, the temporary security credentials that your application will use
+       // are associated with that user. This session name is included as part of the
+       // ARN and assumed role ID in the AssumedRoleUser response element.
+       //
+       // The regex used to validate this parameter is a string of characters consisting
+       // of upper- and lower-case alphanumeric characters with no spaces. You can
+       // also include underscores or any of the following characters: =,.@-
+       //
+       // RoleSessionName is a required field
+       RoleSessionName *string `min:"2" type:"string" required:"true"`
+
+       // The OAuth 2.0 access token or OpenID Connect ID token that is provided by
+       // the identity provider. Your application must get this token by authenticating
+       // the user who is using your application with a web identity provider before
+       // the application makes an AssumeRoleWithWebIdentity call.
+       //
+       // WebIdentityToken is a required field
+       WebIdentityToken *string `min:"4" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AssumeRoleWithWebIdentityInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"}
+       if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+               invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+       }
+       if s.Policy != nil && len(*s.Policy) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+       }
+       if s.ProviderId != nil && len(*s.ProviderId) < 4 {
+               invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4))
+       }
+       if s.RoleArn == nil {
+               invalidParams.Add(request.NewErrParamRequired("RoleArn"))
+       }
+       if s.RoleArn != nil && len(*s.RoleArn) < 20 {
+               invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
+       }
+       if s.RoleSessionName == nil {
+               invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
+       }
+       if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
+               invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
+       }
+       if s.WebIdentityToken == nil {
+               invalidParams.Add(request.NewErrParamRequired("WebIdentityToken"))
+       }
+       if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 {
+               invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput {
+       s.DurationSeconds = &v
+       return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput {
+       s.Policy = &v
+       return s
+}
+
+// SetProviderId sets the ProviderId field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput {
+       s.ProviderId = &v
+       return s
+}
+
+// SetRoleArn sets the RoleArn field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput {
+       s.RoleArn = &v
+       return s
+}
+
+// SetRoleSessionName sets the RoleSessionName field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput {
+       s.RoleSessionName = &v
+       return s
+}
+
+// SetWebIdentityToken sets the WebIdentityToken field's value.
+func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput {
+       s.WebIdentityToken = &v
+       return s
+}
+
+// Contains the response to a successful AssumeRoleWithWebIdentity request,
+// including temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse
+type AssumeRoleWithWebIdentityOutput struct {
+       _ struct{} `type:"structure"`
+
+       // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
+       // that you can use to refer to the resulting temporary security credentials.
+       // For example, you can reference these credentials as a principal in a resource-based
+       // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
+       // that you specified when you called AssumeRole.
+       AssumedRoleUser *AssumedRoleUser `type:"structure"`
+
+       // The intended audience (also known as client ID) of the web identity token.
+       // This is traditionally the client identifier issued to the application that
+       // requested the web identity token.
+       Audience *string `type:"string"`
+
+       // The temporary security credentials, which include an access key ID, a secret
+       // access key, and a security token.
+       //
+       // Note: The size of the security token that STS APIs return is not fixed. We
+       // strongly recommend that you make no assumptions about the maximum size. As
+       // of this writing, the typical size is less than 4096 bytes, but that can vary.
+       // Also, future updates to AWS might require larger sizes.
+       Credentials *Credentials `type:"structure"`
+
+       // A percentage value that indicates the size of the policy in packed form.
+       // The service rejects any policy with a packed size greater than 100 percent,
+       // which means the policy exceeded the allowed space.
+       PackedPolicySize *int64 `type:"integer"`
+
+       // The issuing authority of the web identity token presented. For OpenID Connect
+       // ID Tokens this contains the value of the iss field. For OAuth 2.0 access
+       // tokens, this contains the value of the ProviderId parameter that was passed
+       // in the AssumeRoleWithWebIdentity request.
+       Provider *string `type:"string"`
+
+       // The unique user identifier that is returned by the identity provider. This
+       // identifier is associated with the WebIdentityToken that was submitted with
+       // the AssumeRoleWithWebIdentity call. The identifier is typically unique to
+       // the user and the application that acquired the WebIdentityToken (pairwise
+       // identifier). For OpenID Connect ID tokens, this field contains the value
+       // returned by the identity provider as the token's sub (Subject) claim.
+       SubjectFromWebIdentityToken *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumeRoleWithWebIdentityOutput) GoString() string {
+       return s.String()
+}
+
+// SetAssumedRoleUser sets the AssumedRoleUser field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput {
+       s.AssumedRoleUser = v
+       return s
+}
+
+// SetAudience sets the Audience field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput {
+       s.Audience = &v
+       return s
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput {
+       s.Credentials = v
+       return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput {
+       s.PackedPolicySize = &v
+       return s
+}
+
+// SetProvider sets the Provider field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput {
+       s.Provider = &v
+       return s
+}
+
+// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value.
+func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput {
+       s.SubjectFromWebIdentityToken = &v
+       return s
+}
+
+// The identifiers for the temporary security credentials that the operation
+// returns.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser
+type AssumedRoleUser struct {
+       _ struct{} `type:"structure"`
+
+       // The ARN of the temporary security credentials that are returned from the
+       // AssumeRole action. For more information about ARNs and how to use them in
+       // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+       // in Using IAM.
+       //
+       // Arn is a required field
+       Arn *string `min:"20" type:"string" required:"true"`
+
+       // A unique identifier that contains the role ID and the role session name of
+       // the role that is being assumed. The role ID is generated by AWS when the
+       // role is created.
+       //
+       // AssumedRoleId is a required field
+       AssumedRoleId *string `min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AssumedRoleUser) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AssumedRoleUser) GoString() string {
+       return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser {
+       s.Arn = &v
+       return s
+}
+
+// SetAssumedRoleId sets the AssumedRoleId field's value.
+func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser {
+       s.AssumedRoleId = &v
+       return s
+}
+
+// AWS credentials for API authentication.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials
+type Credentials struct {
+       _ struct{} `type:"structure"`
+
+       // The access key ID that identifies the temporary security credentials.
+       //
+       // AccessKeyId is a required field
+       AccessKeyId *string `min:"16" type:"string" required:"true"`
+
+       // The date on which the current credentials expire.
+       //
+       // Expiration is a required field
+       Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"`
+
+       // The secret access key that can be used to sign requests.
+       //
+       // SecretAccessKey is a required field
+       SecretAccessKey *string `type:"string" required:"true"`
+
+       // The token that users must pass to the service API to use the temporary credentials.
+       //
+       // SessionToken is a required field
+       SessionToken *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Credentials) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Credentials) GoString() string {
+       return s.String()
+}
+
+// SetAccessKeyId sets the AccessKeyId field's value.
+func (s *Credentials) SetAccessKeyId(v string) *Credentials {
+       s.AccessKeyId = &v
+       return s
+}
+
+// SetExpiration sets the Expiration field's value.
+func (s *Credentials) SetExpiration(v time.Time) *Credentials {
+       s.Expiration = &v
+       return s
+}
+
+// SetSecretAccessKey sets the SecretAccessKey field's value.
+func (s *Credentials) SetSecretAccessKey(v string) *Credentials {
+       s.SecretAccessKey = &v
+       return s
+}
+
+// SetSessionToken sets the SessionToken field's value.
+func (s *Credentials) SetSessionToken(v string) *Credentials {
+       s.SessionToken = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest
+type DecodeAuthorizationMessageInput struct {
+       _ struct{} `type:"structure"`
+
+       // The encoded message that was returned with the response.
+       //
+       // EncodedMessage is a required field
+       EncodedMessage *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DecodeAuthorizationMessageInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"}
+       if s.EncodedMessage == nil {
+               invalidParams.Add(request.NewErrParamRequired("EncodedMessage"))
+       }
+       if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetEncodedMessage sets the EncodedMessage field's value.
+func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput {
+       s.EncodedMessage = &v
+       return s
+}
+
+// A document that contains additional information about the authorization status
+// of a request from an encoded message that is returned in response to an AWS
+// request.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse
+type DecodeAuthorizationMessageOutput struct {
+       _ struct{} `type:"structure"`
+
+       // An XML document that contains the decoded message.
+       DecodedMessage *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DecodeAuthorizationMessageOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DecodeAuthorizationMessageOutput) GoString() string {
+       return s.String()
+}
+
+// SetDecodedMessage sets the DecodedMessage field's value.
+func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput {
+       s.DecodedMessage = &v
+       return s
+}
+
+// Identifiers for the federated user that is associated with the credentials.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser
+type FederatedUser struct {
+       _ struct{} `type:"structure"`
+
+       // The ARN that specifies the federated user that is associated with the credentials.
+       // For more information about ARNs and how to use them in policies, see IAM
+       // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
+       // in Using IAM.
+       //
+       // Arn is a required field
+       Arn *string `min:"20" type:"string" required:"true"`
+
+       // The string that identifies the federated user associated with the credentials,
+       // similar to the unique ID of an IAM user.
+       //
+       // FederatedUserId is a required field
+       FederatedUserId *string `min:"2" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s FederatedUser) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FederatedUser) GoString() string {
+       return s.String()
+}
+
+// SetArn sets the Arn field's value.
+func (s *FederatedUser) SetArn(v string) *FederatedUser {
+       s.Arn = &v
+       return s
+}
+
+// SetFederatedUserId sets the FederatedUserId field's value.
+func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser {
+       s.FederatedUserId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest
+type GetCallerIdentityInput struct {
+       _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetCallerIdentityInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCallerIdentityInput) GoString() string {
+       return s.String()
+}
+
+// Contains the response to a successful GetCallerIdentity request, including
+// information about the entity making the request.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse
+type GetCallerIdentityOutput struct {
+       _ struct{} `type:"structure"`
+
+       // The AWS account ID number of the account that owns or contains the calling
+       // entity.
+       Account *string `type:"string"`
+
+       // The AWS ARN associated with the calling entity.
+       Arn *string `min:"20" type:"string"`
+
+       // The unique identifier of the calling entity. The exact value depends on the
+       // type of entity making the call. The values returned are those listed in the
+       // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
+       // found on the Policy Variables reference page in the IAM User Guide.
+       UserId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s GetCallerIdentityOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetCallerIdentityOutput) GoString() string {
+       return s.String()
+}
+
+// SetAccount sets the Account field's value.
+func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput {
+       s.Account = &v
+       return s
+}
+
+// SetArn sets the Arn field's value.
+func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput {
+       s.Arn = &v
+       return s
+}
+
+// SetUserId sets the UserId field's value.
+func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput {
+       s.UserId = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest
+type GetFederationTokenInput struct {
+       _ struct{} `type:"structure"`
+
+       // The duration, in seconds, that the session should last. Acceptable durations
+       // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds
+       // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained
+       // using AWS account (root) credentials are restricted to a maximum of 3600
+       // seconds (one hour). If the specified duration is longer than one hour, the
+       // session obtained by using AWS account (root) credentials defaults to one
+       // hour.
+       DurationSeconds *int64 `min:"900" type:"integer"`
+
+       // The name of the federated user. The name is used as an identifier for the
+       // temporary security credentials (such as Bob). For example, you can reference
+       // the federated user name in a resource-based policy, such as in an Amazon
+       // S3 bucket policy.
+       //
+       // The regex used to validate this parameter is a string of characters consisting
+       // of upper- and lower-case alphanumeric characters with no spaces. You can
+       // also include underscores or any of the following characters: =,.@-
+       //
+       // Name is a required field
+       Name *string `min:"2" type:"string" required:"true"`
+
+       // An IAM policy in JSON format that is passed with the GetFederationToken call
+       // and evaluated along with the policy or policies that are attached to the
+       // IAM user whose credentials are used to call GetFederationToken. The passed
+       // policy is used to scope down the permissions that are available to the IAM
+       // user, by allowing only a subset of the permissions that are granted to the
+       // IAM user. The passed policy cannot grant more permissions than those granted
+       // to the IAM user. The final permissions for the federated user are the most
+       // restrictive set based on the intersection of the passed policy and the IAM
+       // user policy.
+       //
+       // If you do not pass a policy, the resulting temporary security credentials
+       // have no effective permissions. The only exception is when the temporary security
+       // credentials are used to access a resource that has a resource-based policy
+       // that specifically allows the federated user to access the resource.
+       //
+       // The format for this parameter, as described by its regex pattern, is a string
+       // of characters up to 2048 characters in length. The characters can be any
+       // ASCII character from the space character to the end of the valid character
+       // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A),
+       // and carriage return (\u000D) characters.
+       //
+       // The policy plain text must be 2048 bytes or shorter. However, an internal
+       // conversion compresses it into a packed binary format with a separate limit.
+       // The PackedPolicySize response element indicates by percentage how close to
+       // the upper size limit the policy is, with 100% equaling the maximum allowed
+       // size.
+       //
+       // For more information about how permissions work, see Permissions for GetFederationToken
+       // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html).
+       Policy *string `min:"1" type:"string"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetFederationTokenInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"}
+       if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+               invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+       }
+       if s.Name == nil {
+               invalidParams.Add(request.NewErrParamRequired("Name"))
+       }
+       if s.Name != nil && len(*s.Name) < 2 {
+               invalidParams.Add(request.NewErrParamMinLen("Name", 2))
+       }
+       if s.Policy != nil && len(*s.Policy) < 1 {
+               invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput {
+       s.DurationSeconds = &v
+       return s
+}
+
+// SetName sets the Name field's value.
+func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput {
+       s.Name = &v
+       return s
+}
+
+// SetPolicy sets the Policy field's value.
+func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput {
+       s.Policy = &v
+       return s
+}
+
+// Contains the response to a successful GetFederationToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse
+type GetFederationTokenOutput struct {
+       _ struct{} `type:"structure"`
+
+       // The temporary security credentials, which include an access key ID, a secret
+       // access key, and a security (or session) token.
+       //
+       // Note: The size of the security token that STS APIs return is not fixed. We
+       // strongly recommend that you make no assumptions about the maximum size. As
+       // of this writing, the typical size is less than 4096 bytes, but that can vary.
+       // Also, future updates to AWS might require larger sizes.
+       Credentials *Credentials `type:"structure"`
+
+       // Identifiers for the federated user associated with the credentials (such
+       // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You
+       // can use the federated user's ARN in your resource-based policies, such as
+       // an Amazon S3 bucket policy.
+       FederatedUser *FederatedUser `type:"structure"`
+
+       // A percentage value indicating the size of the policy in packed form. The
+       // service rejects policies for which the packed size is greater than 100 percent
+       // of the allowed value.
+       PackedPolicySize *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s GetFederationTokenOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetFederationTokenOutput) GoString() string {
+       return s.String()
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput {
+       s.Credentials = v
+       return s
+}
+
+// SetFederatedUser sets the FederatedUser field's value.
+func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput {
+       s.FederatedUser = v
+       return s
+}
+
+// SetPackedPolicySize sets the PackedPolicySize field's value.
+func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput {
+       s.PackedPolicySize = &v
+       return s
+}
+
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest
+type GetSessionTokenInput struct {
+       _ struct{} `type:"structure"`
+
+       // The duration, in seconds, that the credentials should remain valid. Acceptable
+       // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600
+       // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions
+       // for AWS account owners are restricted to a maximum of 3600 seconds (one hour).
+       // If the duration is longer than one hour, the session for AWS account owners
+       // defaults to one hour.
+       DurationSeconds *int64 `min:"900" type:"integer"`
+
+       // The identification number of the MFA device that is associated with the IAM
+       // user who is making the GetSessionToken call. Specify this value if the IAM
+       // user has a policy that requires MFA authentication. The value is either the
+       // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource
+       // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
+       // You can find the device for an IAM user by going to the AWS Management Console
+       // and viewing the user's security credentials.
+       //
+       // The regex used to validate this parameter is a string of characters consisting
+       // of upper- and lower-case alphanumeric characters with no spaces. You can
+       // also include underscores or any of the following characters: =,.@-
+       SerialNumber *string `min:"9" type:"string"`
+
+       // The value provided by the MFA device, if MFA is required. If any policy requires
+       // the IAM user to submit an MFA code, specify this value. If MFA authentication
+       // is required, and the user does not provide a code when requesting a set of
+       // temporary security credentials, the user will receive an "access denied"
+       // response when requesting resources that require MFA authentication.
+       //
+       // The format for this parameter, as described by its regex pattern, is a sequence
+       // of six numeric digits.
+       TokenCode *string `min:"6" type:"string"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenInput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenInput) GoString() string {
+       return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetSessionTokenInput) Validate() error {
+       invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"}
+       if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
+               invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
+       }
+       if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
+               invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
+       }
+       if s.TokenCode != nil && len(*s.TokenCode) < 6 {
+               invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
+       }
+
+       if invalidParams.Len() > 0 {
+               return invalidParams
+       }
+       return nil
+}
+
+// SetDurationSeconds sets the DurationSeconds field's value.
+func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput {
+       s.DurationSeconds = &v
+       return s
+}
+
+// SetSerialNumber sets the SerialNumber field's value.
+func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput {
+       s.SerialNumber = &v
+       return s
+}
+
+// SetTokenCode sets the TokenCode field's value.
+func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput {
+       s.TokenCode = &v
+       return s
+}
+
+// Contains the response to a successful GetSessionToken request, including
+// temporary AWS credentials that can be used to make AWS requests.
+// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse
+type GetSessionTokenOutput struct {
+       _ struct{} `type:"structure"`
+
+       // The temporary security credentials, which include an access key ID, a secret
+       // access key, and a security (or session) token.
+       //
+       // Note: The size of the security token that STS APIs return is not fixed. We
+       // strongly recommend that you make no assumptions about the maximum size. As
+       // of this writing, the typical size is less than 4096 bytes, but that can vary.
+       // Also, future updates to AWS might require larger sizes.
+       Credentials *Credentials `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetSessionTokenOutput) String() string {
+       return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetSessionTokenOutput) GoString() string {
+       return s.String()
+}
+
+// SetCredentials sets the Credentials field's value.
+func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput {
+       s.Credentials = v
+       return s
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
new file mode 100644 (file)
index 0000000..4010cc7
--- /dev/null
@@ -0,0 +1,12 @@
+package sts
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+func init() {
+       initRequest = func(r *request.Request) {
+               switch r.Operation.Name {
+               case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity:
+                       r.Handlers.Sign.Clear() // these operations are unsigned
+               }
+       }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
new file mode 100644 (file)
index 0000000..d2af518
--- /dev/null
@@ -0,0 +1,124 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package sts provides the client and types for making API
+// requests to AWS Security Token Service.
+//
+// The AWS Security Token Service (STS) is a web service that enables you to
+// request temporary, limited-privilege credentials for AWS Identity and Access
+// Management (IAM) users or for users that you authenticate (federated users).
+// This guide provides descriptions of the STS API. For more detailed information
+// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
+//
+// As an alternative to using the API, you can use one of the AWS SDKs, which
+// consist of libraries and sample code for various programming languages and
+// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
+// way to create programmatic access to STS. For example, the SDKs take care
+// of cryptographically signing requests, managing errors, and retrying requests
+// automatically. For information about the AWS SDKs, including how to download
+// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
+//
+// For information about setting up signatures and authorization through the
+// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
+// in the AWS General Reference. For general information about the Query API,
+// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
+// in Using IAM. For information about using security tokens with other AWS
+// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
+// in the IAM User Guide.
+//
+// If you're new to AWS and need additional technical information about a specific
+// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
+// (http://aws.amazon.com/documentation/).
+//
+// Endpoints
+//
+// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
+// that maps to the US East (N. Virginia) region. Additional regions are available
+// and are activated by default. For more information, see Activating and Deactivating
+// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
+// in the AWS General Reference.
+//
+// Recording API requests
+//
+// STS supports AWS CloudTrail, which is a service that records AWS calls for
+// your AWS account and delivers log files to an Amazon S3 bucket. By using
+// information collected by CloudTrail, you can determine what requests were
+// successfully made to STS, who made the request, when it was made, and so
+// on. To learn more about CloudTrail, including how to turn it on and find
+// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
+//
+// See sts package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/
+//
+// Using the Client
+//
+// To use the client for AWS Security Token Service you will first need
+// to create a new instance of it.
+//
+// When creating a client for an AWS service you'll first need to have a Session
+// already created. The Session provides configuration that can be shared
+// between multiple service clients. Additional configuration can be applied to
+// the Session and service's client when they are constructed. The aws package's
+// Config type contains several fields such as Region for the AWS Region the
+// client should make API requests too. The optional Config value can be provided
+// as the variadic argument for Sessions and client creation.
+//
+// Once the service's client is created you can use it to make API requests the
+// AWS service. These clients are safe to use concurrently.
+//
+//   // Create a session to share configuration, and load external configuration.
+//   sess := session.Must(session.NewSession())
+//
+//   // Create the service's client with the session.
+//   svc := sts.New(sess)
+//
+// See the SDK's documentation for more information on how to use service clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws package's Config type for more information on configuration options.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the AWS Security Token Service client STS for more
+// information on creating the service's client.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New
+//
+// Once the client is created you can make an API request to the service.
+// Each API method takes a input parameter, and returns the service response
+// and an error.
+//
+// The API method will document which error codes the service can be returned
+// by the operation if the service models the API operation's errors. These
+// errors will also be available as const strings prefixed with "ErrCode".
+//
+//   result, err := svc.AssumeRole(params)
+//   if err != nil {
+//       // Cast err to awserr.Error to handle specific error codes.
+//       aerr, ok := err.(awserr.Error)
+//       if ok && aerr.Code() == <error code to check for> {
+//           // Specific error code handling
+//       }
+//       return err
+//   }
+//
+//   fmt.Println("AssumeRole result:")
+//   fmt.Println(result)
+//
+// Using the Client with Context
+//
+// The service's client also provides methods to make API requests with a Context
+// value. This allows you to control the timeout, and cancellation of pending
+// requests. These methods also take request Option as variadic parameter to apply
+// additional configuration to the API request.
+//
+//   ctx := context.Background()
+//
+//   result, err := svc.AssumeRoleWithContext(ctx, params)
+//
+// See the request package documentation for more information on using Context pattern
+// with the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/
+package sts
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
new file mode 100644 (file)
index 0000000..e24884e
--- /dev/null
@@ -0,0 +1,73 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+const (
+
+       // ErrCodeExpiredTokenException for service response error code
+       // "ExpiredTokenException".
+       //
+       // The web identity token that was passed is expired or is not valid. Get a
+       // new identity token from the identity provider and then retry the request.
+       ErrCodeExpiredTokenException = "ExpiredTokenException"
+
+       // ErrCodeIDPCommunicationErrorException for service response error code
+       // "IDPCommunicationError".
+       //
+       // The request could not be fulfilled because the non-AWS identity provider
+       // (IDP) that was asked to verify the incoming identity token could not be reached.
+       // This is often a transient error caused by network conditions. Retry the request
+       // a limited number of times so that you don't exceed the request rate. If the
+       // error persists, the non-AWS identity provider might be down or not responding.
+       ErrCodeIDPCommunicationErrorException = "IDPCommunicationError"
+
+       // ErrCodeIDPRejectedClaimException for service response error code
+       // "IDPRejectedClaim".
+       //
+       // The identity provider (IdP) reported that authentication failed. This might
+       // be because the claim is invalid.
+       //
+       // If this error is returned for the AssumeRoleWithWebIdentity operation, it
+       // can also mean that the claim has expired or has been explicitly revoked.
+       ErrCodeIDPRejectedClaimException = "IDPRejectedClaim"
+
+       // ErrCodeInvalidAuthorizationMessageException for service response error code
+       // "InvalidAuthorizationMessageException".
+       //
+       // The error returned if the message passed to DecodeAuthorizationMessage was
+       // invalid. This can happen if the token contains invalid characters, such as
+       // linebreaks.
+       ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException"
+
+       // ErrCodeInvalidIdentityTokenException for service response error code
+       // "InvalidIdentityToken".
+       //
+       // The web identity token that was passed could not be validated by AWS. Get
+       // a new identity token from the identity provider and then retry the request.
+       ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken"
+
+       // ErrCodeMalformedPolicyDocumentException for service response error code
+       // "MalformedPolicyDocument".
+       //
+       // The request was rejected because the policy document was malformed. The error
+       // message describes the specific error.
+       ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument"
+
+       // ErrCodePackedPolicyTooLargeException for service response error code
+       // "PackedPolicyTooLarge".
+       //
+       // The request was rejected because the policy document was too large. The error
+       // message describes how big the policy document is, in packed form, as a percentage
+       // of what the API allows.
+       ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge"
+
+       // ErrCodeRegionDisabledException for service response error code
+       // "RegionDisabledException".
+       //
+       // STS is not activated in the requested region for the account that is being
+       // asked to generate credentials. The account administrator must use the IAM
+       // console to activate STS in that region. For more information, see Activating
+       // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+       // in the IAM User Guide.
+       ErrCodeRegionDisabledException = "RegionDisabledException"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
new file mode 100644 (file)
index 0000000..1ee5839
--- /dev/null
@@ -0,0 +1,93 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+package sts
+
+import (
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/client"
+       "github.com/aws/aws-sdk-go/aws/client/metadata"
+       "github.com/aws/aws-sdk-go/aws/request"
+       "github.com/aws/aws-sdk-go/aws/signer/v4"
+       "github.com/aws/aws-sdk-go/private/protocol/query"
+)
+
+// STS provides the API operation methods for making requests to
+// AWS Security Token Service. See this package's package overview docs
+// for details on the service.
+//
+// STS methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
+type STS struct {
+       *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// Service information constants
+const (
+       ServiceName = "sts"       // Service endpoint prefix API calls made to.
+       EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata.
+)
+
+// New creates a new instance of the STS client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+//     // Create a STS client from just a session.
+//     svc := sts.New(mySession)
+//
+//     // Create a STS client with additional configuration
+//     svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
+       c := p.ClientConfig(EndpointsID, cfgs...)
+       return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS {
+       svc := &STS{
+               Client: client.New(
+                       cfg,
+                       metadata.ClientInfo{
+                               ServiceName:   ServiceName,
+                               SigningName:   signingName,
+                               SigningRegion: signingRegion,
+                               Endpoint:      endpoint,
+                               APIVersion:    "2011-06-15",
+                       },
+                       handlers,
+               ),
+       }
+
+       // Handlers
+       svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
+       svc.Handlers.Build.PushBackNamed(query.BuildHandler)
+       svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
+       svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
+       svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
+
+       // Run custom client initialization if present
+       if initClient != nil {
+               initClient(svc.Client)
+       }
+
+       return svc
+}
+
+// newRequest creates a new request for a STS operation and runs any
+// custom request initialization.
+func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+       req := c.NewRequest(op, params, data)
+
+       // Run custom request initialization if present
+       if initRequest != nil {
+               initRequest(req)
+       }
+
+       return req
+}
diff --git a/vendor/github.com/bgentry/go-netrc/LICENSE b/vendor/github.com/bgentry/go-netrc/LICENSE
new file mode 100644 (file)
index 0000000..aade9a5
--- /dev/null
@@ -0,0 +1,20 @@
+Original version Copyright © 2010 Fazlul Shahriar <fshahriar@gmail.com>. Newer
+portions Copyright © 2014 Blake Gentry <blakesgentry@gmail.com>.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/bgentry/go-netrc/netrc/netrc.go b/vendor/github.com/bgentry/go-netrc/netrc/netrc.go
new file mode 100644 (file)
index 0000000..ea49987
--- /dev/null
@@ -0,0 +1,510 @@
+package netrc
+
+import (
+       "bufio"
+       "bytes"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "os"
+       "strings"
+       "sync"
+       "unicode"
+       "unicode/utf8"
+)
+
+type tkType int
+
+const (
+       tkMachine tkType = iota
+       tkDefault
+       tkLogin
+       tkPassword
+       tkAccount
+       tkMacdef
+       tkComment
+       tkWhitespace
+)
+
+var keywords = map[string]tkType{
+       "machine":  tkMachine,
+       "default":  tkDefault,
+       "login":    tkLogin,
+       "password": tkPassword,
+       "account":  tkAccount,
+       "macdef":   tkMacdef,
+       "#":        tkComment,
+}
+
+type Netrc struct {
+       tokens     []*token
+       machines   []*Machine
+       macros     Macros
+       updateLock sync.Mutex
+}
+
+// FindMachine returns the Machine in n named by name. If a machine named by
+// name exists, it is returned. If no Machine with name name is found and there
+// is a ``default'' machine, the ``default'' machine is returned. Otherwise, nil
+// is returned.
+func (n *Netrc) FindMachine(name string) (m *Machine) {
+       // TODO(bgentry): not safe for concurrency
+       var def *Machine
+       for _, m = range n.machines {
+               if m.Name == name {
+                       return m
+               }
+               if m.IsDefault() {
+                       def = m
+               }
+       }
+       if def == nil {
+               return nil
+       }
+       return def
+}
+
+// MarshalText implements the encoding.TextMarshaler interface to encode a
+// Netrc into text format.
+func (n *Netrc) MarshalText() (text []byte, err error) {
+       // TODO(bgentry): not safe for concurrency
+       for i := range n.tokens {
+               switch n.tokens[i].kind {
+               case tkComment, tkDefault, tkWhitespace: // always append these types
+                       text = append(text, n.tokens[i].rawkind...)
+               default:
+                       if n.tokens[i].value != "" { // skip empty-value tokens
+                               text = append(text, n.tokens[i].rawkind...)
+                       }
+               }
+               if n.tokens[i].kind == tkMacdef {
+                       text = append(text, ' ')
+                       text = append(text, n.tokens[i].macroName...)
+               }
+               text = append(text, n.tokens[i].rawvalue...)
+       }
+       return
+}
+
+func (n *Netrc) NewMachine(name, login, password, account string) *Machine {
+       n.updateLock.Lock()
+       defer n.updateLock.Unlock()
+
+       prefix := "\n"
+       if len(n.tokens) == 0 {
+               prefix = ""
+       }
+       m := &Machine{
+               Name:     name,
+               Login:    login,
+               Password: password,
+               Account:  account,
+
+               nametoken: &token{
+                       kind:     tkMachine,
+                       rawkind:  []byte(prefix + "machine"),
+                       value:    name,
+                       rawvalue: []byte(" " + name),
+               },
+               logintoken: &token{
+                       kind:     tkLogin,
+                       rawkind:  []byte("\n\tlogin"),
+                       value:    login,
+                       rawvalue: []byte(" " + login),
+               },
+               passtoken: &token{
+                       kind:     tkPassword,
+                       rawkind:  []byte("\n\tpassword"),
+                       value:    password,
+                       rawvalue: []byte(" " + password),
+               },
+               accounttoken: &token{
+                       kind:     tkAccount,
+                       rawkind:  []byte("\n\taccount"),
+                       value:    account,
+                       rawvalue: []byte(" " + account),
+               },
+       }
+       n.insertMachineTokensBeforeDefault(m)
+       for i := range n.machines {
+               if n.machines[i].IsDefault() {
+                       n.machines = append(append(n.machines[:i], m), n.machines[i:]...)
+                       return m
+               }
+       }
+       n.machines = append(n.machines, m)
+       return m
+}
+
+func (n *Netrc) insertMachineTokensBeforeDefault(m *Machine) {
+       newtokens := []*token{m.nametoken}
+       if m.logintoken.value != "" {
+               newtokens = append(newtokens, m.logintoken)
+       }
+       if m.passtoken.value != "" {
+               newtokens = append(newtokens, m.passtoken)
+       }
+       if m.accounttoken.value != "" {
+               newtokens = append(newtokens, m.accounttoken)
+       }
+       for i := range n.tokens {
+               if n.tokens[i].kind == tkDefault {
+                       // found the default, now insert tokens before it
+                       n.tokens = append(n.tokens[:i], append(newtokens, n.tokens[i:]...)...)
+                       return
+               }
+       }
+       // didn't find a default, just add the newtokens to the end
+       n.tokens = append(n.tokens, newtokens...)
+       return
+}
+
+func (n *Netrc) RemoveMachine(name string) {
+       n.updateLock.Lock()
+       defer n.updateLock.Unlock()
+
+       for i := range n.machines {
+               if n.machines[i] != nil && n.machines[i].Name == name {
+                       m := n.machines[i]
+                       for _, t := range []*token{
+                               m.nametoken, m.logintoken, m.passtoken, m.accounttoken,
+                       } {
+                               n.removeToken(t)
+                       }
+                       n.machines = append(n.machines[:i], n.machines[i+1:]...)
+                       return
+               }
+       }
+}
+
+func (n *Netrc) removeToken(t *token) {
+       if t != nil {
+               for i := range n.tokens {
+                       if n.tokens[i] == t {
+                               n.tokens = append(n.tokens[:i], n.tokens[i+1:]...)
+                               return
+                       }
+               }
+       }
+}
+
+// Machine contains information about a remote machine.
+type Machine struct {
+       Name     string
+       Login    string
+       Password string
+       Account  string
+
+       nametoken    *token
+       logintoken   *token
+       passtoken    *token
+       accounttoken *token
+}
+
+// IsDefault returns true if the machine is a "default" token, denoted by an
+// empty name.
+func (m *Machine) IsDefault() bool {
+       return m.Name == ""
+}
+
+// UpdatePassword sets the password for the Machine m.
+func (m *Machine) UpdatePassword(newpass string) {
+       m.Password = newpass
+       updateTokenValue(m.passtoken, newpass)
+}
+
+// UpdateLogin sets the login for the Machine m.
+func (m *Machine) UpdateLogin(newlogin string) {
+       m.Login = newlogin
+       updateTokenValue(m.logintoken, newlogin)
+}
+
+// UpdateAccount sets the login for the Machine m.
+func (m *Machine) UpdateAccount(newaccount string) {
+       m.Account = newaccount
+       updateTokenValue(m.accounttoken, newaccount)
+}
+
+func updateTokenValue(t *token, value string) {
+       oldvalue := t.value
+       t.value = value
+       newraw := make([]byte, len(t.rawvalue))
+       copy(newraw, t.rawvalue)
+       t.rawvalue = append(
+               bytes.TrimSuffix(newraw, []byte(oldvalue)),
+               []byte(value)...,
+       )
+}
+
+// Macros contains all the macro definitions in a netrc file.
+type Macros map[string]string
+
+type token struct {
+       kind      tkType
+       macroName string
+       value     string
+       rawkind   []byte
+       rawvalue  []byte
+}
+
+// Error represents a netrc file parse error.
+type Error struct {
+       LineNum int    // Line number
+       Msg     string // Error message
+}
+
+// Error returns a string representation of error e.
+func (e *Error) Error() string {
+       return fmt.Sprintf("line %d: %s", e.LineNum, e.Msg)
+}
+
+func (e *Error) BadDefaultOrder() bool {
+       return e.Msg == errBadDefaultOrder
+}
+
+const errBadDefaultOrder = "default token must appear after all machine tokens"
+
+// scanLinesKeepPrefix is a split function for a Scanner that returns each line
+// of text. The returned token may include newlines if they are before the
+// first non-space character. The returned line may be empty. The end-of-line
+// marker is one optional carriage return followed by one mandatory newline. In
+// regular expression notation, it is `\r?\n`. The last non-empty line of
+// input will be returned even if it has no newline.
+func scanLinesKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) {
+       if atEOF && len(data) == 0 {
+               return 0, nil, nil
+       }
+       // Skip leading spaces.
+       start := 0
+       for width := 0; start < len(data); start += width {
+               var r rune
+               r, width = utf8.DecodeRune(data[start:])
+               if !unicode.IsSpace(r) {
+                       break
+               }
+       }
+       if i := bytes.IndexByte(data[start:], '\n'); i >= 0 {
+               // We have a full newline-terminated line.
+               return start + i, data[0 : start+i], nil
+       }
+       // If we're at EOF, we have a final, non-terminated line. Return it.
+       if atEOF {
+               return len(data), data, nil
+       }
+       // Request more data.
+       return 0, nil, nil
+}
+
+// scanWordsKeepPrefix is a split function for a Scanner that returns each
+// space-separated word of text, with prefixing spaces included. It will never
+// return an empty string. The definition of space is set by unicode.IsSpace.
+//
+// Adapted from bufio.ScanWords().
+func scanTokensKeepPrefix(data []byte, atEOF bool) (advance int, token []byte, err error) {
+       // Skip leading spaces.
+       start := 0
+       for width := 0; start < len(data); start += width {
+               var r rune
+               r, width = utf8.DecodeRune(data[start:])
+               if !unicode.IsSpace(r) {
+                       break
+               }
+       }
+       if atEOF && len(data) == 0 || start == len(data) {
+               return len(data), data, nil
+       }
+       if len(data) > start && data[start] == '#' {
+               return scanLinesKeepPrefix(data, atEOF)
+       }
+       // Scan until space, marking end of word.
+       for width, i := 0, start; i < len(data); i += width {
+               var r rune
+               r, width = utf8.DecodeRune(data[i:])
+               if unicode.IsSpace(r) {
+                       return i, data[:i], nil
+               }
+       }
+       // If we're at EOF, we have a final, non-empty, non-terminated word. Return it.
+       if atEOF && len(data) > start {
+               return len(data), data, nil
+       }
+       // Request more data.
+       return 0, nil, nil
+}
+
+func newToken(rawb []byte) (*token, error) {
+       _, tkind, err := bufio.ScanWords(rawb, true)
+       if err != nil {
+               return nil, err
+       }
+       var ok bool
+       t := token{rawkind: rawb}
+       t.kind, ok = keywords[string(tkind)]
+       if !ok {
+               trimmed := strings.TrimSpace(string(tkind))
+               if trimmed == "" {
+                       t.kind = tkWhitespace // whitespace-only, should happen only at EOF
+                       return &t, nil
+               }
+               if strings.HasPrefix(trimmed, "#") {
+                       t.kind = tkComment // this is a comment
+                       return &t, nil
+               }
+               return &t, fmt.Errorf("keyword expected; got " + string(tkind))
+       }
+       return &t, nil
+}
+
+func scanValue(scanner *bufio.Scanner, pos int) ([]byte, string, int, error) {
+       if scanner.Scan() {
+               raw := scanner.Bytes()
+               pos += bytes.Count(raw, []byte{'\n'})
+               return raw, strings.TrimSpace(string(raw)), pos, nil
+       }
+       if err := scanner.Err(); err != nil {
+               return nil, "", pos, &Error{pos, err.Error()}
+       }
+       return nil, "", pos, nil
+}
+
+func parse(r io.Reader, pos int) (*Netrc, error) {
+       b, err := ioutil.ReadAll(r)
+       if err != nil {
+               return nil, err
+       }
+
+       nrc := Netrc{machines: make([]*Machine, 0, 20), macros: make(Macros, 10)}
+
+       defaultSeen := false
+       var currentMacro *token
+       var m *Machine
+       var t *token
+       scanner := bufio.NewScanner(bytes.NewReader(b))
+       scanner.Split(scanTokensKeepPrefix)
+
+       for scanner.Scan() {
+               rawb := scanner.Bytes()
+               if len(rawb) == 0 {
+                       break
+               }
+               pos += bytes.Count(rawb, []byte{'\n'})
+               t, err = newToken(rawb)
+               if err != nil {
+                       if currentMacro == nil {
+                               return nil, &Error{pos, err.Error()}
+                       }
+                       currentMacro.rawvalue = append(currentMacro.rawvalue, rawb...)
+                       continue
+               }
+
+               if currentMacro != nil && bytes.Contains(rawb, []byte{'\n', '\n'}) {
+                       // if macro rawvalue + rawb would contain \n\n, then macro def is over
+                       currentMacro.value = strings.TrimLeft(string(currentMacro.rawvalue), "\r\n")
+                       nrc.macros[currentMacro.macroName] = currentMacro.value
+                       currentMacro = nil
+               }
+
+               switch t.kind {
+               case tkMacdef:
+                       if _, t.macroName, pos, err = scanValue(scanner, pos); err != nil {
+                               return nil, &Error{pos, err.Error()}
+                       }
+                       currentMacro = t
+               case tkDefault:
+                       if defaultSeen {
+                               return nil, &Error{pos, "multiple default token"}
+                       }
+                       if m != nil {
+                               nrc.machines, m = append(nrc.machines, m), nil
+                       }
+                       m = new(Machine)
+                       m.Name = ""
+                       defaultSeen = true
+               case tkMachine:
+                       if defaultSeen {
+                               return nil, &Error{pos, errBadDefaultOrder}
+                       }
+                       if m != nil {
+                               nrc.machines, m = append(nrc.machines, m), nil
+                       }
+                       m = new(Machine)
+                       if t.rawvalue, m.Name, pos, err = scanValue(scanner, pos); err != nil {
+                               return nil, &Error{pos, err.Error()}
+                       }
+                       t.value = m.Name
+                       m.nametoken = t
+               case tkLogin:
+                       if m == nil || m.Login != "" {
+                               return nil, &Error{pos, "unexpected token login "}
+                       }
+                       if t.rawvalue, m.Login, pos, err = scanValue(scanner, pos); err != nil {
+                               return nil, &Error{pos, err.Error()}
+                       }
+                       t.value = m.Login
+                       m.logintoken = t
+               case tkPassword:
+                       if m == nil || m.Password != "" {
+                               return nil, &Error{pos, "unexpected token password"}
+                       }
+                       if t.rawvalue, m.Password, pos, err = scanValue(scanner, pos); err != nil {
+                               return nil, &Error{pos, err.Error()}
+                       }
+                       t.value = m.Password
+                       m.passtoken = t
+               case tkAccount:
+                       if m == nil || m.Account != "" {
+                               return nil, &Error{pos, "unexpected token account"}
+                       }
+                       if t.rawvalue, m.Account, pos, err = scanValue(scanner, pos); err != nil {
+                               return nil, &Error{pos, err.Error()}
+                       }
+                       t.value = m.Account
+                       m.accounttoken = t
+               }
+
+               nrc.tokens = append(nrc.tokens, t)
+       }
+
+       if err := scanner.Err(); err != nil {
+               return nil, err
+       }
+
+       if m != nil {
+               nrc.machines, m = append(nrc.machines, m), nil
+       }
+       return &nrc, nil
+}
+
+// ParseFile opens the file at filename and then passes its io.Reader to
+// Parse().
+func ParseFile(filename string) (*Netrc, error) {
+       fd, err := os.Open(filename)
+       if err != nil {
+               return nil, err
+       }
+       defer fd.Close()
+       return Parse(fd)
+}
+
+// Parse parses from the the Reader r as a netrc file and returns the set of
+// machine information and macros defined in it. The ``default'' machine,
+// which is intended to be used when no machine name matches, is identified
+// by an empty machine name. There can be only one ``default'' machine.
+//
+// If there is a parsing error, an Error is returned.
+func Parse(r io.Reader) (*Netrc, error) {
+       return parse(r, 1)
+}
+
+// FindMachine parses the netrc file identified by filename and returns the
+// Machine named by name. If a problem occurs parsing the file at filename, an
+// error is returned. If a machine named by name exists, it is returned. If no
+// Machine with name name is found and there is a ``default'' machine, the
+// ``default'' machine is returned. Otherwise, nil is returned.
+func FindMachine(filename, name string) (m *Machine, err error) {
+       n, err := ParseFile(filename)
+       if err != nil {
+               return nil, err
+       }
+       return n.FindMachine(name), nil
+}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644 (file)
index 0000000..c836416
--- /dev/null
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644 (file)
index 0000000..8a4a658
--- /dev/null
@@ -0,0 +1,152 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build !js,!appengine,!safe,!disableunsafe
+
+package spew
+
+import (
+       "reflect"
+       "unsafe"
+)
+
+const (
+       // UnsafeDisabled is a build-time constant which specifies whether or
+       // not access to the unsafe package is available.
+       UnsafeDisabled = false
+
+       // ptrSize is the size of a pointer on the current arch.
+       ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+var (
+       // offsetPtr, offsetScalar, and offsetFlag are the offsets for the
+       // internal reflect.Value fields.  These values are valid before golang
+       // commit ecccf07e7f9d which changed the format.  The are also valid
+       // after commit 82f48826c6c7 which changed the format again to mirror
+       // the original format.  Code in the init function updates these offsets
+       // as necessary.
+       offsetPtr    = uintptr(ptrSize)
+       offsetScalar = uintptr(0)
+       offsetFlag   = uintptr(ptrSize * 2)
+
+       // flagKindWidth and flagKindShift indicate various bits that the
+       // reflect package uses internally to track kind information.
+       //
+       // flagRO indicates whether or not the value field of a reflect.Value is
+       // read-only.
+       //
+       // flagIndir indicates whether the value field of a reflect.Value is
+       // the actual data or a pointer to the data.
+       //
+       // These values are valid before golang commit 90a7c3c86944 which
+       // changed their positions.  Code in the init function updates these
+       // flags as necessary.
+       flagKindWidth = uintptr(5)
+       flagKindShift = uintptr(flagKindWidth - 1)
+       flagRO        = uintptr(1 << 0)
+       flagIndir     = uintptr(1 << 1)
+)
+
+func init() {
+       // Older versions of reflect.Value stored small integers directly in the
+       // ptr field (which is named val in the older versions).  Versions
+       // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
+       // scalar for this purpose which unfortunately came before the flag
+       // field, so the offset of the flag field is different for those
+       // versions.
+       //
+       // This code constructs a new reflect.Value from a known small integer
+       // and checks if the size of the reflect.Value struct indicates it has
+       // the scalar field. When it does, the offsets are updated accordingly.
+       vv := reflect.ValueOf(0xf00)
+       if unsafe.Sizeof(vv) == (ptrSize * 4) {
+               offsetScalar = ptrSize * 2
+               offsetFlag = ptrSize * 3
+       }
+
+       // Commit 90a7c3c86944 changed the flag positions such that the low
+       // order bits are the kind.  This code extracts the kind from the flags
+       // field and ensures it's the correct type.  When it's not, the flag
+       // order has been changed to the newer format, so the flags are updated
+       // accordingly.
+       upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
+       upfv := *(*uintptr)(upf)
+       flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
+       if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
+               flagKindShift = 0
+               flagRO = 1 << 5
+               flagIndir = 1 << 6
+
+               // Commit adf9b30e5594 modified the flags to separate the
+               // flagRO flag into two bits which specifies whether or not the
+               // field is embedded.  This causes flagIndir to move over a bit
+               // and means that flagRO is the combination of either of the
+               // original flagRO bit and the new bit.
+               //
+               // This code detects the change by extracting what used to be
+               // the indirect bit to ensure it's set.  When it's not, the flag
+               // order has been changed to the newer format, so the flags are
+               // updated accordingly.
+               if upfv&flagIndir == 0 {
+                       flagRO = 3 << 5
+                       flagIndir = 1 << 7
+               }
+       }
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data.  It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
+       indirects := 1
+       vt := v.Type()
+       upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
+       rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
+       if rvf&flagIndir != 0 {
+               vt = reflect.PtrTo(v.Type())
+               indirects++
+       } else if offsetScalar != 0 {
+               // The value is in the scalar field when it's not one of the
+               // reference types.
+               switch vt.Kind() {
+               case reflect.Uintptr:
+               case reflect.Chan:
+               case reflect.Func:
+               case reflect.Map:
+               case reflect.Ptr:
+               case reflect.UnsafePointer:
+               default:
+                       upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
+                               offsetScalar)
+               }
+       }
+
+       pv := reflect.NewAt(vt, upv)
+       rv = pv
+       for i := 0; i < indirects; i++ {
+               rv = rv.Elem()
+       }
+       return rv
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644 (file)
index 0000000..1fe3cf3
--- /dev/null
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe
+
+package spew
+
+import "reflect"
+
+const (
+       // UnsafeDisabled is a build-time constant which specifies whether or
+       // not access to the unsafe package is available.
+       UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data.  However, doing this relies on access to
+// the unsafe package.  This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+       return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644 (file)
index 0000000..7c519ff
--- /dev/null
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+       "bytes"
+       "fmt"
+       "io"
+       "reflect"
+       "sort"
+       "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead.  This mirrors
+// the technique used in the fmt package.
+var (
+       panicBytes            = []byte("(PANIC=")
+       plusBytes             = []byte("+")
+       iBytes                = []byte("i")
+       trueBytes             = []byte("true")
+       falseBytes            = []byte("false")
+       interfaceBytes        = []byte("(interface {})")
+       commaNewlineBytes     = []byte(",\n")
+       newlineBytes          = []byte("\n")
+       openBraceBytes        = []byte("{")
+       openBraceNewlineBytes = []byte("{\n")
+       closeBraceBytes       = []byte("}")
+       asteriskBytes         = []byte("*")
+       colonBytes            = []byte(":")
+       colonSpaceBytes       = []byte(": ")
+       openParenBytes        = []byte("(")
+       closeParenBytes       = []byte(")")
+       spaceBytes            = []byte(" ")
+       pointerChainBytes     = []byte("->")
+       nilAngleBytes         = []byte("<nil>")
+       maxNewlineBytes       = []byte("<max depth reached>\n")
+       maxShortBytes         = []byte("<max>")
+       circularBytes         = []byte("<already shown>")
+       circularShortBytes    = []byte("<shown>")
+       invalidAngleBytes     = []byte("<invalid>")
+       openBracketBytes      = []byte("[")
+       closeBracketBytes     = []byte("]")
+       percentBytes          = []byte("%")
+       precisionBytes        = []byte(".")
+       openAngleBytes        = []byte("<")
+       closeAngleBytes       = []byte(">")
+       openMapBytes          = []byte("map[")
+       closeMapBytes         = []byte("]")
+       lenEqualsBytes        = []byte("len=")
+       capEqualsBytes        = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+       if err := recover(); err != nil {
+               w.Write(panicBytes)
+               fmt.Fprintf(w, "%v", err)
+               w.Write(closeParenBytes)
+       }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+       // We need an interface to check if the type implements the error or
+       // Stringer interface.  However, the reflect package won't give us an
+       // interface on certain things like unexported struct fields in order
+       // to enforce visibility rules.  We use unsafe, when it's available,
+       // to bypass these restrictions since this package does not mutate the
+       // values.
+       if !v.CanInterface() {
+               if UnsafeDisabled {
+                       return false
+               }
+
+               v = unsafeReflectValue(v)
+       }
+
+       // Choose whether or not to do error and Stringer interface lookups against
+       // the base type or a pointer to the base type depending on settings.
+       // Technically calling one of these methods with a pointer receiver can
+       // mutate the value, however, types which choose to satisify an error or
+       // Stringer interface with a pointer receiver should not be mutating their
+       // state inside these interface methods.
+       if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+               v = unsafeReflectValue(v)
+       }
+       if v.CanAddr() {
+               v = v.Addr()
+       }
+
+       // Is it an error or Stringer?
+       switch iface := v.Interface().(type) {
+       case error:
+               defer catchPanic(w, v)
+               if cs.ContinueOnMethod {
+                       w.Write(openParenBytes)
+                       w.Write([]byte(iface.Error()))
+                       w.Write(closeParenBytes)
+                       w.Write(spaceBytes)
+                       return false
+               }
+
+               w.Write([]byte(iface.Error()))
+               return true
+
+       case fmt.Stringer:
+               defer catchPanic(w, v)
+               if cs.ContinueOnMethod {
+                       w.Write(openParenBytes)
+                       w.Write([]byte(iface.String()))
+                       w.Write(closeParenBytes)
+                       w.Write(spaceBytes)
+                       return false
+               }
+               w.Write([]byte(iface.String()))
+               return true
+       }
+       return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+       if val {
+               w.Write(trueBytes)
+       } else {
+               w.Write(falseBytes)
+       }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+       w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+       w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+       w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+       r := real(c)
+       w.Write(openParenBytes)
+       w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+       i := imag(c)
+       if i >= 0 {
+               w.Write(plusBytes)
+       }
+       w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+       w.Write(iBytes)
+       w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+       // Null pointer.
+       num := uint64(p)
+       if num == 0 {
+               w.Write(nilAngleBytes)
+               return
+       }
+
+       // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+       buf := make([]byte, 18)
+
+       // It's simpler to construct the hex string right to left.
+       base := uint64(16)
+       i := len(buf) - 1
+       for num >= base {
+               buf[i] = hexDigits[num%base]
+               num /= base
+               i--
+       }
+       buf[i] = hexDigits[num]
+
+       // Add '0x' prefix.
+       i--
+       buf[i] = 'x'
+       i--
+       buf[i] = '0'
+
+       // Strip unused leading bytes.
+       buf = buf[i:]
+       w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+       values  []reflect.Value
+       strings []string // either nil or same len and values
+       cs      *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted.  It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+       vs := &valuesSorter{values: values, cs: cs}
+       if canSortSimply(vs.values[0].Kind()) {
+               return vs
+       }
+       if !cs.DisableMethods {
+               vs.strings = make([]string, len(values))
+               for i := range vs.values {
+                       b := bytes.Buffer{}
+                       if !handleMethods(cs, &b, vs.values[i]) {
+                               vs.strings = nil
+                               break
+                       }
+                       vs.strings[i] = b.String()
+               }
+       }
+       if vs.strings == nil && cs.SpewKeys {
+               vs.strings = make([]string, len(values))
+               for i := range vs.values {
+                       vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+               }
+       }
+       return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+       // This switch parallels valueSortLess, except for the default case.
+       switch kind {
+       case reflect.Bool:
+               return true
+       case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+               return true
+       case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+               return true
+       case reflect.Float32, reflect.Float64:
+               return true
+       case reflect.String:
+               return true
+       case reflect.Uintptr:
+               return true
+       case reflect.Array:
+               return true
+       }
+       return false
+}
+
+// Len returns the number of values in the slice.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+       return len(s.values)
+}
+
+// Swap swaps the values at the passed indices.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+       s.values[i], s.values[j] = s.values[j], s.values[i]
+       if s.strings != nil {
+               s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+       }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value.  It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+       switch a.Kind() {
+       case reflect.Bool:
+               return !a.Bool() && b.Bool()
+       case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+               return a.Int() < b.Int()
+       case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+               return a.Uint() < b.Uint()
+       case reflect.Float32, reflect.Float64:
+               return a.Float() < b.Float()
+       case reflect.String:
+               return a.String() < b.String()
+       case reflect.Uintptr:
+               return a.Uint() < b.Uint()
+       case reflect.Array:
+               // Compare the contents of both arrays.
+               l := a.Len()
+               for i := 0; i < l; i++ {
+                       av := a.Index(i)
+                       bv := b.Index(i)
+                       if av.Interface() == bv.Interface() {
+                               continue
+                       }
+                       return valueSortLess(av, bv)
+               }
+       }
+       return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j.  It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+       if s.strings == nil {
+               return valueSortLess(s.values[i], s.values[j])
+       }
+       return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer.  Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+       if len(values) == 0 {
+               return
+       }
+       sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644 (file)
index 0000000..2e3d22f
--- /dev/null
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+       "bytes"
+       "fmt"
+       "io"
+       "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values.  There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality.  Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation.  You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings.  See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+       // Indent specifies the string to use for each indentation level.  The
+       // global config instance that all top-level functions use set this to a
+       // single space by default.  If you would like more indentation, you might
+       // set this to a tab with "\t" or perhaps two spaces with "  ".
+       Indent string
+
+       // MaxDepth controls the maximum number of levels to descend into nested
+       // data structures.  The default, 0, means there is no limit.
+       //
+       // NOTE: Circular data structures are properly detected, so it is not
+       // necessary to set this value unless you specifically want to limit deeply
+       // nested data structures.
+       MaxDepth int
+
+       // DisableMethods specifies whether or not error and Stringer interfaces are
+       // invoked for types that implement them.
+       DisableMethods bool
+
+       // DisablePointerMethods specifies whether or not to check for and invoke
+       // error and Stringer interfaces on types which only accept a pointer
+       // receiver when the current type is not a pointer.
+       //
+       // NOTE: This might be an unsafe action since calling one of these methods
+       // with a pointer receiver could technically mutate the value, however,
+       // in practice, types which choose to satisify an error or Stringer
+       // interface with a pointer receiver should not be mutating their state
+       // inside these interface methods.  As a result, this option relies on
+       // access to the unsafe package, so it will not have any effect when
+       // running in environments without access to the unsafe package such as
+       // Google App Engine or with the "safe" build tag specified.
+       DisablePointerMethods bool
+
+       // DisablePointerAddresses specifies whether to disable the printing of
+       // pointer addresses. This is useful when diffing data structures in tests.
+       DisablePointerAddresses bool
+
+       // DisableCapacities specifies whether to disable the printing of capacities
+       // for arrays, slices, maps and channels. This is useful when diffing
+       // data structures in tests.
+       DisableCapacities bool
+
+       // ContinueOnMethod specifies whether or not recursion should continue once
+       // a custom error or Stringer interface is invoked.  The default, false,
+       // means it will print the results of invoking the custom error or Stringer
+       // interface and return immediately instead of continuing to recurse into
+       // the internals of the data type.
+       //
+       // NOTE: This flag does not have any effect if method invocation is disabled
+       // via the DisableMethods or DisablePointerMethods options.
+       ContinueOnMethod bool
+
+       // SortKeys specifies map keys should be sorted before being printed. Use
+       // this to have a more deterministic, diffable output.  Note that only
+       // native types (bool, int, uint, floats, uintptr and string) and types
+       // that support the error or Stringer interfaces (if methods are
+       // enabled) are supported, with other types sorted according to the
+       // reflect.Value.String() output which guarantees display stability.
+       SortKeys bool
+
+       // SpewKeys specifies that, as a last resort attempt, map keys should
+       // be spewed to strings and sorted by those strings.  This is only
+       // considered if SortKeys is true.
+       SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the formatted string as a value that satisfies error.  See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+       return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+       return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+       return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+       return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+       return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+       return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+       return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+       return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+       return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+       return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+       return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+       fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+       * Pointers are dereferenced and followed
+       * Circular data structures are detected and handled properly
+       * Custom Stringer/error interfaces are optionally invoked, including
+         on unexported types
+       * Custom types which only implement the Stringer/error interfaces via
+         a pointer receiver are optionally invoked when passing non-pointer
+         variables
+       * Byte arrays and slices are dumped like the hexdump -C command which
+         includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+       fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+       var buf bytes.Buffer
+       fdump(c, &buf, a...)
+       return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+       formatters = make([]interface{}, len(args))
+       for index, arg := range args {
+               formatters[index] = newFormatter(c, arg)
+       }
+       return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+//     Indent: " "
+//     MaxDepth: 0
+//     DisableMethods: false
+//     DisablePointerMethods: false
+//     ContinueOnMethod: false
+//     SortKeys: false
+func NewDefaultConfig() *ConfigState {
+       return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644 (file)
index 0000000..aacaac6
--- /dev/null
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+       * Pointers are dereferenced and followed
+       * Circular data structures are detected and handled properly
+       * Custom Stringer/error interfaces are optionally invoked, including
+         on unexported types
+       * Custom types which only implement the Stringer/error interfaces via
+         a pointer receiver are optionally invoked when passing non-pointer
+         variables
+       * Byte arrays and slices are dumped like the hexdump -C command which
+         includes offsets, byte values in hex, and ASCII output (only when using
+         Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+       * Dump style which prints with newlines, customizable indentation,
+         and additional debug information such as types and all pointer addresses
+         used to indirect to the final value
+       * A custom Formatter interface that integrates cleanly with the standard fmt
+         package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+         similar to the default %v while providing the additional functionality
+         outlined above and passing unsupported format verbs such as %x and %q
+         along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew.  See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+       spew.Dump(myVar1, myVar2, ...)
+       spew.Fdump(someWriter, myVar1, myVar2, ...)
+       str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+       spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+       spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+       spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+       spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type.  For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions.  This allows concurrent configuration
+options.  See the ConfigState documentation for more details.
+
+The following configuration options are available:
+       * Indent
+               String to use for each indentation level for Dump functions.
+               It is a single space by default.  A popular alternative is "\t".
+
+       * MaxDepth
+               Maximum number of levels to descend into nested data structures.
+               There is no limit by default.
+
+       * DisableMethods
+               Disables invocation of error and Stringer interface methods.
+               Method invocation is enabled by default.
+
+       * DisablePointerMethods
+               Disables invocation of error and Stringer interface methods on types
+               which only accept pointer receivers from non-pointer variables.
+               Pointer method invocation is enabled by default.
+
+       * DisablePointerAddresses
+               DisablePointerAddresses specifies whether to disable the printing of
+               pointer addresses. This is useful when diffing data structures in tests.
+
+       * DisableCapacities
+               DisableCapacities specifies whether to disable the printing of
+               capacities for arrays, slices, maps and channels. This is useful when
+               diffing data structures in tests.
+
+       * ContinueOnMethod
+               Enables recursion into types after invoking error and Stringer interface
+               methods. Recursion after method invocation is disabled by default.
+
+       * SortKeys
+               Specifies map keys should be sorted before being printed. Use
+               this to have a more deterministic, diffable output.  Note that
+               only native types (bool, int, uint, floats, uintptr and string)
+               and types which implement error or Stringer interfaces are
+               supported with other types sorted according to the
+               reflect.Value.String() output which guarantees display
+               stability.  Natural map order is used by default.
+
+       * SpewKeys
+               Specifies that, as a last resort attempt, map keys should be
+               spewed to strings and sorted by those strings.  This is only
+               considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+       spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer.  For example, to dump to standard error:
+
+       spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+       str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+       (main.Foo) {
+        unexportedField: (*main.Bar)(0xf84002e210)({
+         flag: (main.Flag) flagTwo,
+         data: (uintptr) <nil>
+        }),
+        ExportedField: (map[interface {}]interface {}) (len=1) {
+         (string) (len=3) "one": (bool) true
+        }
+       }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+       ([]uint8) (len=32 cap=32) {
+        00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
+        00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
+        00000020  31 32                                             |12|
+       }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf.  The
+functions have syntax you are most likely already familiar with:
+
+       spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+       spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+       spew.Println(myVar, myVar2)
+       spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+       spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+         %v: <**>5
+        %+v: <**>(0xf8400420d0->0xf8400420c8)5
+        %#v: (**uint8)5
+       %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+         %v: <*>{1 <*><shown>}
+        %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+        %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+       %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output.  Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644 (file)
index 0000000..df1d582
--- /dev/null
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+       "bytes"
+       "encoding/hex"
+       "fmt"
+       "io"
+       "os"
+       "reflect"
+       "regexp"
+       "strconv"
+       "strings"
+)
+
+var (
+       // uint8Type is a reflect.Type representing a uint8.  It is used to
+       // convert cgo types to uint8 slices for hexdumping.
+       uint8Type = reflect.TypeOf(uint8(0))
+
+       // cCharRE is a regular expression that matches a cgo char.
+       // It is used to detect character arrays to hexdump them.
+       cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+
+       // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+       // char.  It is used to detect unsigned character arrays to hexdump
+       // them.
+       cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+
+       // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+       // It is used to detect uint8_t arrays to hexdump them.
+       cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+       w                io.Writer
+       depth            int
+       pointers         map[uintptr]int
+       ignoreNextType   bool
+       ignoreNextIndent bool
+       cs               *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+       if d.ignoreNextIndent {
+               d.ignoreNextIndent = false
+               return
+       }
+       d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+       if v.Kind() == reflect.Interface && !v.IsNil() {
+               v = v.Elem()
+       }
+       return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+       // Remove pointers at or below the current depth from map used to detect
+       // circular refs.
+       for k, depth := range d.pointers {
+               if depth >= d.depth {
+                       delete(d.pointers, k)
+               }
+       }
+
+       // Keep list of all dereferenced pointers to show later.
+       pointerChain := make([]uintptr, 0)
+
+       // Figure out how many levels of indirection there are by dereferencing
+       // pointers and unpacking interfaces down the chain while detecting circular
+       // references.
+       nilFound := false
+       cycleFound := false
+       indirects := 0
+       ve := v
+       for ve.Kind() == reflect.Ptr {
+               if ve.IsNil() {
+                       nilFound = true
+                       break
+               }
+               indirects++
+               addr := ve.Pointer()
+               pointerChain = append(pointerChain, addr)
+               if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+                       cycleFound = true
+                       indirects--
+                       break
+               }
+               d.pointers[addr] = d.depth
+
+               ve = ve.Elem()
+               if ve.Kind() == reflect.Interface {
+                       if ve.IsNil() {
+                               nilFound = true
+                               break
+                       }
+                       ve = ve.Elem()
+               }
+       }
+
+       // Display type information.
+       d.w.Write(openParenBytes)
+       d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+       d.w.Write([]byte(ve.Type().String()))
+       d.w.Write(closeParenBytes)
+
+       // Display pointer information.
+       if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+               d.w.Write(openParenBytes)
+               for i, addr := range pointerChain {
+                       if i > 0 {
+                               d.w.Write(pointerChainBytes)
+                       }
+                       printHexPtr(d.w, addr)
+               }
+               d.w.Write(closeParenBytes)
+       }
+
+       // Display dereferenced value.
+       d.w.Write(openParenBytes)
+       switch {
+       case nilFound == true:
+               d.w.Write(nilAngleBytes)
+
+       case cycleFound == true:
+               d.w.Write(circularBytes)
+
+       default:
+               d.ignoreNextType = true
+               d.dump(ve)
+       }
+       d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices.  Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+       // Determine whether this type should be hex dumped or not.  Also,
+       // for types which should be hexdumped, try to use the underlying data
+       // first, then fall back to trying to convert them to a uint8 slice.
+       var buf []uint8
+       doConvert := false
+       doHexDump := false
+       numEntries := v.Len()
+       if numEntries > 0 {
+               vt := v.Index(0).Type()
+               vts := vt.String()
+               switch {
+               // C types that need to be converted.
+               case cCharRE.MatchString(vts):
+                       fallthrough
+               case cUnsignedCharRE.MatchString(vts):
+                       fallthrough
+               case cUint8tCharRE.MatchString(vts):
+                       doConvert = true
+
+               // Try to use existing uint8 slices and fall back to converting
+               // and copying if that fails.
+               case vt.Kind() == reflect.Uint8:
+                       // We need an addressable interface to convert the type
+                       // to a byte slice.  However, the reflect package won't
+                       // give us an interface on certain things like
+                       // unexported struct fields in order to enforce
+                       // visibility rules.  We use unsafe, when available, to
+                       // bypass these restrictions since this package does not
+                       // mutate the values.
+                       vs := v
+                       if !vs.CanInterface() || !vs.CanAddr() {
+                               vs = unsafeReflectValue(vs)
+                       }
+                       if !UnsafeDisabled {
+                               vs = vs.Slice(0, numEntries)
+
+                               // Use the existing uint8 slice if it can be
+                               // type asserted.
+                               iface := vs.Interface()
+                               if slice, ok := iface.([]uint8); ok {
+                                       buf = slice
+                                       doHexDump = true
+                                       break
+                               }
+                       }
+
+                       // The underlying data needs to be converted if it can't
+                       // be type asserted to a uint8 slice.
+                       doConvert = true
+               }
+
+               // Copy and convert the underlying type if needed.
+               if doConvert && vt.ConvertibleTo(uint8Type) {
+                       // Convert and copy each element into a uint8 byte
+                       // slice.
+                       buf = make([]uint8, numEntries)
+                       for i := 0; i < numEntries; i++ {
+                               vv := v.Index(i)
+                               buf[i] = uint8(vv.Convert(uint8Type).Uint())
+                       }
+                       doHexDump = true
+               }
+       }
+
+       // Hexdump the entire slice as needed.
+       if doHexDump {
+               indent := strings.Repeat(d.cs.Indent, d.depth)
+               str := indent + hex.Dump(buf)
+               str = strings.Replace(str, "\n", "\n"+indent, -1)
+               str = strings.TrimRight(str, d.cs.Indent)
+               d.w.Write([]byte(str))
+               return
+       }
+
+       // Recursively call dump for each item.
+       for i := 0; i < numEntries; i++ {
+               d.dump(d.unpackValue(v.Index(i)))
+               if i < (numEntries - 1) {
+                       d.w.Write(commaNewlineBytes)
+               } else {
+                       d.w.Write(newlineBytes)
+               }
+       }
+}
+
+// dump is the main workhorse for dumping a value.  It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately.  It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+       // Handle invalid reflect values immediately.
+       kind := v.Kind()
+       if kind == reflect.Invalid {
+               d.w.Write(invalidAngleBytes)
+               return
+       }
+
+       // Handle pointers specially.
+       if kind == reflect.Ptr {
+               d.indent()
+               d.dumpPtr(v)
+               return
+       }
+
+       // Print type information unless already handled elsewhere.
+       if !d.ignoreNextType {
+               d.indent()
+               d.w.Write(openParenBytes)
+               d.w.Write([]byte(v.Type().String()))
+               d.w.Write(closeParenBytes)
+               d.w.Write(spaceBytes)
+       }
+       d.ignoreNextType = false
+
+       // Display length and capacity if the built-in len and cap functions
+       // work with the value's kind and the len/cap itself is non-zero.
+       valueLen, valueCap := 0, 0
+       switch v.Kind() {
+       case reflect.Array, reflect.Slice, reflect.Chan:
+               valueLen, valueCap = v.Len(), v.Cap()
+       case reflect.Map, reflect.String:
+               valueLen = v.Len()
+       }
+       if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+               d.w.Write(openParenBytes)
+               if valueLen != 0 {
+                       d.w.Write(lenEqualsBytes)
+                       printInt(d.w, int64(valueLen), 10)
+               }
+               if !d.cs.DisableCapacities && valueCap != 0 {
+                       if valueLen != 0 {
+                               d.w.Write(spaceBytes)
+                       }
+                       d.w.Write(capEqualsBytes)
+                       printInt(d.w, int64(valueCap), 10)
+               }
+               d.w.Write(closeParenBytes)
+               d.w.Write(spaceBytes)
+       }
+
+       // Call Stringer/error interfaces if they exist and the handle methods flag
+       // is enabled
+       if !d.cs.DisableMethods {
+               if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+                       if handled := handleMethods(d.cs, d.w, v); handled {
+                               return
+                       }
+               }
+       }
+
+       switch kind {
+       case reflect.Invalid:
+               // Do nothing.  We should never get here since invalid has already
+               // been handled above.
+
+       case reflect.Bool:
+               printBool(d.w, v.Bool())
+
+       case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+               printInt(d.w, v.Int(), 10)
+
+       case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+               printUint(d.w, v.Uint(), 10)
+
+       case reflect.Float32:
+               printFloat(d.w, v.Float(), 32)
+
+       case reflect.Float64:
+               printFloat(d.w, v.Float(), 64)
+
+       case reflect.Complex64:
+               printComplex(d.w, v.Complex(), 32)
+
+       case reflect.Complex128:
+               printComplex(d.w, v.Complex(), 64)
+
+       case reflect.Slice:
+               if v.IsNil() {
+                       d.w.Write(nilAngleBytes)
+                       break
+               }
+               fallthrough
+
+       case reflect.Array:
+               d.w.Write(openBraceNewlineBytes)
+               d.depth++
+               if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+                       d.indent()
+                       d.w.Write(maxNewlineBytes)
+               } else {
+                       d.dumpSlice(v)
+               }
+               d.depth--
+               d.indent()
+               d.w.Write(closeBraceBytes)
+
+       case reflect.String:
+               d.w.Write([]byte(strconv.Quote(v.String())))
+
+       case reflect.Interface:
+               // The only time we should get here is for nil interfaces due to
+               // unpackValue calls.
+               if v.IsNil() {
+                       d.w.Write(nilAngleBytes)
+               }
+
+       case reflect.Ptr:
+               // Do nothing.  We should never get here since pointers have already
+               // been handled above.
+
+       case reflect.Map:
+               // nil maps should be indicated as different than empty maps
+               if v.IsNil() {
+                       d.w.Write(nilAngleBytes)
+                       break
+               }
+
+               d.w.Write(openBraceNewlineBytes)
+               d.depth++
+               if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+                       d.indent()
+                       d.w.Write(maxNewlineBytes)
+               } else {
+                       numEntries := v.Len()
+                       keys := v.MapKeys()
+                       if d.cs.SortKeys {
+                               sortValues(keys, d.cs)
+                       }
+                       for i, key := range keys {
+                               d.dump(d.unpackValue(key))
+                               d.w.Write(colonSpaceBytes)
+                               d.ignoreNextIndent = true
+                               d.dump(d.unpackValue(v.MapIndex(key)))
+                               if i < (numEntries - 1) {
+                                       d.w.Write(commaNewlineBytes)
+                               } else {
+                                       d.w.Write(newlineBytes)
+                               }
+                       }
+               }
+               d.depth--
+               d.indent()
+               d.w.Write(closeBraceBytes)
+
+       case reflect.Struct:
+               d.w.Write(openBraceNewlineBytes)
+               d.depth++
+               if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+                       d.indent()
+                       d.w.Write(maxNewlineBytes)
+               } else {
+                       vt := v.Type()
+                       numFields := v.NumField()
+                       for i := 0; i < numFields; i++ {
+                               d.indent()
+                               vtf := vt.Field(i)
+                               d.w.Write([]byte(vtf.Name))
+                               d.w.Write(colonSpaceBytes)
+                               d.ignoreNextIndent = true
+                               d.dump(d.unpackValue(v.Field(i)))
+                               if i < (numFields - 1) {
+                                       d.w.Write(commaNewlineBytes)
+                               } else {
+                                       d.w.Write(newlineBytes)
+                               }
+                       }
+               }
+               d.depth--
+               d.indent()
+               d.w.Write(closeBraceBytes)
+
+       case reflect.Uintptr:
+               printHexPtr(d.w, uintptr(v.Uint()))
+
+       case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+               printHexPtr(d.w, v.Pointer())
+
+       // There were not any other types at the time this code was written, but
+       // fall back to letting the default fmt package handle it in case any new
+       // types are added.
+       default:
+               if v.CanInterface() {
+                       fmt.Fprintf(d.w, "%v", v.Interface())
+               } else {
+                       fmt.Fprintf(d.w, "%v", v.String())
+               }
+       }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+       for _, arg := range a {
+               if arg == nil {
+                       w.Write(interfaceBytes)
+                       w.Write(spaceBytes)
+                       w.Write(nilAngleBytes)
+                       w.Write(newlineBytes)
+                       continue
+               }
+
+               d := dumpState{w: w, cs: cs}
+               d.pointers = make(map[uintptr]int)
+               d.dump(reflect.ValueOf(arg))
+               d.w.Write(newlineBytes)
+       }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+       fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+       var buf bytes.Buffer
+       fdump(&Config, &buf, a...)
+       return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+       * Pointers are dereferenced and followed
+       * Circular data structures are detected and handled properly
+       * Custom Stringer/error interfaces are optionally invoked, including
+         on unexported types
+       * Custom types which only implement the Stringer/error interfaces via
+         a pointer receiver are optionally invoked when passing non-pointer
+         variables
+       * Byte arrays and slices are dumped like the hexdump -C command which
+         includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+       fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644 (file)
index 0000000..c49875b
--- /dev/null
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+       "bytes"
+       "fmt"
+       "reflect"
+       "strconv"
+       "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation.  The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+       value          interface{}
+       fs             fmt.State
+       depth          int
+       pointers       map[uintptr]int
+       ignoreNextType bool
+       cs             *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type.  Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+       buf := bytes.NewBuffer(percentBytes)
+
+       for _, flag := range supportedFlags {
+               if f.fs.Flag(int(flag)) {
+                       buf.WriteRune(flag)
+               }
+       }
+
+       buf.WriteRune('v')
+
+       format = buf.String()
+       return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package.  This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+       buf := bytes.NewBuffer(percentBytes)
+
+       for _, flag := range supportedFlags {
+               if f.fs.Flag(int(flag)) {
+                       buf.WriteRune(flag)
+               }
+       }
+
+       if width, ok := f.fs.Width(); ok {
+               buf.WriteString(strconv.Itoa(width))
+       }
+
+       if precision, ok := f.fs.Precision(); ok {
+               buf.Write(precisionBytes)
+               buf.WriteString(strconv.Itoa(precision))
+       }
+
+       buf.WriteRune(verb)
+
+       format = buf.String()
+       return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+       if v.Kind() == reflect.Interface {
+               f.ignoreNextType = false
+               if !v.IsNil() {
+                       v = v.Elem()
+               }
+       }
+       return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+       // Display nil if top level pointer is nil.
+       showTypes := f.fs.Flag('#')
+       if v.IsNil() && (!showTypes || f.ignoreNextType) {
+               f.fs.Write(nilAngleBytes)
+               return
+       }
+
+       // Remove pointers at or below the current depth from map used to detect
+       // circular refs.
+       for k, depth := range f.pointers {
+               if depth >= f.depth {
+                       delete(f.pointers, k)
+               }
+       }
+
+       // Keep list of all dereferenced pointers to possibly show later.
+       pointerChain := make([]uintptr, 0)
+
+       // Figure out how many levels of indirection there are by derferencing
+       // pointers and unpacking interfaces down the chain while detecting circular
+       // references.
+       nilFound := false
+       cycleFound := false
+       indirects := 0
+       ve := v
+       for ve.Kind() == reflect.Ptr {
+               if ve.IsNil() {
+                       nilFound = true
+                       break
+               }
+               indirects++
+               addr := ve.Pointer()
+               pointerChain = append(pointerChain, addr)
+               if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+                       cycleFound = true
+                       indirects--
+                       break
+               }
+               f.pointers[addr] = f.depth
+
+               ve = ve.Elem()
+               if ve.Kind() == reflect.Interface {
+                       if ve.IsNil() {
+                               nilFound = true
+                               break
+                       }
+                       ve = ve.Elem()
+               }
+       }
+
+       // Display type or indirection level depending on flags.
+       if showTypes && !f.ignoreNextType {
+               f.fs.Write(openParenBytes)
+               f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+               f.fs.Write([]byte(ve.Type().String()))
+               f.fs.Write(closeParenBytes)
+       } else {
+               if nilFound || cycleFound {
+                       indirects += strings.Count(ve.Type().String(), "*")
+               }
+               f.fs.Write(openAngleBytes)
+               f.fs.Write([]byte(strings.Repeat("*", indirects)))
+               f.fs.Write(closeAngleBytes)
+       }
+
+       // Display pointer information depending on flags.
+       if f.fs.Flag('+') && (len(pointerChain) > 0) {
+               f.fs.Write(openParenBytes)
+               for i, addr := range pointerChain {
+                       if i > 0 {
+                               f.fs.Write(pointerChainBytes)
+                       }
+                       printHexPtr(f.fs, addr)
+               }
+               f.fs.Write(closeParenBytes)
+       }
+
+       // Display dereferenced value.
+       switch {
+       case nilFound == true:
+               f.fs.Write(nilAngleBytes)
+
+       case cycleFound == true:
+               f.fs.Write(circularShortBytes)
+
+       default:
+               f.ignoreNextType = true
+               f.format(ve)
+       }
+}
+
+// format is the main workhorse for providing the Formatter interface.  It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately.  It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+       // Handle invalid reflect values immediately.
+       kind := v.Kind()
+       if kind == reflect.Invalid {
+               f.fs.Write(invalidAngleBytes)
+               return
+       }
+
+       // Handle pointers specially.
+       if kind == reflect.Ptr {
+               f.formatPtr(v)
+               return
+       }
+
+       // Print type information unless already handled elsewhere.
+       if !f.ignoreNextType && f.fs.Flag('#') {
+               f.fs.Write(openParenBytes)
+               f.fs.Write([]byte(v.Type().String()))
+               f.fs.Write(closeParenBytes)
+       }
+       f.ignoreNextType = false
+
+       // Call Stringer/error interfaces if they exist and the handle methods
+       // flag is enabled.
+       if !f.cs.DisableMethods {
+               if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+                       if handled := handleMethods(f.cs, f.fs, v); handled {
+                               return
+                       }
+               }
+       }
+
+       switch kind {
+       case reflect.Invalid:
+               // Do nothing.  We should never get here since invalid has already
+               // been handled above.
+
+       case reflect.Bool:
+               printBool(f.fs, v.Bool())
+
+       case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+               printInt(f.fs, v.Int(), 10)
+
+       case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+               printUint(f.fs, v.Uint(), 10)
+
+       case reflect.Float32:
+               printFloat(f.fs, v.Float(), 32)
+
+       case reflect.Float64:
+               printFloat(f.fs, v.Float(), 64)
+
+       case reflect.Complex64:
+               printComplex(f.fs, v.Complex(), 32)
+
+       case reflect.Complex128:
+               printComplex(f.fs, v.Complex(), 64)
+
+       case reflect.Slice:
+               if v.IsNil() {
+                       f.fs.Write(nilAngleBytes)
+                       break
+               }
+               fallthrough
+
+       case reflect.Array:
+               f.fs.Write(openBracketBytes)
+               f.depth++
+               if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+                       f.fs.Write(maxShortBytes)
+               } else {
+                       numEntries := v.Len()
+                       for i := 0; i < numEntries; i++ {
+                               if i > 0 {
+                                       f.fs.Write(spaceBytes)
+                               }
+                               f.ignoreNextType = true
+                               f.format(f.unpackValue(v.Index(i)))
+                       }
+               }
+               f.depth--
+               f.fs.Write(closeBracketBytes)
+
+       case reflect.String:
+               f.fs.Write([]byte(v.String()))
+
+       case reflect.Interface:
+               // The only time we should get here is for nil interfaces due to
+               // unpackValue calls.
+               if v.IsNil() {
+                       f.fs.Write(nilAngleBytes)
+               }
+
+       case reflect.Ptr:
+               // Do nothing.  We should never get here since pointers have already
+               // been handled above.
+
+       case reflect.Map:
+               // nil maps should be indicated as different than empty maps
+               if v.IsNil() {
+                       f.fs.Write(nilAngleBytes)
+                       break
+               }
+
+               f.fs.Write(openMapBytes)
+               f.depth++
+               if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+                       f.fs.Write(maxShortBytes)
+               } else {
+                       keys := v.MapKeys()
+                       if f.cs.SortKeys {
+                               sortValues(keys, f.cs)
+                       }
+                       for i, key := range keys {
+                               if i > 0 {
+                                       f.fs.Write(spaceBytes)
+                               }
+                               f.ignoreNextType = true
+                               f.format(f.unpackValue(key))
+                               f.fs.Write(colonBytes)
+                               f.ignoreNextType = true
+                               f.format(f.unpackValue(v.MapIndex(key)))
+                       }
+               }
+               f.depth--
+               f.fs.Write(closeMapBytes)
+
+       case reflect.Struct:
+               numFields := v.NumField()
+               f.fs.Write(openBraceBytes)
+               f.depth++
+               if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+                       f.fs.Write(maxShortBytes)
+               } else {
+                       vt := v.Type()
+                       for i := 0; i < numFields; i++ {
+                               if i > 0 {
+                                       f.fs.Write(spaceBytes)
+                               }
+                               vtf := vt.Field(i)
+                               if f.fs.Flag('+') || f.fs.Flag('#') {
+                                       f.fs.Write([]byte(vtf.Name))
+                                       f.fs.Write(colonBytes)
+                               }
+                               f.format(f.unpackValue(v.Field(i)))
+                       }
+               }
+               f.depth--
+               f.fs.Write(closeBraceBytes)
+
+       case reflect.Uintptr:
+               printHexPtr(f.fs, uintptr(v.Uint()))
+
+       case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+               printHexPtr(f.fs, v.Pointer())
+
+       // There were not any other types at the time this code was written, but
+       // fall back to letting the default fmt package handle it if any get added.
+       default:
+               format := f.buildDefaultFormat()
+               if v.CanInterface() {
+                       fmt.Fprintf(f.fs, format, v.Interface())
+               } else {
+                       fmt.Fprintf(f.fs, format, v.String())
+               }
+       }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+       f.fs = fs
+
+       // Use standard formatting for verbs that are not v.
+       if verb != 'v' {
+               format := f.constructOrigFormat(verb)
+               fmt.Fprintf(fs, format, f.value)
+               return
+       }
+
+       if f.value == nil {
+               if fs.Flag('#') {
+                       fs.Write(interfaceBytes)
+               }
+               fs.Write(nilAngleBytes)
+               return
+       }
+
+       f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+       fs := &formatState{value: v, cs: cs}
+       fs.pointers = make(map[uintptr]int)
+       return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+       return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644 (file)
index 0000000..32c0e33
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+       "fmt"
+       "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the formatted string as a value that satisfies error.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+       return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+       return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+       return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+       return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+       return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+       return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+       return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+       return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+       return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//     fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+       return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+       formatters = make([]interface{}, len(args))
+       for index, arg := range args {
+               formatters[index] = NewFormatter(arg)
+       }
+       return formatters
+}
diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE
new file mode 100644 (file)
index 0000000..37ec93a
--- /dev/null
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile
new file mode 100644 (file)
index 0000000..ac034e5
--- /dev/null
@@ -0,0 +1,12 @@
+.PHONY: build test bench vet
+
+build: vet bench
+
+test:
+       go test -v -cover -race
+
+bench:
+       go test -v -cover -race -test.bench=. -test.benchmem
+
+vet:
+       go vet
diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md
new file mode 100644 (file)
index 0000000..22a4234
--- /dev/null
@@ -0,0 +1,734 @@
+INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini)
+===
+
+![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
+
+Package ini provides INI file read and write functionality in Go.
+
+[简体中文](README_ZH.md)
+
+## Feature
+
+- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites.
+- Read with recursion values.
+- Read with parent-child sections.
+- Read with auto-increment key names.
+- Read with multiple-line values.
+- Read with tons of helper methods.
+- Read and convert values to Go types.
+- Read and **WRITE** comments of sections and keys.
+- Manipulate sections, keys and comments with ease.
+- Keep sections and keys in order as you parse and save.
+
+## Installation
+
+To use a tagged revision:
+
+       go get gopkg.in/ini.v1
+
+To use with latest changes:
+
+       go get github.com/go-ini/ini
+
+Please add `-u` flag to update in the future.
+
+### Testing
+
+If you want to test on your machine, please apply `-t` flag:
+
+       go get -t gopkg.in/ini.v1
+
+Please add `-u` flag to update in the future.
+
+## Getting Started
+
+### Loading from data sources
+
+A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error.
+
+```go
+cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data"))))
+```
+
+Or start with an empty object:
+
+```go
+cfg := ini.Empty()
+```
+
+When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later.
+
+```go
+err := cfg.Append("other file", []byte("other raw data"))
+```
+
+If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error.
+
+```go
+cfg, err := ini.LooseLoad("filename", "filename_404")
+```
+
+The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual.
+
+#### Ignore cases of key name
+
+When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing.
+
+```go
+cfg, err := ini.InsensitiveLoad("filename")
+//...
+
+// sec1 and sec2 are the exactly same section object
+sec1, err := cfg.GetSection("Section")
+sec2, err := cfg.GetSection("SecTIOn")
+
+// key1 and key2 are the exactly same key object
+key1, err := cfg.GetKey("Key")
+key2, err := cfg.GetKey("KeY")
+```
+
+#### MySQL-like boolean key 
+
+MySQL's configuration allows a key without value as follows:
+
+```ini
+[mysqld]
+...
+skip-host-cache
+skip-name-resolve
+```
+
+By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options:
+
+```go
+cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
+```
+
+The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read.
+
+#### Comment
+
+Take care that following format will be treated as comment:
+
+1. Line begins with `#` or `;`
+2. Words after `#` or `;`
+3. Words after section name (i.e words after `[some section name]`)
+
+If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```.
+
+### Working with sections
+
+To get a section, you would need to:
+
+```go
+section, err := cfg.GetSection("section name")
+```
+
+For a shortcut for default section, just give an empty string as name:
+
+```go
+section, err := cfg.GetSection("")
+```
+
+When you're pretty sure the section exists, following code could make your life easier:
+
+```go
+section := cfg.Section("section name")
+```
+
+What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
+
+To create a new section:
+
+```go
+err := cfg.NewSection("new section")
+```
+
+To get a list of sections or section names:
+
+```go
+sections := cfg.Sections()
+names := cfg.SectionStrings()
+```
+
+### Working with keys
+
+To get a key under a section:
+
+```go
+key, err := cfg.Section("").GetKey("key name")
+```
+
+Same rule applies to key operations:
+
+```go
+key := cfg.Section("").Key("key name")
+```
+
+To check if a key exists:
+
+```go
+yes := cfg.Section("").HasKey("key name")
+```
+
+To create a new key:
+
+```go
+err := cfg.Section("").NewKey("name", "value")
+```
+
+To get a list of keys or key names:
+
+```go
+keys := cfg.Section("").Keys()
+names := cfg.Section("").KeyStrings()
+```
+
+To get a clone hash of keys and corresponding values:
+
+```go
+hash := cfg.Section("").KeysHash()
+```
+
+### Working with values
+
+To get a string value:
+
+```go
+val := cfg.Section("").Key("key name").String()
+```
+
+To validate key value on the fly:
+
+```go
+val := cfg.Section("").Key("key name").Validate(func(in string) string {
+       if len(in) == 0 {
+               return "default"
+       }
+       return in
+})
+```
+
+If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance):
+
+```go
+val := cfg.Section("").Key("key name").Value()
+```
+
+To check if raw value exists:
+
+```go
+yes := cfg.Section("").HasValue("test value")
+```
+
+To get value with types:
+
+```go
+// For boolean values:
+// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
+// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
+v, err = cfg.Section("").Key("BOOL").Bool()
+v, err = cfg.Section("").Key("FLOAT64").Float64()
+v, err = cfg.Section("").Key("INT").Int()
+v, err = cfg.Section("").Key("INT64").Int64()
+v, err = cfg.Section("").Key("UINT").Uint()
+v, err = cfg.Section("").Key("UINT64").Uint64()
+v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
+v, err = cfg.Section("").Key("TIME").Time() // RFC3339
+
+v = cfg.Section("").Key("BOOL").MustBool()
+v = cfg.Section("").Key("FLOAT64").MustFloat64()
+v = cfg.Section("").Key("INT").MustInt()
+v = cfg.Section("").Key("INT64").MustInt64()
+v = cfg.Section("").Key("UINT").MustUint()
+v = cfg.Section("").Key("UINT64").MustUint64()
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
+v = cfg.Section("").Key("TIME").MustTime() // RFC3339
+
+// Methods start with Must also accept one argument for default value
+// when key not found or fail to parse value to given type.
+// Except method MustString, which you have to pass a default value.
+
+v = cfg.Section("").Key("String").MustString("default")
+v = cfg.Section("").Key("BOOL").MustBool(true)
+v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
+v = cfg.Section("").Key("INT").MustInt(10)
+v = cfg.Section("").Key("INT64").MustInt64(99)
+v = cfg.Section("").Key("UINT").MustUint(3)
+v = cfg.Section("").Key("UINT64").MustUint64(6)
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
+v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
+```
+
+What if my value is three-line long?
+
+```ini
+[advance]
+ADDRESS = """404 road,
+NotFound, State, 5000
+Earth"""
+```
+
+Not a problem!
+
+```go
+cfg.Section("advance").Key("ADDRESS").String()
+
+/* --- start ---
+404 road,
+NotFound, State, 5000
+Earth
+------  end  --- */
+```
+
+That's cool, how about continuation lines?
+
+```ini
+[advance]
+two_lines = how about \
+       continuation lines?
+lots_of_lines = 1 \
+       2 \
+       3 \
+       4
+```
+
+Piece of cake!
+
+```go
+cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
+cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
+```
+
+Well, I hate continuation lines, how do I disable that?
+
+```go
+cfg, err := ini.LoadSources(ini.LoadOptions{
+       IgnoreContinuation: true,
+}, "filename")
+```
+
+Holy crap! 
+
+Note that single quotes around values will be stripped:
+
+```ini
+foo = "some value" // foo: some value
+bar = 'some value' // bar: some value
+```
+
+That's all? Hmm, no.
+
+#### Helper methods of working with values
+
+To get value with given candidates:
+
+```go
+v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
+v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
+v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
+v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
+v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
+v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
+v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
+v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
+```
+
+Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates.
+
+To validate value in a given range:
+
+```go
+vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
+vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
+vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
+vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
+vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
+vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
+vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
+```
+
+##### Auto-split values into a slice
+
+To use zero value of type for invalid inputs:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
+vals = cfg.Section("").Key("STRINGS").Strings(",")
+vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
+vals = cfg.Section("").Key("INTS").Ints(",")
+vals = cfg.Section("").Key("INT64S").Int64s(",")
+vals = cfg.Section("").Key("UINTS").Uints(",")
+vals = cfg.Section("").Key("UINT64S").Uint64s(",")
+vals = cfg.Section("").Key("TIMES").Times(",")
+```
+
+To exclude invalid values out of result slice:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [2.2]
+vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
+vals = cfg.Section("").Key("INTS").ValidInts(",")
+vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
+vals = cfg.Section("").Key("UINTS").ValidUints(",")
+vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
+vals = cfg.Section("").Key("TIMES").ValidTimes(",")
+```
+
+Or to return nothing but error when have invalid inputs:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> error
+vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
+vals = cfg.Section("").Key("INTS").StrictInts(",")
+vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
+vals = cfg.Section("").Key("UINTS").StrictUints(",")
+vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
+vals = cfg.Section("").Key("TIMES").StrictTimes(",")
+```
+
+### Save your configuration
+
+Finally, it's time to save your configuration to somewhere.
+
+A typical way to save configuration is writing it to a file:
+
+```go
+// ...
+err = cfg.SaveTo("my.ini")
+err = cfg.SaveToIndent("my.ini", "\t")
+```
+
+Another way to save is writing to a `io.Writer` interface:
+
+```go
+// ...
+cfg.WriteTo(writer)
+cfg.WriteToIndent(writer, "\t")
+```
+
+By default, spaces are used to align "=" sign between key and values, to disable that:
+
+```go
+ini.PrettyFormat = false
+``` 
+
+## Advanced Usage
+
+### Recursive Values
+
+For all value of keys, there is a special syntax `%(<name>)s`, where `<name>` is the key name in same section or default section, and `%(<name>)s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions.
+
+```ini
+NAME = ini
+
+[author]
+NAME = Unknwon
+GITHUB = https://github.com/%(NAME)s
+
+[package]
+FULL_NAME = github.com/go-ini/%(NAME)s
+```
+
+```go
+cfg.Section("author").Key("GITHUB").String()           // https://github.com/Unknwon
+cfg.Section("package").Key("FULL_NAME").String()       // github.com/go-ini/ini
+```
+
+### Parent-child Sections
+
+You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section.
+
+```ini
+NAME = ini
+VERSION = v1
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+```
+
+```go
+cfg.Section("package.sub").Key("CLONE_URL").String()   // https://gopkg.in/ini.v1
+```
+
+#### Retrieve parent keys available to a child section
+
+```go
+cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
+```
+
+### Unparseable Sections
+
+Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`:
+
+```go
+cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`))
+
+body := cfg.Section("COMMENTS").Body()
+
+/* --- start ---
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>
+------  end  --- */
+```
+
+### Auto-increment Key Names
+
+If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
+
+```ini
+[features]
+-: Support read/write comments of keys and sections
+-: Support auto-increment of key names
+-: Support load multiple files to overwrite key values
+```
+
+```go
+cfg.Section("features").KeyStrings()   // []{"#1", "#2", "#3"}
+```
+
+### Map To Struct
+
+Want more objective way to play with INI? Cool.
+
+```ini
+Name = Unknwon
+age = 21
+Male = true
+Born = 1993-01-01T20:17:05Z
+
+[Note]
+Content = Hi is a good man!
+Cities = HangZhou, Boston
+```
+
+```go
+type Note struct {
+       Content string
+       Cities  []string
+}
+
+type Person struct {
+       Name string
+       Age  int `ini:"age"`
+       Male bool
+       Born time.Time
+       Note
+       Created time.Time `ini:"-"`
+}
+
+func main() {
+       cfg, err := ini.Load("path/to/ini")
+       // ...
+       p := new(Person)
+       err = cfg.MapTo(p)
+       // ...
+
+       // Things can be simpler.
+       err = ini.MapTo(p, "path/to/ini")
+       // ...
+
+       // Just map a section? Fine.
+       n := new(Note)
+       err = cfg.Section("Note").MapTo(n)
+       // ...
+}
+```
+
+Can I have default value for field? Absolutely.
+
+Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type.
+
+```go
+// ...
+p := &Person{
+       Name: "Joe",
+}
+// ...
+```
+
+It's really cool, but what's the point if you can't give me my file back from struct?
+
+### Reflect From Struct
+
+Why not?
+
+```go
+type Embeded struct {
+       Dates  []time.Time `delim:"|"`
+       Places []string    `ini:"places,omitempty"`
+       None   []int       `ini:",omitempty"`
+}
+
+type Author struct {
+       Name      string `ini:"NAME"`
+       Male      bool
+       Age       int
+       GPA       float64
+       NeverMind string `ini:"-"`
+       *Embeded
+}
+
+func main() {
+       a := &Author{"Unknwon", true, 21, 2.8, "",
+               &Embeded{
+                       []time.Time{time.Now(), time.Now()},
+                       []string{"HangZhou", "Boston"},
+                       []int{},
+               }}
+       cfg := ini.Empty()
+       err = ini.ReflectFrom(cfg, a)
+       // ...
+}
+```
+
+So, what do I get?
+
+```ini
+NAME = Unknwon
+Male = true
+Age = 21
+GPA = 2.8
+
+[Embeded]
+Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
+places = HangZhou,Boston
+```
+
+#### Name Mapper
+
+To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name.
+
+There are 2 built-in name mappers:
+
+- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key.
+- `TitleUnderscore`: it converts to format `title_underscore` then match section or key.
+
+To use them:
+
+```go
+type Info struct {
+       PackageName string
+}
+
+func main() {
+       err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
+       // ...
+
+       cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
+       // ...
+       info := new(Info)
+       cfg.NameMapper = ini.AllCapsUnderscore
+       err = cfg.MapTo(info)
+       // ...
+}
+```
+
+Same rules of name mapper apply to `ini.ReflectFromWithMapper` function.
+
+#### Value Mapper
+
+To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values:
+
+```go
+type Env struct {
+       Foo string `ini:"foo"`
+}
+
+func main() {
+       cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
+       cfg.ValueMapper = os.ExpandEnv
+       // ...
+       env := &Env{}
+       err = cfg.Section("env").MapTo(env)
+}
+```
+
+This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`.
+
+#### Other Notes On Map/Reflect
+
+Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature:
+
+```go
+type Child struct {
+       Age string
+}
+
+type Parent struct {
+       Name string
+       Child
+}
+
+type Config struct {
+       City string
+       Parent
+}
+```
+
+Example configuration:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+
+[Child]
+Age = 21
+```
+
+What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome.
+
+```go
+type Child struct {
+       Age string
+}
+
+type Parent struct {
+       Name string
+       Child `ini:"Parent"`
+}
+
+type Config struct {
+       City string
+       Parent
+}
+```
+
+Example configuration:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+Age = 21
+```
+
+## Getting Help
+
+- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
+- [File An Issue](https://github.com/go-ini/ini/issues/new)
+
+## FAQs
+
+### What does `BlockMode` field do?
+
+By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster.
+
+### Why another INI library?
+
+Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster.
+
+To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path)
+
+## License
+
+This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md
new file mode 100644 (file)
index 0000000..3b4fb66
--- /dev/null
@@ -0,0 +1,721 @@
+本包提供了 Go 语言中读写 INI 文件的功能。
+
+## 功能特性
+
+- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`)
+- 支持递归读取键值
+- 支持读取父子分区
+- 支持读取自增键名
+- 支持读取多行的键值
+- 支持大量辅助方法
+- 支持在读取时直接转换为 Go 语言类型
+- 支持读取和 **写入** 分区和键的注释
+- 轻松操作分区、键值和注释
+- 在保存文件时分区和键值会保持原有的顺序
+
+## 下载安装
+
+使用一个特定版本:
+
+    go get gopkg.in/ini.v1
+
+使用最新版:
+
+       go get github.com/go-ini/ini
+
+如需更新请添加 `-u` 选项。
+
+### 测试安装
+
+如果您想要在自己的机器上运行测试,请使用 `-t` 标记:
+
+       go get -t gopkg.in/ini.v1
+
+如需更新请添加 `-u` 选项。
+
+## 开始使用
+
+### 从数据源加载
+
+一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
+
+```go
+cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data"))))
+```
+
+或者从一个空白的文件开始:
+
+```go
+cfg := ini.Empty()
+```
+
+当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。
+
+```go
+err := cfg.Append("other file", []byte("other raw data"))
+```
+
+当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误):
+
+```go
+cfg, err := ini.LooseLoad("filename", "filename_404")
+```
+
+更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。
+
+#### 忽略键名的大小写
+
+有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写:
+
+```go
+cfg, err := ini.InsensitiveLoad("filename")
+//...
+
+// sec1 和 sec2 指向同一个分区对象
+sec1, err := cfg.GetSection("Section")
+sec2, err := cfg.GetSection("SecTIOn")
+
+// key1 和 key2 指向同一个键对象
+key1, err := cfg.GetKey("Key")
+key2, err := cfg.GetKey("KeY")
+```
+
+#### 类似 MySQL 配置中的布尔值键
+
+MySQL 的配置文件中会出现没有具体值的布尔类型的键:
+
+```ini
+[mysqld]
+...
+skip-host-cache
+skip-name-resolve
+```
+
+默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理:
+
+```go
+cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
+```
+
+这些键的值永远为 `true`,且在保存到文件时也只会输出键名。
+
+#### 关于注释
+
+下述几种情况的内容将被视为注释:
+
+1. 所有以 `#` 或 `;` 开头的行
+2. 所有在 `#` 或 `;` 之后的内容
+3. 分区标签后的文字 (即 `[分区名]` 之后的内容)
+
+如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。
+
+### 操作分区(Section)
+
+获取指定分区:
+
+```go
+section, err := cfg.GetSection("section name")
+```
+
+如果您想要获取默认分区,则可以用空字符串代替分区名:
+
+```go
+section, err := cfg.GetSection("")
+```
+
+当您非常确定某个分区是存在的,可以使用以下简便方法:
+
+```go
+section := cfg.Section("section name")
+```
+
+如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
+
+创建一个分区:
+
+```go
+err := cfg.NewSection("new section")
+```
+
+获取所有分区对象或名称:
+
+```go
+sections := cfg.Sections()
+names := cfg.SectionStrings()
+```
+
+### 操作键(Key)
+
+获取某个分区下的键:
+
+```go
+key, err := cfg.Section("").GetKey("key name")
+```
+
+和分区一样,您也可以直接获取键而忽略错误处理:
+
+```go
+key := cfg.Section("").Key("key name")
+```
+
+判断某个键是否存在:
+
+```go
+yes := cfg.Section("").HasKey("key name")
+```
+
+创建一个新的键:
+
+```go
+err := cfg.Section("").NewKey("name", "value")
+```
+
+获取分区下的所有键或键名:
+
+```go
+keys := cfg.Section("").Keys()
+names := cfg.Section("").KeyStrings()
+```
+
+获取分区下的所有键值对的克隆:
+
+```go
+hash := cfg.Section("").KeysHash()
+```
+
+### 操作键值(Value)
+
+获取一个类型为字符串(string)的值:
+
+```go
+val := cfg.Section("").Key("key name").String()
+```
+
+获取值的同时通过自定义函数进行处理验证:
+
+```go
+val := cfg.Section("").Key("key name").Validate(func(in string) string {
+       if len(in) == 0 {
+               return "default"
+       }
+       return in
+})
+```
+
+如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳):
+
+```go
+val := cfg.Section("").Key("key name").Value()
+```
+
+判断某个原值是否存在:
+
+```go
+yes := cfg.Section("").HasValue("test value")
+```
+
+获取其它类型的值:
+
+```go
+// 布尔值的规则:
+// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
+// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
+v, err = cfg.Section("").Key("BOOL").Bool()
+v, err = cfg.Section("").Key("FLOAT64").Float64()
+v, err = cfg.Section("").Key("INT").Int()
+v, err = cfg.Section("").Key("INT64").Int64()
+v, err = cfg.Section("").Key("UINT").Uint()
+v, err = cfg.Section("").Key("UINT64").Uint64()
+v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
+v, err = cfg.Section("").Key("TIME").Time() // RFC3339
+
+v = cfg.Section("").Key("BOOL").MustBool()
+v = cfg.Section("").Key("FLOAT64").MustFloat64()
+v = cfg.Section("").Key("INT").MustInt()
+v = cfg.Section("").Key("INT64").MustInt64()
+v = cfg.Section("").Key("UINT").MustUint()
+v = cfg.Section("").Key("UINT64").MustUint64()
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
+v = cfg.Section("").Key("TIME").MustTime() // RFC3339
+
+// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值,
+// 当键不存在或者转换失败时,则会直接返回该默认值。
+// 但是,MustString 方法必须传递一个默认值。
+
+v = cfg.Seciont("").Key("String").MustString("default")
+v = cfg.Section("").Key("BOOL").MustBool(true)
+v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
+v = cfg.Section("").Key("INT").MustInt(10)
+v = cfg.Section("").Key("INT64").MustInt64(99)
+v = cfg.Section("").Key("UINT").MustUint(3)
+v = cfg.Section("").Key("UINT64").MustUint64(6)
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
+v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
+```
+
+如果我的值有好多行怎么办?
+
+```ini
+[advance]
+ADDRESS = """404 road,
+NotFound, State, 5000
+Earth"""
+```
+
+嗯哼?小 case!
+
+```go
+cfg.Section("advance").Key("ADDRESS").String()
+
+/* --- start ---
+404 road,
+NotFound, State, 5000
+Earth
+------  end  --- */
+```
+
+赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办?
+
+```ini
+[advance]
+two_lines = how about \
+       continuation lines?
+lots_of_lines = 1 \
+       2 \
+       3 \
+       4
+```
+
+简直是小菜一碟!
+
+```go
+cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
+cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
+```
+
+可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢?
+
+```go
+cfg, err := ini.LoadSources(ini.LoadOptions{
+       IgnoreContinuation: true,
+}, "filename")
+```
+
+哇靠给力啊!
+
+需要注意的是,值两侧的单引号会被自动剔除:
+
+```ini
+foo = "some value" // foo: some value
+bar = 'some value' // bar: some value
+```
+
+这就是全部了?哈哈,当然不是。
+
+#### 操作键值的辅助方法
+
+获取键值时设定候选值:
+
+```go
+v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
+v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
+v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
+v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
+v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
+v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
+v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
+v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
+```
+
+如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。
+
+验证获取的值是否在指定范围内:
+
+```go
+vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
+vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
+vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
+vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
+vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
+vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
+vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
+```
+
+##### 自动分割键值到切片(slice)
+
+当存在无效输入时,使用零值代替:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0]
+vals = cfg.Section("").Key("STRINGS").Strings(",")
+vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
+vals = cfg.Section("").Key("INTS").Ints(",")
+vals = cfg.Section("").Key("INT64S").Int64s(",")
+vals = cfg.Section("").Key("UINTS").Uints(",")
+vals = cfg.Section("").Key("UINT64S").Uint64s(",")
+vals = cfg.Section("").Key("TIMES").Times(",")
+```
+
+从结果切片中剔除无效输入:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> [2.2]
+vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",")
+vals = cfg.Section("").Key("INTS").ValidInts(",")
+vals = cfg.Section("").Key("INT64S").ValidInt64s(",")
+vals = cfg.Section("").Key("UINTS").ValidUints(",")
+vals = cfg.Section("").Key("UINT64S").ValidUint64s(",")
+vals = cfg.Section("").Key("TIMES").ValidTimes(",")
+```
+
+当存在无效输入时,直接返回错误:
+
+```go
+// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4]
+// Input: how, 2.2, are, you -> error
+vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",")
+vals = cfg.Section("").Key("INTS").StrictInts(",")
+vals = cfg.Section("").Key("INT64S").StrictInt64s(",")
+vals = cfg.Section("").Key("UINTS").StrictUints(",")
+vals = cfg.Section("").Key("UINT64S").StrictUint64s(",")
+vals = cfg.Section("").Key("TIMES").StrictTimes(",")
+```
+
+### 保存配置
+
+终于到了这个时刻,是时候保存一下配置了。
+
+比较原始的做法是输出配置到某个文件:
+
+```go
+// ...
+err = cfg.SaveTo("my.ini")
+err = cfg.SaveToIndent("my.ini", "\t")
+```
+
+另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中:
+
+```go
+// ...
+cfg.WriteTo(writer)
+cfg.WriteToIndent(writer, "\t")
+```
+
+默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能:
+
+```go
+ini.PrettyFormat = false
+``` 
+
+## 高级用法
+
+### 递归读取键值
+
+在获取所有键值的过程中,特殊语法 `%(<name>)s` 会被应用,其中 `<name>` 可以是相同分区或者默认分区下的键名。字符串 `%(<name>)s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
+
+```ini
+NAME = ini
+
+[author]
+NAME = Unknwon
+GITHUB = https://github.com/%(NAME)s
+
+[package]
+FULL_NAME = github.com/go-ini/%(NAME)s
+```
+
+```go
+cfg.Section("author").Key("GITHUB").String()           // https://github.com/Unknwon
+cfg.Section("package").Key("FULL_NAME").String()       // github.com/go-ini/ini
+```
+
+### 读取父子分区
+
+您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
+
+```ini
+NAME = ini
+VERSION = v1
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+```
+
+```go
+cfg.Section("package.sub").Key("CLONE_URL").String()   // https://gopkg.in/ini.v1
+```
+
+#### 获取上级父分区下的所有键名
+
+```go
+cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
+```
+
+### 无法解析的分区
+
+如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理:
+
+```go
+cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`))
+
+body := cfg.Section("COMMENTS").Body()
+
+/* --- start ---
+<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>
+------  end  --- */
+```
+
+### 读取自增键名
+
+如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
+
+```ini
+[features]
+-: Support read/write comments of keys and sections
+-: Support auto-increment of key names
+-: Support load multiple files to overwrite key values
+```
+
+```go
+cfg.Section("features").KeyStrings()   // []{"#1", "#2", "#3"}
+```
+
+### 映射到结构
+
+想要使用更加面向对象的方式玩转 INI 吗?好主意。
+
+```ini
+Name = Unknwon
+age = 21
+Male = true
+Born = 1993-01-01T20:17:05Z
+
+[Note]
+Content = Hi is a good man!
+Cities = HangZhou, Boston
+```
+
+```go
+type Note struct {
+       Content string
+       Cities  []string
+}
+
+type Person struct {
+       Name string
+       Age  int `ini:"age"`
+       Male bool
+       Born time.Time
+       Note
+       Created time.Time `ini:"-"`
+}
+
+func main() {
+       cfg, err := ini.Load("path/to/ini")
+       // ...
+       p := new(Person)
+       err = cfg.MapTo(p)
+       // ...
+
+       // 一切竟可以如此的简单。
+       err = ini.MapTo(p, "path/to/ini")
+       // ...
+
+       // 嗯哼?只需要映射一个分区吗?
+       n := new(Note)
+       err = cfg.Section("Note").MapTo(n)
+       // ...
+}
+```
+
+结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。
+
+```go
+// ...
+p := &Person{
+       Name: "Joe",
+}
+// ...
+```
+
+这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用?
+
+### 从结构反射
+
+可是,我有说不能吗?
+
+```go
+type Embeded struct {
+       Dates  []time.Time `delim:"|"`
+       Places []string    `ini:"places,omitempty"`
+       None   []int       `ini:",omitempty"`
+}
+
+type Author struct {
+       Name      string `ini:"NAME"`
+       Male      bool
+       Age       int
+       GPA       float64
+       NeverMind string `ini:"-"`
+       *Embeded
+}
+
+func main() {
+       a := &Author{"Unknwon", true, 21, 2.8, "",
+               &Embeded{
+                       []time.Time{time.Now(), time.Now()},
+                       []string{"HangZhou", "Boston"},
+                       []int{},
+               }}
+       cfg := ini.Empty()
+       err = ini.ReflectFrom(cfg, a)
+       // ...
+}
+```
+
+瞧瞧,奇迹发生了。
+
+```ini
+NAME = Unknwon
+Male = true
+Age = 21
+GPA = 2.8
+
+[Embeded]
+Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
+places = HangZhou,Boston
+```
+
+#### 名称映射器(Name Mapper)
+
+为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。
+
+目前有 2 款内置的映射器:
+
+- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。
+- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。
+
+使用方法:
+
+```go
+type Info struct{
+       PackageName string
+}
+
+func main() {
+       err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini"))
+       // ...
+
+       cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
+       // ...
+       info := new(Info)
+       cfg.NameMapper = ini.AllCapsUnderscore
+       err = cfg.MapTo(info)
+       // ...
+}
+```
+
+使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。
+
+#### 值映射器(Value Mapper)
+
+值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量:
+
+```go
+type Env struct {
+       Foo string `ini:"foo"`
+}
+
+func main() {
+       cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n")
+       cfg.ValueMapper = os.ExpandEnv
+       // ...
+       env := &Env{}
+       err = cfg.Section("env").MapTo(env)
+}
+```
+
+本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。
+
+#### 映射/反射的其它说明
+
+任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联:
+
+```go
+type Child struct {
+       Age string
+}
+
+type Parent struct {
+       Name string
+       Child
+}
+
+type Config struct {
+       City string
+       Parent
+}
+```
+
+示例配置文件:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+
+[Child]
+Age = 21
+```
+
+很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚!
+
+```go
+type Child struct {
+       Age string
+}
+
+type Parent struct {
+       Name string
+       Child `ini:"Parent"`
+}
+
+type Config struct {
+       City string
+       Parent
+}
+```
+
+示例配置文件:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+Age = 21
+```
+
+## 获取帮助
+
+- [API 文档](https://gowalker.org/gopkg.in/ini.v1)
+- [创建工单](https://github.com/go-ini/ini/issues/new)
+
+## 常见问题
+
+### 字段 `BlockMode` 是什么?
+
+默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。
+
+### 为什么要写另一个 INI 解析库?
+
+许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。
+
+为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了)
diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go
new file mode 100644 (file)
index 0000000..80afe74
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright 2016 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+       "fmt"
+)
+
+type ErrDelimiterNotFound struct {
+       Line string
+}
+
+func IsErrDelimiterNotFound(err error) bool {
+       _, ok := err.(ErrDelimiterNotFound)
+       return ok
+}
+
+func (err ErrDelimiterNotFound) Error() string {
+       return fmt.Sprintf("key-value delimiter not found: %s", err.Line)
+}
diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go
new file mode 100644 (file)
index 0000000..77e0dbd
--- /dev/null
@@ -0,0 +1,535 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package ini provides INI file read and write functionality in Go.
+package ini
+
+import (
+       "bytes"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "os"
+       "regexp"
+       "runtime"
+       "strconv"
+       "strings"
+       "sync"
+       "time"
+)
+
+const (
+       // Name for default section. You can use this constant or the string literal.
+       // In most of cases, an empty string is all you need to access the section.
+       DEFAULT_SECTION = "DEFAULT"
+
+       // Maximum allowed depth when recursively substituing variable names.
+       _DEPTH_VALUES = 99
+       _VERSION      = "1.23.1"
+)
+
+// Version returns current package version literal.
+func Version() string {
+       return _VERSION
+}
+
+var (
+       // Delimiter to determine or compose a new line.
+       // This variable will be changed to "\r\n" automatically on Windows
+       // at package init time.
+       LineBreak = "\n"
+
+       // Variable regexp pattern: %(variable)s
+       varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
+
+       // Indicate whether to align "=" sign with spaces to produce pretty output
+       // or reduce all possible spaces for compact format.
+       PrettyFormat = true
+
+       // Explicitly write DEFAULT section header
+       DefaultHeader = false
+)
+
+func init() {
+       if runtime.GOOS == "windows" {
+               LineBreak = "\r\n"
+       }
+}
+
+func inSlice(str string, s []string) bool {
+       for _, v := range s {
+               if str == v {
+                       return true
+               }
+       }
+       return false
+}
+
+// dataSource is an interface that returns object which can be read and closed.
+type dataSource interface {
+       ReadCloser() (io.ReadCloser, error)
+}
+
+// sourceFile represents an object that contains content on the local file system.
+type sourceFile struct {
+       name string
+}
+
+func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
+       return os.Open(s.name)
+}
+
+type bytesReadCloser struct {
+       reader io.Reader
+}
+
+func (rc *bytesReadCloser) Read(p []byte) (n int, err error) {
+       return rc.reader.Read(p)
+}
+
+func (rc *bytesReadCloser) Close() error {
+       return nil
+}
+
+// sourceData represents an object that contains content in memory.
+type sourceData struct {
+       data []byte
+}
+
+func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
+       return ioutil.NopCloser(bytes.NewReader(s.data)), nil
+}
+
+// sourceReadCloser represents an input stream with Close method.
+type sourceReadCloser struct {
+       reader io.ReadCloser
+}
+
+func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) {
+       return s.reader, nil
+}
+
+// File represents a combination of a or more INI file(s) in memory.
+type File struct {
+       // Should make things safe, but sometimes doesn't matter.
+       BlockMode bool
+       // Make sure data is safe in multiple goroutines.
+       lock sync.RWMutex
+
+       // Allow combination of multiple data sources.
+       dataSources []dataSource
+       // Actual data is stored here.
+       sections map[string]*Section
+
+       // To keep data in order.
+       sectionList []string
+
+       options LoadOptions
+
+       NameMapper
+       ValueMapper
+}
+
+// newFile initializes File object with given data sources.
+func newFile(dataSources []dataSource, opts LoadOptions) *File {
+       return &File{
+               BlockMode:   true,
+               dataSources: dataSources,
+               sections:    make(map[string]*Section),
+               sectionList: make([]string, 0, 10),
+               options:     opts,
+       }
+}
+
+func parseDataSource(source interface{}) (dataSource, error) {
+       switch s := source.(type) {
+       case string:
+               return sourceFile{s}, nil
+       case []byte:
+               return &sourceData{s}, nil
+       case io.ReadCloser:
+               return &sourceReadCloser{s}, nil
+       default:
+               return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
+       }
+}
+
+type LoadOptions struct {
+       // Loose indicates whether the parser should ignore nonexistent files or return error.
+       Loose bool
+       // Insensitive indicates whether the parser forces all section and key names to lowercase.
+       Insensitive bool
+       // IgnoreContinuation indicates whether to ignore continuation lines while parsing.
+       IgnoreContinuation bool
+       // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
+       // This type of keys are mostly used in my.cnf.
+       AllowBooleanKeys bool
+       // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
+       // conform to key/value pairs. Specify the names of those blocks here.
+       UnparseableSections []string
+}
+
+func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) {
+       sources := make([]dataSource, len(others)+1)
+       sources[0], err = parseDataSource(source)
+       if err != nil {
+               return nil, err
+       }
+       for i := range others {
+               sources[i+1], err = parseDataSource(others[i])
+               if err != nil {
+                       return nil, err
+               }
+       }
+       f := newFile(sources, opts)
+       if err = f.Reload(); err != nil {
+               return nil, err
+       }
+       return f, nil
+}
+
+// Load loads and parses from INI data sources.
+// Arguments can be mixed of file name with string type, or raw data in []byte.
+// It will return error if list contains nonexistent files.
+func Load(source interface{}, others ...interface{}) (*File, error) {
+       return LoadSources(LoadOptions{}, source, others...)
+}
+
+// LooseLoad has exactly same functionality as Load function
+// except it ignores nonexistent files instead of returning error.
+func LooseLoad(source interface{}, others ...interface{}) (*File, error) {
+       return LoadSources(LoadOptions{Loose: true}, source, others...)
+}
+
+// InsensitiveLoad has exactly same functionality as Load function
+// except it forces all section and key names to be lowercased.
+func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
+       return LoadSources(LoadOptions{Insensitive: true}, source, others...)
+}
+
+// Empty returns an empty file object.
+func Empty() *File {
+       // Ignore error here, we sure our data is good.
+       f, _ := Load([]byte(""))
+       return f
+}
+
+// NewSection creates a new section.
+func (f *File) NewSection(name string) (*Section, error) {
+       if len(name) == 0 {
+               return nil, errors.New("error creating new section: empty section name")
+       } else if f.options.Insensitive && name != DEFAULT_SECTION {
+               name = strings.ToLower(name)
+       }
+
+       if f.BlockMode {
+               f.lock.Lock()
+               defer f.lock.Unlock()
+       }
+
+       if inSlice(name, f.sectionList) {
+               return f.sections[name], nil
+       }
+
+       f.sectionList = append(f.sectionList, name)
+       f.sections[name] = newSection(f, name)
+       return f.sections[name], nil
+}
+
+// NewRawSection creates a new section with an unparseable body.
+func (f *File) NewRawSection(name, body string) (*Section, error) {
+       section, err := f.NewSection(name)
+       if err != nil {
+               return nil, err
+       }
+
+       section.isRawSection = true
+       section.rawBody = body
+       return section, nil
+}
+
+// NewSections creates a list of sections.
+func (f *File) NewSections(names ...string) (err error) {
+       for _, name := range names {
+               if _, err = f.NewSection(name); err != nil {
+                       return err
+               }
+       }
+       return nil
+}
+
+// GetSection returns section by given name.
+func (f *File) GetSection(name string) (*Section, error) {
+       if len(name) == 0 {
+               name = DEFAULT_SECTION
+       } else if f.options.Insensitive {
+               name = strings.ToLower(name)
+       }
+
+       if f.BlockMode {
+               f.lock.RLock()
+               defer f.lock.RUnlock()
+       }
+
+       sec := f.sections[name]
+       if sec == nil {
+               return nil, fmt.Errorf("section '%s' does not exist", name)
+       }
+       return sec, nil
+}
+
+// Section assumes named section exists and returns a zero-value when not.
+func (f *File) Section(name string) *Section {
+       sec, err := f.GetSection(name)
+       if err != nil {
+               // Note: It's OK here because the only possible error is empty section name,
+               // but if it's empty, this piece of code won't be executed.
+               sec, _ = f.NewSection(name)
+               return sec
+       }
+       return sec
+}
+
+// Section returns list of Section.
+func (f *File) Sections() []*Section {
+       sections := make([]*Section, len(f.sectionList))
+       for i := range f.sectionList {
+               sections[i] = f.Section(f.sectionList[i])
+       }
+       return sections
+}
+
+// SectionStrings returns list of section names.
+func (f *File) SectionStrings() []string {
+       list := make([]string, len(f.sectionList))
+       copy(list, f.sectionList)
+       return list
+}
+
+// DeleteSection deletes a section.
+func (f *File) DeleteSection(name string) {
+       if f.BlockMode {
+               f.lock.Lock()
+               defer f.lock.Unlock()
+       }
+
+       if len(name) == 0 {
+               name = DEFAULT_SECTION
+       }
+
+       for i, s := range f.sectionList {
+               if s == name {
+                       f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
+                       delete(f.sections, name)
+                       return
+               }
+       }
+}
+
+func (f *File) reload(s dataSource) error {
+       r, err := s.ReadCloser()
+       if err != nil {
+               return err
+       }
+       defer r.Close()
+
+       return f.parse(r)
+}
+
+// Reload reloads and parses all data sources.
+func (f *File) Reload() (err error) {
+       for _, s := range f.dataSources {
+               if err = f.reload(s); err != nil {
+                       // In loose mode, we create an empty default section for nonexistent files.
+                       if os.IsNotExist(err) && f.options.Loose {
+                               f.parse(bytes.NewBuffer(nil))
+                               continue
+                       }
+                       return err
+               }
+       }
+       return nil
+}
+
+// Append appends one or more data sources and reloads automatically.
+func (f *File) Append(source interface{}, others ...interface{}) error {
+       ds, err := parseDataSource(source)
+       if err != nil {
+               return err
+       }
+       f.dataSources = append(f.dataSources, ds)
+       for _, s := range others {
+               ds, err = parseDataSource(s)
+               if err != nil {
+                       return err
+               }
+               f.dataSources = append(f.dataSources, ds)
+       }
+       return f.Reload()
+}
+
+// WriteToIndent writes content into io.Writer with given indention.
+// If PrettyFormat has been set to be true,
+// it will align "=" sign with spaces under each section.
+func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
+       equalSign := "="
+       if PrettyFormat {
+               equalSign = " = "
+       }
+
+       // Use buffer to make sure target is safe until finish encoding.
+       buf := bytes.NewBuffer(nil)
+       for i, sname := range f.sectionList {
+               sec := f.Section(sname)
+               if len(sec.Comment) > 0 {
+                       if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
+                               sec.Comment = "; " + sec.Comment
+                       }
+                       if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil {
+                               return 0, err
+                       }
+               }
+
+               if i > 0 || DefaultHeader {
+                       if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
+                               return 0, err
+                       }
+               } else {
+                       // Write nothing if default section is empty
+                       if len(sec.keyList) == 0 {
+                               continue
+                       }
+               }
+
+               if sec.isRawSection {
+                       if _, err = buf.WriteString(sec.rawBody); err != nil {
+                               return 0, err
+                       }
+                       continue
+               }
+
+               // Count and generate alignment length and buffer spaces using the
+               // longest key. Keys may be modifed if they contain certain characters so
+               // we need to take that into account in our calculation.
+               alignLength := 0
+               if PrettyFormat {
+                       for _, kname := range sec.keyList {
+                               keyLength := len(kname)
+                               // First case will surround key by ` and second by """
+                               if strings.ContainsAny(kname, "\"=:") {
+                                       keyLength += 2
+                               } else if strings.Contains(kname, "`") {
+                                       keyLength += 6
+                               }
+
+                               if keyLength > alignLength {
+                                       alignLength = keyLength
+                               }
+                       }
+               }
+               alignSpaces := bytes.Repeat([]byte(" "), alignLength)
+
+               for _, kname := range sec.keyList {
+                       key := sec.Key(kname)
+                       if len(key.Comment) > 0 {
+                               if len(indent) > 0 && sname != DEFAULT_SECTION {
+                                       buf.WriteString(indent)
+                               }
+                               if key.Comment[0] != '#' && key.Comment[0] != ';' {
+                                       key.Comment = "; " + key.Comment
+                               }
+                               if _, err = buf.WriteString(key.Comment + LineBreak); err != nil {
+                                       return 0, err
+                               }
+                       }
+
+                       if len(indent) > 0 && sname != DEFAULT_SECTION {
+                               buf.WriteString(indent)
+                       }
+
+                       switch {
+                       case key.isAutoIncrement:
+                               kname = "-"
+                       case strings.ContainsAny(kname, "\"=:"):
+                               kname = "`" + kname + "`"
+                       case strings.Contains(kname, "`"):
+                               kname = `"""` + kname + `"""`
+                       }
+                       if _, err = buf.WriteString(kname); err != nil {
+                               return 0, err
+                       }
+
+                       if key.isBooleanType {
+                               continue
+                       }
+
+                       // Write out alignment spaces before "=" sign
+                       if PrettyFormat {
+                               buf.Write(alignSpaces[:alignLength-len(kname)])
+                       }
+
+                       val := key.value
+                       // In case key value contains "\n", "`", "\"", "#" or ";"
+                       if strings.ContainsAny(val, "\n`") {
+                               val = `"""` + val + `"""`
+                       } else if strings.ContainsAny(val, "#;") {
+                               val = "`" + val + "`"
+                       }
+                       if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
+                               return 0, err
+                       }
+               }
+
+               // Put a line between sections
+               if _, err = buf.WriteString(LineBreak); err != nil {
+                       return 0, err
+               }
+       }
+
+       return buf.WriteTo(w)
+}
+
+// WriteTo writes file content into io.Writer.
+func (f *File) WriteTo(w io.Writer) (int64, error) {
+       return f.WriteToIndent(w, "")
+}
+
+// SaveToIndent writes content to file system with given value indention.
+func (f *File) SaveToIndent(filename, indent string) error {
+       // Note: Because we are truncating with os.Create,
+       //      so it's safer to save to a temporary file location and rename afte done.
+       tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp"
+       defer os.Remove(tmpPath)
+
+       fw, err := os.Create(tmpPath)
+       if err != nil {
+               return err
+       }
+
+       if _, err = f.WriteToIndent(fw, indent); err != nil {
+               fw.Close()
+               return err
+       }
+       fw.Close()
+
+       // Remove old file and rename the new one.
+       os.Remove(filename)
+       return os.Rename(tmpPath, filename)
+}
+
+// SaveTo writes content to file system.
+func (f *File) SaveTo(filename string) error {
+       return f.SaveToIndent(filename, "")
+}
diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go
new file mode 100644 (file)
index 0000000..9738c55
--- /dev/null
@@ -0,0 +1,633 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+       "fmt"
+       "strconv"
+       "strings"
+       "time"
+)
+
+// Key represents a key under a section.
+type Key struct {
+       s               *Section
+       name            string
+       value           string
+       isAutoIncrement bool
+       isBooleanType   bool
+
+       Comment string
+}
+
+// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
+type ValueMapper func(string) string
+
+// Name returns name of key.
+func (k *Key) Name() string {
+       return k.name
+}
+
+// Value returns raw value of key for performance purpose.
+func (k *Key) Value() string {
+       return k.value
+}
+
+// String returns string representation of value.
+func (k *Key) String() string {
+       val := k.value
+       if k.s.f.ValueMapper != nil {
+               val = k.s.f.ValueMapper(val)
+       }
+       if strings.Index(val, "%") == -1 {
+               return val
+       }
+
+       for i := 0; i < _DEPTH_VALUES; i++ {
+               vr := varPattern.FindString(val)
+               if len(vr) == 0 {
+                       break
+               }
+
+               // Take off leading '%(' and trailing ')s'.
+               noption := strings.TrimLeft(vr, "%(")
+               noption = strings.TrimRight(noption, ")s")
+
+               // Search in the same section.
+               nk, err := k.s.GetKey(noption)
+               if err != nil {
+                       // Search again in default section.
+                       nk, _ = k.s.f.Section("").GetKey(noption)
+               }
+
+               // Substitute by new value and take off leading '%(' and trailing ')s'.
+               val = strings.Replace(val, vr, nk.value, -1)
+       }
+       return val
+}
+
+// Validate accepts a validate function which can
+// return modifed result as key value.
+func (k *Key) Validate(fn func(string) string) string {
+       return fn(k.String())
+}
+
+// parseBool returns the boolean value represented by the string.
+//
+// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
+// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
+// Any other value returns an error.
+func parseBool(str string) (value bool, err error) {
+       switch str {
+       case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
+               return true, nil
+       case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
+               return false, nil
+       }
+       return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
+}
+
+// Bool returns bool type value.
+func (k *Key) Bool() (bool, error) {
+       return parseBool(k.String())
+}
+
+// Float64 returns float64 type value.
+func (k *Key) Float64() (float64, error) {
+       return strconv.ParseFloat(k.String(), 64)
+}
+
+// Int returns int type value.
+func (k *Key) Int() (int, error) {
+       return strconv.Atoi(k.String())
+}
+
+// Int64 returns int64 type value.
+func (k *Key) Int64() (int64, error) {
+       return strconv.ParseInt(k.String(), 10, 64)
+}
+
+// Uint returns uint type valued.
+func (k *Key) Uint() (uint, error) {
+       u, e := strconv.ParseUint(k.String(), 10, 64)
+       return uint(u), e
+}
+
+// Uint64 returns uint64 type value.
+func (k *Key) Uint64() (uint64, error) {
+       return strconv.ParseUint(k.String(), 10, 64)
+}
+
+// Duration returns time.Duration type value.
+func (k *Key) Duration() (time.Duration, error) {
+       return time.ParseDuration(k.String())
+}
+
+// TimeFormat parses with given format and returns time.Time type value.
+func (k *Key) TimeFormat(format string) (time.Time, error) {
+       return time.Parse(format, k.String())
+}
+
+// Time parses with RFC3339 format and returns time.Time type value.
+func (k *Key) Time() (time.Time, error) {
+       return k.TimeFormat(time.RFC3339)
+}
+
+// MustString returns default value if key value is empty.
+func (k *Key) MustString(defaultVal string) string {
+       val := k.String()
+       if len(val) == 0 {
+               k.value = defaultVal
+               return defaultVal
+       }
+       return val
+}
+
+// MustBool always returns value without error,
+// it returns false if error occurs.
+func (k *Key) MustBool(defaultVal ...bool) bool {
+       val, err := k.Bool()
+       if len(defaultVal) > 0 && err != nil {
+               k.value = strconv.FormatBool(defaultVal[0])
+               return defaultVal[0]
+       }
+       return val
+}
+
+// MustFloat64 always returns value without error,
+// it returns 0.0 if error occurs.
+func (k *Key) MustFloat64(defaultVal ...float64) float64 {
+       val, err := k.Float64()
+       if len(defaultVal) > 0 && err != nil {
+               k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64)
+               return defaultVal[0]
+       }
+       return val
+}
+
+// MustInt always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt(defaultVal ...int) int {
+       val, err := k.Int()
+       if len(defaultVal) > 0 && err != nil {
+               k.value = strconv.FormatInt(int64(defaultVal[0]), 10)
+               return defaultVal[0]
+       }
+       return val
+}
+
+// MustInt64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt64(defaultVal ...int64) int64 {
+       val, err := k.Int64()
+       if len(defaultVal) > 0 && err != nil {
+               k.value = strconv.FormatInt(defaultVal[0], 10)
+               return defaultVal[0]
+       }
+       return val
+}
+
+// MustUint always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint(defaultVal ...uint) uint {
+       val, err := k.Uint()
+       if len(defaultVal) > 0 && err != nil {
+               k.value = strconv.FormatUint(uint64(defaultVal[0]), 10)
+               return defaultVal[0]
+       }
+       return val
+}
+
+// MustUint64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
+       val, err := k.Uint64()
+       if len(defaultVal) > 0 && err != nil {
+               k.value = strconv.FormatUint(defaultVal[0], 10)
+               return defaultVal[0]
+       }
+       return val
+}
+
+// MustDuration always returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
+       val, err := k.Duration()
+       if len(defaultVal) > 0 && err != nil {
+               k.value = defaultVal[0].String()
+               return defaultVal[0]
+       }
+       return val
+}
+
+// MustTimeFormat always parses with given format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
+       val, err := k.TimeFormat(format)
+       if len(defaultVal) > 0 && err != nil {
+               k.value = defaultVal[0].Format(format)
+               return defaultVal[0]
+       }
+       return val
+}
+
+// MustTime always parses with RFC3339 format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
+       return k.MustTimeFormat(time.RFC3339, defaultVal...)
+}
+
+// In always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) In(defaultVal string, candidates []string) string {
+       val := k.String()
+       for _, cand := range candidates {
+               if val == cand {
+                       return val
+               }
+       }
+       return defaultVal
+}
+
+// InFloat64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
+       val := k.MustFloat64()
+       for _, cand := range candidates {
+               if val == cand {
+                       return val
+               }
+       }
+       return defaultVal
+}
+
+// InInt always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt(defaultVal int, candidates []int) int {
+       val := k.MustInt()
+       for _, cand := range candidates {
+               if val == cand {
+                       return val
+               }
+       }
+       return defaultVal
+}
+
+// InInt64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
+       val := k.MustInt64()
+       for _, cand := range candidates {
+               if val == cand {
+                       return val
+               }
+       }
+       return defaultVal
+}
+
+// InUint always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
+       val := k.MustUint()
+       for _, cand := range candidates {
+               if val == cand {
+                       return val
+               }
+       }
+       return defaultVal
+}
+
+// InUint64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
+       val := k.MustUint64()
+       for _, cand := range candidates {
+               if val == cand {
+                       return val
+               }
+       }
+       return defaultVal
+}
+
+// InTimeFormat always parses with given format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
+       val := k.MustTimeFormat(format)
+       for _, cand := range candidates {
+               if val == cand {
+                       return val
+               }
+       }
+       return defaultVal
+}
+
+// InTime always parses with RFC3339 format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
+       return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
+}
+
+// RangeFloat64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
+       val := k.MustFloat64()
+       if val < min || val > max {
+               return defaultVal
+       }
+       return val
+}
+
+// RangeInt checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt(defaultVal, min, max int) int {
+       val := k.MustInt()
+       if val < min || val > max {
+               return defaultVal
+       }
+       return val
+}
+
+// RangeInt64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
+       val := k.MustInt64()
+       if val < min || val > max {
+               return defaultVal
+       }
+       return val
+}
+
+// RangeTimeFormat checks if value with given format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
+       val := k.MustTimeFormat(format)
+       if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
+               return defaultVal
+       }
+       return val
+}
+
+// RangeTime checks if value with RFC3339 format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
+       return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
+}
+
+// Strings returns list of string divided by given delimiter.
+func (k *Key) Strings(delim string) []string {
+       str := k.String()
+       if len(str) == 0 {
+               return []string{}
+       }
+
+       vals := strings.Split(str, delim)
+       for i := range vals {
+               vals[i] = strings.TrimSpace(vals[i])
+       }
+       return vals
+}
+
+// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Float64s(delim string) []float64 {
+       vals, _ := k.getFloat64s(delim, true, false)
+       return vals
+}
+
+// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Ints(delim string) []int {
+       vals, _ := k.getInts(delim, true, false)
+       return vals
+}
+
+// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Int64s(delim string) []int64 {
+       vals, _ := k.getInt64s(delim, true, false)
+       return vals
+}
+
+// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uints(delim string) []uint {
+       vals, _ := k.getUints(delim, true, false)
+       return vals
+}
+
+// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
+func (k *Key) Uint64s(delim string) []uint64 {
+       vals, _ := k.getUint64s(delim, true, false)
+       return vals
+}
+
+// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) TimesFormat(format, delim string) []time.Time {
+       vals, _ := k.getTimesFormat(format, delim, true, false)
+       return vals
+}
+
+// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
+func (k *Key) Times(delim string) []time.Time {
+       return k.TimesFormat(time.RFC3339, delim)
+}
+
+// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
+// it will not be included to result list.
+func (k *Key) ValidFloat64s(delim string) []float64 {
+       vals, _ := k.getFloat64s(delim, false, false)
+       return vals
+}
+
+// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
+// not be included to result list.
+func (k *Key) ValidInts(delim string) []int {
+       vals, _ := k.getInts(delim, false, false)
+       return vals
+}
+
+// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
+// then it will not be included to result list.
+func (k *Key) ValidInt64s(delim string) []int64 {
+       vals, _ := k.getInt64s(delim, false, false)
+       return vals
+}
+
+// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
+// then it will not be included to result list.
+func (k *Key) ValidUints(delim string) []uint {
+       vals, _ := k.getUints(delim, false, false)
+       return vals
+}
+
+// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
+// integer, then it will not be included to result list.
+func (k *Key) ValidUint64s(delim string) []uint64 {
+       vals, _ := k.getUint64s(delim, false, false)
+       return vals
+}
+
+// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
+       vals, _ := k.getTimesFormat(format, delim, false, false)
+       return vals
+}
+
+// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter.
+func (k *Key) ValidTimes(delim string) []time.Time {
+       return k.ValidTimesFormat(time.RFC3339, delim)
+}
+
+// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
+       return k.getFloat64s(delim, false, true)
+}
+
+// StrictInts returns list of int divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInts(delim string) ([]int, error) {
+       return k.getInts(delim, false, true)
+}
+
+// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictInt64s(delim string) ([]int64, error) {
+       return k.getInt64s(delim, false, true)
+}
+
+// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUints(delim string) ([]uint, error) {
+       return k.getUints(delim, false, true)
+}
+
+// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
+func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
+       return k.getUint64s(delim, false, true)
+}
+
+// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
+       return k.getTimesFormat(format, delim, false, true)
+}
+
+// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
+// or error on first invalid input.
+func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
+       return k.StrictTimesFormat(time.RFC3339, delim)
+}
+
+// getFloat64s returns list of float64 divided by given delimiter.
+func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) {
+       strs := k.Strings(delim)
+       vals := make([]float64, 0, len(strs))
+       for _, str := range strs {
+               val, err := strconv.ParseFloat(str, 64)
+               if err != nil && returnOnInvalid {
+                       return nil, err
+               }
+               if err == nil || addInvalid {
+                       vals = append(vals, val)
+               }
+       }
+       return vals, nil
+}
+
+// getInts returns list of int divided by given delimiter.
+func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) {
+       strs := k.Strings(delim)
+       vals := make([]int, 0, len(strs))
+       for _, str := range strs {
+               val, err := strconv.Atoi(str)
+               if err != nil && returnOnInvalid {
+                       return nil, err
+               }
+               if err == nil || addInvalid {
+                       vals = append(vals, val)
+               }
+       }
+       return vals, nil
+}
+
+// getInt64s returns list of int64 divided by given delimiter.
+func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) {
+       strs := k.Strings(delim)
+       vals := make([]int64, 0, len(strs))
+       for _, str := range strs {
+               val, err := strconv.ParseInt(str, 10, 64)
+               if err != nil && returnOnInvalid {
+                       return nil, err
+               }
+               if err == nil || addInvalid {
+                       vals = append(vals, val)
+               }
+       }
+       return vals, nil
+}
+
+// getUints returns list of uint divided by given delimiter.
+func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) {
+       strs := k.Strings(delim)
+       vals := make([]uint, 0, len(strs))
+       for _, str := range strs {
+               val, err := strconv.ParseUint(str, 10, 0)
+               if err != nil && returnOnInvalid {
+                       return nil, err
+               }
+               if err == nil || addInvalid {
+                       vals = append(vals, uint(val))
+               }
+       }
+       return vals, nil
+}
+
+// getUint64s returns list of uint64 divided by given delimiter.
+func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
+       strs := k.Strings(delim)
+       vals := make([]uint64, 0, len(strs))
+       for _, str := range strs {
+               val, err := strconv.ParseUint(str, 10, 64)
+               if err != nil && returnOnInvalid {
+                       return nil, err
+               }
+               if err == nil || addInvalid {
+                       vals = append(vals, val)
+               }
+       }
+       return vals, nil
+}
+
+// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
+func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
+       strs := k.Strings(delim)
+       vals := make([]time.Time, 0, len(strs))
+       for _, str := range strs {
+               val, err := time.Parse(format, str)
+               if err != nil && returnOnInvalid {
+                       return nil, err
+               }
+               if err == nil || addInvalid {
+                       vals = append(vals, val)
+               }
+       }
+       return vals, nil
+}
+
+// SetValue changes key value.
+func (k *Key) SetValue(v string) {
+       if k.s.f.BlockMode {
+               k.s.f.lock.Lock()
+               defer k.s.f.lock.Unlock()
+       }
+
+       k.value = v
+       k.s.keysHash[k.name] = v
+}
diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go
new file mode 100644 (file)
index 0000000..b0aabe3
--- /dev/null
@@ -0,0 +1,356 @@
+// Copyright 2015 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+       "bufio"
+       "bytes"
+       "fmt"
+       "io"
+       "strconv"
+       "strings"
+       "unicode"
+)
+
+type tokenType int
+
+const (
+       _TOKEN_INVALID tokenType = iota
+       _TOKEN_COMMENT
+       _TOKEN_SECTION
+       _TOKEN_KEY
+)
+
+type parser struct {
+       buf     *bufio.Reader
+       isEOF   bool
+       count   int
+       comment *bytes.Buffer
+}
+
+func newParser(r io.Reader) *parser {
+       return &parser{
+               buf:     bufio.NewReader(r),
+               count:   1,
+               comment: &bytes.Buffer{},
+       }
+}
+
+// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format.
+// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
+func (p *parser) BOM() error {
+       mask, err := p.buf.Peek(2)
+       if err != nil && err != io.EOF {
+               return err
+       } else if len(mask) < 2 {
+               return nil
+       }
+
+       switch {
+       case mask[0] == 254 && mask[1] == 255:
+               fallthrough
+       case mask[0] == 255 && mask[1] == 254:
+               p.buf.Read(mask)
+       case mask[0] == 239 && mask[1] == 187:
+               mask, err := p.buf.Peek(3)
+               if err != nil && err != io.EOF {
+                       return err
+               } else if len(mask) < 3 {
+                       return nil
+               }
+               if mask[2] == 191 {
+                       p.buf.Read(mask)
+               }
+       }
+       return nil
+}
+
+func (p *parser) readUntil(delim byte) ([]byte, error) {
+       data, err := p.buf.ReadBytes(delim)
+       if err != nil {
+               if err == io.EOF {
+                       p.isEOF = true
+               } else {
+                       return nil, err
+               }
+       }
+       return data, nil
+}
+
+func cleanComment(in []byte) ([]byte, bool) {
+       i := bytes.IndexAny(in, "#;")
+       if i == -1 {
+               return nil, false
+       }
+       return in[i:], true
+}
+
+func readKeyName(in []byte) (string, int, error) {
+       line := string(in)
+
+       // Check if key name surrounded by quotes.
+       var keyQuote string
+       if line[0] == '"' {
+               if len(line) > 6 && string(line[0:3]) == `"""` {
+                       keyQuote = `"""`
+               } else {
+                       keyQuote = `"`
+               }
+       } else if line[0] == '`' {
+               keyQuote = "`"
+       }
+
+       // Get out key name
+       endIdx := -1
+       if len(keyQuote) > 0 {
+               startIdx := len(keyQuote)
+               // FIXME: fail case -> """"""name"""=value
+               pos := strings.Index(line[startIdx:], keyQuote)
+               if pos == -1 {
+                       return "", -1, fmt.Errorf("missing closing key quote: %s", line)
+               }
+               pos += startIdx
+
+               // Find key-value delimiter
+               i := strings.IndexAny(line[pos+startIdx:], "=:")
+               if i < 0 {
+                       return "", -1, ErrDelimiterNotFound{line}
+               }
+               endIdx = pos + i
+               return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
+       }
+
+       endIdx = strings.IndexAny(line, "=:")
+       if endIdx < 0 {
+               return "", -1, ErrDelimiterNotFound{line}
+       }
+       return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
+}
+
+func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
+       for {
+               data, err := p.readUntil('\n')
+               if err != nil {
+                       return "", err
+               }
+               next := string(data)
+
+               pos := strings.LastIndex(next, valQuote)
+               if pos > -1 {
+                       val += next[:pos]
+
+                       comment, has := cleanComment([]byte(next[pos:]))
+                       if has {
+                               p.comment.Write(bytes.TrimSpace(comment))
+                       }
+                       break
+               }
+               val += next
+               if p.isEOF {
+                       return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
+               }
+       }
+       return val, nil
+}
+
+func (p *parser) readContinuationLines(val string) (string, error) {
+       for {
+               data, err := p.readUntil('\n')
+               if err != nil {
+                       return "", err
+               }
+               next := strings.TrimSpace(string(data))
+
+               if len(next) == 0 {
+                       break
+               }
+               val += next
+               if val[len(val)-1] != '\\' {
+                       break
+               }
+               val = val[:len(val)-1]
+       }
+       return val, nil
+}
+
+// hasSurroundedQuote check if and only if the first and last characters
+// are quotes \" or \'.
+// It returns false if any other parts also contain same kind of quotes.
+func hasSurroundedQuote(in string, quote byte) bool {
+       return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote &&
+               strings.IndexByte(in[1:], quote) == len(in)-2
+}
+
+func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) {
+       line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
+       if len(line) == 0 {
+               return "", nil
+       }
+
+       var valQuote string
+       if len(line) > 3 && string(line[0:3]) == `"""` {
+               valQuote = `"""`
+       } else if line[0] == '`' {
+               valQuote = "`"
+       }
+
+       if len(valQuote) > 0 {
+               startIdx := len(valQuote)
+               pos := strings.LastIndex(line[startIdx:], valQuote)
+               // Check for multi-line value
+               if pos == -1 {
+                       return p.readMultilines(line, line[startIdx:], valQuote)
+               }
+
+               return line[startIdx : pos+startIdx], nil
+       }
+
+       // Won't be able to reach here if value only contains whitespace.
+       line = strings.TrimSpace(line)
+
+       // Check continuation lines when desired.
+       if !ignoreContinuation && line[len(line)-1] == '\\' {
+               return p.readContinuationLines(line[:len(line)-1])
+       }
+
+       i := strings.IndexAny(line, "#;")
+       if i > -1 {
+               p.comment.WriteString(line[i:])
+               line = strings.TrimSpace(line[:i])
+       }
+
+       // Trim single quotes
+       if hasSurroundedQuote(line, '\'') ||
+               hasSurroundedQuote(line, '"') {
+               line = line[1 : len(line)-1]
+       }
+       return line, nil
+}
+
+// parse parses data through an io.Reader.
+func (f *File) parse(reader io.Reader) (err error) {
+       p := newParser(reader)
+       if err = p.BOM(); err != nil {
+               return fmt.Errorf("BOM: %v", err)
+       }
+
+       // Ignore error because default section name is never empty string.
+       section, _ := f.NewSection(DEFAULT_SECTION)
+
+       var line []byte
+       var inUnparseableSection bool
+       for !p.isEOF {
+               line, err = p.readUntil('\n')
+               if err != nil {
+                       return err
+               }
+
+               line = bytes.TrimLeftFunc(line, unicode.IsSpace)
+               if len(line) == 0 {
+                       continue
+               }
+
+               // Comments
+               if line[0] == '#' || line[0] == ';' {
+                       // Note: we do not care ending line break,
+                       // it is needed for adding second line,
+                       // so just clean it once at the end when set to value.
+                       p.comment.Write(line)
+                       continue
+               }
+
+               // Section
+               if line[0] == '[' {
+                       // Read to the next ']' (TODO: support quoted strings)
+                       // TODO(unknwon): use LastIndexByte when stop supporting Go1.4
+                       closeIdx := bytes.LastIndex(line, []byte("]"))
+                       if closeIdx == -1 {
+                               return fmt.Errorf("unclosed section: %s", line)
+                       }
+
+                       name := string(line[1:closeIdx])
+                       section, err = f.NewSection(name)
+                       if err != nil {
+                               return err
+                       }
+
+                       comment, has := cleanComment(line[closeIdx+1:])
+                       if has {
+                               p.comment.Write(comment)
+                       }
+
+                       section.Comment = strings.TrimSpace(p.comment.String())
+
+                       // Reset aotu-counter and comments
+                       p.comment.Reset()
+                       p.count = 1
+
+                       inUnparseableSection = false
+                       for i := range f.options.UnparseableSections {
+                               if f.options.UnparseableSections[i] == name ||
+                                       (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) {
+                                       inUnparseableSection = true
+                                       continue
+                               }
+                       }
+                       continue
+               }
+
+               if inUnparseableSection {
+                       section.isRawSection = true
+                       section.rawBody += string(line)
+                       continue
+               }
+
+               kname, offset, err := readKeyName(line)
+               if err != nil {
+                       // Treat as boolean key when desired, and whole line is key name.
+                       if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
+                               key, err := section.NewKey(string(line), "true")
+                               if err != nil {
+                                       return err
+                               }
+                               key.isBooleanType = true
+                               key.Comment = strings.TrimSpace(p.comment.String())
+                               p.comment.Reset()
+                               continue
+                       }
+                       return err
+               }
+
+               // Auto increment.
+               isAutoIncr := false
+               if kname == "-" {
+                       isAutoIncr = true
+                       kname = "#" + strconv.Itoa(p.count)
+                       p.count++
+               }
+
+               key, err := section.NewKey(kname, "")
+               if err != nil {
+                       return err
+               }
+               key.isAutoIncrement = isAutoIncr
+
+               value, err := p.readValue(line[offset:], f.options.IgnoreContinuation)
+               if err != nil {
+                       return err
+               }
+               key.SetValue(value)
+               key.Comment = strings.TrimSpace(p.comment.String())
+               p.comment.Reset()
+       }
+       return nil
+}
diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go
new file mode 100644 (file)
index 0000000..45d2f3b
--- /dev/null
@@ -0,0 +1,221 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+       "errors"
+       "fmt"
+       "strings"
+)
+
+// Section represents a config section.
+type Section struct {
+       f        *File
+       Comment  string
+       name     string
+       keys     map[string]*Key
+       keyList  []string
+       keysHash map[string]string
+
+       isRawSection bool
+       rawBody      string
+}
+
+func newSection(f *File, name string) *Section {
+       return &Section{
+               f:        f,
+               name:     name,
+               keys:     make(map[string]*Key),
+               keyList:  make([]string, 0, 10),
+               keysHash: make(map[string]string),
+       }
+}
+
+// Name returns name of Section.
+func (s *Section) Name() string {
+       return s.name
+}
+
+// Body returns rawBody of Section if the section was marked as unparseable.
+// It still follows the other rules of the INI format surrounding leading/trailing whitespace.
+func (s *Section) Body() string {
+       return strings.TrimSpace(s.rawBody)
+}
+
+// NewKey creates a new key to given section.
+func (s *Section) NewKey(name, val string) (*Key, error) {
+       if len(name) == 0 {
+               return nil, errors.New("error creating new key: empty key name")
+       } else if s.f.options.Insensitive {
+               name = strings.ToLower(name)
+       }
+
+       if s.f.BlockMode {
+               s.f.lock.Lock()
+               defer s.f.lock.Unlock()
+       }
+
+       if inSlice(name, s.keyList) {
+               s.keys[name].value = val
+               return s.keys[name], nil
+       }
+
+       s.keyList = append(s.keyList, name)
+       s.keys[name] = &Key{
+               s:     s,
+               name:  name,
+               value: val,
+       }
+       s.keysHash[name] = val
+       return s.keys[name], nil
+}
+
+// GetKey returns key in section by given name.
+func (s *Section) GetKey(name string) (*Key, error) {
+       // FIXME: change to section level lock?
+       if s.f.BlockMode {
+               s.f.lock.RLock()
+       }
+       if s.f.options.Insensitive {
+               name = strings.ToLower(name)
+       }
+       key := s.keys[name]
+       if s.f.BlockMode {
+               s.f.lock.RUnlock()
+       }
+
+       if key == nil {
+               // Check if it is a child-section.
+               sname := s.name
+               for {
+                       if i := strings.LastIndex(sname, "."); i > -1 {
+                               sname = sname[:i]
+                               sec, err := s.f.GetSection(sname)
+                               if err != nil {
+                                       continue
+                               }
+                               return sec.GetKey(name)
+                       } else {
+                               break
+                       }
+               }
+               return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
+       }
+       return key, nil
+}
+
+// HasKey returns true if section contains a key with given name.
+func (s *Section) HasKey(name string) bool {
+       key, _ := s.GetKey(name)
+       return key != nil
+}
+
+// Haskey is a backwards-compatible name for HasKey.
+func (s *Section) Haskey(name string) bool {
+       return s.HasKey(name)
+}
+
+// HasValue returns true if section contains given raw value.
+func (s *Section) HasValue(value string) bool {
+       if s.f.BlockMode {
+               s.f.lock.RLock()
+               defer s.f.lock.RUnlock()
+       }
+
+       for _, k := range s.keys {
+               if value == k.value {
+                       return true
+               }
+       }
+       return false
+}
+
+// Key assumes named Key exists in section and returns a zero-value when not.
+func (s *Section) Key(name string) *Key {
+       key, err := s.GetKey(name)
+       if err != nil {
+               // It's OK here because the only possible error is empty key name,
+               // but if it's empty, this piece of code won't be executed.
+               key, _ = s.NewKey(name, "")
+               return key
+       }
+       return key
+}
+
+// Keys returns list of keys of section.
+func (s *Section) Keys() []*Key {
+       keys := make([]*Key, len(s.keyList))
+       for i := range s.keyList {
+               keys[i] = s.Key(s.keyList[i])
+       }
+       return keys
+}
+
+// ParentKeys returns list of keys of parent section.
+func (s *Section) ParentKeys() []*Key {
+       var parentKeys []*Key
+       sname := s.name
+       for {
+               if i := strings.LastIndex(sname, "."); i > -1 {
+                       sname = sname[:i]
+                       sec, err := s.f.GetSection(sname)
+                       if err != nil {
+                               continue
+                       }
+                       parentKeys = append(parentKeys, sec.Keys()...)
+               } else {
+                       break
+               }
+
+       }
+       return parentKeys
+}
+
+// KeyStrings returns list of key names of section.
+func (s *Section) KeyStrings() []string {
+       list := make([]string, len(s.keyList))
+       copy(list, s.keyList)
+       return list
+}
+
+// KeysHash returns keys hash consisting of names and values.
+func (s *Section) KeysHash() map[string]string {
+       if s.f.BlockMode {
+               s.f.lock.RLock()
+               defer s.f.lock.RUnlock()
+       }
+
+       hash := map[string]string{}
+       for key, value := range s.keysHash {
+               hash[key] = value
+       }
+       return hash
+}
+
+// DeleteKey deletes a key from section.
+func (s *Section) DeleteKey(name string) {
+       if s.f.BlockMode {
+               s.f.lock.Lock()
+               defer s.f.lock.Unlock()
+       }
+
+       for i, k := range s.keyList {
+               if k == name {
+                       s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
+                       delete(s.keys, name)
+                       return
+               }
+       }
+}
diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go
new file mode 100644 (file)
index 0000000..5ef38d8
--- /dev/null
@@ -0,0 +1,431 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+       "bytes"
+       "errors"
+       "fmt"
+       "reflect"
+       "strings"
+       "time"
+       "unicode"
+)
+
+// NameMapper represents a ini tag name mapper.
+type NameMapper func(string) string
+
+// Built-in name getters.
+var (
+       // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
+       AllCapsUnderscore NameMapper = func(raw string) string {
+               newstr := make([]rune, 0, len(raw))
+               for i, chr := range raw {
+                       if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+                               if i > 0 {
+                                       newstr = append(newstr, '_')
+                               }
+                       }
+                       newstr = append(newstr, unicode.ToUpper(chr))
+               }
+               return string(newstr)
+       }
+       // TitleUnderscore converts to format title_underscore.
+       TitleUnderscore NameMapper = func(raw string) string {
+               newstr := make([]rune, 0, len(raw))
+               for i, chr := range raw {
+                       if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+                               if i > 0 {
+                                       newstr = append(newstr, '_')
+                               }
+                               chr -= ('A' - 'a')
+                       }
+                       newstr = append(newstr, chr)
+               }
+               return string(newstr)
+       }
+)
+
+func (s *Section) parseFieldName(raw, actual string) string {
+       if len(actual) > 0 {
+               return actual
+       }
+       if s.f.NameMapper != nil {
+               return s.f.NameMapper(raw)
+       }
+       return raw
+}
+
+func parseDelim(actual string) string {
+       if len(actual) > 0 {
+               return actual
+       }
+       return ","
+}
+
+var reflectTime = reflect.TypeOf(time.Now()).Kind()
+
+// setSliceWithProperType sets proper values to slice based on its type.
+func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
+       strs := key.Strings(delim)
+       numVals := len(strs)
+       if numVals == 0 {
+               return nil
+       }
+
+       var vals interface{}
+
+       sliceOf := field.Type().Elem().Kind()
+       switch sliceOf {
+       case reflect.String:
+               vals = strs
+       case reflect.Int:
+               vals = key.Ints(delim)
+       case reflect.Int64:
+               vals = key.Int64s(delim)
+       case reflect.Uint:
+               vals = key.Uints(delim)
+       case reflect.Uint64:
+               vals = key.Uint64s(delim)
+       case reflect.Float64:
+               vals = key.Float64s(delim)
+       case reflectTime:
+               vals = key.Times(delim)
+       default:
+               return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+       }
+
+       slice := reflect.MakeSlice(field.Type(), numVals, numVals)
+       for i := 0; i < numVals; i++ {
+               switch sliceOf {
+               case reflect.String:
+                       slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i]))
+               case reflect.Int:
+                       slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i]))
+               case reflect.Int64:
+                       slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i]))
+               case reflect.Uint:
+                       slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i]))
+               case reflect.Uint64:
+                       slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i]))
+               case reflect.Float64:
+                       slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i]))
+               case reflectTime:
+                       slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i]))
+               }
+       }
+       field.Set(slice)
+       return nil
+}
+
+// setWithProperType sets proper value to field based on its type,
+// but it does not return error for failing parsing,
+// because we want to use default value that is already assigned to strcut.
+func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
+       switch t.Kind() {
+       case reflect.String:
+               if len(key.String()) == 0 {
+                       return nil
+               }
+               field.SetString(key.String())
+       case reflect.Bool:
+               boolVal, err := key.Bool()
+               if err != nil {
+                       return nil
+               }
+               field.SetBool(boolVal)
+       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+               durationVal, err := key.Duration()
+               // Skip zero value
+               if err == nil && int(durationVal) > 0 {
+                       field.Set(reflect.ValueOf(durationVal))
+                       return nil
+               }
+
+               intVal, err := key.Int64()
+               if err != nil || intVal == 0 {
+                       return nil
+               }
+               field.SetInt(intVal)
+       //      byte is an alias for uint8, so supporting uint8 breaks support for byte
+       case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+               durationVal, err := key.Duration()
+               // Skip zero value
+               if err == nil && int(durationVal) > 0 {
+                       field.Set(reflect.ValueOf(durationVal))
+                       return nil
+               }
+
+               uintVal, err := key.Uint64()
+               if err != nil {
+                       return nil
+               }
+               field.SetUint(uintVal)
+
+       case reflect.Float32, reflect.Float64:
+               floatVal, err := key.Float64()
+               if err != nil {
+                       return nil
+               }
+               field.SetFloat(floatVal)
+       case reflectTime:
+               timeVal, err := key.Time()
+               if err != nil {
+                       return nil
+               }
+               field.Set(reflect.ValueOf(timeVal))
+       case reflect.Slice:
+               return setSliceWithProperType(key, field, delim)
+       default:
+               return fmt.Errorf("unsupported type '%s'", t)
+       }
+       return nil
+}
+
+func (s *Section) mapTo(val reflect.Value) error {
+       if val.Kind() == reflect.Ptr {
+               val = val.Elem()
+       }
+       typ := val.Type()
+
+       for i := 0; i < typ.NumField(); i++ {
+               field := val.Field(i)
+               tpField := typ.Field(i)
+
+               tag := tpField.Tag.Get("ini")
+               if tag == "-" {
+                       continue
+               }
+
+               opts := strings.SplitN(tag, ",", 2) // strip off possible omitempty
+               fieldName := s.parseFieldName(tpField.Name, opts[0])
+               if len(fieldName) == 0 || !field.CanSet() {
+                       continue
+               }
+
+               isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
+               isStruct := tpField.Type.Kind() == reflect.Struct
+               if isAnonymous {
+                       field.Set(reflect.New(tpField.Type.Elem()))
+               }
+
+               if isAnonymous || isStruct {
+                       if sec, err := s.f.GetSection(fieldName); err == nil {
+                               if err = sec.mapTo(field); err != nil {
+                                       return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+                               }
+                               continue
+                       }
+               }
+
+               if key, err := s.GetKey(fieldName); err == nil {
+                       if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
+                               return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+                       }
+               }
+       }
+       return nil
+}
+
+// MapTo maps section to given struct.
+func (s *Section) MapTo(v interface{}) error {
+       typ := reflect.TypeOf(v)
+       val := reflect.ValueOf(v)
+       if typ.Kind() == reflect.Ptr {
+               typ = typ.Elem()
+               val = val.Elem()
+       } else {
+               return errors.New("cannot map to non-pointer struct")
+       }
+
+       return s.mapTo(val)
+}
+
+// MapTo maps file to given struct.
+func (f *File) MapTo(v interface{}) error {
+       return f.Section("").MapTo(v)
+}
+
+// MapTo maps data sources to given struct with name mapper.
+func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+       cfg, err := Load(source, others...)
+       if err != nil {
+               return err
+       }
+       cfg.NameMapper = mapper
+       return cfg.MapTo(v)
+}
+
+// MapTo maps data sources to given struct.
+func MapTo(v, source interface{}, others ...interface{}) error {
+       return MapToWithMapper(v, nil, source, others...)
+}
+
+// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
+func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
+       slice := field.Slice(0, field.Len())
+       if field.Len() == 0 {
+               return nil
+       }
+
+       var buf bytes.Buffer
+       sliceOf := field.Type().Elem().Kind()
+       for i := 0; i < field.Len(); i++ {
+               switch sliceOf {
+               case reflect.String:
+                       buf.WriteString(slice.Index(i).String())
+               case reflect.Int, reflect.Int64:
+                       buf.WriteString(fmt.Sprint(slice.Index(i).Int()))
+               case reflect.Uint, reflect.Uint64:
+                       buf.WriteString(fmt.Sprint(slice.Index(i).Uint()))
+               case reflect.Float64:
+                       buf.WriteString(fmt.Sprint(slice.Index(i).Float()))
+               case reflectTime:
+                       buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339))
+               default:
+                       return fmt.Errorf("unsupported type '[]%s'", sliceOf)
+               }
+               buf.WriteString(delim)
+       }
+       key.SetValue(buf.String()[:buf.Len()-1])
+       return nil
+}
+
+// reflectWithProperType does the opposite thing as setWithProperType.
+func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
+       switch t.Kind() {
+       case reflect.String:
+               key.SetValue(field.String())
+       case reflect.Bool:
+               key.SetValue(fmt.Sprint(field.Bool()))
+       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+               key.SetValue(fmt.Sprint(field.Int()))
+       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+               key.SetValue(fmt.Sprint(field.Uint()))
+       case reflect.Float32, reflect.Float64:
+               key.SetValue(fmt.Sprint(field.Float()))
+       case reflectTime:
+               key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339)))
+       case reflect.Slice:
+               return reflectSliceWithProperType(key, field, delim)
+       default:
+               return fmt.Errorf("unsupported type '%s'", t)
+       }
+       return nil
+}
+
+// CR: copied from encoding/json/encode.go with modifications of time.Time support.
+// TODO: add more test coverage.
+func isEmptyValue(v reflect.Value) bool {
+       switch v.Kind() {
+       case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+               return v.Len() == 0
+       case reflect.Bool:
+               return !v.Bool()
+       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+               return v.Int() == 0
+       case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+               return v.Uint() == 0
+       case reflect.Float32, reflect.Float64:
+               return v.Float() == 0
+       case reflectTime:
+               return v.Interface().(time.Time).IsZero()
+       case reflect.Interface, reflect.Ptr:
+               return v.IsNil()
+       }
+       return false
+}
+
+func (s *Section) reflectFrom(val reflect.Value) error {
+       if val.Kind() == reflect.Ptr {
+               val = val.Elem()
+       }
+       typ := val.Type()
+
+       for i := 0; i < typ.NumField(); i++ {
+               field := val.Field(i)
+               tpField := typ.Field(i)
+
+               tag := tpField.Tag.Get("ini")
+               if tag == "-" {
+                       continue
+               }
+
+               opts := strings.SplitN(tag, ",", 2)
+               if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) {
+                       continue
+               }
+
+               fieldName := s.parseFieldName(tpField.Name, opts[0])
+               if len(fieldName) == 0 || !field.CanSet() {
+                       continue
+               }
+
+               if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
+                       (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") {
+                       // Note: The only error here is section doesn't exist.
+                       sec, err := s.f.GetSection(fieldName)
+                       if err != nil {
+                               // Note: fieldName can never be empty here, ignore error.
+                               sec, _ = s.f.NewSection(fieldName)
+                       }
+                       if err = sec.reflectFrom(field); err != nil {
+                               return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
+                       }
+                       continue
+               }
+
+               // Note: Same reason as secion.
+               key, err := s.GetKey(fieldName)
+               if err != nil {
+                       key, _ = s.NewKey(fieldName, "")
+               }
+               if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
+                       return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
+               }
+
+       }
+       return nil
+}
+
+// ReflectFrom reflects secion from given struct.
+func (s *Section) ReflectFrom(v interface{}) error {
+       typ := reflect.TypeOf(v)
+       val := reflect.ValueOf(v)
+       if typ.Kind() == reflect.Ptr {
+               typ = typ.Elem()
+               val = val.Elem()
+       } else {
+               return errors.New("cannot reflect from non-pointer struct")
+       }
+
+       return s.reflectFrom(val)
+}
+
+// ReflectFrom reflects file from given struct.
+func (f *File) ReflectFrom(v interface{}) error {
+       return f.Section("").ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct with name mapper.
+func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
+       cfg.NameMapper = mapper
+       return cfg.ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct.
+func ReflectFrom(cfg *File, v interface{}) error {
+       return ReflectFromWithMapper(cfg, v, nil)
+}
diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE
new file mode 100644 (file)
index 0000000..c33dcc7
--- /dev/null
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/errwrap/README.md b/vendor/github.com/hashicorp/errwrap/README.md
new file mode 100644 (file)
index 0000000..1c95f59
--- /dev/null
@@ -0,0 +1,89 @@
+# errwrap
+
+`errwrap` is a package for Go that formalizes the pattern of wrapping errors
+and checking if an error contains another error.
+
+There is a common pattern in Go of taking a returned `error` value and
+then wrapping it (such as with `fmt.Errorf`) before returning it. The problem
+with this pattern is that you completely lose the original `error` structure.
+
+Arguably the _correct_ approach is that you should make a custom structure
+implementing the `error` interface, and have the original error as a field
+on that structure, such [as this example](http://golang.org/pkg/os/#PathError).
+This is a good approach, but you have to know the entire chain of possible
+rewrapping that happens, when you might just care about one.
+
+`errwrap` formalizes this pattern (it doesn't matter what approach you use
+above) by giving a single interface for wrapping errors, checking if a specific
+error is wrapped, and extracting that error.
+
+## Installation and Docs
+
+Install using `go get github.com/hashicorp/errwrap`.
+
+Full documentation is available at
+http://godoc.org/github.com/hashicorp/errwrap
+
+## Usage
+
+#### Basic Usage
+
+Below is a very basic example of its usage:
+
+```go
+// A function that always returns an error, but wraps it, like a real
+// function might.
+func tryOpen() error {
+       _, err := os.Open("/i/dont/exist")
+       if err != nil {
+               return errwrap.Wrapf("Doesn't exist: {{err}}", err)
+       }
+
+       return nil
+}
+
+func main() {
+       err := tryOpen()
+
+       // We can use the Contains helpers to check if an error contains
+       // another error. It is safe to do this with a nil error, or with
+       // an error that doesn't even use the errwrap package.
+       if errwrap.Contains(err, ErrNotExist) {
+               // Do something
+       }
+       if errwrap.ContainsType(err, new(os.PathError)) {
+               // Do something
+       }
+
+       // Or we can use the associated `Get` functions to just extract
+       // a specific error. This would return nil if that specific error doesn't
+       // exist.
+       perr := errwrap.GetType(err, new(os.PathError))
+}
+```
+
+#### Custom Types
+
+If you're already making custom types that properly wrap errors, then
+you can get all the functionality of `errwraps.Contains` and such by
+implementing the `Wrapper` interface with just one function. Example:
+
+```go
+type AppError {
+  Code ErrorCode
+  Err  error
+}
+
+func (e *AppError) WrappedErrors() []error {
+  return []error{e.Err}
+}
+```
+
+Now this works:
+
+```go
+err := &AppError{Err: fmt.Errorf("an error")}
+if errwrap.ContainsType(err, fmt.Errorf("")) {
+       // This will work!
+}
+```
diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go
new file mode 100644 (file)
index 0000000..a733bef
--- /dev/null
@@ -0,0 +1,169 @@
+// Package errwrap implements methods to formalize error wrapping in Go.
+//
+// All of the top-level functions that take an `error` are built to be able
+// to take any error, not just wrapped errors. This allows you to use errwrap
+// without having to type-check and type-cast everywhere.
+package errwrap
+
+import (
+       "errors"
+       "reflect"
+       "strings"
+)
+
+// WalkFunc is the callback called for Walk.
+type WalkFunc func(error)
+
+// Wrapper is an interface that can be implemented by custom types to
+// have all the Contains, Get, etc. functions in errwrap work.
+//
+// When Walk reaches a Wrapper, it will call the callback for every
+// wrapped error in addition to the wrapper itself. Since all the top-level
+// functions in errwrap use Walk, this means that all those functions work
+// with your custom type.
+type Wrapper interface {
+       WrappedErrors() []error
+}
+
+// Wrap defines that outer wraps inner, returning an error type that
+// can be cleanly used with the other methods in this package, such as
+// Contains, GetAll, etc.
+//
+// This function won't modify the error message at all (the outer message
+// will be used).
+func Wrap(outer, inner error) error {
+       return &wrappedError{
+               Outer: outer,
+               Inner: inner,
+       }
+}
+
+// Wrapf wraps an error with a formatting message. This is similar to using
+// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap
+// errors, you should replace it with this.
+//
+// format is the format of the error message. The string '{{err}}' will
+// be replaced with the original error message.
+func Wrapf(format string, err error) error {
+       outerMsg := "<nil>"
+       if err != nil {
+               outerMsg = err.Error()
+       }
+
+       outer := errors.New(strings.Replace(
+               format, "{{err}}", outerMsg, -1))
+
+       return Wrap(outer, err)
+}
+
+// Contains checks if the given error contains an error with the
+// message msg. If err is not a wrapped error, this will always return
+// false unless the error itself happens to match this msg.
+func Contains(err error, msg string) bool {
+       return len(GetAll(err, msg)) > 0
+}
+
+// ContainsType checks if the given error contains an error with
+// the same concrete type as v. If err is not a wrapped error, this will
+// check the err itself.
+func ContainsType(err error, v interface{}) bool {
+       return len(GetAllType(err, v)) > 0
+}
+
+// Get is the same as GetAll but returns the deepest matching error.
+func Get(err error, msg string) error {
+       es := GetAll(err, msg)
+       if len(es) > 0 {
+               return es[len(es)-1]
+       }
+
+       return nil
+}
+
+// GetType is the same as GetAllType but returns the deepest matching error.
+func GetType(err error, v interface{}) error {
+       es := GetAllType(err, v)
+       if len(es) > 0 {
+               return es[len(es)-1]
+       }
+
+       return nil
+}
+
+// GetAll gets all the errors that might be wrapped in err with the
+// given message. The order of the errors is such that the outermost
+// matching error (the most recent wrap) is index zero, and so on.
+func GetAll(err error, msg string) []error {
+       var result []error
+
+       Walk(err, func(err error) {
+               if err.Error() == msg {
+                       result = append(result, err)
+               }
+       })
+
+       return result
+}
+
+// GetAllType gets all the errors that are the same type as v.
+//
+// The order of the return value is the same as described in GetAll.
+func GetAllType(err error, v interface{}) []error {
+       var result []error
+
+       var search string
+       if v != nil {
+               search = reflect.TypeOf(v).String()
+       }
+       Walk(err, func(err error) {
+               var needle string
+               if err != nil {
+                       needle = reflect.TypeOf(err).String()
+               }
+
+               if needle == search {
+                       result = append(result, err)
+               }
+       })
+
+       return result
+}
+
+// Walk walks all the wrapped errors in err and calls the callback. If
+// err isn't a wrapped error, this will be called once for err. If err
+// is a wrapped error, the callback will be called for both the wrapper
+// that implements error as well as the wrapped error itself.
+func Walk(err error, cb WalkFunc) {
+       if err == nil {
+               return
+       }
+
+       switch e := err.(type) {
+       case *wrappedError:
+               cb(e.Outer)
+               Walk(e.Inner, cb)
+       case Wrapper:
+               cb(err)
+
+               for _, err := range e.WrappedErrors() {
+                       Walk(err, cb)
+               }
+       default:
+               cb(err)
+       }
+}
+
+// wrappedError is an implementation of error that has both the
+// outer and inner errors.
+type wrappedError struct {
+       Outer error
+       Inner error
+}
+
+func (w *wrappedError) Error() string {
+       return w.Outer.Error()
+}
+
+func (w *wrappedError) WrappedErrors() []error {
+       return []error{w.Outer, w.Inner}
+}
diff --git a/vendor/github.com/hashicorp/go-getter/LICENSE b/vendor/github.com/hashicorp/go-getter/LICENSE
new file mode 100644 (file)
index 0000000..c33dcc7
--- /dev/null
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md
new file mode 100644 (file)
index 0000000..4a0b6a6
--- /dev/null
@@ -0,0 +1,253 @@
+# go-getter
+
+[![Build Status](http://img.shields.io/travis/hashicorp/go-getter.svg?style=flat-square)][travis]
+[![Build status](https://ci.appveyor.com/api/projects/status/ulq3qr43n62croyq/branch/master?svg=true)][appveyor]
+[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
+
+[travis]: http://travis-ci.org/hashicorp/go-getter
+[godocs]: http://godoc.org/github.com/hashicorp/go-getter
+[appveyor]: https://ci.appveyor.com/project/hashicorp/go-getter/branch/master
+
+go-getter is a library for Go (golang) for downloading files or directories
+from various sources using a URL as the primary form of input.
+
+The power of this library is being flexible in being able to download
+from a number of different sources (file paths, Git, HTTP, Mercurial, etc.)
+using a single string as input. This removes the burden of knowing how to
+download from a variety of sources from the implementer.
+
+The concept of a _detector_ automatically turns invalid URLs into proper
+URLs. For example: "github.com/hashicorp/go-getter" would turn into a
+Git URL. Or "./foo" would turn into a file URL. These are extensible.
+
+This library is used by [Terraform](https://terraform.io) for
+downloading modules, [Otto](https://ottoproject.io) for dependencies and
+Appfile imports, and [Nomad](https://nomadproject.io) for downloading
+binaries.
+
+## Installation and Usage
+
+Package documentation can be found on
+[GoDoc](http://godoc.org/github.com/hashicorp/go-getter).
+
+Installation can be done with a normal `go get`:
+
+```
+$ go get github.com/hashicorp/go-getter
+```
+
+go-getter also has a command you can use to test URL strings:
+
+```
+$ go install github.com/hashicorp/go-getter/cmd/go-getter
+...
+
+$ go-getter github.com/foo/bar ./foo
+...
+```
+
+The command is useful for verifying URL structures.
+
+## URL Format
+
+go-getter uses a single string URL as input to download from a variety of
+protocols. go-getter has various "tricks" with this URL to do certain things.
+This section documents the URL format.
+
+### Supported Protocols and Detectors
+
+**Protocols** are used to download files/directories using a specific
+mechanism. Example protocols are Git and HTTP.
+
+**Detectors** are used to transform a valid or invalid URL into another
+URL if it matches a certain pattern. Example: "github.com/user/repo" is
+automatically transformed into a fully valid Git URL. This allows go-getter
+to be very user friendly.
+
+go-getter out of the box supports the following protocols. Additional protocols
+can be augmented at runtime by implementing the `Getter` interface.
+
+  * Local files
+  * Git
+  * Mercurial
+  * HTTP
+  * Amazon S3
+
+In addition to the above protocols, go-getter has what are called "detectors."
+These take a URL and attempt to automatically choose the best protocol for
+it, which might involve even changing the protocol. The following detection
+is built-in by default:
+
+  * File paths such as "./foo" are automatically changed to absolute
+    file URLs.
+  * GitHub URLs, such as "github.com/mitchellh/vagrant" are automatically
+    changed to Git protocol over HTTP.
+  * BitBucket URLs, such as "bitbucket.org/mitchellh/vagrant" are automatically
+    changed to a Git or mercurial protocol using the BitBucket API.
+
+### Forced Protocol
+
+In some cases, the protocol to use is ambiguous depending on the source
+URL. For example, "http://github.com/mitchellh/vagrant.git" could reference
+an HTTP URL or a Git URL. Forced protocol syntax is used to disambiguate this
+URL.
+
+Forced protocol can be done by prefixing the URL with the protocol followed
+by double colons. For example: `git::http://github.com/mitchellh/vagrant.git`
+would download the given HTTP URL using the Git protocol.
+
+Forced protocols will also override any detectors.
+
+In the absense of a forced protocol, detectors may be run on the URL, transforming
+the protocol anyways. The above example would've used the Git protocol either
+way since the Git detector would've detected it was a GitHub URL.
+
+### Protocol-Specific Options
+
+Each protocol can support protocol-specific options to configure that
+protocol. For example, the `git` protocol supports specifying a `ref`
+query parameter that tells it what ref to checkout for that Git
+repository.
+
+The options are specified as query parameters on the URL (or URL-like string)
+given to go-getter. Using the Git example above, the URL below is a valid
+input to go-getter:
+
+    github.com/hashicorp/go-getter?ref=abcd1234
+
+The protocol-specific options are documented below the URL format
+section. But because they are part of the URL, we point it out here so
+you know they exist.
+
+### Checksumming
+
+For file downloads of any protocol, go-getter can automatically verify
+a checksum for you. Note that checksumming only works for downloading files,
+not directories, but checksumming will work for any protocol.
+
+To checksum a file, append a `checksum` query parameter to the URL.
+The paramter value should be in the format of `type:value`, where
+type is "md5", "sha1", "sha256", or "sha512". The "value" should be
+the actual checksum value. go-getter will parse out this query parameter
+automatically and use it to verify the checksum. An example URL
+is shown below:
+
+```
+./foo.txt?checksum=md5:b7d96c89d09d9e204f5fedc4d5d55b21
+```
+
+The checksum query parameter is never sent to the backend protocol
+implementation. It is used at a higher level by go-getter itself.
+
+### Unarchiving
+
+go-getter will automatically unarchive files into a file or directory
+based on the extension of the file being requested (over any protocol).
+This works for both file and directory downloads.
+
+go-getter looks for an `archive` query parameter to specify the format of
+the archive. If this isn't specified, go-getter will use the extension of
+the path to see if it appears archived. Unarchiving can be explicitly
+disabled by setting the `archive` query parameter to `false`.
+
+The following archive formats are supported:
+
+  * `tar.gz` and `tgz`
+  * `tar.bz2` and `tbz2`
+  * `zip`
+  * `gz`
+  * `bz2`
+
+For example, an example URL is shown below:
+
+```
+./foo.zip
+```
+
+This will automatically be inferred to be a ZIP file and will be extracted.
+You can also be explicit about the archive type:
+
+```
+./some/other/path?archive=zip
+```
+
+And finally, you can disable archiving completely:
+
+```
+./some/path?archive=false
+```
+
+You can combine unarchiving with the other features of go-getter such
+as checksumming. The special `archive` query parameter will be removed
+from the URL before going to the final protocol downloader.
+
+## Protocol-Specific Options
+
+This section documents the protocol-specific options that can be specified
+for go-getter. These options should be appended to the input as normal query
+parameters. Depending on the usage of go-getter, applications may provide
+alternate ways of inputting options. For example, [Nomad](https://www.nomadproject.io)
+provides a nice options block for specifying options rather than in the URL.
+
+## General (All Protocols)
+
+The options below are available to all protocols:
+
+  * `archive` - The archive format to use to unarchive this file, or "" (empty
+    string) to disable unarchiving. For more details, see the complete section
+    on archive support above.
+
+  * `checksum` - Checksum to verify the downloaded file or archive. See
+    the entire section on checksumming above for format and more details.
+
+### Local Files (`file`)
+
+None
+
+### Git (`git`)
+
+  * `ref` - The Git ref to checkout. This is a ref, so it can point to
+    a commit SHA, a branch name, etc. If it is a named ref such as a branch
+    name, go-getter will update it to the latest on each get.
+
+  * `sshkey` - An SSH private key to use during clones. The provided key must
+    be a base64-encoded string. For example, to generate a suitable `sshkey`
+    from a private key file on disk, you would run `base64 -w0 <file>`.
+
+    **Note**: Git 2.3+ is required to use this feature.
+
+### Mercurial (`hg`)
+
+  * `rev` - The Mercurial revision to checkout.
+
+### HTTP (`http`)
+
+None
+
+### S3 (`s3`)
+
+S3 takes various access configurations in the URL. Note that it will also
+read these from standard AWS environment variables if they're set. If
+the query parameters are present, these take priority.
+
+  * `aws_access_key_id` - AWS access key.
+  * `aws_access_key_secret` - AWS access key secret.
+  * `aws_access_token` - AWS access token if this is being used.
+
+#### Using IAM Instance Profiles with S3
+
+If you use go-getter and want to use an EC2 IAM Instance Profile to avoid
+using credentials, then just omit these and the profile, if available will
+be used automatically.
+
+#### S3 Bucket Examples
+
+S3 has several addressing schemes used to reference your bucket. These are
+listed here: http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro
+
+Some examples for these addressing schemes:
+- s3::https://s3.amazonaws.com/bucket/foo
+- s3::https://s3-eu-west-1.amazonaws.com/bucket/foo
+- bucket.s3.amazonaws.com/foo
+- bucket.s3-eu-west-1.amazonaws.com/foo/bar
+
diff --git a/vendor/github.com/hashicorp/go-getter/appveyor.yml b/vendor/github.com/hashicorp/go-getter/appveyor.yml
new file mode 100644 (file)
index 0000000..159dad4
--- /dev/null
@@ -0,0 +1,16 @@
+version: "build-{branch}-{build}"
+image: Visual Studio 2015
+clone_folder: c:\gopath\github.com\hashicorp\go-getter
+environment:
+  GOPATH: c:\gopath
+install:
+- cmd: >-
+    echo %Path%
+
+    go version
+
+    go env
+
+    go get -d -v -t ./...
+build_script:
+- cmd: go test -v ./...
diff --git a/vendor/github.com/hashicorp/go-getter/client.go b/vendor/github.com/hashicorp/go-getter/client.go
new file mode 100644 (file)
index 0000000..876812a
--- /dev/null
@@ -0,0 +1,335 @@
+package getter
+
+import (
+       "bytes"
+       "crypto/md5"
+       "crypto/sha1"
+       "crypto/sha256"
+       "crypto/sha512"
+       "encoding/hex"
+       "fmt"
+       "hash"
+       "io"
+       "io/ioutil"
+       "os"
+       "path/filepath"
+       "strconv"
+       "strings"
+
+       urlhelper "github.com/hashicorp/go-getter/helper/url"
+)
+
+// Client is a client for downloading things.
+//
+// Top-level functions such as Get are shortcuts for interacting with a client.
+// Using a client directly allows more fine-grained control over how downloading
+// is done, as well as customizing the protocols supported.
+type Client struct {
+       // Src is the source URL to get.
+       //
+       // Dst is the path to save the downloaded thing as. If Dir is set to
+       // true, then this should be a directory. If the directory doesn't exist,
+       // it will be created for you.
+       //
+       // Pwd is the working directory for detection. If this isn't set, some
+       // detection may fail. Client will not default pwd to the current
+       // working directory for security reasons.
+       Src string
+       Dst string
+       Pwd string
+
+       // Mode is the method of download the client will use. See ClientMode
+       // for documentation.
+       Mode ClientMode
+
+       // Detectors is the list of detectors that are tried on the source.
+       // If this is nil, then the default Detectors will be used.
+       Detectors []Detector
+
+       // Decompressors is the map of decompressors supported by this client.
+       // If this is nil, then the default value is the Decompressors global.
+       Decompressors map[string]Decompressor
+
+       // Getters is the map of protocols supported by this client. If this
+       // is nil, then the default Getters variable will be used.
+       Getters map[string]Getter
+
+       // Dir, if true, tells the Client it is downloading a directory (versus
+       // a single file). This distinction is necessary since filenames and
+       // directory names follow the same format so disambiguating is impossible
+       // without knowing ahead of time.
+       //
+       // WARNING: deprecated. If Mode is set, that will take precedence.
+       Dir bool
+}
+
+// Get downloads the configured source to the destination.
+func (c *Client) Get() error {
+       // Store this locally since there are cases we swap this
+       mode := c.Mode
+       if mode == ClientModeInvalid {
+               if c.Dir {
+                       mode = ClientModeDir
+               } else {
+                       mode = ClientModeFile
+               }
+       }
+
+       // Default decompressor value
+       decompressors := c.Decompressors
+       if decompressors == nil {
+               decompressors = Decompressors
+       }
+
+       // Detect the URL. This is safe if it is already detected.
+       detectors := c.Detectors
+       if detectors == nil {
+               detectors = Detectors
+       }
+       src, err := Detect(c.Src, c.Pwd, detectors)
+       if err != nil {
+               return err
+       }
+
+       // Determine if we have a forced protocol, i.e. "git::http://..."
+       force, src := getForcedGetter(src)
+
+       // If there is a subdir component, then we download the root separately
+       // and then copy over the proper subdir.
+       var realDst string
+       dst := c.Dst
+       src, subDir := SourceDirSubdir(src)
+       if subDir != "" {
+               tmpDir, err := ioutil.TempDir("", "tf")
+               if err != nil {
+                       return err
+               }
+               if err := os.RemoveAll(tmpDir); err != nil {
+                       return err
+               }
+               defer os.RemoveAll(tmpDir)
+
+               realDst = dst
+               dst = tmpDir
+       }
+
+       u, err := urlhelper.Parse(src)
+       if err != nil {
+               return err
+       }
+       if force == "" {
+               force = u.Scheme
+       }
+
+       getters := c.Getters
+       if getters == nil {
+               getters = Getters
+       }
+
+       g, ok := getters[force]
+       if !ok {
+               return fmt.Errorf(
+                       "download not supported for scheme '%s'", force)
+       }
+
+       // We have magic query parameters that we use to signal different features
+       q := u.Query()
+
+       // Determine if we have an archive type
+       archiveV := q.Get("archive")
+       if archiveV != "" {
+               // Delete the paramter since it is a magic parameter we don't
+               // want to pass on to the Getter
+               q.Del("archive")
+               u.RawQuery = q.Encode()
+
+               // If we can parse the value as a bool and it is false, then
+               // set the archive to "-" which should never map to a decompressor
+               if b, err := strconv.ParseBool(archiveV); err == nil && !b {
+                       archiveV = "-"
+               }
+       }
+       if archiveV == "" {
+               // We don't appear to... but is it part of the filename?
+               matchingLen := 0
+               for k, _ := range decompressors {
+                       if strings.HasSuffix(u.Path, "."+k) && len(k) > matchingLen {
+                               archiveV = k
+                               matchingLen = len(k)
+                       }
+               }
+       }
+
+       // If we have a decompressor, then we need to change the destination
+       // to download to a temporary path. We unarchive this into the final,
+       // real path.
+       var decompressDst string
+       var decompressDir bool
+       decompressor := decompressors[archiveV]
+       if decompressor != nil {
+               // Create a temporary directory to store our archive. We delete
+               // this at the end of everything.
+               td, err := ioutil.TempDir("", "getter")
+               if err != nil {
+                       return fmt.Errorf(
+                               "Error creating temporary directory for archive: %s", err)
+               }
+               defer os.RemoveAll(td)
+
+               // Swap the download directory to be our temporary path and
+               // store the old values.
+               decompressDst = dst
+               decompressDir = mode != ClientModeFile
+               dst = filepath.Join(td, "archive")
+               mode = ClientModeFile
+       }
+
+       // Determine if we have a checksum
+       var checksumHash hash.Hash
+       var checksumValue []byte
+       if v := q.Get("checksum"); v != "" {
+               // Delete the query parameter if we have it.
+               q.Del("checksum")
+               u.RawQuery = q.Encode()
+
+               // Determine the checksum hash type
+               checksumType := ""
+               idx := strings.Index(v, ":")
+               if idx > -1 {
+                       checksumType = v[:idx]
+               }
+               switch checksumType {
+               case "md5":
+                       checksumHash = md5.New()
+               case "sha1":
+                       checksumHash = sha1.New()
+               case "sha256":
+                       checksumHash = sha256.New()
+               case "sha512":
+                       checksumHash = sha512.New()
+               default:
+                       return fmt.Errorf(
+                               "unsupported checksum type: %s", checksumType)
+               }
+
+               // Get the remainder of the value and parse it into bytes
+               b, err := hex.DecodeString(v[idx+1:])
+               if err != nil {
+                       return fmt.Errorf("invalid checksum: %s", err)
+               }
+
+               // Set our value
+               checksumValue = b
+       }
+
+       if mode == ClientModeAny {
+               // Ask the getter which client mode to use
+               mode, err = g.ClientMode(u)
+               if err != nil {
+                       return err
+               }
+
+               // Destination is the base name of the URL path in "any" mode when
+               // a file source is detected.
+               if mode == ClientModeFile {
+                       dst = filepath.Join(dst, filepath.Base(u.Path))
+               }
+       }
+
+       // If we're not downloading a directory, then just download the file
+       // and return.
+       if mode == ClientModeFile {
+               err := g.GetFile(dst, u)
+               if err != nil {
+                       return err
+               }
+
+               if checksumHash != nil {
+                       if err := checksum(dst, checksumHash, checksumValue); err != nil {
+                               return err
+                       }
+               }
+
+               if decompressor != nil {
+                       // We have a decompressor, so decompress the current destination
+                       // into the final destination with the proper mode.
+                       err := decompressor.Decompress(decompressDst, dst, decompressDir)
+                       if err != nil {
+                               return err
+                       }
+
+                       // Swap the information back
+                       dst = decompressDst
+                       if decompressDir {
+                               mode = ClientModeAny
+                       } else {
+                               mode = ClientModeFile
+                       }
+               }
+
+               // We check the dir value again because it can be switched back
+               // if we were unarchiving. If we're still only Get-ing a file, then
+               // we're done.
+               if mode == ClientModeFile {
+                       return nil
+               }
+       }
+
+       // If we're at this point we're either downloading a directory or we've
+       // downloaded and unarchived a directory and we're just checking subdir.
+       // In the case we have a decompressor we don't Get because it was Get
+       // above.
+       if decompressor == nil {
+               // If we're getting a directory, then this is an error. You cannot
+               // checksum a directory. TODO: test
+               if checksumHash != nil {
+                       return fmt.Errorf(
+                               "checksum cannot be specified for directory download")
+               }
+
+               // We're downloading a directory, which might require a bit more work
+               // if we're specifying a subdir.
+               err := g.Get(dst, u)
+               if err != nil {
+                       err = fmt.Errorf("error downloading '%s': %s", src, err)
+                       return err
+               }
+       }
+
+       // If we have a subdir, copy that over
+       if subDir != "" {
+               if err := os.RemoveAll(realDst); err != nil {
+                       return err
+               }
+               if err := os.MkdirAll(realDst, 0755); err != nil {
+                       return err
+               }
+
+               return copyDir(realDst, filepath.Join(dst, subDir), false)
+       }
+
+       return nil
+}
+
+// checksum is a simple method to compute the checksum of a source file
+// and compare it to the given expected value.
+func checksum(source string, h hash.Hash, v []byte) error {
+       f, err := os.Open(source)
+       if err != nil {
+               return fmt.Errorf("Failed to open file for checksum: %s", err)
+       }
+       defer f.Close()
+
+       if _, err := io.Copy(h, f); err != nil {
+               return fmt.Errorf("Failed to hash: %s", err)
+       }
+
+       if actual := h.Sum(nil); !bytes.Equal(actual, v) {
+               return fmt.Errorf(
+                       "Checksums did not match.\nExpected: %s\nGot: %s",
+                       hex.EncodeToString(v),
+                       hex.EncodeToString(actual))
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/client_mode.go b/vendor/github.com/hashicorp/go-getter/client_mode.go
new file mode 100644 (file)
index 0000000..7f02509
--- /dev/null
@@ -0,0 +1,24 @@
+package getter
+
+// ClientMode is the mode that the client operates in.
+type ClientMode uint
+
+const (
+       ClientModeInvalid ClientMode = iota
+
+       // ClientModeAny downloads anything it can. In this mode, dst must
+       // be a directory. If src is a file, it is saved into the directory
+       // with the basename of the URL. If src is a directory or archive,
+       // it is unpacked directly into dst.
+       ClientModeAny
+
+       // ClientModeFile downloads a single file. In this mode, dst must
+       // be a file path (doesn't have to exist). src must point to a single
+       // file. It is saved as dst.
+       ClientModeFile
+
+       // ClientModeDir downloads a directory. In this mode, dst must be
+       // a directory path (doesn't have to exist). src must point to an
+       // archive or directory (such as in s3).
+       ClientModeDir
+)
diff --git a/vendor/github.com/hashicorp/go-getter/copy_dir.go b/vendor/github.com/hashicorp/go-getter/copy_dir.go
new file mode 100644 (file)
index 0000000..2f58e8a
--- /dev/null
@@ -0,0 +1,78 @@
+package getter
+
+import (
+       "io"
+       "os"
+       "path/filepath"
+       "strings"
+)
+
+// copyDir copies the src directory contents into dst. Both directories
+// should already exist.
+//
+// If ignoreDot is set to true, then dot-prefixed files/folders are ignored.
+func copyDir(dst string, src string, ignoreDot bool) error {
+       src, err := filepath.EvalSymlinks(src)
+       if err != nil {
+               return err
+       }
+
+       walkFn := func(path string, info os.FileInfo, err error) error {
+               if err != nil {
+                       return err
+               }
+               if path == src {
+                       return nil
+               }
+
+               if ignoreDot && strings.HasPrefix(filepath.Base(path), ".") {
+                       // Skip any dot files
+                       if info.IsDir() {
+                               return filepath.SkipDir
+                       } else {
+                               return nil
+                       }
+               }
+
+               // The "path" has the src prefixed to it. We need to join our
+               // destination with the path without the src on it.
+               dstPath := filepath.Join(dst, path[len(src):])
+
+               // If we have a directory, make that subdirectory, then continue
+               // the walk.
+               if info.IsDir() {
+                       if path == filepath.Join(src, dst) {
+                               // dst is in src; don't walk it.
+                               return nil
+                       }
+
+                       if err := os.MkdirAll(dstPath, 0755); err != nil {
+                               return err
+                       }
+
+                       return nil
+               }
+
+               // If we have a file, copy the contents.
+               srcF, err := os.Open(path)
+               if err != nil {
+                       return err
+               }
+               defer srcF.Close()
+
+               dstF, err := os.Create(dstPath)
+               if err != nil {
+                       return err
+               }
+               defer dstF.Close()
+
+               if _, err := io.Copy(dstF, srcF); err != nil {
+                       return err
+               }
+
+               // Chmod it
+               return os.Chmod(dstPath, info.Mode())
+       }
+
+       return filepath.Walk(src, walkFn)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress.go b/vendor/github.com/hashicorp/go-getter/decompress.go
new file mode 100644 (file)
index 0000000..d18174c
--- /dev/null
@@ -0,0 +1,29 @@
+package getter
+
+// Decompressor defines the interface that must be implemented to add
+// support for decompressing a type.
+type Decompressor interface {
+       // Decompress should decompress src to dst. dir specifies whether dst
+       // is a directory or single file. src is guaranteed to be a single file
+       // that exists. dst is not guaranteed to exist already.
+       Decompress(dst, src string, dir bool) error
+}
+
+// Decompressors is the mapping of extension to the Decompressor implementation
+// that will decompress that extension/type.
+var Decompressors map[string]Decompressor
+
+func init() {
+       tbzDecompressor := new(TarBzip2Decompressor)
+       tgzDecompressor := new(TarGzipDecompressor)
+
+       Decompressors = map[string]Decompressor{
+               "bz2":     new(Bzip2Decompressor),
+               "gz":      new(GzipDecompressor),
+               "tar.bz2": tbzDecompressor,
+               "tar.gz":  tgzDecompressor,
+               "tbz2":    tbzDecompressor,
+               "tgz":     tgzDecompressor,
+               "zip":     new(ZipDecompressor),
+       }
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go b/vendor/github.com/hashicorp/go-getter/decompress_bzip2.go
new file mode 100644 (file)
index 0000000..339f4cf
--- /dev/null
@@ -0,0 +1,45 @@
+package getter
+
+import (
+       "compress/bzip2"
+       "fmt"
+       "io"
+       "os"
+       "path/filepath"
+)
+
+// Bzip2Decompressor is an implementation of Decompressor that can
+// decompress bz2 files.
+type Bzip2Decompressor struct{}
+
+func (d *Bzip2Decompressor) Decompress(dst, src string, dir bool) error {
+       // Directory isn't supported at all
+       if dir {
+               return fmt.Errorf("bzip2-compressed files can only unarchive to a single file")
+       }
+
+       // If we're going into a directory we should make that first
+       if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+               return err
+       }
+
+       // File first
+       f, err := os.Open(src)
+       if err != nil {
+               return err
+       }
+       defer f.Close()
+
+       // Bzip2 compression is second
+       bzipR := bzip2.NewReader(f)
+
+       // Copy it out
+       dstF, err := os.Create(dst)
+       if err != nil {
+               return err
+       }
+       defer dstF.Close()
+
+       _, err = io.Copy(dstF, bzipR)
+       return err
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_gzip.go b/vendor/github.com/hashicorp/go-getter/decompress_gzip.go
new file mode 100644 (file)
index 0000000..2001054
--- /dev/null
@@ -0,0 +1,49 @@
+package getter
+
+import (
+       "compress/gzip"
+       "fmt"
+       "io"
+       "os"
+       "path/filepath"
+)
+
+// GzipDecompressor is an implementation of Decompressor that can
+// decompress bz2 files.
+type GzipDecompressor struct{}
+
+func (d *GzipDecompressor) Decompress(dst, src string, dir bool) error {
+       // Directory isn't supported at all
+       if dir {
+               return fmt.Errorf("gzip-compressed files can only unarchive to a single file")
+       }
+
+       // If we're going into a directory we should make that first
+       if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+               return err
+       }
+
+       // File first
+       f, err := os.Open(src)
+       if err != nil {
+               return err
+       }
+       defer f.Close()
+
+       // gzip compression is second
+       gzipR, err := gzip.NewReader(f)
+       if err != nil {
+               return err
+       }
+       defer gzipR.Close()
+
+       // Copy it out
+       dstF, err := os.Create(dst)
+       if err != nil {
+               return err
+       }
+       defer dstF.Close()
+
+       _, err = io.Copy(dstF, gzipR)
+       return err
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go b/vendor/github.com/hashicorp/go-getter/decompress_tbz2.go
new file mode 100644 (file)
index 0000000..c46ed44
--- /dev/null
@@ -0,0 +1,95 @@
+package getter
+
+import (
+       "archive/tar"
+       "compress/bzip2"
+       "fmt"
+       "io"
+       "os"
+       "path/filepath"
+)
+
+// TarBzip2Decompressor is an implementation of Decompressor that can
+// decompress tar.bz2 files.
+type TarBzip2Decompressor struct{}
+
+func (d *TarBzip2Decompressor) Decompress(dst, src string, dir bool) error {
+       // If we're going into a directory we should make that first
+       mkdir := dst
+       if !dir {
+               mkdir = filepath.Dir(dst)
+       }
+       if err := os.MkdirAll(mkdir, 0755); err != nil {
+               return err
+       }
+
+       // File first
+       f, err := os.Open(src)
+       if err != nil {
+               return err
+       }
+       defer f.Close()
+
+       // Bzip2 compression is second
+       bzipR := bzip2.NewReader(f)
+
+       // Once bzip decompressed we have a tar format
+       tarR := tar.NewReader(bzipR)
+       done := false
+       for {
+               hdr, err := tarR.Next()
+               if err == io.EOF {
+                       if !done {
+                               // Empty archive
+                               return fmt.Errorf("empty archive: %s", src)
+                       }
+
+                       return nil
+               }
+               if err != nil {
+                       return err
+               }
+
+               path := dst
+               if dir {
+                       path = filepath.Join(path, hdr.Name)
+               }
+
+               if hdr.FileInfo().IsDir() {
+                       if dir {
+                               return fmt.Errorf("expected a single file: %s", src)
+                       }
+
+                       // A directory, just make the directory and continue unarchiving...
+                       if err := os.MkdirAll(path, 0755); err != nil {
+                               return err
+                       }
+
+                       continue
+               }
+
+               // We have a file. If we already decoded, then it is an error
+               if !dir && done {
+                       return fmt.Errorf("expected a single file, got multiple: %s", src)
+               }
+
+               // Mark that we're done so future in single file mode errors
+               done = true
+
+               // Open the file for writing
+               dstF, err := os.Create(path)
+               if err != nil {
+                       return err
+               }
+               _, err = io.Copy(dstF, tarR)
+               dstF.Close()
+               if err != nil {
+                       return err
+               }
+
+               // Chmod the file
+               if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil {
+                       return err
+               }
+       }
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_testing.go b/vendor/github.com/hashicorp/go-getter/decompress_testing.go
new file mode 100644 (file)
index 0000000..686d6c2
--- /dev/null
@@ -0,0 +1,134 @@
+package getter
+
+import (
+       "crypto/md5"
+       "encoding/hex"
+       "io"
+       "io/ioutil"
+       "os"
+       "path/filepath"
+       "reflect"
+       "runtime"
+       "sort"
+       "strings"
+       "testing"
+)
+
+// TestDecompressCase is a single test case for testing decompressors
+type TestDecompressCase struct {
+       Input   string   // Input is the complete path to the input file
+       Dir     bool     // Dir is whether or not we're testing directory mode
+       Err     bool     // Err is whether we expect an error or not
+       DirList []string // DirList is the list of files for Dir mode
+       FileMD5 string   // FileMD5 is the expected MD5 for a single file
+}
+
+// TestDecompressor is a helper function for testing generic decompressors.
+func TestDecompressor(t *testing.T, d Decompressor, cases []TestDecompressCase) {
+       for _, tc := range cases {
+               t.Logf("Testing: %s", tc.Input)
+
+               // Temporary dir to store stuff
+               td, err := ioutil.TempDir("", "getter")
+               if err != nil {
+                       t.Fatalf("err: %s", err)
+               }
+
+               // Destination is always joining result so that we have a new path
+               dst := filepath.Join(td, "subdir", "result")
+
+               // We use a function so defers work
+               func() {
+                       defer os.RemoveAll(td)
+
+                       // Decompress
+                       err := d.Decompress(dst, tc.Input, tc.Dir)
+                       if (err != nil) != tc.Err {
+                               t.Fatalf("err %s: %s", tc.Input, err)
+                       }
+                       if tc.Err {
+                               return
+                       }
+
+                       // If it isn't a directory, then check for a single file
+                       if !tc.Dir {
+                               fi, err := os.Stat(dst)
+                               if err != nil {
+                                       t.Fatalf("err %s: %s", tc.Input, err)
+                               }
+                               if fi.IsDir() {
+                                       t.Fatalf("err %s: expected file, got directory", tc.Input)
+                               }
+                               if tc.FileMD5 != "" {
+                                       actual := testMD5(t, dst)
+                                       expected := tc.FileMD5
+                                       if actual != expected {
+                                               t.Fatalf("err %s: expected MD5 %s, got %s", tc.Input, expected, actual)
+                                       }
+                               }
+
+                               return
+                       }
+
+                       // Convert expected for windows
+                       expected := tc.DirList
+                       if runtime.GOOS == "windows" {
+                               for i, v := range expected {
+                                       expected[i] = strings.Replace(v, "/", "\\", -1)
+                               }
+                       }
+
+                       // Directory, check for the correct contents
+                       actual := testListDir(t, dst)
+                       if !reflect.DeepEqual(actual, expected) {
+                               t.Fatalf("bad %s\n\n%#v\n\n%#v", tc.Input, actual, expected)
+                       }
+               }()
+       }
+}
+
+func testListDir(t *testing.T, path string) []string {
+       var result []string
+       err := filepath.Walk(path, func(sub string, info os.FileInfo, err error) error {
+               if err != nil {
+                       return err
+               }
+
+               sub = strings.TrimPrefix(sub, path)
+               if sub == "" {
+                       return nil
+               }
+               sub = sub[1:] // Trim the leading path sep.
+
+               // If it is a dir, add trailing sep
+               if info.IsDir() {
+                       sub += "/"
+               }
+
+               result = append(result, sub)
+               return nil
+       })
+       if err != nil {
+               t.Fatalf("err: %s", err)
+       }
+
+       sort.Strings(result)
+       return result
+}
+
+func testMD5(t *testing.T, path string) string {
+       f, err := os.Open(path)
+       if err != nil {
+               t.Fatalf("err: %s", err)
+       }
+       defer f.Close()
+
+       h := md5.New()
+       _, err = io.Copy(h, f)
+       if err != nil {
+               t.Fatalf("err: %s", err)
+       }
+
+       result := h.Sum(nil)
+       return hex.EncodeToString(result)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_tgz.go b/vendor/github.com/hashicorp/go-getter/decompress_tgz.go
new file mode 100644 (file)
index 0000000..e8b1c31
--- /dev/null
@@ -0,0 +1,99 @@
+package getter
+
+import (
+       "archive/tar"
+       "compress/gzip"
+       "fmt"
+       "io"
+       "os"
+       "path/filepath"
+)
+
+// TarGzipDecompressor is an implementation of Decompressor that can
+// decompress tar.gzip files.
+type TarGzipDecompressor struct{}
+
+func (d *TarGzipDecompressor) Decompress(dst, src string, dir bool) error {
+       // If we're going into a directory we should make that first
+       mkdir := dst
+       if !dir {
+               mkdir = filepath.Dir(dst)
+       }
+       if err := os.MkdirAll(mkdir, 0755); err != nil {
+               return err
+       }
+
+       // File first
+       f, err := os.Open(src)
+       if err != nil {
+               return err
+       }
+       defer f.Close()
+
+       // Gzip compression is second
+       gzipR, err := gzip.NewReader(f)
+       if err != nil {
+               return fmt.Errorf("Error opening a gzip reader for %s: %s", src, err)
+       }
+       defer gzipR.Close()
+
+       // Once gzip decompressed we have a tar format
+       tarR := tar.NewReader(gzipR)
+       done := false
+       for {
+               hdr, err := tarR.Next()
+               if err == io.EOF {
+                       if !done {
+                               // Empty archive
+                               return fmt.Errorf("empty archive: %s", src)
+                       }
+
+                       return nil
+               }
+               if err != nil {
+                       return err
+               }
+
+               path := dst
+               if dir {
+                       path = filepath.Join(path, hdr.Name)
+               }
+
+               if hdr.FileInfo().IsDir() {
+                       if !dir {
+                               return fmt.Errorf("expected a single file: %s", src)
+                       }
+
+                       // A directory, just make the directory and continue unarchiving...
+                       if err := os.MkdirAll(path, 0755); err != nil {
+                               return err
+                       }
+
+                       continue
+               }
+
+               // We have a file. If we already decoded, then it is an error
+               if !dir && done {
+                       return fmt.Errorf("expected a single file, got multiple: %s", src)
+               }
+
+               // Mark that we're done so future in single file mode errors
+               done = true
+
+               // Open the file for writing
+               dstF, err := os.Create(path)
+               if err != nil {
+                       return err
+               }
+               _, err = io.Copy(dstF, tarR)
+               dstF.Close()
+               if err != nil {
+                       return err
+               }
+
+               // Chmod the file
+               if err := os.Chmod(path, hdr.FileInfo().Mode()); err != nil {
+                       return err
+               }
+       }
+}
diff --git a/vendor/github.com/hashicorp/go-getter/decompress_zip.go b/vendor/github.com/hashicorp/go-getter/decompress_zip.go
new file mode 100644 (file)
index 0000000..a065c07
--- /dev/null
@@ -0,0 +1,96 @@
+package getter
+
+import (
+       "archive/zip"
+       "fmt"
+       "io"
+       "os"
+       "path/filepath"
+)
+
+// ZipDecompressor is an implementation of Decompressor that can
+// decompress tar.gzip files.
+type ZipDecompressor struct{}
+
+func (d *ZipDecompressor) Decompress(dst, src string, dir bool) error {
+       // If we're going into a directory we should make that first
+       mkdir := dst
+       if !dir {
+               mkdir = filepath.Dir(dst)
+       }
+       if err := os.MkdirAll(mkdir, 0755); err != nil {
+               return err
+       }
+
+       // Open the zip
+       zipR, err := zip.OpenReader(src)
+       if err != nil {
+               return err
+       }
+       defer zipR.Close()
+
+       // Check the zip integrity
+       if len(zipR.File) == 0 {
+               // Empty archive
+               return fmt.Errorf("empty archive: %s", src)
+       }
+       if !dir && len(zipR.File) > 1 {
+               return fmt.Errorf("expected a single file: %s", src)
+       }
+
+       // Go through and unarchive
+       for _, f := range zipR.File {
+               path := dst
+               if dir {
+                       path = filepath.Join(path, f.Name)
+               }
+
+               if f.FileInfo().IsDir() {
+                       if !dir {
+                               return fmt.Errorf("expected a single file: %s", src)
+                       }
+
+                       // A directory, just make the directory and continue unarchiving...
+                       if err := os.MkdirAll(path, 0755); err != nil {
+                               return err
+                       }
+
+                       continue
+               }
+
+               // Create the enclosing directories if we must. ZIP files aren't
+               // required to contain entries for just the directories so this
+               // can happen.
+               if dir {
+                       if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
+                               return err
+                       }
+               }
+
+               // Open the file for reading
+               srcF, err := f.Open()
+               if err != nil {
+                       return err
+               }
+
+               // Open the file for writing
+               dstF, err := os.Create(path)
+               if err != nil {
+                       srcF.Close()
+                       return err
+               }
+               _, err = io.Copy(dstF, srcF)
+               srcF.Close()
+               dstF.Close()
+               if err != nil {
+                       return err
+               }
+
+               // Chmod the file
+               if err := os.Chmod(path, f.Mode()); err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/detect.go b/vendor/github.com/hashicorp/go-getter/detect.go
new file mode 100644 (file)
index 0000000..481b737
--- /dev/null
@@ -0,0 +1,97 @@
+package getter
+
+import (
+       "fmt"
+       "path/filepath"
+
+       "github.com/hashicorp/go-getter/helper/url"
+)
+
+// Detector defines the interface that an invalid URL or a URL with a blank
+// scheme is passed through in order to determine if its shorthand for
+// something else well-known.
+type Detector interface {
+       // Detect will detect whether the string matches a known pattern to
+       // turn it into a proper URL.
+       Detect(string, string) (string, bool, error)
+}
+
+// Detectors is the list of detectors that are tried on an invalid URL.
+// This is also the order they're tried (index 0 is first).
+var Detectors []Detector
+
+func init() {
+       Detectors = []Detector{
+               new(GitHubDetector),
+               new(BitBucketDetector),
+               new(S3Detector),
+               new(FileDetector),
+       }
+}
+
+// Detect turns a source string into another source string if it is
+// detected to be of a known pattern.
+//
+// The third parameter should be the list of detectors to use in the
+// order to try them. If you don't want to configure this, just use
+// the global Detectors variable.
+//
+// This is safe to be called with an already valid source string: Detect
+// will just return it.
+func Detect(src string, pwd string, ds []Detector) (string, error) {
+       getForce, getSrc := getForcedGetter(src)
+
+       // Separate out the subdir if there is one, we don't pass that to detect
+       getSrc, subDir := SourceDirSubdir(getSrc)
+
+       u, err := url.Parse(getSrc)
+       if err == nil && u.Scheme != "" {
+               // Valid URL
+               return src, nil
+       }
+
+       for _, d := range ds {
+               result, ok, err := d.Detect(getSrc, pwd)
+               if err != nil {
+                       return "", err
+               }
+               if !ok {
+                       continue
+               }
+
+               var detectForce string
+               detectForce, result = getForcedGetter(result)
+               result, detectSubdir := SourceDirSubdir(result)
+
+               // If we have a subdir from the detection, then prepend it to our
+               // requested subdir.
+               if detectSubdir != "" {
+                       if subDir != "" {
+                               subDir = filepath.Join(detectSubdir, subDir)
+                       } else {
+                               subDir = detectSubdir
+                       }
+               }
+               if subDir != "" {
+                       u, err := url.Parse(result)
+                       if err != nil {
+                               return "", fmt.Errorf("Error parsing URL: %s", err)
+                       }
+                       u.Path += "//" + subDir
+                       result = u.String()
+               }
+
+               // Preserve the forced getter if it exists. We try to use the
+               // original set force first, followed by any force set by the
+               // detector.
+               if getForce != "" {
+                       result = fmt.Sprintf("%s::%s", getForce, result)
+               } else if detectForce != "" {
+                       result = fmt.Sprintf("%s::%s", detectForce, result)
+               }
+
+               return result, nil
+       }
+
+       return "", fmt.Errorf("invalid source string: %s", src)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go b/vendor/github.com/hashicorp/go-getter/detect_bitbucket.go
new file mode 100644 (file)
index 0000000..a183a17
--- /dev/null
@@ -0,0 +1,66 @@
+package getter
+
+import (
+       "encoding/json"
+       "fmt"
+       "net/http"
+       "net/url"
+       "strings"
+)
+
+// BitBucketDetector implements Detector to detect BitBucket URLs and turn
+// them into URLs that the Git or Hg Getter can understand.
+type BitBucketDetector struct{}
+
+func (d *BitBucketDetector) Detect(src, _ string) (string, bool, error) {
+       if len(src) == 0 {
+               return "", false, nil
+       }
+
+       if strings.HasPrefix(src, "bitbucket.org/") {
+               return d.detectHTTP(src)
+       }
+
+       return "", false, nil
+}
+
+func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) {
+       u, err := url.Parse("https://" + src)
+       if err != nil {
+               return "", true, fmt.Errorf("error parsing BitBucket URL: %s", err)
+       }
+
+       // We need to get info on this BitBucket repository to determine whether
+       // it is Git or Hg.
+       var info struct {
+               SCM string `json:"scm"`
+       }
+       infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path
+       resp, err := http.Get(infoUrl)
+       if err != nil {
+               return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err)
+       }
+       if resp.StatusCode == 403 {
+               // A private repo
+               return "", true, fmt.Errorf(
+                       "shorthand BitBucket URL can't be used for private repos, " +
+                               "please use a full URL")
+       }
+       dec := json.NewDecoder(resp.Body)
+       if err := dec.Decode(&info); err != nil {
+               return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err)
+       }
+
+       switch info.SCM {
+       case "git":
+               if !strings.HasSuffix(u.Path, ".git") {
+                       u.Path += ".git"
+               }
+
+               return "git::" + u.String(), true, nil
+       case "hg":
+               return "hg::" + u.String(), true, nil
+       default:
+               return "", true, fmt.Errorf("unknown BitBucket SCM type: %s", info.SCM)
+       }
+}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_file.go b/vendor/github.com/hashicorp/go-getter/detect_file.go
new file mode 100644 (file)
index 0000000..756ea43
--- /dev/null
@@ -0,0 +1,67 @@
+package getter
+
+import (
+       "fmt"
+       "os"
+       "path/filepath"
+       "runtime"
+)
+
+// FileDetector implements Detector to detect file paths.
+type FileDetector struct{}
+
+func (d *FileDetector) Detect(src, pwd string) (string, bool, error) {
+       if len(src) == 0 {
+               return "", false, nil
+       }
+
+       if !filepath.IsAbs(src) {
+               if pwd == "" {
+                       return "", true, fmt.Errorf(
+                               "relative paths require a module with a pwd")
+               }
+
+               // Stat the pwd to determine if its a symbolic link. If it is,
+               // then the pwd becomes the original directory. Otherwise,
+               // `filepath.Join` below does some weird stuff.
+               //
+               // We just ignore if the pwd doesn't exist. That error will be
+               // caught later when we try to use the URL.
+               if fi, err := os.Lstat(pwd); !os.IsNotExist(err) {
+                       if err != nil {
+                               return "", true, err
+                       }
+                       if fi.Mode()&os.ModeSymlink != 0 {
+                               pwd, err = os.Readlink(pwd)
+                               if err != nil {
+                                       return "", true, err
+                               }
+
+                               // The symlink itself might be a relative path, so we have to
+                               // resolve this to have a correctly rooted URL.
+                               pwd, err = filepath.Abs(pwd)
+                               if err != nil {
+                                       return "", true, err
+                               }
+                       }
+               }
+
+               src = filepath.Join(pwd, src)
+       }
+
+       return fmtFileURL(src), true, nil
+}
+
+func fmtFileURL(path string) string {
+       if runtime.GOOS == "windows" {
+               // Make sure we're using "/" on Windows. URLs are "/"-based.
+               path = filepath.ToSlash(path)
+               return fmt.Sprintf("file://%s", path)
+       }
+
+       // Make sure that we don't start with "/" since we add that below.
+       if path[0] == '/' {
+               path = path[1:]
+       }
+       return fmt.Sprintf("file:///%s", path)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_github.go b/vendor/github.com/hashicorp/go-getter/detect_github.go
new file mode 100644 (file)
index 0000000..c084ad9
--- /dev/null
@@ -0,0 +1,73 @@
+package getter
+
+import (
+       "fmt"
+       "net/url"
+       "strings"
+)
+
+// GitHubDetector implements Detector to detect GitHub URLs and turn
+// them into URLs that the Git Getter can understand.
+type GitHubDetector struct{}
+
+func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) {
+       if len(src) == 0 {
+               return "", false, nil
+       }
+
+       if strings.HasPrefix(src, "github.com/") {
+               return d.detectHTTP(src)
+       } else if strings.HasPrefix(src, "git@github.com:") {
+               return d.detectSSH(src)
+       }
+
+       return "", false, nil
+}
+
+func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) {
+       parts := strings.Split(src, "/")
+       if len(parts) < 3 {
+               return "", false, fmt.Errorf(
+                       "GitHub URLs should be github.com/username/repo")
+       }
+
+       urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/"))
+       url, err := url.Parse(urlStr)
+       if err != nil {
+               return "", true, fmt.Errorf("error parsing GitHub URL: %s", err)
+       }
+
+       if !strings.HasSuffix(url.Path, ".git") {
+               url.Path += ".git"
+       }
+
+       if len(parts) > 3 {
+               url.Path += "//" + strings.Join(parts[3:], "/")
+       }
+
+       return "git::" + url.String(), true, nil
+}
+
+func (d *GitHubDetector) detectSSH(src string) (string, bool, error) {
+       idx := strings.Index(src, ":")
+       qidx := strings.Index(src, "?")
+       if qidx == -1 {
+               qidx = len(src)
+       }
+
+       var u url.URL
+       u.Scheme = "ssh"
+       u.User = url.User("git")
+       u.Host = "github.com"
+       u.Path = src[idx+1 : qidx]
+       if qidx < len(src) {
+               q, err := url.ParseQuery(src[qidx+1:])
+               if err != nil {
+                       return "", true, fmt.Errorf("error parsing GitHub SSH URL: %s", err)
+               }
+
+               u.RawQuery = q.Encode()
+       }
+
+       return "git::" + u.String(), true, nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/detect_s3.go b/vendor/github.com/hashicorp/go-getter/detect_s3.go
new file mode 100644 (file)
index 0000000..8e0f4a0
--- /dev/null
@@ -0,0 +1,61 @@
+package getter
+
+import (
+       "fmt"
+       "net/url"
+       "strings"
+)
+
+// S3Detector implements Detector to detect S3 URLs and turn
+// them into URLs that the S3 getter can understand.
+type S3Detector struct{}
+
+func (d *S3Detector) Detect(src, _ string) (string, bool, error) {
+       if len(src) == 0 {
+               return "", false, nil
+       }
+
+       if strings.Contains(src, ".amazonaws.com/") {
+               return d.detectHTTP(src)
+       }
+
+       return "", false, nil
+}
+
+func (d *S3Detector) detectHTTP(src string) (string, bool, error) {
+       parts := strings.Split(src, "/")
+       if len(parts) < 2 {
+               return "", false, fmt.Errorf(
+                       "URL is not a valid S3 URL")
+       }
+
+       hostParts := strings.Split(parts[0], ".")
+       if len(hostParts) == 3 {
+               return d.detectPathStyle(hostParts[0], parts[1:])
+       } else if len(hostParts) == 4 {
+               return d.detectVhostStyle(hostParts[1], hostParts[0], parts[1:])
+       } else {
+               return "", false, fmt.Errorf(
+                       "URL is not a valid S3 URL")
+       }
+}
+
+func (d *S3Detector) detectPathStyle(region string, parts []string) (string, bool, error) {
+       urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s", region, strings.Join(parts, "/"))
+       url, err := url.Parse(urlStr)
+       if err != nil {
+               return "", false, fmt.Errorf("error parsing S3 URL: %s", err)
+       }
+
+       return "s3::" + url.String(), true, nil
+}
+
+func (d *S3Detector) detectVhostStyle(region, bucket string, parts []string) (string, bool, error) {
+       urlStr := fmt.Sprintf("https://%s.amazonaws.com/%s/%s", region, bucket, strings.Join(parts, "/"))
+       url, err := url.Parse(urlStr)
+       if err != nil {
+               return "", false, fmt.Errorf("error parsing S3 URL: %s", err)
+       }
+
+       return "s3::" + url.String(), true, nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/folder_storage.go b/vendor/github.com/hashicorp/go-getter/folder_storage.go
new file mode 100644 (file)
index 0000000..647ccf4
--- /dev/null
@@ -0,0 +1,65 @@
+package getter
+
+import (
+       "crypto/md5"
+       "encoding/hex"
+       "fmt"
+       "os"
+       "path/filepath"
+)
+
+// FolderStorage is an implementation of the Storage interface that manages
+// modules on the disk.
+type FolderStorage struct {
+       // StorageDir is the directory where the modules will be stored.
+       StorageDir string
+}
+
+// Dir implements Storage.Dir
+func (s *FolderStorage) Dir(key string) (d string, e bool, err error) {
+       d = s.dir(key)
+       _, err = os.Stat(d)
+       if err == nil {
+               // Directory exists
+               e = true
+               return
+       }
+       if os.IsNotExist(err) {
+               // Directory doesn't exist
+               d = ""
+               e = false
+               err = nil
+               return
+       }
+
+       // An error
+       d = ""
+       e = false
+       return
+}
+
+// Get implements Storage.Get
+func (s *FolderStorage) Get(key string, source string, update bool) error {
+       dir := s.dir(key)
+       if !update {
+               if _, err := os.Stat(dir); err == nil {
+                       // If the directory already exists, then we're done since
+                       // we're not updating.
+                       return nil
+               } else if !os.IsNotExist(err) {
+                       // If the error we got wasn't a file-not-exist error, then
+                       // something went wrong and we should report it.
+                       return fmt.Errorf("Error reading module directory: %s", err)
+               }
+       }
+
+       // Get the source. This always forces an update.
+       return Get(dir, source)
+}
+
+// dir returns the directory name internally that we'll use to map to
+// internally.
+func (s *FolderStorage) dir(key string) string {
+       sum := md5.Sum([]byte(key))
+       return filepath.Join(s.StorageDir, hex.EncodeToString(sum[:]))
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get.go b/vendor/github.com/hashicorp/go-getter/get.go
new file mode 100644 (file)
index 0000000..c3236f5
--- /dev/null
@@ -0,0 +1,139 @@
+// getter is a package for downloading files or directories from a variety of
+// protocols.
+//
+// getter is unique in its ability to download both directories and files.
+// It also detects certain source strings to be protocol-specific URLs. For
+// example, "github.com/hashicorp/go-getter" would turn into a Git URL and
+// use the Git protocol.
+//
+// Protocols and detectors are extensible.
+//
+// To get started, see Client.
+package getter
+
+import (
+       "bytes"
+       "fmt"
+       "net/url"
+       "os/exec"
+       "regexp"
+       "syscall"
+)
+
+// Getter defines the interface that schemes must implement to download
+// things.
+type Getter interface {
+       // Get downloads the given URL into the given directory. This always
+       // assumes that we're updating and gets the latest version that it can.
+       //
+       // The directory may already exist (if we're updating). If it is in a
+       // format that isn't understood, an error should be returned. Get shouldn't
+       // simply nuke the directory.
+       Get(string, *url.URL) error
+
+       // GetFile downloads the give URL into the given path. The URL must
+       // reference a single file. If possible, the Getter should check if
+       // the remote end contains the same file and no-op this operation.
+       GetFile(string, *url.URL) error
+
+       // ClientMode returns the mode based on the given URL. This is used to
+       // allow clients to let the getters decide which mode to use.
+       ClientMode(*url.URL) (ClientMode, error)
+}
+
+// Getters is the mapping of scheme to the Getter implementation that will
+// be used to get a dependency.
+var Getters map[string]Getter
+
+// forcedRegexp is the regular expression that finds forced getters. This
+// syntax is schema::url, example: git::https://foo.com
+var forcedRegexp = regexp.MustCompile(`^([A-Za-z0-9]+)::(.+)$`)
+
+func init() {
+       httpGetter := &HttpGetter{Netrc: true}
+
+       Getters = map[string]Getter{
+               "file":  new(FileGetter),
+               "git":   new(GitGetter),
+               "hg":    new(HgGetter),
+               "s3":    new(S3Getter),
+               "http":  httpGetter,
+               "https": httpGetter,
+       }
+}
+
+// Get downloads the directory specified by src into the folder specified by
+// dst. If dst already exists, Get will attempt to update it.
+//
+// src is a URL, whereas dst is always just a file path to a folder. This
+// folder doesn't need to exist. It will be created if it doesn't exist.
+func Get(dst, src string) error {
+       return (&Client{
+               Src:     src,
+               Dst:     dst,
+               Dir:     true,
+               Getters: Getters,
+       }).Get()
+}
+
+// GetAny downloads a URL into the given destination. Unlike Get or
+// GetFile, both directories and files are supported.
+//
+// dst must be a directory. If src is a file, it will be downloaded
+// into dst with the basename of the URL. If src is a directory or
+// archive, it will be unpacked directly into dst.
+func GetAny(dst, src string) error {
+       return (&Client{
+               Src:     src,
+               Dst:     dst,
+               Mode:    ClientModeAny,
+               Getters: Getters,
+       }).Get()
+}
+
+// GetFile downloads the file specified by src into the path specified by
+// dst.
+func GetFile(dst, src string) error {
+       return (&Client{
+               Src:     src,
+               Dst:     dst,
+               Dir:     false,
+               Getters: Getters,
+       }).Get()
+}
+
+// getRunCommand is a helper that will run a command and capture the output
+// in the case an error happens.
+func getRunCommand(cmd *exec.Cmd) error {
+       var buf bytes.Buffer
+       cmd.Stdout = &buf
+       cmd.Stderr = &buf
+       err := cmd.Run()
+       if err == nil {
+               return nil
+       }
+       if exiterr, ok := err.(*exec.ExitError); ok {
+               // The program has exited with an exit code != 0
+               if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
+                       return fmt.Errorf(
+                               "%s exited with %d: %s",
+                               cmd.Path,
+                               status.ExitStatus(),
+                               buf.String())
+               }
+       }
+
+       return fmt.Errorf("error running %s: %s", cmd.Path, buf.String())
+}
+
+// getForcedGetter takes a source and returns the tuple of the forced
+// getter and the raw URL (without the force syntax).
+func getForcedGetter(src string) (string, string) {
+       var forced string
+       if ms := forcedRegexp.FindStringSubmatch(src); ms != nil {
+               forced = ms[1]
+               src = ms[2]
+       }
+
+       return forced, src
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_file.go b/vendor/github.com/hashicorp/go-getter/get_file.go
new file mode 100644 (file)
index 0000000..e5d2d61
--- /dev/null
@@ -0,0 +1,32 @@
+package getter
+
+import (
+       "net/url"
+       "os"
+)
+
+// FileGetter is a Getter implementation that will download a module from
+// a file scheme.
+type FileGetter struct {
+       // Copy, if set to true, will copy data instead of using a symlink
+       Copy bool
+}
+
+func (g *FileGetter) ClientMode(u *url.URL) (ClientMode, error) {
+       path := u.Path
+       if u.RawPath != "" {
+               path = u.RawPath
+       }
+
+       fi, err := os.Stat(path)
+       if err != nil {
+               return 0, err
+       }
+
+       // Check if the source is a directory.
+       if fi.IsDir() {
+               return ClientModeDir, nil
+       }
+
+       return ClientModeFile, nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_file_unix.go b/vendor/github.com/hashicorp/go-getter/get_file_unix.go
new file mode 100644 (file)
index 0000000..c89a2d5
--- /dev/null
@@ -0,0 +1,103 @@
+// +build !windows
+
+package getter
+
+import (
+       "fmt"
+       "io"
+       "net/url"
+       "os"
+       "path/filepath"
+)
+
+func (g *FileGetter) Get(dst string, u *url.URL) error {
+       path := u.Path
+       if u.RawPath != "" {
+               path = u.RawPath
+       }
+
+       // The source path must exist and be a directory to be usable.
+       if fi, err := os.Stat(path); err != nil {
+               return fmt.Errorf("source path error: %s", err)
+       } else if !fi.IsDir() {
+               return fmt.Errorf("source path must be a directory")
+       }
+
+       fi, err := os.Lstat(dst)
+       if err != nil && !os.IsNotExist(err) {
+               return err
+       }
+
+       // If the destination already exists, it must be a symlink
+       if err == nil {
+               mode := fi.Mode()
+               if mode&os.ModeSymlink == 0 {
+                       return fmt.Errorf("destination exists and is not a symlink")
+               }
+
+               // Remove the destination
+               if err := os.Remove(dst); err != nil {
+                       return err
+               }
+       }
+
+       // Create all the parent directories
+       if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+               return err
+       }
+
+       return os.Symlink(path, dst)
+}
+
+func (g *FileGetter) GetFile(dst string, u *url.URL) error {
+       path := u.Path
+       if u.RawPath != "" {
+               path = u.RawPath
+       }
+
+       // The source path must exist and be a file to be usable.
+       if fi, err := os.Stat(path); err != nil {
+               return fmt.Errorf("source path error: %s", err)
+       } else if fi.IsDir() {
+               return fmt.Errorf("source path must be a file")
+       }
+
+       _, err := os.Lstat(dst)
+       if err != nil && !os.IsNotExist(err) {
+               return err
+       }
+
+       // If the destination already exists, it must be a symlink
+       if err == nil {
+               // Remove the destination
+               if err := os.Remove(dst); err != nil {
+                       return err
+               }
+       }
+
+       // Create all the parent directories
+       if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+               return err
+       }
+
+       // If we're not copying, just symlink and we're done
+       if !g.Copy {
+               return os.Symlink(path, dst)
+       }
+
+       // Copy
+       srcF, err := os.Open(path)
+       if err != nil {
+               return err
+       }
+       defer srcF.Close()
+
+       dstF, err := os.Create(dst)
+       if err != nil {
+               return err
+       }
+       defer dstF.Close()
+
+       _, err = io.Copy(dstF, srcF)
+       return err
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_file_windows.go b/vendor/github.com/hashicorp/go-getter/get_file_windows.go
new file mode 100644 (file)
index 0000000..f87ed0a
--- /dev/null
@@ -0,0 +1,120 @@
+// +build windows
+
+package getter
+
+import (
+       "fmt"
+       "io"
+       "net/url"
+       "os"
+       "os/exec"
+       "path/filepath"
+       "strings"
+)
+
+func (g *FileGetter) Get(dst string, u *url.URL) error {
+       path := u.Path
+       if u.RawPath != "" {
+               path = u.RawPath
+       }
+
+       // The source path must exist and be a directory to be usable.
+       if fi, err := os.Stat(path); err != nil {
+               return fmt.Errorf("source path error: %s", err)
+       } else if !fi.IsDir() {
+               return fmt.Errorf("source path must be a directory")
+       }
+
+       fi, err := os.Lstat(dst)
+       if err != nil && !os.IsNotExist(err) {
+               return err
+       }
+
+       // If the destination already exists, it must be a symlink
+       if err == nil {
+               mode := fi.Mode()
+               if mode&os.ModeSymlink == 0 {
+                       return fmt.Errorf("destination exists and is not a symlink")
+               }
+
+               // Remove the destination
+               if err := os.Remove(dst); err != nil {
+                       return err
+               }
+       }
+
+       // Create all the parent directories
+       if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+               return err
+       }
+
+       sourcePath := toBackslash(path)
+
+       // Use mklink to create a junction point
+       output, err := exec.Command("cmd", "/c", "mklink", "/J", dst, sourcePath).CombinedOutput()
+       if err != nil {
+               return fmt.Errorf("failed to run mklink %v %v: %v %q", dst, sourcePath, err, output)
+       }
+
+       return nil
+}
+
+func (g *FileGetter) GetFile(dst string, u *url.URL) error {
+       path := u.Path
+       if u.RawPath != "" {
+               path = u.RawPath
+       }
+
+       // The source path must exist and be a directory to be usable.
+       if fi, err := os.Stat(path); err != nil {
+               return fmt.Errorf("source path error: %s", err)
+       } else if fi.IsDir() {
+               return fmt.Errorf("source path must be a file")
+       }
+
+       _, err := os.Lstat(dst)
+       if err != nil && !os.IsNotExist(err) {
+               return err
+       }
+
+       // If the destination already exists, it must be a symlink
+       if err == nil {
+               // Remove the destination
+               if err := os.Remove(dst); err != nil {
+                       return err
+               }
+       }
+
+       // Create all the parent directories
+       if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+               return err
+       }
+
+       // If we're not copying, just symlink and we're done
+       if !g.Copy {
+               return os.Symlink(path, dst)
+       }
+
+       // Copy
+       srcF, err := os.Open(path)
+       if err != nil {
+               return err
+       }
+       defer srcF.Close()
+
+       dstF, err := os.Create(dst)
+       if err != nil {
+               return err
+       }
+       defer dstF.Close()
+
+       _, err = io.Copy(dstF, srcF)
+       return err
+}
+
+// toBackslash returns the result of replacing each slash character
+// in path with a backslash ('\') character. Multiple separators are
+// replaced by multiple backslashes.
+func toBackslash(path string) string {
+       return strings.Replace(path, "/", "\\", -1)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_git.go b/vendor/github.com/hashicorp/go-getter/get_git.go
new file mode 100644 (file)
index 0000000..0728139
--- /dev/null
@@ -0,0 +1,225 @@
+package getter
+
+import (
+       "encoding/base64"
+       "fmt"
+       "io/ioutil"
+       "net/url"
+       "os"
+       "os/exec"
+       "path/filepath"
+       "strings"
+
+       urlhelper "github.com/hashicorp/go-getter/helper/url"
+       "github.com/hashicorp/go-version"
+)
+
+// GitGetter is a Getter implementation that will download a module from
+// a git repository.
+type GitGetter struct{}
+
+func (g *GitGetter) ClientMode(_ *url.URL) (ClientMode, error) {
+       return ClientModeDir, nil
+}
+
+func (g *GitGetter) Get(dst string, u *url.URL) error {
+       if _, err := exec.LookPath("git"); err != nil {
+               return fmt.Errorf("git must be available and on the PATH")
+       }
+
+       // Extract some query parameters we use
+       var ref, sshKey string
+       q := u.Query()
+       if len(q) > 0 {
+               ref = q.Get("ref")
+               q.Del("ref")
+
+               sshKey = q.Get("sshkey")
+               q.Del("sshkey")
+
+               // Copy the URL
+               var newU url.URL = *u
+               u = &newU
+               u.RawQuery = q.Encode()
+       }
+
+       var sshKeyFile string
+       if sshKey != "" {
+               // Check that the git version is sufficiently new.
+               if err := checkGitVersion("2.3"); err != nil {
+                       return fmt.Errorf("Error using ssh key: %v", err)
+               }
+
+               // We have an SSH key - decode it.
+               raw, err := base64.StdEncoding.DecodeString(sshKey)
+               if err != nil {
+                       return err
+               }
+
+               // Create a temp file for the key and ensure it is removed.
+               fh, err := ioutil.TempFile("", "go-getter")
+               if err != nil {
+                       return err
+               }
+               sshKeyFile = fh.Name()
+               defer os.Remove(sshKeyFile)
+
+               // Set the permissions prior to writing the key material.
+               if err := os.Chmod(sshKeyFile, 0600); err != nil {
+                       return err
+               }
+
+               // Write the raw key into the temp file.
+               _, err = fh.Write(raw)
+               fh.Close()
+               if err != nil {
+                       return err
+               }
+       }
+
+       // Clone or update the repository
+       _, err := os.Stat(dst)
+       if err != nil && !os.IsNotExist(err) {
+               return err
+       }
+       if err == nil {
+               err = g.update(dst, sshKeyFile, ref)
+       } else {
+               err = g.clone(dst, sshKeyFile, u)
+       }
+       if err != nil {
+               return err
+       }
+
+       // Next: check out the proper tag/branch if it is specified, and checkout
+       if ref != "" {
+               if err := g.checkout(dst, ref); err != nil {
+                       return err
+               }
+       }
+
+       // Lastly, download any/all submodules.
+       return g.fetchSubmodules(dst, sshKeyFile)
+}
+
+// GetFile for Git doesn't support updating at this time. It will download
+// the file every time.
+func (g *GitGetter) GetFile(dst string, u *url.URL) error {
+       td, err := ioutil.TempDir("", "getter-git")
+       if err != nil {
+               return err
+       }
+       if err := os.RemoveAll(td); err != nil {
+               return err
+       }
+
+       // Get the filename, and strip the filename from the URL so we can
+       // just get the repository directly.
+       filename := filepath.Base(u.Path)
+       u.Path = filepath.Dir(u.Path)
+
+       // Get the full repository
+       if err := g.Get(td, u); err != nil {
+               return err
+       }
+
+       // Copy the single file
+       u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename)))
+       if err != nil {
+               return err
+       }
+
+       fg := &FileGetter{Copy: true}
+       return fg.GetFile(dst, u)
+}
+
+func (g *GitGetter) checkout(dst string, ref string) error {
+       cmd := exec.Command("git", "checkout", ref)
+       cmd.Dir = dst
+       return getRunCommand(cmd)
+}
+
+func (g *GitGetter) clone(dst, sshKeyFile string, u *url.URL) error {
+       cmd := exec.Command("git", "clone", u.String(), dst)
+       setupGitEnv(cmd, sshKeyFile)
+       return getRunCommand(cmd)
+}
+
+func (g *GitGetter) update(dst, sshKeyFile, ref string) error {
+       // Determine if we're a branch. If we're NOT a branch, then we just
+       // switch to master prior to checking out
+       cmd := exec.Command("git", "show-ref", "-q", "--verify", "refs/heads/"+ref)
+       cmd.Dir = dst
+
+       if getRunCommand(cmd) != nil {
+               // Not a branch, switch to master. This will also catch non-existent
+               // branches, in which case we want to switch to master and then
+               // checkout the proper branch later.
+               ref = "master"
+       }
+
+       // We have to be on a branch to pull
+       if err := g.checkout(dst, ref); err != nil {
+               return err
+       }
+
+       cmd = exec.Command("git", "pull", "--ff-only")
+       cmd.Dir = dst
+       setupGitEnv(cmd, sshKeyFile)
+       return getRunCommand(cmd)
+}
+
+// fetchSubmodules downloads any configured submodules recursively.
+func (g *GitGetter) fetchSubmodules(dst, sshKeyFile string) error {
+       cmd := exec.Command("git", "submodule", "update", "--init", "--recursive")
+       cmd.Dir = dst
+       setupGitEnv(cmd, sshKeyFile)
+       return getRunCommand(cmd)
+}
+
+// setupGitEnv sets up the environment for the given command. This is used to
+// pass configuration data to git and ssh and enables advanced cloning methods.
+func setupGitEnv(cmd *exec.Cmd, sshKeyFile string) {
+       var sshOpts []string
+
+       if sshKeyFile != "" {
+               // We have an SSH key temp file configured, tell ssh about this.
+               sshOpts = append(sshOpts, "-i", sshKeyFile)
+       }
+
+       cmd.Env = append(os.Environ(),
+               // Set the ssh command to use for clones.
+               "GIT_SSH_COMMAND=ssh "+strings.Join(sshOpts, " "),
+       )
+}
+
+// checkGitVersion is used to check the version of git installed on the system
+// against a known minimum version. Returns an error if the installed version
+// is older than the given minimum.
+func checkGitVersion(min string) error {
+       want, err := version.NewVersion(min)
+       if err != nil {
+               return err
+       }
+
+       out, err := exec.Command("git", "version").Output()
+       if err != nil {
+               return err
+       }
+
+       fields := strings.Fields(string(out))
+       if len(fields) != 3 {
+               return fmt.Errorf("Unexpected 'git version' output: %q", string(out))
+       }
+
+       have, err := version.NewVersion(fields[2])
+       if err != nil {
+               return err
+       }
+
+       if have.LessThan(want) {
+               return fmt.Errorf("Required git version = %s, have %s", want, have)
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_hg.go b/vendor/github.com/hashicorp/go-getter/get_hg.go
new file mode 100644 (file)
index 0000000..820bdd4
--- /dev/null
@@ -0,0 +1,131 @@
+package getter
+
+import (
+       "fmt"
+       "io/ioutil"
+       "net/url"
+       "os"
+       "os/exec"
+       "path/filepath"
+       "runtime"
+
+       urlhelper "github.com/hashicorp/go-getter/helper/url"
+)
+
+// HgGetter is a Getter implementation that will download a module from
+// a Mercurial repository.
+type HgGetter struct{}
+
+func (g *HgGetter) ClientMode(_ *url.URL) (ClientMode, error) {
+       return ClientModeDir, nil
+}
+
+func (g *HgGetter) Get(dst string, u *url.URL) error {
+       if _, err := exec.LookPath("hg"); err != nil {
+               return fmt.Errorf("hg must be available and on the PATH")
+       }
+
+       newURL, err := urlhelper.Parse(u.String())
+       if err != nil {
+               return err
+       }
+       if fixWindowsDrivePath(newURL) {
+               // See valid file path form on http://www.selenic.com/hg/help/urls
+               newURL.Path = fmt.Sprintf("/%s", newURL.Path)
+       }
+
+       // Extract some query parameters we use
+       var rev string
+       q := newURL.Query()
+       if len(q) > 0 {
+               rev = q.Get("rev")
+               q.Del("rev")
+
+               newURL.RawQuery = q.Encode()
+       }
+
+       _, err = os.Stat(dst)
+       if err != nil && !os.IsNotExist(err) {
+               return err
+       }
+       if err != nil {
+               if err := g.clone(dst, newURL); err != nil {
+                       return err
+               }
+       }
+
+       if err := g.pull(dst, newURL); err != nil {
+               return err
+       }
+
+       return g.update(dst, newURL, rev)
+}
+
+// GetFile for Hg doesn't support updating at this time. It will download
+// the file every time.
+func (g *HgGetter) GetFile(dst string, u *url.URL) error {
+       td, err := ioutil.TempDir("", "getter-hg")
+       if err != nil {
+               return err
+       }
+       if err := os.RemoveAll(td); err != nil {
+               return err
+       }
+
+       // Get the filename, and strip the filename from the URL so we can
+       // just get the repository directly.
+       filename := filepath.Base(u.Path)
+       u.Path = filepath.ToSlash(filepath.Dir(u.Path))
+
+       // If we're on Windows, we need to set the host to "localhost" for hg
+       if runtime.GOOS == "windows" {
+               u.Host = "localhost"
+       }
+
+       // Get the full repository
+       if err := g.Get(td, u); err != nil {
+               return err
+       }
+
+       // Copy the single file
+       u, err = urlhelper.Parse(fmtFileURL(filepath.Join(td, filename)))
+       if err != nil {
+               return err
+       }
+
+       fg := &FileGetter{Copy: true}
+       return fg.GetFile(dst, u)
+}
+
+func (g *HgGetter) clone(dst string, u *url.URL) error {
+       cmd := exec.Command("hg", "clone", "-U", u.String(), dst)
+       return getRunCommand(cmd)
+}
+
+func (g *HgGetter) pull(dst string, u *url.URL) error {
+       cmd := exec.Command("hg", "pull")
+       cmd.Dir = dst
+       return getRunCommand(cmd)
+}
+
+func (g *HgGetter) update(dst string, u *url.URL, rev string) error {
+       args := []string{"update"}
+       if rev != "" {
+               args = append(args, rev)
+       }
+
+       cmd := exec.Command("hg", args...)
+       cmd.Dir = dst
+       return getRunCommand(cmd)
+}
+
+func fixWindowsDrivePath(u *url.URL) bool {
+       // hg assumes a file:/// prefix for Windows drive letter file paths.
+       // (e.g. file:///c:/foo/bar)
+       // If the URL Path does not begin with a '/' character, the resulting URL
+       // path will have a file:// prefix. (e.g. file://c:/foo/bar)
+       // See http://www.selenic.com/hg/help/urls and the examples listed in
+       // http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936
+       return runtime.GOOS == "windows" && u.Scheme == "file" &&
+               len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':'
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_http.go b/vendor/github.com/hashicorp/go-getter/get_http.go
new file mode 100644 (file)
index 0000000..3c02034
--- /dev/null
@@ -0,0 +1,219 @@
+package getter
+
+import (
+       "encoding/xml"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "net/http"
+       "net/url"
+       "os"
+       "path/filepath"
+       "strings"
+)
+
+// HttpGetter is a Getter implementation that will download from an HTTP
+// endpoint.
+//
+// For file downloads, HTTP is used directly.
+//
+// The protocol for downloading a directory from an HTTP endpoing is as follows:
+//
+// An HTTP GET request is made to the URL with the additional GET parameter
+// "terraform-get=1". This lets you handle that scenario specially if you
+// wish. The response must be a 2xx.
+//
+// First, a header is looked for "X-Terraform-Get" which should contain
+// a source URL to download.
+//
+// If the header is not present, then a meta tag is searched for named
+// "terraform-get" and the content should be a source URL.
+//
+// The source URL, whether from the header or meta tag, must be a fully
+// formed URL. The shorthand syntax of "github.com/foo/bar" or relative
+// paths are not allowed.
+type HttpGetter struct {
+       // Netrc, if true, will lookup and use auth information found
+       // in the user's netrc file if available.
+       Netrc bool
+}
+
+func (g *HttpGetter) ClientMode(u *url.URL) (ClientMode, error) {
+       if strings.HasSuffix(u.Path, "/") {
+               return ClientModeDir, nil
+       }
+       return ClientModeFile, nil
+}
+
+func (g *HttpGetter) Get(dst string, u *url.URL) error {
+       // Copy the URL so we can modify it
+       var newU url.URL = *u
+       u = &newU
+
+       if g.Netrc {
+               // Add auth from netrc if we can
+               if err := addAuthFromNetrc(u); err != nil {
+                       return err
+               }
+       }
+
+       // Add terraform-get to the parameter.
+       q := u.Query()
+       q.Add("terraform-get", "1")
+       u.RawQuery = q.Encode()
+
+       // Get the URL
+       resp, err := http.Get(u.String())
+       if err != nil {
+               return err
+       }
+       defer resp.Body.Close()
+       if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+               return fmt.Errorf("bad response code: %d", resp.StatusCode)
+       }
+
+       // Extract the source URL
+       var source string
+       if v := resp.Header.Get("X-Terraform-Get"); v != "" {
+               source = v
+       } else {
+               source, err = g.parseMeta(resp.Body)
+               if err != nil {
+                       return err
+               }
+       }
+       if source == "" {
+               return fmt.Errorf("no source URL was returned")
+       }
+
+       // If there is a subdir component, then we download the root separately
+       // into a temporary directory, then copy over the proper subdir.
+       source, subDir := SourceDirSubdir(source)
+       if subDir == "" {
+               return Get(dst, source)
+       }
+
+       // We have a subdir, time to jump some hoops
+       return g.getSubdir(dst, source, subDir)
+}
+
+func (g *HttpGetter) GetFile(dst string, u *url.URL) error {
+       resp, err := http.Get(u.String())
+       if err != nil {
+               return err
+       }
+       defer resp.Body.Close()
+       if resp.StatusCode != 200 {
+               return fmt.Errorf("bad response code: %d", resp.StatusCode)
+       }
+
+       // Create all the parent directories
+       if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+               return err
+       }
+
+       f, err := os.Create(dst)
+       if err != nil {
+               return err
+       }
+       defer f.Close()
+
+       _, err = io.Copy(f, resp.Body)
+       return err
+}
+
+// getSubdir downloads the source into the destination, but with
+// the proper subdir.
+func (g *HttpGetter) getSubdir(dst, source, subDir string) error {
+       // Create a temporary directory to store the full source
+       td, err := ioutil.TempDir("", "tf")
+       if err != nil {
+               return err
+       }
+       defer os.RemoveAll(td)
+
+       // Download that into the given directory
+       if err := Get(td, source); err != nil {
+               return err
+       }
+
+       // Make sure the subdir path actually exists
+       sourcePath := filepath.Join(td, subDir)
+       if _, err := os.Stat(sourcePath); err != nil {
+               return fmt.Errorf(
+                       "Error downloading %s: %s", source, err)
+       }
+
+       // Copy the subdirectory into our actual destination.
+       if err := os.RemoveAll(dst); err != nil {
+               return err
+       }
+
+       // Make the final destination
+       if err := os.MkdirAll(dst, 0755); err != nil {
+               return err
+       }
+
+       return copyDir(dst, sourcePath, false)
+}
+
+// parseMeta looks for the first meta tag in the given reader that
+// will give us the source URL.
+func (g *HttpGetter) parseMeta(r io.Reader) (string, error) {
+       d := xml.NewDecoder(r)
+       d.CharsetReader = charsetReader
+       d.Strict = false
+       var err error
+       var t xml.Token
+       for {
+               t, err = d.Token()
+               if err != nil {
+                       if err == io.EOF {
+                               err = nil
+                       }
+                       return "", err
+               }
+               if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") {
+                       return "", nil
+               }
+               if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") {
+                       return "", nil
+               }
+               e, ok := t.(xml.StartElement)
+               if !ok || !strings.EqualFold(e.Name.Local, "meta") {
+                       continue
+               }
+               if attrValue(e.Attr, "name") != "terraform-get" {
+                       continue
+               }
+               if f := attrValue(e.Attr, "content"); f != "" {
+                       return f, nil
+               }
+       }
+}
+
+// attrValue returns the attribute value for the case-insensitive key
+// `name', or the empty string if nothing is found.
+func attrValue(attrs []xml.Attr, name string) string {
+       for _, a := range attrs {
+               if strings.EqualFold(a.Name.Local, name) {
+                       return a.Value
+               }
+       }
+       return ""
+}
+
+// charsetReader returns a reader for the given charset. Currently
+// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful
+// error which is printed by go get, so the user can find why the package
+// wasn't downloaded if the encoding is not supported. Note that, in
+// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters
+// greater than 0x7f are not rejected).
+func charsetReader(charset string, input io.Reader) (io.Reader, error) {
+       switch strings.ToLower(charset) {
+       case "ascii":
+               return input, nil
+       default:
+               return nil, fmt.Errorf("can't decode XML document using charset %q", charset)
+       }
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_mock.go b/vendor/github.com/hashicorp/go-getter/get_mock.go
new file mode 100644 (file)
index 0000000..882e694
--- /dev/null
@@ -0,0 +1,52 @@
+package getter
+
+import (
+       "net/url"
+)
+
+// MockGetter is an implementation of Getter that can be used for tests.
+type MockGetter struct {
+       // Proxy, if set, will be called after recording the calls below.
+       // If it isn't set, then the *Err values will be returned.
+       Proxy Getter
+
+       GetCalled bool
+       GetDst    string
+       GetURL    *url.URL
+       GetErr    error
+
+       GetFileCalled bool
+       GetFileDst    string
+       GetFileURL    *url.URL
+       GetFileErr    error
+}
+
+func (g *MockGetter) Get(dst string, u *url.URL) error {
+       g.GetCalled = true
+       g.GetDst = dst
+       g.GetURL = u
+
+       if g.Proxy != nil {
+               return g.Proxy.Get(dst, u)
+       }
+
+       return g.GetErr
+}
+
+func (g *MockGetter) GetFile(dst string, u *url.URL) error {
+       g.GetFileCalled = true
+       g.GetFileDst = dst
+       g.GetFileURL = u
+
+       if g.Proxy != nil {
+               return g.Proxy.GetFile(dst, u)
+       }
+       return g.GetFileErr
+}
+
+func (g *MockGetter) ClientMode(u *url.URL) (ClientMode, error) {
+       if l := len(u.Path); l > 0 && u.Path[l-1:] == "/" {
+               return ClientModeDir, nil
+       }
+       return ClientModeFile, nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/get_s3.go b/vendor/github.com/hashicorp/go-getter/get_s3.go
new file mode 100644 (file)
index 0000000..d3bffeb
--- /dev/null
@@ -0,0 +1,243 @@
+package getter
+
+import (
+       "fmt"
+       "io"
+       "net/url"
+       "os"
+       "path/filepath"
+       "strings"
+
+       "github.com/aws/aws-sdk-go/aws"
+       "github.com/aws/aws-sdk-go/aws/credentials"
+       "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+       "github.com/aws/aws-sdk-go/aws/ec2metadata"
+       "github.com/aws/aws-sdk-go/aws/session"
+       "github.com/aws/aws-sdk-go/service/s3"
+)
+
+// S3Getter is a Getter implementation that will download a module from
+// a S3 bucket.
+type S3Getter struct{}
+
+func (g *S3Getter) ClientMode(u *url.URL) (ClientMode, error) {
+       // Parse URL
+       region, bucket, path, _, creds, err := g.parseUrl(u)
+       if err != nil {
+               return 0, err
+       }
+
+       // Create client config
+       config := g.getAWSConfig(region, creds)
+       sess := session.New(config)
+       client := s3.New(sess)
+
+       // List the object(s) at the given prefix
+       req := &s3.ListObjectsInput{
+               Bucket: aws.String(bucket),
+               Prefix: aws.String(path),
+       }
+       resp, err := client.ListObjects(req)
+       if err != nil {
+               return 0, err
+       }
+
+       for _, o := range resp.Contents {
+               // Use file mode on exact match.
+               if *o.Key == path {
+                       return ClientModeFile, nil
+               }
+
+               // Use dir mode if child keys are found.
+               if strings.HasPrefix(*o.Key, path+"/") {
+                       return ClientModeDir, nil
+               }
+       }
+
+       // There was no match, so just return file mode. The download is going
+       // to fail but we will let S3 return the proper error later.
+       return ClientModeFile, nil
+}
+
+func (g *S3Getter) Get(dst string, u *url.URL) error {
+       // Parse URL
+       region, bucket, path, _, creds, err := g.parseUrl(u)
+       if err != nil {
+               return err
+       }
+
+       // Remove destination if it already exists
+       _, err = os.Stat(dst)
+       if err != nil && !os.IsNotExist(err) {
+               return err
+       }
+
+       if err == nil {
+               // Remove the destination
+               if err := os.RemoveAll(dst); err != nil {
+                       return err
+               }
+       }
+
+       // Create all the parent directories
+       if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+               return err
+       }
+
+       config := g.getAWSConfig(region, creds)
+       sess := session.New(config)
+       client := s3.New(sess)
+
+       // List files in path, keep listing until no more objects are found
+       lastMarker := ""
+       hasMore := true
+       for hasMore {
+               req := &s3.ListObjectsInput{
+                       Bucket: aws.String(bucket),
+                       Prefix: aws.String(path),
+               }
+               if lastMarker != "" {
+                       req.Marker = aws.String(lastMarker)
+               }
+
+               resp, err := client.ListObjects(req)
+               if err != nil {
+                       return err
+               }
+
+               hasMore = aws.BoolValue(resp.IsTruncated)
+
+               // Get each object storing each file relative to the destination path
+               for _, object := range resp.Contents {
+                       lastMarker = aws.StringValue(object.Key)
+                       objPath := aws.StringValue(object.Key)
+
+                       // If the key ends with a backslash assume it is a directory and ignore
+                       if strings.HasSuffix(objPath, "/") {
+                               continue
+                       }
+
+                       // Get the object destination path
+                       objDst, err := filepath.Rel(path, objPath)
+                       if err != nil {
+                               return err
+                       }
+                       objDst = filepath.Join(dst, objDst)
+
+                       if err := g.getObject(client, objDst, bucket, objPath, ""); err != nil {
+                               return err
+                       }
+               }
+       }
+
+       return nil
+}
+
+func (g *S3Getter) GetFile(dst string, u *url.URL) error {
+       region, bucket, path, version, creds, err := g.parseUrl(u)
+       if err != nil {
+               return err
+       }
+
+       config := g.getAWSConfig(region, creds)
+       sess := session.New(config)
+       client := s3.New(sess)
+       return g.getObject(client, dst, bucket, path, version)
+}
+
+func (g *S3Getter) getObject(client *s3.S3, dst, bucket, key, version string) error {
+       req := &s3.GetObjectInput{
+               Bucket: aws.String(bucket),
+               Key:    aws.String(key),
+       }
+       if version != "" {
+               req.VersionId = aws.String(version)
+       }
+
+       resp, err := client.GetObject(req)
+       if err != nil {
+               return err
+       }
+
+       // Create all the parent directories
+       if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
+               return err
+       }
+
+       f, err := os.Create(dst)
+       if err != nil {
+               return err
+       }
+       defer f.Close()
+
+       _, err = io.Copy(f, resp.Body)
+       return err
+}
+
+func (g *S3Getter) getAWSConfig(region string, creds *credentials.Credentials) *aws.Config {
+       conf := &aws.Config{}
+       if creds == nil {
+               // Grab the metadata URL
+               metadataURL := os.Getenv("AWS_METADATA_URL")
+               if metadataURL == "" {
+                       metadataURL = "http://169.254.169.254:80/latest"
+               }
+
+               creds = credentials.NewChainCredentials(
+                       []credentials.Provider{
+                               &credentials.EnvProvider{},
+                               &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+                               &ec2rolecreds.EC2RoleProvider{
+                                       Client: ec2metadata.New(session.New(&aws.Config{
+                                               Endpoint: aws.String(metadataURL),
+                                       })),
+                               },
+                       })
+       }
+
+       conf.Credentials = creds
+       if region != "" {
+               conf.Region = aws.String(region)
+       }
+
+       return conf
+}
+
+func (g *S3Getter) parseUrl(u *url.URL) (region, bucket, path, version string, creds *credentials.Credentials, err error) {
+       // Expected host style: s3.amazonaws.com. They always have 3 parts,
+       // although the first may differ if we're accessing a specific region.
+       hostParts := strings.Split(u.Host, ".")
+       if len(hostParts) != 3 {
+               err = fmt.Errorf("URL is not a valid S3 URL")
+               return
+       }
+
+       // Parse the region out of the first part of the host
+       region = strings.TrimPrefix(strings.TrimPrefix(hostParts[0], "s3-"), "s3")
+       if region == "" {
+               region = "us-east-1"
+       }
+
+       pathParts := strings.SplitN(u.Path, "/", 3)
+       if len(pathParts) != 3 {
+               err = fmt.Errorf("URL is not a valid S3 URL")
+               return
+       }
+
+       bucket = pathParts[1]
+       path = pathParts[2]
+       version = u.Query().Get("version")
+
+       _, hasAwsId := u.Query()["aws_access_key_id"]
+       _, hasAwsSecret := u.Query()["aws_access_key_secret"]
+       _, hasAwsToken := u.Query()["aws_access_token"]
+       if hasAwsId || hasAwsSecret || hasAwsToken {
+               creds = credentials.NewStaticCredentials(
+                       u.Query().Get("aws_access_key_id"),
+                       u.Query().Get("aws_access_key_secret"),
+                       u.Query().Get("aws_access_token"),
+               )
+       }
+
+       return
+}
diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url.go b/vendor/github.com/hashicorp/go-getter/helper/url/url.go
new file mode 100644 (file)
index 0000000..02497c2
--- /dev/null
@@ -0,0 +1,14 @@
+package url
+
+import (
+       "net/url"
+)
+
+// Parse parses rawURL into a URL structure.
+// The rawURL may be relative or absolute.
+//
+// Parse is a wrapper for the Go stdlib net/url Parse function, but returns
+// Windows "safe" URLs on Windows platforms.
+func Parse(rawURL string) (*url.URL, error) {
+       return parse(rawURL)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_unix.go
new file mode 100644 (file)
index 0000000..ed1352a
--- /dev/null
@@ -0,0 +1,11 @@
+// +build !windows
+
+package url
+
+import (
+       "net/url"
+)
+
+func parse(rawURL string) (*url.URL, error) {
+       return url.Parse(rawURL)
+}
diff --git a/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go b/vendor/github.com/hashicorp/go-getter/helper/url/url_windows.go
new file mode 100644 (file)
index 0000000..4655226
--- /dev/null
@@ -0,0 +1,40 @@
+package url
+
+import (
+       "fmt"
+       "net/url"
+       "path/filepath"
+       "strings"
+)
+
+func parse(rawURL string) (*url.URL, error) {
+       // Make sure we're using "/" since URLs are "/"-based.
+       rawURL = filepath.ToSlash(rawURL)
+
+       u, err := url.Parse(rawURL)
+       if err != nil {
+               return nil, err
+       }
+
+       if len(rawURL) > 1 && rawURL[1] == ':' {
+               // Assume we're dealing with a drive letter file path where the drive
+               // letter has been parsed into the URL Scheme, and the rest of the path
+               // has been parsed into the URL Path without the leading ':' character.
+               u.Path = fmt.Sprintf("%s:%s", string(rawURL[0]), u.Path)
+               u.Scheme = ""
+       }
+
+       if len(u.Host) > 1 && u.Host[1] == ':' && strings.HasPrefix(rawURL, "file://") {
+               // Assume we're dealing with a drive letter file path where the drive
+               // letter has been parsed into the URL Host.
+               u.Path = fmt.Sprintf("%s%s", u.Host, u.Path)
+               u.Host = ""
+       }
+
+       // Remove leading slash for absolute file paths.
+       if len(u.Path) > 2 && u.Path[0] == '/' && u.Path[2] == ':' {
+               u.Path = u.Path[1:]
+       }
+
+       return u, err
+}
diff --git a/vendor/github.com/hashicorp/go-getter/netrc.go b/vendor/github.com/hashicorp/go-getter/netrc.go
new file mode 100644 (file)
index 0000000..c7f6a3f
--- /dev/null
@@ -0,0 +1,67 @@
+package getter
+
+import (
+       "fmt"
+       "net/url"
+       "os"
+       "runtime"
+
+       "github.com/bgentry/go-netrc/netrc"
+       "github.com/mitchellh/go-homedir"
+)
+
+// addAuthFromNetrc adds auth information to the URL from the user's
+// netrc file if it can be found. This will only add the auth info
+// if the URL doesn't already have auth info specified and the
+// the username is blank.
+func addAuthFromNetrc(u *url.URL) error {
+       // If the URL already has auth information, do nothing
+       if u.User != nil && u.User.Username() != "" {
+               return nil
+       }
+
+       // Get the netrc file path
+       path := os.Getenv("NETRC")
+       if path == "" {
+               filename := ".netrc"
+               if runtime.GOOS == "windows" {
+                       filename = "_netrc"
+               }
+
+               var err error
+               path, err = homedir.Expand("~/" + filename)
+               if err != nil {
+                       return err
+               }
+       }
+
+       // If the file is not a file, then do nothing
+       if fi, err := os.Stat(path); err != nil {
+               // File doesn't exist, do nothing
+               if os.IsNotExist(err) {
+                       return nil
+               }
+
+               // Some other error!
+               return err
+       } else if fi.IsDir() {
+               // File is directory, ignore
+               return nil
+       }
+
+       // Load up the netrc file
+       net, err := netrc.ParseFile(path)
+       if err != nil {
+               return fmt.Errorf("Error parsing netrc file at %q: %s", path, err)
+       }
+
+       machine := net.FindMachine(u.Host)
+       if machine == nil {
+               // Machine not found, no problem
+               return nil
+       }
+
+       // Set the user info
+       u.User = url.UserPassword(machine.Login, machine.Password)
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/go-getter/source.go b/vendor/github.com/hashicorp/go-getter/source.go
new file mode 100644 (file)
index 0000000..4d5ee3c
--- /dev/null
@@ -0,0 +1,36 @@
+package getter
+
+import (
+       "strings"
+)
+
+// SourceDirSubdir takes a source and returns a tuple of the URL without
+// the subdir and the URL with the subdir.
+func SourceDirSubdir(src string) (string, string) {
+       // Calcaulate an offset to avoid accidentally marking the scheme
+       // as the dir.
+       var offset int
+       if idx := strings.Index(src, "://"); idx > -1 {
+               offset = idx + 3
+       }
+
+       // First see if we even have an explicit subdir
+       idx := strings.Index(src[offset:], "//")
+       if idx == -1 {
+               return src, ""
+       }
+
+       idx += offset
+       subdir := src[idx+2:]
+       src = src[:idx]
+
+       // Next, check if we have query parameters and push them onto the
+       // URL.
+       if idx = strings.Index(subdir, "?"); idx > -1 {
+               query := subdir[idx:]
+               subdir = subdir[:idx]
+               src += query
+       }
+
+       return src, subdir
+}
diff --git a/vendor/github.com/hashicorp/go-getter/storage.go b/vendor/github.com/hashicorp/go-getter/storage.go
new file mode 100644 (file)
index 0000000..2bc6b9e
--- /dev/null
@@ -0,0 +1,13 @@
+package getter
+
+// Storage is an interface that knows how to lookup downloaded directories
+// as well as download and update directories from their sources into the
+// proper location.
+type Storage interface {
+       // Dir returns the directory on local disk where the directory source
+       // can be loaded from.
+       Dir(string) (string, bool, error)
+
+       // Get will download and optionally update the given directory.
+       Get(string, string, bool) error
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE
new file mode 100644 (file)
index 0000000..82b4de9
--- /dev/null
@@ -0,0 +1,353 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-multierror/README.md b/vendor/github.com/hashicorp/go-multierror/README.md
new file mode 100644 (file)
index 0000000..e81be50
--- /dev/null
@@ -0,0 +1,91 @@
+# go-multierror
+
+`go-multierror` is a package for Go that provides a mechanism for
+representing a list of `error` values as a single `error`.
+
+This allows a function in Go to return an `error` that might actually
+be a list of errors. If the caller knows this, they can unwrap the
+list and access the errors. If the caller doesn't know, the error
+formats to a nice human-readable format.
+
+`go-multierror` implements the
+[errwrap](https://github.com/hashicorp/errwrap) interface so that it can
+be used with that library, as well.
+
+## Installation and Docs
+
+Install using `go get github.com/hashicorp/go-multierror`.
+
+Full documentation is available at
+http://godoc.org/github.com/hashicorp/go-multierror
+
+## Usage
+
+go-multierror is easy to use and purposely built to be unobtrusive in
+existing Go applications/libraries that may not be aware of it.
+
+**Building a list of errors**
+
+The `Append` function is used to create a list of errors. This function
+behaves a lot like the Go built-in `append` function: it doesn't matter
+if the first argument is nil, a `multierror.Error`, or any other `error`,
+the function behaves as you would expect.
+
+```go
+var result error
+
+if err := step1(); err != nil {
+       result = multierror.Append(result, err)
+}
+if err := step2(); err != nil {
+       result = multierror.Append(result, err)
+}
+
+return result
+```
+
+**Customizing the formatting of the errors**
+
+By specifying a custom `ErrorFormat`, you can customize the format
+of the `Error() string` function:
+
+```go
+var result *multierror.Error
+
+// ... accumulate errors here, maybe using Append
+
+if result != nil {
+       result.ErrorFormat = func([]error) string {
+               return "errors!"
+       }
+}
+```
+
+**Accessing the list of errors**
+
+`multierror.Error` implements `error` so if the caller doesn't know about
+multierror, it will work just fine. But if you're aware a multierror might
+be returned, you can use type switches to access the list of errors:
+
+```go
+if err := something(); err != nil {
+       if merr, ok := err.(*multierror.Error); ok {
+               // Use merr.Errors
+       }
+}
+```
+
+**Returning a multierror only if there are errors**
+
+If you build a `multierror.Error`, you can use the `ErrorOrNil` function
+to return an `error` implementation only if there are errors to return:
+
+```go
+var result *multierror.Error
+
+// ... accumulate errors here
+
+// Return the `error` only if errors were added to the multierror, otherwise
+// return nil since there are no errors.
+return result.ErrorOrNil()
+```
diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go
new file mode 100644 (file)
index 0000000..00afa9b
--- /dev/null
@@ -0,0 +1,37 @@
+package multierror
+
+// Append is a helper function that will append more errors
+// onto an Error in order to create a larger multi-error.
+//
+// If err is not a multierror.Error, then it will be turned into
+// one. If any of the errs are multierr.Error, they will be flattened
+// one level into err.
+func Append(err error, errs ...error) *Error {
+       switch err := err.(type) {
+       case *Error:
+               // Typed nils can reach here, so initialize if we are nil
+               if err == nil {
+                       err = new(Error)
+               }
+
+               // Go through each error and flatten
+               for _, e := range errs {
+                       switch e := e.(type) {
+                       case *Error:
+                               err.Errors = append(err.Errors, e.Errors...)
+                       default:
+                               err.Errors = append(err.Errors, e)
+                       }
+               }
+
+               return err
+       default:
+               newErrs := make([]error, 0, len(errs)+1)
+               if err != nil {
+                       newErrs = append(newErrs, err)
+               }
+               newErrs = append(newErrs, errs...)
+
+               return Append(&Error{}, newErrs...)
+       }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go
new file mode 100644 (file)
index 0000000..aab8e9a
--- /dev/null
@@ -0,0 +1,26 @@
+package multierror
+
+// Flatten flattens the given error, merging any *Errors together into
+// a single *Error.
+func Flatten(err error) error {
+       // If it isn't an *Error, just return the error as-is
+       if _, ok := err.(*Error); !ok {
+               return err
+       }
+
+       // Otherwise, make the result and flatten away!
+       flatErr := new(Error)
+       flatten(err, flatErr)
+       return flatErr
+}
+
+func flatten(err error, flatErr *Error) {
+       switch err := err.(type) {
+       case *Error:
+               for _, e := range err.Errors {
+                       flatten(e, flatErr)
+               }
+       default:
+               flatErr.Errors = append(flatErr.Errors, err)
+       }
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go
new file mode 100644 (file)
index 0000000..bb65a12
--- /dev/null
@@ -0,0 +1,23 @@
+package multierror
+
+import (
+       "fmt"
+       "strings"
+)
+
+// ErrorFormatFunc is a function callback that is called by Error to
+// turn the list of errors into a string.
+type ErrorFormatFunc func([]error) string
+
+// ListFormatFunc is a basic formatter that outputs the number of errors
+// that occurred along with a bullet point list of the errors.
+func ListFormatFunc(es []error) string {
+       points := make([]string, len(es))
+       for i, err := range es {
+               points[i] = fmt.Sprintf("* %s", err)
+       }
+
+       return fmt.Sprintf(
+               "%d error(s) occurred:\n\n%s",
+               len(es), strings.Join(points, "\n"))
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go
new file mode 100644 (file)
index 0000000..2ea0827
--- /dev/null
@@ -0,0 +1,51 @@
+package multierror
+
+import (
+       "fmt"
+)
+
+// Error is an error type to track multiple errors. This is used to
+// accumulate errors in cases and return them as a single "error".
+type Error struct {
+       Errors      []error
+       ErrorFormat ErrorFormatFunc
+}
+
+func (e *Error) Error() string {
+       fn := e.ErrorFormat
+       if fn == nil {
+               fn = ListFormatFunc
+       }
+
+       return fn(e.Errors)
+}
+
+// ErrorOrNil returns an error interface if this Error represents
+// a list of errors, or returns nil if the list of errors is empty. This
+// function is useful at the end of accumulation to make sure that the value
+// returned represents the existence of errors.
+func (e *Error) ErrorOrNil() error {
+       if e == nil {
+               return nil
+       }
+       if len(e.Errors) == 0 {
+               return nil
+       }
+
+       return e
+}
+
+func (e *Error) GoString() string {
+       return fmt.Sprintf("*%#v", *e)
+}
+
+// WrappedErrors returns the list of errors that this Error is wrapping.
+// It is an implementatin of the errwrap.Wrapper interface so that
+// multierror.Error can be used with that library.
+//
+// This method is not safe to be called concurrently and is no different
+// than accessing the Errors field directly. It is implementd only to
+// satisfy the errwrap.Wrapper interface.
+func (e *Error) WrappedErrors() []error {
+       return e.Errors
+}
diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go
new file mode 100644 (file)
index 0000000..5c477ab
--- /dev/null
@@ -0,0 +1,37 @@
+package multierror
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/errwrap"
+)
+
+// Prefix is a helper function that will prefix some text
+// to the given error. If the error is a multierror.Error, then
+// it will be prefixed to each wrapped error.
+//
+// This is useful to use when appending multiple multierrors
+// together in order to give better scoping.
+func Prefix(err error, prefix string) error {
+       if err == nil {
+               return nil
+       }
+
+       format := fmt.Sprintf("%s {{err}}", prefix)
+       switch err := err.(type) {
+       case *Error:
+               // Typed nils can reach here, so initialize if we are nil
+               if err == nil {
+                       err = new(Error)
+               }
+
+               // Wrap each of the errors
+               for i, e := range err.Errors {
+                       err.Errors[i] = errwrap.Wrapf(format, e)
+               }
+
+               return err
+       default:
+               return errwrap.Wrapf(format, err)
+       }
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/LICENSE b/vendor/github.com/hashicorp/go-plugin/LICENSE
new file mode 100644 (file)
index 0000000..82b4de9
--- /dev/null
@@ -0,0 +1,353 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/go-plugin/README.md b/vendor/github.com/hashicorp/go-plugin/README.md
new file mode 100644 (file)
index 0000000..2058cfb
--- /dev/null
@@ -0,0 +1,161 @@
+# Go Plugin System over RPC
+
+`go-plugin` is a Go (golang) plugin system over RPC. It is the plugin system
+that has been in use by HashiCorp tooling for over 3 years. While initially
+created for [Packer](https://www.packer.io), it has since been used by
+[Terraform](https://www.terraform.io) and [Otto](https://www.ottoproject.io),
+with plans to also use it for [Nomad](https://www.nomadproject.io) and
+[Vault](https://www.vaultproject.io).
+
+While the plugin system is over RPC, it is currently only designed to work
+over a local [reliable] network. Plugins over a real network are not supported
+and will lead to unexpected behavior.
+
+This plugin system has been used on millions of machines across many different
+projects and has proven to be battle hardened and ready for production use.
+
+## Features
+
+The HashiCorp plugin system supports a number of features:
+
+**Plugins are Go interface implementations.** This makes writing and consuming
+plugins feel very natural. To a plugin author: you just implement an
+interface as if it were going to run in the same process. For a plugin user:
+you just use and call functions on an interface as if it were in the same
+process. This plugin system handles the communication in between.
+
+**Complex arguments and return values are supported.** This library
+provides APIs for handling complex arguments and return values such
+as interfaces, `io.Reader/Writer`, etc. We do this by giving you a library
+(`MuxBroker`) for creating new connections between the client/server to
+serve additional interfaces or transfer raw data.
+
+**Bidirectional communication.** Because the plugin system supports
+complex arguments, the host process can send it interface implementations
+and the plugin can call back into the host process.
+
+**Built-in Logging.** Any plugins that use the `log` standard library
+will have log data automatically sent to the host process. The host
+process will mirror this output prefixed with the path to the plugin
+binary. This makes debugging with plugins simple.
+
+**Protocol Versioning.** A very basic "protocol version" is supported that
+can be incremented to invalidate any previous plugins. This is useful when
+interface signatures are changing, protocol level changes are necessary,
+etc. When a protocol version is incompatible, a human friendly error
+message is shown to the end user.
+
+**Stdout/Stderr Syncing.** While plugins are subprocesses, they can continue
+to use stdout/stderr as usual and the output will get mirrored back to
+the host process. The host process can control what `io.Writer` these
+streams go to to prevent this from happening.
+
+**TTY Preservation.** Plugin subprocesses are connected to the identical
+stdin file descriptor as the host process, allowing software that requires
+a TTY to work. For example, a plugin can execute `ssh` and even though there
+are multiple subprocesses and RPC happening, it will look and act perfectly
+to the end user.
+
+**Host upgrade while a plugin is running.** Plugins can be "reattached"
+so that the host process can be upgraded while the plugin is still running.
+This requires the host/plugin to know this is possible and daemonize
+properly. `NewClient` takes a `ReattachConfig` to determine if and how to
+reattach.
+
+## Architecture
+
+The HashiCorp plugin system works by launching subprocesses and communicating
+over RPC (using standard `net/rpc`). A single connection is made between
+any plugin and the host process, and we use a
+[connection multiplexing](https://github.com/hashicorp/yamux)
+library to multiplex any other connections on top.
+
+This architecture has a number of benefits:
+
+  * Plugins can't crash your host process: A panic in a plugin doesn't
+    panic the plugin user.
+
+  * Plugins are very easy to write: just write a Go application and `go build`.
+    Theoretically you could also use another language as long as it can
+    communicate the Go `net/rpc` protocol but this hasn't yet been tried.
+
+  * Plugins are very easy to install: just put the binary in a location where
+    the host will find it (depends on the host but this library also provides
+    helpers), and the plugin host handles the rest.
+
+  * Plugins can be relatively secure: The plugin only has access to the
+    interfaces and args given to it, not to the entire memory space of the
+    process. More security features are planned (see the coming soon section
+    below).
+
+## Usage
+
+To use the plugin system, you must take the following steps. These are
+high-level steps that must be done. Examples are available in the
+`examples/` directory.
+
+  1. Choose the interface(s) you want to expose for plugins.
+
+  2. For each interface, implement an implementation of that interface
+     that communicates over an `*rpc.Client` (from the standard `net/rpc`
+     package) for every function call. Likewise, implement the RPC server
+     struct this communicates to which is then communicating to a real,
+     concrete implementation.
+
+  3. Create a `Plugin` implementation that knows how to create the RPC
+     client/server for a given plugin type.
+
+  4. Plugin authors call `plugin.Serve` to serve a plugin from the
+     `main` function.
+
+  5. Plugin users use `plugin.Client` to launch a subprocess and request
+     an interface implementation over RPC.
+
+That's it! In practice, step 2 is the most tedious and time consuming step.
+Even so, it isn't very difficult and you can see examples in the `examples/`
+directory as well as throughout our various open source projects.
+
+For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-plugin).
+
+## Roadmap
+
+Our plugin system is constantly evolving. As we use the plugin system for
+new projects or for new features in existing projects, we constantly find
+improvements we can make.
+
+At this point in time, the roadmap for the plugin system is:
+
+**Cryptographically Secure Plugins.** We'll implement signing plugins
+and loading signed plugins in order to allow Vault to make use of multi-process
+in a secure way.
+
+**Semantic Versioning.** Plugins will be able to implement a semantic version.
+This plugin system will give host processes a system for constraining
+versions. This is in addition to the protocol versioning already present
+which is more for larger underlying changes.
+
+**Plugin fetching.** We will integrate with [go-getter](https://github.com/hashicorp/go-getter)
+to support automatic download + install of plugins. Paired with cryptographically
+secure plugins (above), we can make this a safe operation for an amazing
+user experience.
+
+## What About Shared Libraries?
+
+When we started using plugins (late 2012, early 2013), plugins over RPC
+were the only option since Go didn't support dynamic library loading. Today,
+Go still doesn't support dynamic library loading, but they do intend to.
+Since 2012, our plugin system has stabilized from millions of users using it,
+and has many benefits we've come to value greatly.
+
+For example, we intend to use this plugin system in
+[Vault](https://www.vaultproject.io), and dynamic library loading will
+simply never be acceptable in Vault for security reasons. That is an extreme
+example, but we believe our library system has more upsides than downsides
+over dynamic library loading and since we've had it built and tested for years,
+we'll likely continue to use it.
+
+Shared libraries have one major advantage over our system which is much
+higher performance. In real world scenarios across our various tools,
+we've never required any more performance out of our plugin system and it
+has seen very high throughput, so this isn't a concern for us at the moment.
+
diff --git a/vendor/github.com/hashicorp/go-plugin/client.go b/vendor/github.com/hashicorp/go-plugin/client.go
new file mode 100644 (file)
index 0000000..9f8a0f2
--- /dev/null
@@ -0,0 +1,581 @@
+package plugin
+
+import (
+       "bufio"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "net"
+       "os"
+       "os/exec"
+       "path/filepath"
+       "strconv"
+       "strings"
+       "sync"
+       "sync/atomic"
+       "time"
+       "unicode"
+)
+
+// If this is 1, then we've called CleanupClients. This can be used
+// by plugin RPC implementations to change error behavior since you
+// can expected network connection errors at this point. This should be
+// read by using sync/atomic.
+var Killed uint32 = 0
+
+// This is a slice of the "managed" clients which are cleaned up when
+// calling Cleanup
+var managedClients = make([]*Client, 0, 5)
+var managedClientsLock sync.Mutex
+
+// Error types
+var (
+       // ErrProcessNotFound is returned when a client is instantiated to
+       // reattach to an existing process and it isn't found.
+       ErrProcessNotFound = errors.New("Reattachment process not found")
+)
+
+// Client handles the lifecycle of a plugin application. It launches
+// plugins, connects to them, dispenses interface implementations, and handles
+// killing the process.
+//
+// Plugin hosts should use one Client for each plugin executable. To
+// dispense a plugin type, use the `Client.Client` function, and then
+// cal `Dispense`. This awkward API is mostly historical but is used to split
+// the client that deals with subprocess management and the client that
+// does RPC management.
+//
+// See NewClient and ClientConfig for using a Client.
+type Client struct {
+       config      *ClientConfig
+       exited      bool
+       doneLogging chan struct{}
+       l           sync.Mutex
+       address     net.Addr
+       process     *os.Process
+       client      *RPCClient
+}
+
+// ClientConfig is the configuration used to initialize a new
+// plugin client. After being used to initialize a plugin client,
+// that configuration must not be modified again.
+type ClientConfig struct {
+       // HandshakeConfig is the configuration that must match servers.
+       HandshakeConfig
+
+       // Plugins are the plugins that can be consumed.
+       Plugins map[string]Plugin
+
+       // One of the following must be set, but not both.
+       //
+       // Cmd is the unstarted subprocess for starting the plugin. If this is
+       // set, then the Client starts the plugin process on its own and connects
+       // to it.
+       //
+       // Reattach is configuration for reattaching to an existing plugin process
+       // that is already running. This isn't common.
+       Cmd      *exec.Cmd
+       Reattach *ReattachConfig
+
+       // Managed represents if the client should be managed by the
+       // plugin package or not. If true, then by calling CleanupClients,
+       // it will automatically be cleaned up. Otherwise, the client
+       // user is fully responsible for making sure to Kill all plugin
+       // clients. By default the client is _not_ managed.
+       Managed bool
+
+       // The minimum and maximum port to use for communicating with
+       // the subprocess. If not set, this defaults to 10,000 and 25,000
+       // respectively.
+       MinPort, MaxPort uint
+
+       // StartTimeout is the timeout to wait for the plugin to say it
+       // has started successfully.
+       StartTimeout time.Duration
+
+       // If non-nil, then the stderr of the client will be written to here
+       // (as well as the log). This is the original os.Stderr of the subprocess.
+       // This isn't the output of synced stderr.
+       Stderr io.Writer
+
+       // SyncStdout, SyncStderr can be set to override the
+       // respective os.Std* values in the plugin. Care should be taken to
+       // avoid races here. If these are nil, then this will automatically be
+       // hooked up to os.Stdin, Stdout, and Stderr, respectively.
+       //
+       // If the default values (nil) are used, then this package will not
+       // sync any of these streams.
+       SyncStdout io.Writer
+       SyncStderr io.Writer
+}
+
+// ReattachConfig is used to configure a client to reattach to an
+// already-running plugin process. You can retrieve this information by
+// calling ReattachConfig on Client.
+type ReattachConfig struct {
+       Addr net.Addr
+       Pid  int
+}
+
+// This makes sure all the managed subprocesses are killed and properly
+// logged. This should be called before the parent process running the
+// plugins exits.
+//
+// This must only be called _once_.
+func CleanupClients() {
+       // Set the killed to true so that we don't get unexpected panics
+       atomic.StoreUint32(&Killed, 1)
+
+       // Kill all the managed clients in parallel and use a WaitGroup
+       // to wait for them all to finish up.
+       var wg sync.WaitGroup
+       managedClientsLock.Lock()
+       for _, client := range managedClients {
+               wg.Add(1)
+
+               go func(client *Client) {
+                       client.Kill()
+                       wg.Done()
+               }(client)
+       }
+       managedClientsLock.Unlock()
+
+       log.Println("[DEBUG] plugin: waiting for all plugin processes to complete...")
+       wg.Wait()
+}
+
+// Creates a new plugin client which manages the lifecycle of an external
+// plugin and gets the address for the RPC connection.
+//
+// The client must be cleaned up at some point by calling Kill(). If
+// the client is a managed client (created with NewManagedClient) you
+// can just call CleanupClients at the end of your program and they will
+// be properly cleaned.
+func NewClient(config *ClientConfig) (c *Client) {
+       if config.MinPort == 0 && config.MaxPort == 0 {
+               config.MinPort = 10000
+               config.MaxPort = 25000
+       }
+
+       if config.StartTimeout == 0 {
+               config.StartTimeout = 1 * time.Minute
+       }
+
+       if config.Stderr == nil {
+               config.Stderr = ioutil.Discard
+       }
+
+       if config.SyncStdout == nil {
+               config.SyncStdout = ioutil.Discard
+       }
+       if config.SyncStderr == nil {
+               config.SyncStderr = ioutil.Discard
+       }
+
+       c = &Client{config: config}
+       if config.Managed {
+               managedClientsLock.Lock()
+               managedClients = append(managedClients, c)
+               managedClientsLock.Unlock()
+       }
+
+       return
+}
+
+// Client returns an RPC client for the plugin.
+//
+// Subsequent calls to this will return the same RPC client.
+func (c *Client) Client() (*RPCClient, error) {
+       addr, err := c.Start()
+       if err != nil {
+               return nil, err
+       }
+
+       c.l.Lock()
+       defer c.l.Unlock()
+
+       if c.client != nil {
+               return c.client, nil
+       }
+
+       // Connect to the client
+       conn, err := net.Dial(addr.Network(), addr.String())
+       if err != nil {
+               return nil, err
+       }
+       if tcpConn, ok := conn.(*net.TCPConn); ok {
+               // Make sure to set keep alive so that the connection doesn't die
+               tcpConn.SetKeepAlive(true)
+       }
+
+       // Create the actual RPC client
+       c.client, err = NewRPCClient(conn, c.config.Plugins)
+       if err != nil {
+               conn.Close()
+               return nil, err
+       }
+
+       // Begin the stream syncing so that stdin, out, err work properly
+       err = c.client.SyncStreams(
+               c.config.SyncStdout,
+               c.config.SyncStderr)
+       if err != nil {
+               c.client.Close()
+               c.client = nil
+               return nil, err
+       }
+
+       return c.client, nil
+}
+
+// Tells whether or not the underlying process has exited.
+func (c *Client) Exited() bool {
+       c.l.Lock()
+       defer c.l.Unlock()
+       return c.exited
+}
+
+// End the executing subprocess (if it is running) and perform any cleanup
+// tasks necessary such as capturing any remaining logs and so on.
+//
+// This method blocks until the process successfully exits.
+//
+// This method can safely be called multiple times.
+func (c *Client) Kill() {
+       // Grab a lock to read some private fields.
+       c.l.Lock()
+       process := c.process
+       addr := c.address
+       doneCh := c.doneLogging
+       c.l.Unlock()
+
+       // If there is no process, we never started anything. Nothing to kill.
+       if process == nil {
+               return
+       }
+
+       // We need to check for address here. It is possible that the plugin
+       // started (process != nil) but has no address (addr == nil) if the
+       // plugin failed at startup. If we do have an address, we need to close
+       // the plugin net connections.
+       graceful := false
+       if addr != nil {
+               // Close the client to cleanly exit the process.
+               client, err := c.Client()
+               if err == nil {
+                       err = client.Close()
+
+                       // If there is no error, then we attempt to wait for a graceful
+                       // exit. If there was an error, we assume that graceful cleanup
+                       // won't happen and just force kill.
+                       graceful = err == nil
+                       if err != nil {
+                               // If there was an error just log it. We're going to force
+                               // kill in a moment anyways.
+                               log.Printf(
+                                       "[WARN] plugin: error closing client during Kill: %s", err)
+                       }
+               }
+       }
+
+       // If we're attempting a graceful exit, then we wait for a short period
+       // of time to allow that to happen. To wait for this we just wait on the
+       // doneCh which would be closed if the process exits.
+       if graceful {
+               select {
+               case <-doneCh:
+                       return
+               case <-time.After(250 * time.Millisecond):
+               }
+       }
+
+       // If graceful exiting failed, just kill it
+       process.Kill()
+
+       // Wait for the client to finish logging so we have a complete log
+       <-doneCh
+}
+
+// Starts the underlying subprocess, communicating with it to negotiate
+// a port for RPC connections, and returning the address to connect via RPC.
+//
+// This method is safe to call multiple times. Subsequent calls have no effect.
+// Once a client has been started once, it cannot be started again, even if
+// it was killed.
+func (c *Client) Start() (addr net.Addr, err error) {
+       c.l.Lock()
+       defer c.l.Unlock()
+
+       if c.address != nil {
+               return c.address, nil
+       }
+
+       // If one of cmd or reattach isn't set, then it is an error. We wrap
+       // this in a {} for scoping reasons, and hopeful that the escape
+       // analysis will pop the stock here.
+       {
+               cmdSet := c.config.Cmd != nil
+               attachSet := c.config.Reattach != nil
+               if cmdSet == attachSet {
+                       return nil, fmt.Errorf("Only one of Cmd or Reattach must be set")
+               }
+       }
+
+       // Create the logging channel for when we kill
+       c.doneLogging = make(chan struct{})
+
+       if c.config.Reattach != nil {
+               // Verify the process still exists. If not, then it is an error
+               p, err := os.FindProcess(c.config.Reattach.Pid)
+               if err != nil {
+                       return nil, err
+               }
+
+               // Attempt to connect to the addr since on Unix systems FindProcess
+               // doesn't actually return an error if it can't find the process.
+               conn, err := net.Dial(
+                       c.config.Reattach.Addr.Network(),
+                       c.config.Reattach.Addr.String())
+               if err != nil {
+                       p.Kill()
+                       return nil, ErrProcessNotFound
+               }
+               conn.Close()
+
+               // Goroutine to mark exit status
+               go func(pid int) {
+                       // Wait for the process to die
+                       pidWait(pid)
+
+                       // Log so we can see it
+                       log.Printf("[DEBUG] plugin: reattached plugin process exited\n")
+
+                       // Mark it
+                       c.l.Lock()
+                       defer c.l.Unlock()
+                       c.exited = true
+
+                       // Close the logging channel since that doesn't work on reattach
+                       close(c.doneLogging)
+               }(p.Pid)
+
+               // Set the address and process
+               c.address = c.config.Reattach.Addr
+               c.process = p
+
+               return c.address, nil
+       }
+
+       env := []string{
+               fmt.Sprintf("%s=%s", c.config.MagicCookieKey, c.config.MagicCookieValue),
+               fmt.Sprintf("PLUGIN_MIN_PORT=%d", c.config.MinPort),
+               fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort),
+       }
+
+       stdout_r, stdout_w := io.Pipe()
+       stderr_r, stderr_w := io.Pipe()
+
+       cmd := c.config.Cmd
+       cmd.Env = append(cmd.Env, os.Environ()...)
+       cmd.Env = append(cmd.Env, env...)
+       cmd.Stdin = os.Stdin
+       cmd.Stderr = stderr_w
+       cmd.Stdout = stdout_w
+
+       log.Printf("[DEBUG] plugin: starting plugin: %s %#v", cmd.Path, cmd.Args)
+       err = cmd.Start()
+       if err != nil {
+               return
+       }
+
+       // Set the process
+       c.process = cmd.Process
+
+       // Make sure the command is properly cleaned up if there is an error
+       defer func() {
+               r := recover()
+
+               if err != nil || r != nil {
+                       cmd.Process.Kill()
+               }
+
+               if r != nil {
+                       panic(r)
+               }
+       }()
+
+       // Start goroutine to wait for process to exit
+       exitCh := make(chan struct{})
+       go func() {
+               // Make sure we close the write end of our stderr/stdout so
+               // that the readers send EOF properly.
+               defer stderr_w.Close()
+               defer stdout_w.Close()
+
+               // Wait for the command to end.
+               cmd.Wait()
+
+               // Log and make sure to flush the logs write away
+               log.Printf("[DEBUG] plugin: %s: plugin process exited\n", cmd.Path)
+               os.Stderr.Sync()
+
+               // Mark that we exited
+               close(exitCh)
+
+               // Set that we exited, which takes a lock
+               c.l.Lock()
+               defer c.l.Unlock()
+               c.exited = true
+       }()
+
+       // Start goroutine that logs the stderr
+       go c.logStderr(stderr_r)
+
+       // Start a goroutine that is going to be reading the lines
+       // out of stdout
+       linesCh := make(chan []byte)
+       go func() {
+               defer close(linesCh)
+
+               buf := bufio.NewReader(stdout_r)
+               for {
+                       line, err := buf.ReadBytes('\n')
+                       if line != nil {
+                               linesCh <- line
+                       }
+
+                       if err == io.EOF {
+                               return
+                       }
+               }
+       }()
+
+       // Make sure after we exit we read the lines from stdout forever
+       // so they don't block since it is an io.Pipe
+       defer func() {
+               go func() {
+                       for _ = range linesCh {
+                       }
+               }()
+       }()
+
+       // Some channels for the next step
+       timeout := time.After(c.config.StartTimeout)
+
+       // Start looking for the address
+       log.Printf("[DEBUG] plugin: waiting for RPC address for: %s", cmd.Path)
+       select {
+       case <-timeout:
+               err = errors.New("timeout while waiting for plugin to start")
+       case <-exitCh:
+               err = errors.New("plugin exited before we could connect")
+       case lineBytes := <-linesCh:
+               // Trim the line and split by "|" in order to get the parts of
+               // the output.
+               line := strings.TrimSpace(string(lineBytes))
+               parts := strings.SplitN(line, "|", 4)
+               if len(parts) < 4 {
+                       err = fmt.Errorf(
+                               "Unrecognized remote plugin message: %s\n\n"+
+                                       "This usually means that the plugin is either invalid or simply\n"+
+                                       "needs to be recompiled to support the latest protocol.", line)
+                       return
+               }
+
+               // Check the core protocol. Wrapped in a {} for scoping.
+               {
+                       var coreProtocol int64
+                       coreProtocol, err = strconv.ParseInt(parts[0], 10, 0)
+                       if err != nil {
+                               err = fmt.Errorf("Error parsing core protocol version: %s", err)
+                               return
+                       }
+
+                       if int(coreProtocol) != CoreProtocolVersion {
+                               err = fmt.Errorf("Incompatible core API version with plugin. "+
+                                       "Plugin version: %s, Ours: %d\n\n"+
+                                       "To fix this, the plugin usually only needs to be recompiled.\n"+
+                                       "Please report this to the plugin author.", parts[0], CoreProtocolVersion)
+                               return
+                       }
+               }
+
+               // Parse the protocol version
+               var protocol int64
+               protocol, err = strconv.ParseInt(parts[1], 10, 0)
+               if err != nil {
+                       err = fmt.Errorf("Error parsing protocol version: %s", err)
+                       return
+               }
+
+               // Test the API version
+               if uint(protocol) != c.config.ProtocolVersion {
+                       err = fmt.Errorf("Incompatible API version with plugin. "+
+                               "Plugin version: %s, Ours: %d", parts[1], c.config.ProtocolVersion)
+                       return
+               }
+
+               switch parts[2] {
+               case "tcp":
+                       addr, err = net.ResolveTCPAddr("tcp", parts[3])
+               case "unix":
+                       addr, err = net.ResolveUnixAddr("unix", parts[3])
+               default:
+                       err = fmt.Errorf("Unknown address type: %s", parts[3])
+               }
+       }
+
+       c.address = addr
+       return
+}
+
+// ReattachConfig returns the information that must be provided to NewClient
+// to reattach to the plugin process that this client started. This is
+// useful for plugins that detach from their parent process.
+//
+// If this returns nil then the process hasn't been started yet. Please
+// call Start or Client before calling this.
+func (c *Client) ReattachConfig() *ReattachConfig {
+       c.l.Lock()
+       defer c.l.Unlock()
+
+       if c.address == nil {
+               return nil
+       }
+
+       if c.config.Cmd != nil && c.config.Cmd.Process == nil {
+               return nil
+       }
+
+       // If we connected via reattach, just return the information as-is
+       if c.config.Reattach != nil {
+               return c.config.Reattach
+       }
+
+       return &ReattachConfig{
+               Addr: c.address,
+               Pid:  c.config.Cmd.Process.Pid,
+       }
+}
+
+func (c *Client) logStderr(r io.Reader) {
+       bufR := bufio.NewReader(r)
+       for {
+               line, err := bufR.ReadString('\n')
+               if line != "" {
+                       c.config.Stderr.Write([]byte(line))
+
+                       line = strings.TrimRightFunc(line, unicode.IsSpace)
+                       log.Printf("[DEBUG] plugin: %s: %s", filepath.Base(c.config.Cmd.Path), line)
+               }
+
+               if err == io.EOF {
+                       break
+               }
+       }
+
+       // Flag that we've completed logging for others
+       close(c.doneLogging)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/discover.go b/vendor/github.com/hashicorp/go-plugin/discover.go
new file mode 100644 (file)
index 0000000..d22c566
--- /dev/null
@@ -0,0 +1,28 @@
+package plugin
+
+import (
+       "path/filepath"
+)
+
+// Discover discovers plugins that are in a given directory.
+//
+// The directory doesn't need to be absolute. For example, "." will work fine.
+//
+// This currently assumes any file matching the glob is a plugin.
+// In the future this may be smarter about checking that a file is
+// executable and so on.
+//
+// TODO: test
+func Discover(glob, dir string) ([]string, error) {
+       var err error
+
+       // Make the directory absolute if it isn't already
+       if !filepath.IsAbs(dir) {
+               dir, err = filepath.Abs(dir)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       return filepath.Glob(filepath.Join(dir, glob))
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/error.go b/vendor/github.com/hashicorp/go-plugin/error.go
new file mode 100644 (file)
index 0000000..22a7baa
--- /dev/null
@@ -0,0 +1,24 @@
+package plugin
+
+// This is a type that wraps error types so that they can be messaged
+// across RPC channels. Since "error" is an interface, we can't always
+// gob-encode the underlying structure. This is a valid error interface
+// implementer that we will push across.
+type BasicError struct {
+       Message string
+}
+
+// NewBasicError is used to create a BasicError.
+//
+// err is allowed to be nil.
+func NewBasicError(err error) *BasicError {
+       if err == nil {
+               return nil
+       }
+
+       return &BasicError{err.Error()}
+}
+
+func (e *BasicError) Error() string {
+       return e.Message
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/mux_broker.go b/vendor/github.com/hashicorp/go-plugin/mux_broker.go
new file mode 100644 (file)
index 0000000..01c45ad
--- /dev/null
@@ -0,0 +1,204 @@
+package plugin
+
+import (
+       "encoding/binary"
+       "fmt"
+       "log"
+       "net"
+       "sync"
+       "sync/atomic"
+       "time"
+
+       "github.com/hashicorp/yamux"
+)
+
+// MuxBroker is responsible for brokering multiplexed connections by unique ID.
+//
+// It is used by plugins to multiplex multiple RPC connections and data
+// streams on top of a single connection between the plugin process and the
+// host process.
+//
+// This allows a plugin to request a channel with a specific ID to connect to
+// or accept a connection from, and the broker handles the details of
+// holding these channels open while they're being negotiated.
+//
+// The Plugin interface has access to these for both Server and Client.
+// The broker can be used by either (optionally) to reserve and connect to
+// new multiplexed streams. This is useful for complex args and return values,
+// or anything else you might need a data stream for.
+type MuxBroker struct {
+       nextId  uint32
+       session *yamux.Session
+       streams map[uint32]*muxBrokerPending
+
+       sync.Mutex
+}
+
+type muxBrokerPending struct {
+       ch     chan net.Conn
+       doneCh chan struct{}
+}
+
+func newMuxBroker(s *yamux.Session) *MuxBroker {
+       return &MuxBroker{
+               session: s,
+               streams: make(map[uint32]*muxBrokerPending),
+       }
+}
+
+// Accept accepts a connection by ID.
+//
+// This should not be called multiple times with the same ID at one time.
+func (m *MuxBroker) Accept(id uint32) (net.Conn, error) {
+       var c net.Conn
+       p := m.getStream(id)
+       select {
+       case c = <-p.ch:
+               close(p.doneCh)
+       case <-time.After(5 * time.Second):
+               m.Lock()
+               defer m.Unlock()
+               delete(m.streams, id)
+
+               return nil, fmt.Errorf("timeout waiting for accept")
+       }
+
+       // Ack our connection
+       if err := binary.Write(c, binary.LittleEndian, id); err != nil {
+               c.Close()
+               return nil, err
+       }
+
+       return c, nil
+}
+
+// AcceptAndServe is used to accept a specific stream ID and immediately
+// serve an RPC server on that stream ID. This is used to easily serve
+// complex arguments.
+//
+// The served interface is always registered to the "Plugin" name.
+func (m *MuxBroker) AcceptAndServe(id uint32, v interface{}) {
+       conn, err := m.Accept(id)
+       if err != nil {
+               log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err)
+               return
+       }
+
+       serve(conn, "Plugin", v)
+}
+
+// Close closes the connection and all sub-connections.
+func (m *MuxBroker) Close() error {
+       return m.session.Close()
+}
+
+// Dial opens a connection by ID.
+func (m *MuxBroker) Dial(id uint32) (net.Conn, error) {
+       // Open the stream
+       stream, err := m.session.OpenStream()
+       if err != nil {
+               return nil, err
+       }
+
+       // Write the stream ID onto the wire.
+       if err := binary.Write(stream, binary.LittleEndian, id); err != nil {
+               stream.Close()
+               return nil, err
+       }
+
+       // Read the ack that we connected. Then we're off!
+       var ack uint32
+       if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil {
+               stream.Close()
+               return nil, err
+       }
+       if ack != id {
+               stream.Close()
+               return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id)
+       }
+
+       return stream, nil
+}
+
+// NextId returns a unique ID to use next.
+//
+// It is possible for very long-running plugin hosts to wrap this value,
+// though it would require a very large amount of RPC calls. In practice
+// we've never seen it happen.
+func (m *MuxBroker) NextId() uint32 {
+       return atomic.AddUint32(&m.nextId, 1)
+}
+
+// Run starts the brokering and should be executed in a goroutine, since it
+// blocks forever, or until the session closes.
+//
+// Uses of MuxBroker never need to call this. It is called internally by
+// the plugin host/client.
+func (m *MuxBroker) Run() {
+       for {
+               stream, err := m.session.AcceptStream()
+               if err != nil {
+                       // Once we receive an error, just exit
+                       break
+               }
+
+               // Read the stream ID from the stream
+               var id uint32
+               if err := binary.Read(stream, binary.LittleEndian, &id); err != nil {
+                       stream.Close()
+                       continue
+               }
+
+               // Initialize the waiter
+               p := m.getStream(id)
+               select {
+               case p.ch <- stream:
+               default:
+               }
+
+               // Wait for a timeout
+               go m.timeoutWait(id, p)
+       }
+}
+
+func (m *MuxBroker) getStream(id uint32) *muxBrokerPending {
+       m.Lock()
+       defer m.Unlock()
+
+       p, ok := m.streams[id]
+       if ok {
+               return p
+       }
+
+       m.streams[id] = &muxBrokerPending{
+               ch:     make(chan net.Conn, 1),
+               doneCh: make(chan struct{}),
+       }
+       return m.streams[id]
+}
+
+func (m *MuxBroker) timeoutWait(id uint32, p *muxBrokerPending) {
+       // Wait for the stream to either be picked up and connected, or
+       // for a timeout.
+       timeout := false
+       select {
+       case <-p.doneCh:
+       case <-time.After(5 * time.Second):
+               timeout = true
+       }
+
+       m.Lock()
+       defer m.Unlock()
+
+       // Delete the stream so no one else can grab it
+       delete(m.streams, id)
+
+       // If we timed out, then check if we have a channel in the buffer,
+       // and if so, close it.
+       if timeout {
+               select {
+               case s := <-p.ch:
+                       s.Close()
+               }
+       }
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/plugin.go b/vendor/github.com/hashicorp/go-plugin/plugin.go
new file mode 100644 (file)
index 0000000..37c8fd6
--- /dev/null
@@ -0,0 +1,25 @@
+// The plugin package exposes functions and helpers for communicating to
+// plugins which are implemented as standalone binary applications.
+//
+// plugin.Client fully manages the lifecycle of executing the application,
+// connecting to it, and returning the RPC client for dispensing plugins.
+//
+// plugin.Serve fully manages listeners to expose an RPC server from a binary
+// that plugin.Client can connect to.
+package plugin
+
+import (
+       "net/rpc"
+)
+
+// Plugin is the interface that is implemented to serve/connect to an
+// inteface implementation.
+type Plugin interface {
+       // Server should return the RPC server compatible struct to serve
+       // the methods that the Client calls over net/rpc.
+       Server(*MuxBroker) (interface{}, error)
+
+       // Client returns an interface implementation for the plugin you're
+       // serving that communicates to the server end of the plugin.
+       Client(*MuxBroker, *rpc.Client) (interface{}, error)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/process.go b/vendor/github.com/hashicorp/go-plugin/process.go
new file mode 100644 (file)
index 0000000..88c999a
--- /dev/null
@@ -0,0 +1,24 @@
+package plugin
+
+import (
+       "time"
+)
+
+// pidAlive checks whether a pid is alive.
+func pidAlive(pid int) bool {
+       return _pidAlive(pid)
+}
+
+// pidWait blocks for a process to exit.
+func pidWait(pid int) error {
+       ticker := time.NewTicker(1 * time.Second)
+       defer ticker.Stop()
+
+       for range ticker.C {
+               if !pidAlive(pid) {
+                       break
+               }
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/process_posix.go b/vendor/github.com/hashicorp/go-plugin/process_posix.go
new file mode 100644 (file)
index 0000000..70ba546
--- /dev/null
@@ -0,0 +1,19 @@
+// +build !windows
+
+package plugin
+
+import (
+       "os"
+       "syscall"
+)
+
+// _pidAlive tests whether a process is alive or not by sending it Signal 0,
+// since Go otherwise has no way to test this.
+func _pidAlive(pid int) bool {
+       proc, err := os.FindProcess(pid)
+       if err == nil {
+               err = proc.Signal(syscall.Signal(0))
+       }
+
+       return err == nil
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/process_windows.go b/vendor/github.com/hashicorp/go-plugin/process_windows.go
new file mode 100644 (file)
index 0000000..9f7b018
--- /dev/null
@@ -0,0 +1,29 @@
+package plugin
+
+import (
+       "syscall"
+)
+
+const (
+       // Weird name but matches the MSDN docs
+       exit_STILL_ACTIVE = 259
+
+       processDesiredAccess = syscall.STANDARD_RIGHTS_READ |
+               syscall.PROCESS_QUERY_INFORMATION |
+               syscall.SYNCHRONIZE
+)
+
+// _pidAlive tests whether a process is alive or not
+func _pidAlive(pid int) bool {
+       h, err := syscall.OpenProcess(processDesiredAccess, false, uint32(pid))
+       if err != nil {
+               return false
+       }
+
+       var ec uint32
+       if e := syscall.GetExitCodeProcess(h, &ec); e != nil {
+               return false
+       }
+
+       return ec == exit_STILL_ACTIVE
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_client.go b/vendor/github.com/hashicorp/go-plugin/rpc_client.go
new file mode 100644 (file)
index 0000000..29f9bf0
--- /dev/null
@@ -0,0 +1,123 @@
+package plugin
+
+import (
+       "fmt"
+       "io"
+       "net"
+       "net/rpc"
+
+       "github.com/hashicorp/yamux"
+)
+
+// RPCClient connects to an RPCServer over net/rpc to dispense plugin types.
+type RPCClient struct {
+       broker  *MuxBroker
+       control *rpc.Client
+       plugins map[string]Plugin
+
+       // These are the streams used for the various stdout/err overrides
+       stdout, stderr net.Conn
+}
+
+// NewRPCClient creates a client from an already-open connection-like value.
+// Dial is typically used instead.
+func NewRPCClient(conn io.ReadWriteCloser, plugins map[string]Plugin) (*RPCClient, error) {
+       // Create the yamux client so we can multiplex
+       mux, err := yamux.Client(conn, nil)
+       if err != nil {
+               conn.Close()
+               return nil, err
+       }
+
+       // Connect to the control stream.
+       control, err := mux.Open()
+       if err != nil {
+               mux.Close()
+               return nil, err
+       }
+
+       // Connect stdout, stderr streams
+       stdstream := make([]net.Conn, 2)
+       for i, _ := range stdstream {
+               stdstream[i], err = mux.Open()
+               if err != nil {
+                       mux.Close()
+                       return nil, err
+               }
+       }
+
+       // Create the broker and start it up
+       broker := newMuxBroker(mux)
+       go broker.Run()
+
+       // Build the client using our broker and control channel.
+       return &RPCClient{
+               broker:  broker,
+               control: rpc.NewClient(control),
+               plugins: plugins,
+               stdout:  stdstream[0],
+               stderr:  stdstream[1],
+       }, nil
+}
+
+// SyncStreams should be called to enable syncing of stdout,
+// stderr with the plugin.
+//
+// This will return immediately and the syncing will continue to happen
+// in the background. You do not need to launch this in a goroutine itself.
+//
+// This should never be called multiple times.
+func (c *RPCClient) SyncStreams(stdout io.Writer, stderr io.Writer) error {
+       go copyStream("stdout", stdout, c.stdout)
+       go copyStream("stderr", stderr, c.stderr)
+       return nil
+}
+
+// Close closes the connection. The client is no longer usable after this
+// is called.
+func (c *RPCClient) Close() error {
+       // Call the control channel and ask it to gracefully exit. If this
+       // errors, then we save it so that we always return an error but we
+       // want to try to close the other channels anyways.
+       var empty struct{}
+       returnErr := c.control.Call("Control.Quit", true, &empty)
+
+       // Close the other streams we have
+       if err := c.control.Close(); err != nil {
+               return err
+       }
+       if err := c.stdout.Close(); err != nil {
+               return err
+       }
+       if err := c.stderr.Close(); err != nil {
+               return err
+       }
+       if err := c.broker.Close(); err != nil {
+               return err
+       }
+
+       // Return back the error we got from Control.Quit. This is very important
+       // since we MUST return non-nil error if this fails so that Client.Kill
+       // will properly try a process.Kill.
+       return returnErr
+}
+
+func (c *RPCClient) Dispense(name string) (interface{}, error) {
+       p, ok := c.plugins[name]
+       if !ok {
+               return nil, fmt.Errorf("unknown plugin type: %s", name)
+       }
+
+       var id uint32
+       if err := c.control.Call(
+               "Dispenser.Dispense", name, &id); err != nil {
+               return nil, err
+       }
+
+       conn, err := c.broker.Dial(id)
+       if err != nil {
+               return nil, err
+       }
+
+       return p.Client(c.broker, rpc.NewClient(conn))
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/rpc_server.go b/vendor/github.com/hashicorp/go-plugin/rpc_server.go
new file mode 100644 (file)
index 0000000..3984dc8
--- /dev/null
@@ -0,0 +1,185 @@
+package plugin
+
+import (
+       "errors"
+       "fmt"
+       "io"
+       "log"
+       "net"
+       "net/rpc"
+       "sync"
+
+       "github.com/hashicorp/yamux"
+)
+
+// RPCServer listens for network connections and then dispenses interface
+// implementations over net/rpc.
+//
+// After setting the fields below, they shouldn't be read again directly
+// from the structure which may be reading/writing them concurrently.
+type RPCServer struct {
+       Plugins map[string]Plugin
+
+       // Stdout, Stderr are what this server will use instead of the
+       // normal stdin/out/err. This is because due to the multi-process nature
+       // of our plugin system, we can't use the normal process values so we
+       // make our own custom one we pipe across.
+       Stdout io.Reader
+       Stderr io.Reader
+
+       // DoneCh should be set to a non-nil channel that will be closed
+       // when the control requests the RPC server to end.
+       DoneCh chan<- struct{}
+
+       lock sync.Mutex
+}
+
+// Accept accepts connections on a listener and serves requests for
+// each incoming connection. Accept blocks; the caller typically invokes
+// it in a go statement.
+func (s *RPCServer) Accept(lis net.Listener) {
+       for {
+               conn, err := lis.Accept()
+               if err != nil {
+                       log.Printf("[ERR] plugin: plugin server: %s", err)
+                       return
+               }
+
+               go s.ServeConn(conn)
+       }
+}
+
+// ServeConn runs a single connection.
+//
+// ServeConn blocks, serving the connection until the client hangs up.
+func (s *RPCServer) ServeConn(conn io.ReadWriteCloser) {
+       // First create the yamux server to wrap this connection
+       mux, err := yamux.Server(conn, nil)
+       if err != nil {
+               conn.Close()
+               log.Printf("[ERR] plugin: error creating yamux server: %s", err)
+               return
+       }
+
+       // Accept the control connection
+       control, err := mux.Accept()
+       if err != nil {
+               mux.Close()
+               if err != io.EOF {
+                       log.Printf("[ERR] plugin: error accepting control connection: %s", err)
+               }
+
+               return
+       }
+
+       // Connect the stdstreams (in, out, err)
+       stdstream := make([]net.Conn, 2)
+       for i, _ := range stdstream {
+               stdstream[i], err = mux.Accept()
+               if err != nil {
+                       mux.Close()
+                       log.Printf("[ERR] plugin: accepting stream %d: %s", i, err)
+                       return
+               }
+       }
+
+       // Copy std streams out to the proper place
+       go copyStream("stdout", stdstream[0], s.Stdout)
+       go copyStream("stderr", stdstream[1], s.Stderr)
+
+       // Create the broker and start it up
+       broker := newMuxBroker(mux)
+       go broker.Run()
+
+       // Use the control connection to build the dispenser and serve the
+       // connection.
+       server := rpc.NewServer()
+       server.RegisterName("Control", &controlServer{
+               server: s,
+       })
+       server.RegisterName("Dispenser", &dispenseServer{
+               broker:  broker,
+               plugins: s.Plugins,
+       })
+       server.ServeConn(control)
+}
+
+// done is called internally by the control server to trigger the
+// doneCh to close which is listened to by the main process to cleanly
+// exit.
+func (s *RPCServer) done() {
+       s.lock.Lock()
+       defer s.lock.Unlock()
+
+       if s.DoneCh != nil {
+               close(s.DoneCh)
+               s.DoneCh = nil
+       }
+}
+
+// dispenseServer dispenses variousinterface implementations for Terraform.
+type controlServer struct {
+       server *RPCServer
+}
+
+func (c *controlServer) Quit(
+       null bool, response *struct{}) error {
+       // End the server
+       c.server.done()
+
+       // Always return true
+       *response = struct{}{}
+
+       return nil
+}
+
+// dispenseServer dispenses variousinterface implementations for Terraform.
+type dispenseServer struct {
+       broker  *MuxBroker
+       plugins map[string]Plugin
+}
+
+func (d *dispenseServer) Dispense(
+       name string, response *uint32) error {
+       // Find the function to create this implementation
+       p, ok := d.plugins[name]
+       if !ok {
+               return fmt.Errorf("unknown plugin type: %s", name)
+       }
+
+       // Create the implementation first so we know if there is an error.
+       impl, err := p.Server(d.broker)
+       if err != nil {
+               // We turn the error into an errors error so that it works across RPC
+               return errors.New(err.Error())
+       }
+
+       // Reserve an ID for our implementation
+       id := d.broker.NextId()
+       *response = id
+
+       // Run the rest in a goroutine since it can only happen once this RPC
+       // call returns. We wait for a connection for the plugin implementation
+       // and serve it.
+       go func() {
+               conn, err := d.broker.Accept(id)
+               if err != nil {
+                       log.Printf("[ERR] go-plugin: plugin dispense error: %s: %s", name, err)
+                       return
+               }
+
+               serve(conn, "Plugin", impl)
+       }()
+
+       return nil
+}
+
+func serve(conn io.ReadWriteCloser, name string, v interface{}) {
+       server := rpc.NewServer()
+       if err := server.RegisterName(name, v); err != nil {
+               log.Printf("[ERR] go-plugin: plugin dispense error: %s", err)
+               return
+       }
+
+       server.ServeConn(conn)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go
new file mode 100644 (file)
index 0000000..b5c5270
--- /dev/null
@@ -0,0 +1,222 @@
+package plugin
+
+import (
+       "errors"
+       "fmt"
+       "io/ioutil"
+       "log"
+       "net"
+       "os"
+       "os/signal"
+       "runtime"
+       "strconv"
+       "sync/atomic"
+)
+
+// CoreProtocolVersion is the ProtocolVersion of the plugin system itself.
+// We will increment this whenever we change any protocol behavior. This
+// will invalidate any prior plugins but will at least allow us to iterate
+// on the core in a safe way. We will do our best to do this very
+// infrequently.
+const CoreProtocolVersion = 1
+
+// HandshakeConfig is the configuration used by client and servers to
+// handshake before starting a plugin connection. This is embedded by
+// both ServeConfig and ClientConfig.
+//
+// In practice, the plugin host creates a HandshakeConfig that is exported
+// and plugins then can easily consume it.
+type HandshakeConfig struct {
+       // ProtocolVersion is the version that clients must match on to
+       // agree they can communicate. This should match the ProtocolVersion
+       // set on ClientConfig when using a plugin.
+       ProtocolVersion uint
+
+       // MagicCookieKey and value are used as a very basic verification
+       // that a plugin is intended to be launched. This is not a security
+       // measure, just a UX feature. If the magic cookie doesn't match,
+       // we show human-friendly output.
+       MagicCookieKey   string
+       MagicCookieValue string
+}
+
+// ServeConfig configures what sorts of plugins are served.
+type ServeConfig struct {
+       // HandshakeConfig is the configuration that must match clients.
+       HandshakeConfig
+
+       // Plugins are the plugins that are served.
+       Plugins map[string]Plugin
+}
+
+// Serve serves the plugins given by ServeConfig.
+//
+// Serve doesn't return until the plugin is done being executed. Any
+// errors will be outputted to the log.
+//
+// This is the method that plugins should call in their main() functions.
+func Serve(opts *ServeConfig) {
+       // Validate the handshake config
+       if opts.MagicCookieKey == "" || opts.MagicCookieValue == "" {
+               fmt.Fprintf(os.Stderr,
+                       "Misconfigured ServeConfig given to serve this plugin: no magic cookie\n"+
+                               "key or value was set. Please notify the plugin author and report\n"+
+                               "this as a bug.\n")
+               os.Exit(1)
+       }
+
+       // First check the cookie
+       if os.Getenv(opts.MagicCookieKey) != opts.MagicCookieValue {
+               fmt.Fprintf(os.Stderr,
+                       "This binary is a plugin. These are not meant to be executed directly.\n"+
+                               "Please execute the program that consumes these plugins, which will\n"+
+                               "load any plugins automatically\n")
+               os.Exit(1)
+       }
+
+       // Logging goes to the original stderr
+       log.SetOutput(os.Stderr)
+
+       // Create our new stdout, stderr files. These will override our built-in
+       // stdout/stderr so that it works across the stream boundary.
+       stdout_r, stdout_w, err := os.Pipe()
+       if err != nil {
+               fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
+               os.Exit(1)
+       }
+       stderr_r, stderr_w, err := os.Pipe()
+       if err != nil {
+               fmt.Fprintf(os.Stderr, "Error preparing plugin: %s\n", err)
+               os.Exit(1)
+       }
+
+       // Register a listener so we can accept a connection
+       listener, err := serverListener()
+       if err != nil {
+               log.Printf("[ERR] plugin: plugin init: %s", err)
+               return
+       }
+       defer listener.Close()
+
+       // Create the channel to tell us when we're done
+       doneCh := make(chan struct{})
+
+       // Create the RPC server to dispense
+       server := &RPCServer{
+               Plugins: opts.Plugins,
+               Stdout:  stdout_r,
+               Stderr:  stderr_r,
+               DoneCh:  doneCh,
+       }
+
+       // Output the address and service name to stdout so that core can bring it up.
+       log.Printf("[DEBUG] plugin: plugin address: %s %s\n",
+               listener.Addr().Network(), listener.Addr().String())
+       fmt.Printf("%d|%d|%s|%s\n",
+               CoreProtocolVersion,
+               opts.ProtocolVersion,
+               listener.Addr().Network(),
+               listener.Addr().String())
+       os.Stdout.Sync()
+
+       // Eat the interrupts
+       ch := make(chan os.Signal, 1)
+       signal.Notify(ch, os.Interrupt)
+       go func() {
+               var count int32 = 0
+               for {
+                       <-ch
+                       newCount := atomic.AddInt32(&count, 1)
+                       log.Printf(
+                               "[DEBUG] plugin: received interrupt signal (count: %d). Ignoring.",
+                               newCount)
+               }
+       }()
+
+       // Set our new out, err
+       os.Stdout = stdout_w
+       os.Stderr = stderr_w
+
+       // Serve
+       go server.Accept(listener)
+
+       // Wait for the graceful exit
+       <-doneCh
+}
+
+func serverListener() (net.Listener, error) {
+       if runtime.GOOS == "windows" {
+               return serverListener_tcp()
+       }
+
+       return serverListener_unix()
+}
+
+func serverListener_tcp() (net.Listener, error) {
+       minPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MIN_PORT"), 10, 32)
+       if err != nil {
+               return nil, err
+       }
+
+       maxPort, err := strconv.ParseInt(os.Getenv("PLUGIN_MAX_PORT"), 10, 32)
+       if err != nil {
+               return nil, err
+       }
+
+       for port := minPort; port <= maxPort; port++ {
+               address := fmt.Sprintf("127.0.0.1:%d", port)
+               listener, err := net.Listen("tcp", address)
+               if err == nil {
+                       return listener, nil
+               }
+       }
+
+       return nil, errors.New("Couldn't bind plugin TCP listener")
+}
+
+func serverListener_unix() (net.Listener, error) {
+       tf, err := ioutil.TempFile("", "plugin")
+       if err != nil {
+               return nil, err
+       }
+       path := tf.Name()
+
+       // Close the file and remove it because it has to not exist for
+       // the domain socket.
+       if err := tf.Close(); err != nil {
+               return nil, err
+       }
+       if err := os.Remove(path); err != nil {
+               return nil, err
+       }
+
+       l, err := net.Listen("unix", path)
+       if err != nil {
+               return nil, err
+       }
+
+       // Wrap the listener in rmListener so that the Unix domain socket file
+       // is removed on close.
+       return &rmListener{
+               Listener: l,
+               Path:     path,
+       }, nil
+}
+
+// rmListener is an implementation of net.Listener that forwards most
+// calls to the listener but also removes a file as part of the close. We
+// use this to cleanup the unix domain socket on close.
+type rmListener struct {
+       net.Listener
+       Path string
+}
+
+func (l *rmListener) Close() error {
+       // Close the listener itself
+       if err := l.Listener.Close(); err != nil {
+               return err
+       }
+
+       // Remove the file
+       return os.Remove(l.Path)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/server_mux.go b/vendor/github.com/hashicorp/go-plugin/server_mux.go
new file mode 100644 (file)
index 0000000..033079e
--- /dev/null
@@ -0,0 +1,31 @@
+package plugin
+
+import (
+       "fmt"
+       "os"
+)
+
+// ServeMuxMap is the type that is used to configure ServeMux
+type ServeMuxMap map[string]*ServeConfig
+
+// ServeMux is like Serve, but serves multiple types of plugins determined
+// by the argument given on the command-line.
+//
+// This command doesn't return until the plugin is done being executed. Any
+// errors are logged or output to stderr.
+func ServeMux(m ServeMuxMap) {
+       if len(os.Args) != 2 {
+               fmt.Fprintf(os.Stderr,
+                       "Invoked improperly. This is an internal command that shouldn't\n"+
+                               "be manually invoked.\n")
+               os.Exit(1)
+       }
+
+       opts, ok := m[os.Args[1]]
+       if !ok {
+               fmt.Fprintf(os.Stderr, "Unknown plugin: %s\n", os.Args[1])
+               os.Exit(1)
+       }
+
+       Serve(opts)
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/stream.go b/vendor/github.com/hashicorp/go-plugin/stream.go
new file mode 100644 (file)
index 0000000..1d547aa
--- /dev/null
@@ -0,0 +1,18 @@
+package plugin
+
+import (
+       "io"
+       "log"
+)
+
+func copyStream(name string, dst io.Writer, src io.Reader) {
+       if src == nil {
+               panic(name + ": src is nil")
+       }
+       if dst == nil {
+               panic(name + ": dst is nil")
+       }
+       if _, err := io.Copy(dst, src); err != nil && err != io.EOF {
+               log.Printf("[ERR] plugin: stream copy '%s' error: %s", name, err)
+       }
+}
diff --git a/vendor/github.com/hashicorp/go-plugin/testing.go b/vendor/github.com/hashicorp/go-plugin/testing.go
new file mode 100644 (file)
index 0000000..9086a1b
--- /dev/null
@@ -0,0 +1,76 @@
+package plugin
+
+import (
+       "bytes"
+       "net"
+       "net/rpc"
+       "testing"
+)
+
+// The testing file contains test helpers that you can use outside of
+// this package for making it easier to test plugins themselves.
+
+// TestConn is a helper function for returning a client and server
+// net.Conn connected to each other.
+func TestConn(t *testing.T) (net.Conn, net.Conn) {
+       // Listen to any local port. This listener will be closed
+       // after a single connection is established.
+       l, err := net.Listen("tcp", "127.0.0.1:0")
+       if err != nil {
+               t.Fatalf("err: %s", err)
+       }
+
+       // Start a goroutine to accept our client connection
+       var serverConn net.Conn
+       doneCh := make(chan struct{})
+       go func() {
+               defer close(doneCh)
+               defer l.Close()
+               var err error
+               serverConn, err = l.Accept()
+               if err != nil {
+                       t.Fatalf("err: %s", err)
+               }
+       }()
+
+       // Connect to the server
+       clientConn, err := net.Dial("tcp", l.Addr().String())
+       if err != nil {
+               t.Fatalf("err: %s", err)
+       }
+
+       // Wait for the server side to acknowledge it has connected
+       <-doneCh
+
+       return clientConn, serverConn
+}
+
+// TestRPCConn returns a rpc client and server connected to each other.
+func TestRPCConn(t *testing.T) (*rpc.Client, *rpc.Server) {
+       clientConn, serverConn := TestConn(t)
+
+       server := rpc.NewServer()
+       go server.ServeConn(serverConn)
+
+       client := rpc.NewClient(clientConn)
+       return client, server
+}
+
+// TestPluginRPCConn returns a plugin RPC client and server that are connected
+// together and configured.
+func TestPluginRPCConn(t *testing.T, ps map[string]Plugin) (*RPCClient, *RPCServer) {
+       // Create two net.Conns we can use to shuttle our control connection
+       clientConn, serverConn := TestConn(t)
+
+       // Start up the server
+       server := &RPCServer{Plugins: ps, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer)}
+       go server.ServeConn(serverConn)
+
+       // Connect the client to the server
+       client, err := NewRPCClient(clientConn, ps)
+       if err != nil {
+               t.Fatalf("err: %s", err)
+       }
+
+       return client, server
+}
diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE
new file mode 100644 (file)
index 0000000..e87a115
--- /dev/null
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+     means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of
+        version 1.1 or earlier of the License, but not also under the terms of
+        a Secondary License.
+
+1.6. "Executable Form"
+
+     means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+     means a work that combines Covered Software with other material, in a
+     separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+     means this document.
+
+1.9. "Licensable"
+
+     means having the right to grant, to the maximum extent possible, whether
+     at the time of the initial grant or subsequently, any and all of the
+     rights conveyed by this License.
+
+1.10. "Modifications"
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to,
+        deletion from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+      means any patent claim(s), including without limitation, method,
+      process, and apparatus claims, in any patent Licensable by such
+      Contributor that would be infringed, but for the grant of the License,
+      by the making, using, selling, offering for sale, having made, import,
+      or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+      means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, "You" includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, "control" means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or
+        as part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its
+        Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution
+     become effective for each Contribution on the date the Contributor first
+     distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under
+     this License. No additional rights or licenses will be implied from the
+     distribution or licensing of Covered Software under this License.
+     Notwithstanding Section 2.1(b) above, no patent license is granted by a
+     Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party's
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of
+        its Contributions.
+
+     This License does not grant any rights in the trademarks, service marks,
+     or logos of any Contributor (except as may be necessary to comply with
+     the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this
+     License (see Section 10.2) or under the terms of a Secondary License (if
+     permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its
+     Contributions are its original creation(s) or it has sufficient rights to
+     grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under
+     applicable copyright doctrines of fair use, fair dealing, or other
+     equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under
+     the terms of this License. You must inform recipients that the Source
+     Code Form of the Covered Software is governed by the terms of this
+     License, and how they can obtain a copy of this License. You may not
+     attempt to alter or restrict the recipients' rights in the Source Code
+     Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this
+        License, or sublicense it under different terms, provided that the
+        license for the Executable Form does not attempt to limit or alter the
+        recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for
+     the Covered Software. If the Larger Work is a combination of Covered
+     Software with a work governed by one or more Secondary Licenses, and the
+     Covered Software is not Incompatible With Secondary Licenses, this
+     License permits You to additionally distribute such Covered Software
+     under the terms of such Secondary License(s), so that the recipient of
+     the Larger Work may, at their option, further distribute the Covered
+     Software under the terms of either this License or such Secondary
+     License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices
+     (including copyright notices, patent notices, disclaimers of warranty, or
+     limitations of liability) contained within the Source Code Form of the
+     Covered Software, except that You may alter any license notices to the
+     extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on
+     behalf of any Contributor. You must make it absolutely clear that any
+     such warranty, support, indemnity, or liability obligation is offered by
+     You alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute,
+   judicial order, or regulation then You must: (a) comply with the terms of
+   this License to the maximum extent possible; and (b) describe the
+   limitations and the code they affect. Such description must be placed in a
+   text file included with all distributions of the Covered Software under
+   this License. Except to the extent prohibited by statute or regulation,
+   such description must be sufficiently detailed for a recipient of ordinary
+   skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing
+     basis, if such Contributor fails to notify You of the non-compliance by
+     some reasonable means prior to 60 days after You have come back into
+     compliance. Moreover, Your grants from a particular Contributor are
+     reinstated on an ongoing basis if such Contributor notifies You of the
+     non-compliance by some reasonable means, this is the first time You have
+     received notice of non-compliance with this License from such
+     Contributor, and You become compliant prior to 30 days after Your receipt
+     of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions,
+     counter-claims, and cross-claims) alleging that a Contributor Version
+     directly or indirectly infringes any patent, then the rights granted to
+     You by any and all Contributors for the Covered Software under Section
+     2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an "as is" basis,
+   without warranty of any kind, either expressed, implied, or statutory,
+   including, without limitation, warranties that the Covered Software is free
+   of defects, merchantable, fit for a particular purpose or non-infringing.
+   The entire risk as to the quality and performance of the Covered Software
+   is with You. Should any Covered Software prove defective in any respect,
+   You (not any Contributor) assume the cost of any necessary servicing,
+   repair, or correction. This disclaimer of warranty constitutes an essential
+   part of this License. No use of  any Covered Software is authorized under
+   this License except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from
+   such party's negligence to the extent applicable law prohibits such
+   limitation. Some jurisdictions do not allow the exclusion or limitation of
+   incidental or consequential damages, so this exclusion and limitation may
+   not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts
+   of a jurisdiction where the defendant maintains its principal place of
+   business and such litigation shall be governed by laws of that
+   jurisdiction, without reference to its conflict-of-law provisions. Nothing
+   in this Section shall prevent a party's ability to bring cross-claims or
+   counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject
+   matter hereof. If any provision of this License is held to be
+   unenforceable, such provision shall be reformed only to the extent
+   necessary to make it enforceable. Any law or regulation which provides that
+   the language of a contract shall be construed against the drafter shall not
+   be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version
+      of the License under which You originally received the Covered Software,
+      or under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a
+      modified version of this License if you rename the license and remove
+      any references to the name of the license steward (except to note that
+      such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+      Licenses If You choose to distribute Source Code Form that is
+      Incompatible With Secondary Licenses under the terms of this version of
+      the License, the notice described in Exhibit B of this License must be
+      attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+      This Source Code Form is "Incompatible
+      With Secondary Licenses", as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-uuid/README.md b/vendor/github.com/hashicorp/go-uuid/README.md
new file mode 100644 (file)
index 0000000..21fdda4
--- /dev/null
@@ -0,0 +1,8 @@
+# uuid
+
+Generates UUID-format strings using purely high quality random bytes.
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-uuid).
diff --git a/vendor/github.com/hashicorp/go-uuid/uuid.go b/vendor/github.com/hashicorp/go-uuid/uuid.go
new file mode 100644 (file)
index 0000000..322b522
--- /dev/null
@@ -0,0 +1,57 @@
+package uuid
+
+import (
+       "crypto/rand"
+       "encoding/hex"
+       "fmt"
+)
+
+// GenerateUUID is used to generate a random UUID
+func GenerateUUID() (string, error) {
+       buf := make([]byte, 16)
+       if _, err := rand.Read(buf); err != nil {
+               return "", fmt.Errorf("failed to read random bytes: %v", err)
+       }
+
+       return FormatUUID(buf)
+}
+
+func FormatUUID(buf []byte) (string, error) {
+       if len(buf) != 16 {
+               return "", fmt.Errorf("wrong length byte slice (%d)", len(buf))
+       }
+
+       return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
+               buf[0:4],
+               buf[4:6],
+               buf[6:8],
+               buf[8:10],
+               buf[10:16]), nil
+}
+
+func ParseUUID(uuid string) ([]byte, error) {
+       if len(uuid) != 36 {
+               return nil, fmt.Errorf("uuid string is wrong length")
+       }
+
+       hyph := []byte("-")
+
+       if uuid[8] != hyph[0] ||
+               uuid[13] != hyph[0] ||
+               uuid[18] != hyph[0] ||
+               uuid[23] != hyph[0] {
+               return nil, fmt.Errorf("uuid is improperly formatted")
+       }
+
+       hexStr := uuid[0:8] + uuid[9:13] + uuid[14:18] + uuid[19:23] + uuid[24:36]
+
+       ret, err := hex.DecodeString(hexStr)
+       if err != nil {
+               return nil, err
+       }
+       if len(ret) != 16 {
+               return nil, fmt.Errorf("decoded hex is the wrong length")
+       }
+
+       return ret, nil
+}
diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE
new file mode 100644 (file)
index 0000000..c33dcc7
--- /dev/null
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md
new file mode 100644 (file)
index 0000000..6f3a15c
--- /dev/null
@@ -0,0 +1,65 @@
+# Versioning Library for Go
+[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version)
+
+go-version is a library for parsing versions and version constraints,
+and verifying versions against a set of constraints. go-version
+can sort a collection of versions properly, handles prerelease/beta
+versions, can increment versions, etc.
+
+Versions used with go-version must follow [SemVer](http://semver.org/).
+
+## Installation and Usage
+
+Package documentation can be found on
+[GoDoc](http://godoc.org/github.com/hashicorp/go-version).
+
+Installation can be done with a normal `go get`:
+
+```
+$ go get github.com/hashicorp/go-version
+```
+
+#### Version Parsing and Comparison
+
+```go
+v1, err := version.NewVersion("1.2")
+v2, err := version.NewVersion("1.5+metadata")
+
+// Comparison example. There is also GreaterThan, Equal, and just
+// a simple Compare that returns an int allowing easy >=, <=, etc.
+if v1.LessThan(v2) {
+    fmt.Printf("%s is less than %s", v1, v2)
+}
+```
+
+#### Version Constraints
+
+```go
+v1, err := version.NewVersion("1.2")
+
+// Constraints example.
+constraints, err := version.NewConstraint(">= 1.0, < 1.4")
+if constraints.Check(v1) {
+       fmt.Printf("%s satisfies constraints %s", v1, constraints)
+}
+```
+
+#### Version Sorting
+
+```go
+versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"}
+versions := make([]*version.Version, len(versionsRaw))
+for i, raw := range versionsRaw {
+    v, _ := version.NewVersion(raw)
+    versions[i] = v
+}
+
+// After this, the versions are properly sorted
+sort.Sort(version.Collection(versions))
+```
+
+## Issues and Contributing
+
+If you find an issue with this library, please report an issue. If you'd
+like, we welcome any contributions. Fork this library and submit a pull
+request.
diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go
new file mode 100644 (file)
index 0000000..8c73df0
--- /dev/null
@@ -0,0 +1,178 @@
+package version
+
+import (
+       "fmt"
+       "regexp"
+       "strings"
+)
+
+// Constraint represents a single constraint for a version, such as
+// ">= 1.0".
+type Constraint struct {
+       f        constraintFunc
+       check    *Version
+       original string
+}
+
+// Constraints is a slice of constraints. We make a custom type so that
+// we can add methods to it.
+type Constraints []*Constraint
+
+type constraintFunc func(v, c *Version) bool
+
+var constraintOperators map[string]constraintFunc
+
+var constraintRegexp *regexp.Regexp
+
+func init() {
+       constraintOperators = map[string]constraintFunc{
+               "":   constraintEqual,
+               "=":  constraintEqual,
+               "!=": constraintNotEqual,
+               ">":  constraintGreaterThan,
+               "<":  constraintLessThan,
+               ">=": constraintGreaterThanEqual,
+               "<=": constraintLessThanEqual,
+               "~>": constraintPessimistic,
+       }
+
+       ops := make([]string, 0, len(constraintOperators))
+       for k := range constraintOperators {
+               ops = append(ops, regexp.QuoteMeta(k))
+       }
+
+       constraintRegexp = regexp.MustCompile(fmt.Sprintf(
+               `^\s*(%s)\s*(%s)\s*$`,
+               strings.Join(ops, "|"),
+               VersionRegexpRaw))
+}
+
+// NewConstraint will parse one or more constraints from the given
+// constraint string. The string must be a comma-separated list of
+// constraints.
+func NewConstraint(v string) (Constraints, error) {
+       vs := strings.Split(v, ",")
+       result := make([]*Constraint, len(vs))
+       for i, single := range vs {
+               c, err := parseSingle(single)
+               if err != nil {
+                       return nil, err
+               }
+
+               result[i] = c
+       }
+
+       return Constraints(result), nil
+}
+
+// Check tests if a version satisfies all the constraints.
+func (cs Constraints) Check(v *Version) bool {
+       for _, c := range cs {
+               if !c.Check(v) {
+                       return false
+               }
+       }
+
+       return true
+}
+
+// Returns the string format of the constraints
+func (cs Constraints) String() string {
+       csStr := make([]string, len(cs))
+       for i, c := range cs {
+               csStr[i] = c.String()
+       }
+
+       return strings.Join(csStr, ",")
+}
+
+// Check tests if a constraint is validated by the given version.
+func (c *Constraint) Check(v *Version) bool {
+       return c.f(v, c.check)
+}
+
+func (c *Constraint) String() string {
+       return c.original
+}
+
+func parseSingle(v string) (*Constraint, error) {
+       matches := constraintRegexp.FindStringSubmatch(v)
+       if matches == nil {
+               return nil, fmt.Errorf("Malformed constraint: %s", v)
+       }
+
+       check, err := NewVersion(matches[2])
+       if err != nil {
+               return nil, err
+       }
+
+       return &Constraint{
+               f:        constraintOperators[matches[1]],
+               check:    check,
+               original: v,
+       }, nil
+}
+
+//-------------------------------------------------------------------
+// Constraint functions
+//-------------------------------------------------------------------
+
+func constraintEqual(v, c *Version) bool {
+       return v.Equal(c)
+}
+
+func constraintNotEqual(v, c *Version) bool {
+       return !v.Equal(c)
+}
+
+func constraintGreaterThan(v, c *Version) bool {
+       return v.Compare(c) == 1
+}
+
+func constraintLessThan(v, c *Version) bool {
+       return v.Compare(c) == -1
+}
+
+func constraintGreaterThanEqual(v, c *Version) bool {
+       return v.Compare(c) >= 0
+}
+
+func constraintLessThanEqual(v, c *Version) bool {
+       return v.Compare(c) <= 0
+}
+
+func constraintPessimistic(v, c *Version) bool {
+       // If the version being checked is naturally less than the constraint, then there
+       // is no way for the version to be valid against the constraint
+       if v.LessThan(c) {
+               return false
+       }
+       // We'll use this more than once, so grab the length now so it's a little cleaner
+       // to write the later checks
+       cs := len(c.segments)
+
+       // If the version being checked has less specificity than the constraint, then there
+       // is no way for the version to be valid against the constraint
+       if cs > len(v.segments) {
+               return false
+       }
+
+       // Check the segments in the constraint against those in the version. If the version
+       // being checked, at any point, does not have the same values in each index of the
+       // constraints segments, then it cannot be valid against the constraint.
+       for i := 0; i < c.si-1; i++ {
+               if v.segments[i] != c.segments[i] {
+                       return false
+               }
+       }
+
+       // Check the last part of the segment in the constraint. If the version segment at
+       // this index is less than the constraints segment at this index, then it cannot
+       // be valid against the constraint
+       if c.segments[cs-1] > v.segments[cs-1] {
+               return false
+       }
+
+       // If nothing has rejected the version by now, it's valid
+       return true
+}
diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go
new file mode 100644 (file)
index 0000000..ae2f6b6
--- /dev/null
@@ -0,0 +1,308 @@
+package version
+
+import (
+       "bytes"
+       "fmt"
+       "reflect"
+       "regexp"
+       "strconv"
+       "strings"
+)
+
+// The compiled regular expression used to test the validity of a version.
+var versionRegexp *regexp.Regexp
+
+// The raw regular expression string used for testing the validity
+// of a version.
+const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` +
+       `(-?([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+       `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
+       `?`
+
+// Version represents a single version.
+type Version struct {
+       metadata string
+       pre      string
+       segments []int64
+       si       int
+}
+
+func init() {
+       versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$")
+}
+
+// NewVersion parses the given version and returns a new
+// Version.
+func NewVersion(v string) (*Version, error) {
+       matches := versionRegexp.FindStringSubmatch(v)
+       if matches == nil {
+               return nil, fmt.Errorf("Malformed version: %s", v)
+       }
+       segmentsStr := strings.Split(matches[1], ".")
+       segments := make([]int64, len(segmentsStr))
+       si := 0
+       for i, str := range segmentsStr {
+               val, err := strconv.ParseInt(str, 10, 64)
+               if err != nil {
+                       return nil, fmt.Errorf(
+                               "Error parsing version: %s", err)
+               }
+
+               segments[i] = int64(val)
+               si++
+       }
+
+       // Even though we could support more than three segments, if we
+       // got less than three, pad it with 0s. This is to cover the basic
+       // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum
+       for i := len(segments); i < 3; i++ {
+               segments = append(segments, 0)
+       }
+
+       return &Version{
+               metadata: matches[7],
+               pre:      matches[4],
+               segments: segments,
+               si:       si,
+       }, nil
+}
+
+// Must is a helper that wraps a call to a function returning (*Version, error)
+// and panics if error is non-nil.
+func Must(v *Version, err error) *Version {
+       if err != nil {
+               panic(err)
+       }
+
+       return v
+}
+
+// Compare compares this version to another version. This
+// returns -1, 0, or 1 if this version is smaller, equal,
+// or larger than the other version, respectively.
+//
+// If you want boolean results, use the LessThan, Equal,
+// or GreaterThan methods.
+func (v *Version) Compare(other *Version) int {
+       // A quick, efficient equality check
+       if v.String() == other.String() {
+               return 0
+       }
+
+       segmentsSelf := v.Segments64()
+       segmentsOther := other.Segments64()
+
+       // If the segments are the same, we must compare on prerelease info
+       if reflect.DeepEqual(segmentsSelf, segmentsOther) {
+               preSelf := v.Prerelease()
+               preOther := other.Prerelease()
+               if preSelf == "" && preOther == "" {
+                       return 0
+               }
+               if preSelf == "" {
+                       return 1
+               }
+               if preOther == "" {
+                       return -1
+               }
+
+               return comparePrereleases(preSelf, preOther)
+       }
+
+       // Get the highest specificity (hS), or if they're equal, just use segmentSelf length
+       lenSelf := len(segmentsSelf)
+       lenOther := len(segmentsOther)
+       hS := lenSelf
+       if lenSelf < lenOther {
+               hS = lenOther
+       }
+       // Compare the segments
+       // Because a constraint could have more/less specificity than the version it's
+       // checking, we need to account for a lopsided or jagged comparison
+       for i := 0; i < hS; i++ {
+               if i > lenSelf-1 {
+                       // This means Self had the lower specificity
+                       // Check to see if the remaining segments in Other are all zeros
+                       if !allZero(segmentsOther[i:]) {
+                               // if not, it means that Other has to be greater than Self
+                               return -1
+                       }
+                       break
+               } else if i > lenOther-1 {
+                       // this means Other had the lower specificity
+                       // Check to see if the remaining segments in Self are all zeros -
+                       if !allZero(segmentsSelf[i:]) {
+                               //if not, it means that Self has to be greater than Other
+                               return 1
+                       }
+                       break
+               }
+               lhs := segmentsSelf[i]
+               rhs := segmentsOther[i]
+               if lhs == rhs {
+                       continue
+               } else if lhs < rhs {
+                       return -1
+               }
+               // Otherwis, rhs was > lhs, they're not equal
+               return 1
+       }
+
+       // if we got this far, they're equal
+       return 0
+}
+
+func allZero(segs []int64) bool {
+       for _, s := range segs {
+               if s != 0 {
+                       return false
+               }
+       }
+       return true
+}
+
+func comparePart(preSelf string, preOther string) int {
+       if preSelf == preOther {
+               return 0
+       }
+
+       // if a part is empty, we use the other to decide
+       if preSelf == "" {
+               _, notIsNumeric := strconv.ParseInt(preOther, 10, 64)
+               if notIsNumeric == nil {
+                       return -1
+               }
+               return 1
+       }
+
+       if preOther == "" {
+               _, notIsNumeric := strconv.ParseInt(preSelf, 10, 64)
+               if notIsNumeric == nil {
+                       return 1
+               }
+               return -1
+       }
+
+       if preSelf > preOther {
+               return 1
+       }
+
+       return -1
+}
+
+func comparePrereleases(v string, other string) int {
+       // the same pre release!
+       if v == other {
+               return 0
+       }
+
+       // split both pre releases for analyse their parts
+       selfPreReleaseMeta := strings.Split(v, ".")
+       otherPreReleaseMeta := strings.Split(other, ".")
+
+       selfPreReleaseLen := len(selfPreReleaseMeta)
+       otherPreReleaseLen := len(otherPreReleaseMeta)
+
+       biggestLen := otherPreReleaseLen
+       if selfPreReleaseLen > otherPreReleaseLen {
+               biggestLen = selfPreReleaseLen
+       }
+
+       // loop for parts to find the first difference
+       for i := 0; i < biggestLen; i = i + 1 {
+               partSelfPre := ""
+               if i < selfPreReleaseLen {
+                       partSelfPre = selfPreReleaseMeta[i]
+               }
+
+               partOtherPre := ""
+               if i < otherPreReleaseLen {
+                       partOtherPre = otherPreReleaseMeta[i]
+               }
+
+               compare := comparePart(partSelfPre, partOtherPre)
+               // if parts are equals, continue the loop
+               if compare != 0 {
+                       return compare
+               }
+       }
+
+       return 0
+}
+
+// Equal tests if two versions are equal.
+func (v *Version) Equal(o *Version) bool {
+       return v.Compare(o) == 0
+}
+
+// GreaterThan tests if this version is greater than another version.
+func (v *Version) GreaterThan(o *Version) bool {
+       return v.Compare(o) > 0
+}
+
+// LessThan tests if this version is less than another version.
+func (v *Version) LessThan(o *Version) bool {
+       return v.Compare(o) < 0
+}
+
+// Metadata returns any metadata that was part of the version
+// string.
+//
+// Metadata is anything that comes after the "+" in the version.
+// For example, with "1.2.3+beta", the metadata is "beta".
+func (v *Version) Metadata() string {
+       return v.metadata
+}
+
+// Prerelease returns any prerelease data that is part of the version,
+// or blank if there is no prerelease data.
+//
+// Prerelease information is anything that comes after the "-" in the
+// version (but before any metadata). For example, with "1.2.3-beta",
+// the prerelease information is "beta".
+func (v *Version) Prerelease() string {
+       return v.pre
+}
+
+// Segments returns the numeric segments of the version as a slice of ints.
+//
+// This excludes any metadata or pre-release information. For example,
+// for a version "1.2.3-beta", segments will return a slice of
+// 1, 2, 3.
+func (v *Version) Segments() []int {
+       segmentSlice := make([]int, len(v.segments))
+       for i, v := range v.segments {
+               segmentSlice[i] = int(v)
+       }
+       return segmentSlice
+}
+
+// Segments64 returns the numeric segments of the version as a slice of int64s.
+//
+// This excludes any metadata or pre-release information. For example,
+// for a version "1.2.3-beta", segments will return a slice of
+// 1, 2, 3.
+func (v *Version) Segments64() []int64 {
+       return v.segments
+}
+
+// String returns the full version string included pre-release
+// and metadata information.
+func (v *Version) String() string {
+       var buf bytes.Buffer
+       fmtParts := make([]string, len(v.segments))
+       for i, s := range v.segments {
+               // We can ignore err here since we've pre-parsed the values in segments
+               str := strconv.FormatInt(s, 10)
+               fmtParts[i] = str
+       }
+       fmt.Fprintf(&buf, strings.Join(fmtParts, "."))
+       if v.pre != "" {
+               fmt.Fprintf(&buf, "-%s", v.pre)
+       }
+       if v.metadata != "" {
+               fmt.Fprintf(&buf, "+%s", v.metadata)
+       }
+
+       return buf.String()
+}
diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go
new file mode 100644 (file)
index 0000000..cc888d4
--- /dev/null
@@ -0,0 +1,17 @@
+package version
+
+// Collection is a type that implements the sort.Interface interface
+// so that versions can be sorted.
+type Collection []*Version
+
+func (v Collection) Len() int {
+       return len(v)
+}
+
+func (v Collection) Less(i, j int) bool {
+       return v[i].LessThan(v[j])
+}
+
+func (v Collection) Swap(i, j int) {
+       v[i], v[j] = v[j], v[i]
+}
diff --git a/vendor/github.com/hashicorp/hcl/LICENSE b/vendor/github.com/hashicorp/hcl/LICENSE
new file mode 100644 (file)
index 0000000..c33dcc7
--- /dev/null
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/hcl/Makefile b/vendor/github.com/hashicorp/hcl/Makefile
new file mode 100644 (file)
index 0000000..84fd743
--- /dev/null
@@ -0,0 +1,18 @@
+TEST?=./...\r
+\r
+default: test\r
+\r
+fmt: generate\r
+       go fmt ./...\r
+\r
+test: generate\r
+       go get -t ./...\r
+       go test $(TEST) $(TESTARGS)\r
+\r
+generate:\r
+       go generate ./...\r
+\r
+updatedeps:\r
+       go get -u golang.org/x/tools/cmd/stringer\r
+\r
+.PHONY: default generate test updatedeps\r
diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md
new file mode 100644 (file)
index 0000000..c822332
--- /dev/null
@@ -0,0 +1,125 @@
+# HCL
+
+[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
+
+HCL (HashiCorp Configuration Language) is a configuration language built
+by HashiCorp. The goal of HCL is to build a structured configuration language
+that is both human and machine friendly for use with command-line tools, but
+specifically targeted towards DevOps tools, servers, etc.
+
+HCL is also fully JSON compatible. That is, JSON can be used as completely
+valid input to a system expecting HCL. This helps makes systems
+interoperable with other systems.
+
+HCL is heavily inspired by
+[libucl](https://github.com/vstakhov/libucl),
+nginx configuration, and others similar.
+
+## Why?
+
+A common question when viewing HCL is to ask the question: why not
+JSON, YAML, etc.?
+
+Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
+used a variety of configuration languages from full programming languages
+such as Ruby to complete data structure languages such as JSON. What we
+learned is that some people wanted human-friendly configuration languages
+and some people wanted machine-friendly languages.
+
+JSON fits a nice balance in this, but is fairly verbose and most
+importantly doesn't support comments. With YAML, we found that beginners
+had a really hard time determining what the actual structure was, and
+ended up guessing more often than not whether to use a hyphen, colon, etc.
+in order to represent some configuration key.
+
+Full programming languages such as Ruby enable complex behavior
+a configuration language shouldn't usually allow, and also forces
+people to learn some set of Ruby.
+
+Because of this, we decided to create our own configuration language
+that is JSON-compatible. Our configuration language (HCL) is designed
+to be written and modified by humans. The API for HCL allows JSON
+as an input so that it is also machine-friendly (machines can generate
+JSON instead of trying to generate HCL).
+
+Our goal with HCL is not to alienate other configuration languages.
+It is instead to provide HCL as a specialized language for our tools,
+and JSON as the interoperability layer.
+
+## Syntax
+
+For a complete grammar, please see the parser itself. A high-level overview
+of the syntax and grammar is listed here.
+
+  * Single line comments start with `#` or `//`
+
+  * Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
+    are not allowed. A multi-line comment (also known as a block comment)
+    terminates at the first `*/` found.
+
+  * Values are assigned with the syntax `key = value` (whitespace doesn't
+    matter). The value can be any primitive: a string, number, boolean,
+    object, or list.
+
+  * Strings are double-quoted and can contain any UTF-8 characters.
+    Example: `"Hello, World"`
+
+  * Multi-line strings start with `<<EOF` at the end of a line, and end
+    with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)).
+    Any text may be used in place of `EOF`. Example:
+```
+<<FOO
+hello
+world
+FOO
+```
+
+  * Numbers are assumed to be base 10. If you prefix a number with 0x,
+    it is treated as a hexadecimal. If it is prefixed with 0, it is
+    treated as an octal. Numbers can be in scientific notation: "1e10".
+
+  * Boolean values: `true`, `false`
+
+  * Arrays can be made by wrapping it in `[]`. Example:
+    `["foo", "bar", 42]`. Arrays can contain primitives,
+    other arrays, and objects. As an alternative, lists
+    of objects can be created with repeated blocks, using
+    this structure:
+
+    ```hcl
+    service {
+        key = "value"
+    }
+
+    service {
+        key = "value"
+    }
+    ```
+
+Objects and nested objects are created using the structure shown below:
+
+```
+variable "ami" {
+    description = "the AMI to use"
+}
+```
+This would be equivalent to the following json:
+``` json
+{
+  "variable": {
+      "ami": {
+          "description": "the AMI to use"
+        }
+    }
+}
+```
+
+## Thanks
+
+Thanks to:
+
+  * [@vstakhov](https://github.com/vstakhov) - The original libucl parser
+    and syntax that HCL was based off of.
+
+  * [@fatih](https://github.com/fatih) - The rewritten HCL parser
+    in pure Go (no goyacc) and support for a printer.
diff --git a/vendor/github.com/hashicorp/hcl/appveyor.yml b/vendor/github.com/hashicorp/hcl/appveyor.yml
new file mode 100644 (file)
index 0000000..4db0b71
--- /dev/null
@@ -0,0 +1,19 @@
+version: "build-{branch}-{build}"
+image: Visual Studio 2015
+clone_folder: c:\gopath\src\github.com\hashicorp\hcl
+environment:
+  GOPATH: c:\gopath
+init:
+  - git config --global core.autocrlf false
+install:
+- cmd: >-
+    echo %Path%
+
+    go version
+
+    go env
+
+    go get -t ./...
+
+build_script:
+- cmd: go test -v ./...
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
new file mode 100644 (file)
index 0000000..0b39c1b
--- /dev/null
@@ -0,0 +1,724 @@
+package hcl
+
+import (
+       "errors"
+       "fmt"
+       "reflect"
+       "sort"
+       "strconv"
+       "strings"
+
+       "github.com/hashicorp/hcl/hcl/ast"
+       "github.com/hashicorp/hcl/hcl/parser"
+       "github.com/hashicorp/hcl/hcl/token"
+)
+
+// This is the tag to use with structures to have settings for HCL
+const tagName = "hcl"
+
+var (
+       // nodeType holds a reference to the type of ast.Node
+       nodeType reflect.Type = findNodeType()
+)
+
+// Unmarshal accepts a byte slice as input and writes the
+// data to the value pointed to by v.
+func Unmarshal(bs []byte, v interface{}) error {
+       root, err := parse(bs)
+       if err != nil {
+               return err
+       }
+
+       return DecodeObject(v, root)
+}
+
+// Decode reads the given input and decodes it into the structure
+// given by `out`.
+func Decode(out interface{}, in string) error {
+       obj, err := Parse(in)
+       if err != nil {
+               return err
+       }
+
+       return DecodeObject(out, obj)
+}
+
+// DecodeObject is a lower-level version of Decode. It decodes a
+// raw Object into the given output.
+func DecodeObject(out interface{}, n ast.Node) error {
+       val := reflect.ValueOf(out)
+       if val.Kind() != reflect.Ptr {
+               return errors.New("result must be a pointer")
+       }
+
+       // If we have the file, we really decode the root node
+       if f, ok := n.(*ast.File); ok {
+               n = f.Node
+       }
+
+       var d decoder
+       return d.decode("root", n, val.Elem())
+}
+
+type decoder struct {
+       stack []reflect.Kind
+}
+
+func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
+       k := result
+
+       // If we have an interface with a valid value, we use that
+       // for the check.
+       if result.Kind() == reflect.Interface {
+               elem := result.Elem()
+               if elem.IsValid() {
+                       k = elem
+               }
+       }
+
+       // Push current onto stack unless it is an interface.
+       if k.Kind() != reflect.Interface {
+               d.stack = append(d.stack, k.Kind())
+
+               // Schedule a pop
+               defer func() {
+                       d.stack = d.stack[:len(d.stack)-1]
+               }()
+       }
+
+       switch k.Kind() {
+       case reflect.Bool:
+               return d.decodeBool(name, node, result)
+       case reflect.Float64:
+               return d.decodeFloat(name, node, result)
+       case reflect.Int, reflect.Int32, reflect.Int64:
+               return d.decodeInt(name, node, result)
+       case reflect.Interface:
+               // When we see an interface, we make our own thing
+               return d.decodeInterface(name, node, result)
+       case reflect.Map:
+               return d.decodeMap(name, node, result)
+       case reflect.Ptr:
+               return d.decodePtr(name, node, result)
+       case reflect.Slice:
+               return d.decodeSlice(name, node, result)
+       case reflect.String:
+               return d.decodeString(name, node, result)
+       case reflect.Struct:
+               return d.decodeStruct(name, node, result)
+       default:
+               return &parser.PosError{
+                       Pos: node.Pos(),
+                       Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
+               }
+       }
+}
+
+func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
+       switch n := node.(type) {
+       case *ast.LiteralType:
+               if n.Token.Type == token.BOOL {
+                       v, err := strconv.ParseBool(n.Token.Text)
+                       if err != nil {
+                               return err
+                       }
+
+                       result.Set(reflect.ValueOf(v))
+                       return nil
+               }
+       }
+
+       return &parser.PosError{
+               Pos: node.Pos(),
+               Err: fmt.Errorf("%s: unknown type %T", name, node),
+       }
+}
+
+func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
+       switch n := node.(type) {
+       case *ast.LiteralType:
+               if n.Token.Type == token.FLOAT {
+                       v, err := strconv.ParseFloat(n.Token.Text, 64)
+                       if err != nil {
+                               return err
+                       }
+
+                       result.Set(reflect.ValueOf(v))
+                       return nil
+               }
+       }
+
+       return &parser.PosError{
+               Pos: node.Pos(),
+               Err: fmt.Errorf("%s: unknown type %T", name, node),
+       }
+}
+
+func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
+       switch n := node.(type) {
+       case *ast.LiteralType:
+               switch n.Token.Type {
+               case token.NUMBER:
+                       v, err := strconv.ParseInt(n.Token.Text, 0, 0)
+                       if err != nil {
+                               return err
+                       }
+
+                       if result.Kind() == reflect.Interface {
+                               result.Set(reflect.ValueOf(int(v)))
+                       } else {
+                               result.SetInt(v)
+                       }
+                       return nil
+               case token.STRING:
+                       v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
+                       if err != nil {
+                               return err
+                       }
+
+                       if result.Kind() == reflect.Interface {
+                               result.Set(reflect.ValueOf(int(v)))
+                       } else {
+                               result.SetInt(v)
+                       }
+                       return nil
+               }
+       }
+
+       return &parser.PosError{
+               Pos: node.Pos(),
+               Err: fmt.Errorf("%s: unknown type %T", name, node),
+       }
+}
+
+func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
+       // When we see an ast.Node, we retain the value to enable deferred decoding.
+       // Very useful in situations where we want to preserve ast.Node information
+       // like Pos
+       if result.Type() == nodeType && result.CanSet() {
+               result.Set(reflect.ValueOf(node))
+               return nil
+       }
+
+       var set reflect.Value
+       redecode := true
+
+       // For testing types, ObjectType should just be treated as a list. We
+       // set this to a temporary var because we want to pass in the real node.
+       testNode := node
+       if ot, ok := node.(*ast.ObjectType); ok {
+               testNode = ot.List
+       }
+
+       switch n := testNode.(type) {
+       case *ast.ObjectList:
+               // If we're at the root or we're directly within a slice, then we
+               // decode objects into map[string]interface{}, otherwise we decode
+               // them into lists.
+               if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+                       var temp map[string]interface{}
+                       tempVal := reflect.ValueOf(temp)
+                       result := reflect.MakeMap(
+                               reflect.MapOf(
+                                       reflect.TypeOf(""),
+                                       tempVal.Type().Elem()))
+
+                       set = result
+               } else {
+                       var temp []map[string]interface{}
+                       tempVal := reflect.ValueOf(temp)
+                       result := reflect.MakeSlice(
+                               reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
+                       set = result
+               }
+       case *ast.ObjectType:
+               // If we're at the root or we're directly within a slice, then we
+               // decode objects into map[string]interface{}, otherwise we decode
+               // them into lists.
+               if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
+                       var temp map[string]interface{}
+                       tempVal := reflect.ValueOf(temp)
+                       result := reflect.MakeMap(
+                               reflect.MapOf(
+                                       reflect.TypeOf(""),
+                                       tempVal.Type().Elem()))
+
+                       set = result
+               } else {
+                       var temp []map[string]interface{}
+                       tempVal := reflect.ValueOf(temp)
+                       result := reflect.MakeSlice(
+                               reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
+                       set = result
+               }
+       case *ast.ListType:
+               var temp []interface{}
+               tempVal := reflect.ValueOf(temp)
+               result := reflect.MakeSlice(
+                       reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
+               set = result
+       case *ast.LiteralType:
+               switch n.Token.Type {
+               case token.BOOL:
+                       var result bool
+                       set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+               case token.FLOAT:
+                       var result float64
+                       set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+               case token.NUMBER:
+                       var result int
+                       set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
+               case token.STRING, token.HEREDOC:
+                       set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
+               default:
+                       return &parser.PosError{
+                               Pos: node.Pos(),
+                               Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
+                       }
+               }
+       default:
+               return fmt.Errorf(
+                       "%s: cannot decode into interface: %T",
+                       name, node)
+       }
+
+       // Set the result to what its supposed to be, then reset
+       // result so we don't reflect into this method anymore.
+       result.Set(set)
+
+       if redecode {
+               // Revisit the node so that we can use the newly instantiated
+               // thing and populate it.
+               if err := d.decode(name, node, result); err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
+
+func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
+       if item, ok := node.(*ast.ObjectItem); ok {
+               node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+       }
+
+       if ot, ok := node.(*ast.ObjectType); ok {
+               node = ot.List
+       }
+
+       n, ok := node.(*ast.ObjectList)
+       if !ok {
+               return &parser.PosError{
+                       Pos: node.Pos(),
+                       Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
+               }
+       }
+
+       // If we have an interface, then we can address the interface,
+       // but not the slice itself, so get the element but set the interface
+       set := result
+       if result.Kind() == reflect.Interface {
+               result = result.Elem()
+       }
+
+       resultType := result.Type()
+       resultElemType := resultType.Elem()
+       resultKeyType := resultType.Key()
+       if resultKeyType.Kind() != reflect.String {
+               return &parser.PosError{
+                       Pos: node.Pos(),
+                       Err: fmt.Errorf("%s: map must have string keys", name),
+               }
+       }
+
+       // Make a map if it is nil
+       resultMap := result
+       if result.IsNil() {
+               resultMap = reflect.MakeMap(
+                       reflect.MapOf(resultKeyType, resultElemType))
+       }
+
+       // Go through each element and decode it.
+       done := make(map[string]struct{})
+       for _, item := range n.Items {
+               if item.Val == nil {
+                       continue
+               }
+
+               // github.com/hashicorp/terraform/issue/5740
+               if len(item.Keys) == 0 {
+                       return &parser.PosError{
+                               Pos: node.Pos(),
+                               Err: fmt.Errorf("%s: map must have string keys", name),
+                       }
+               }
+
+               // Get the key we're dealing with, which is the first item
+               keyStr := item.Keys[0].Token.Value().(string)
+
+               // If we've already processed this key, then ignore it
+               if _, ok := done[keyStr]; ok {
+                       continue
+               }
+
+               // Determine the value. If we have more than one key, then we
+               // get the objectlist of only these keys.
+               itemVal := item.Val
+               if len(item.Keys) > 1 {
+                       itemVal = n.Filter(keyStr)
+                       done[keyStr] = struct{}{}
+               }
+
+               // Make the field name
+               fieldName := fmt.Sprintf("%s.%s", name, keyStr)
+
+               // Get the key/value as reflection values
+               key := reflect.ValueOf(keyStr)
+               val := reflect.Indirect(reflect.New(resultElemType))
+
+               // If we have a pre-existing value in the map, use that
+               oldVal := resultMap.MapIndex(key)
+               if oldVal.IsValid() {
+                       val.Set(oldVal)
+               }
+
+               // Decode!
+               if err := d.decode(fieldName, itemVal, val); err != nil {
+                       return err
+               }
+
+               // Set the value on the map
+               resultMap.SetMapIndex(key, val)
+       }
+
+       // Set the final map if we can
+       set.Set(resultMap)
+       return nil
+}
+
+func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
+       // Create an element of the concrete (non pointer) type and decode
+       // into that. Then set the value of the pointer to this type.
+       resultType := result.Type()
+       resultElemType := resultType.Elem()
+       val := reflect.New(resultElemType)
+       if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
+               return err
+       }
+
+       result.Set(val)
+       return nil
+}
+
+func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
+       // If we have an interface, then we can address the interface,
+       // but not the slice itself, so get the element but set the interface
+       set := result
+       if result.Kind() == reflect.Interface {
+               result = result.Elem()
+       }
+       // Create the slice if it isn't nil
+       resultType := result.Type()
+       resultElemType := resultType.Elem()
+       if result.IsNil() {
+               resultSliceType := reflect.SliceOf(resultElemType)
+               result = reflect.MakeSlice(
+                       resultSliceType, 0, 0)
+       }
+
+       // Figure out the items we'll be copying into the slice
+       var items []ast.Node
+       switch n := node.(type) {
+       case *ast.ObjectList:
+               items = make([]ast.Node, len(n.Items))
+               for i, item := range n.Items {
+                       items[i] = item
+               }
+       case *ast.ObjectType:
+               items = []ast.Node{n}
+       case *ast.ListType:
+               items = n.List
+       default:
+               return &parser.PosError{
+                       Pos: node.Pos(),
+                       Err: fmt.Errorf("unknown slice type: %T", node),
+               }
+       }
+
+       for i, item := range items {
+               fieldName := fmt.Sprintf("%s[%d]", name, i)
+
+               // Decode
+               val := reflect.Indirect(reflect.New(resultElemType))
+
+               // if item is an object that was decoded from ambiguous JSON and
+               // flattened, make sure it's expanded if it needs to decode into a
+               // defined structure.
+               item := expandObject(item, val)
+
+               if err := d.decode(fieldName, item, val); err != nil {
+                       return err
+               }
+
+               // Append it onto the slice
+               result = reflect.Append(result, val)
+       }
+
+       set.Set(result)
+       return nil
+}
+
+// expandObject detects if an ambiguous JSON object was flattened to a List which
+// should be decoded into a struct, and expands the ast to properly deocode.
+func expandObject(node ast.Node, result reflect.Value) ast.Node {
+       item, ok := node.(*ast.ObjectItem)
+       if !ok {
+               return node
+       }
+
+       elemType := result.Type()
+
+       // our target type must be a struct
+       switch elemType.Kind() {
+       case reflect.Ptr:
+               switch elemType.Elem().Kind() {
+               case reflect.Struct:
+                       //OK
+               default:
+                       return node
+               }
+       case reflect.Struct:
+               //OK
+       default:
+               return node
+       }
+
+       // A list value will have a key and field name. If it had more fields,
+       // it wouldn't have been flattened.
+       if len(item.Keys) != 2 {
+               return node
+       }
+
+       keyToken := item.Keys[0].Token
+       item.Keys = item.Keys[1:]
+
+       // we need to un-flatten the ast enough to decode
+       newNode := &ast.ObjectItem{
+               Keys: []*ast.ObjectKey{
+                       &ast.ObjectKey{
+                               Token: keyToken,
+                       },
+               },
+               Val: &ast.ObjectType{
+                       List: &ast.ObjectList{
+                               Items: []*ast.ObjectItem{item},
+                       },
+               },
+       }
+
+       return newNode
+}
+
+func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
+       switch n := node.(type) {
+       case *ast.LiteralType:
+               switch n.Token.Type {
+               case token.NUMBER:
+                       result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
+                       return nil
+               case token.STRING, token.HEREDOC:
+                       result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
+                       return nil
+               }
+       }
+
+       return &parser.PosError{
+               Pos: node.Pos(),
+               Err: fmt.Errorf("%s: unknown type for string %T", name, node),
+       }
+}
+
+func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
+       var item *ast.ObjectItem
+       if it, ok := node.(*ast.ObjectItem); ok {
+               item = it
+               node = it.Val
+       }
+
+       if ot, ok := node.(*ast.ObjectType); ok {
+               node = ot.List
+       }
+
+       // Handle the special case where the object itself is a literal. Previously
+       // the yacc parser would always ensure top-level elements were arrays. The new
+       // parser does not make the same guarantees, thus we need to convert any
+       // top-level literal elements into a list.
+       if _, ok := node.(*ast.LiteralType); ok && item != nil {
+               node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
+       }
+
+       list, ok := node.(*ast.ObjectList)
+       if !ok {
+               return &parser.PosError{
+                       Pos: node.Pos(),
+                       Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
+               }
+       }
+
+       // This slice will keep track of all the structs we'll be decoding.
+       // There can be more than one struct if there are embedded structs
+       // that are squashed.
+       structs := make([]reflect.Value, 1, 5)
+       structs[0] = result
+
+       // Compile the list of all the fields that we're going to be decoding
+       // from all the structs.
+       fields := make(map[*reflect.StructField]reflect.Value)
+       for len(structs) > 0 {
+               structVal := structs[0]
+               structs = structs[1:]
+
+               structType := structVal.Type()
+               for i := 0; i < structType.NumField(); i++ {
+                       fieldType := structType.Field(i)
+                       tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
+
+                       // Ignore fields with tag name "-"
+                       if tagParts[0] == "-" {
+                               continue
+                       }
+
+                       if fieldType.Anonymous {
+                               fieldKind := fieldType.Type.Kind()
+                               if fieldKind != reflect.Struct {
+                                       return &parser.PosError{
+                                               Pos: node.Pos(),
+                                               Err: fmt.Errorf("%s: unsupported type to struct: %s",
+                                                       fieldType.Name, fieldKind),
+                                       }
+                               }
+
+                               // We have an embedded field. We "squash" the fields down
+                               // if specified in the tag.
+                               squash := false
+                               for _, tag := range tagParts[1:] {
+                                       if tag == "squash" {
+                                               squash = true
+                                               break
+                                       }
+                               }
+
+                               if squash {
+                                       structs = append(
+                                               structs, result.FieldByName(fieldType.Name))
+                                       continue
+                               }
+                       }
+
+                       // Normal struct field, store it away
+                       fields[&fieldType] = structVal.Field(i)
+               }
+       }
+
+       usedKeys := make(map[string]struct{})
+       decodedFields := make([]string, 0, len(fields))
+       decodedFieldsVal := make([]reflect.Value, 0)
+       unusedKeysVal := make([]reflect.Value, 0)
+       for fieldType, field := range fields {
+               if !field.IsValid() {
+                       // This should never happen
+                       panic("field is not valid")
+               }
+
+               // If we can't set the field, then it is unexported or something,
+               // and we just continue onwards.
+               if !field.CanSet() {
+                       continue
+               }
+
+               fieldName := fieldType.Name
+
+               tagValue := fieldType.Tag.Get(tagName)
+               tagParts := strings.SplitN(tagValue, ",", 2)
+               if len(tagParts) >= 2 {
+                       switch tagParts[1] {
+                       case "decodedFields":
+                               decodedFieldsVal = append(decodedFieldsVal, field)
+                               continue
+                       case "key":
+                               if item == nil {
+                                       return &parser.PosError{
+                                               Pos: node.Pos(),
+                                               Err: fmt.Errorf("%s: %s asked for 'key', impossible",
+                                                       name, fieldName),
+                                       }
+                               }
+
+                               field.SetString(item.Keys[0].Token.Value().(string))
+                               continue
+                       case "unusedKeys":
+                               unusedKeysVal = append(unusedKeysVal, field)
+                               continue
+                       }
+               }
+
+               if tagParts[0] != "" {
+                       fieldName = tagParts[0]
+               }
+
+               // Determine the element we'll use to decode. If it is a single
+               // match (only object with the field), then we decode it exactly.
+               // If it is a prefix match, then we decode the matches.
+               filter := list.Filter(fieldName)
+
+               prefixMatches := filter.Children()
+               matches := filter.Elem()
+               if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
+                       continue
+               }
+
+               // Track the used key
+               usedKeys[fieldName] = struct{}{}
+
+               // Create the field name and decode. We range over the elements
+               // because we actually want the value.
+               fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+               if len(prefixMatches.Items) > 0 {
+                       if err := d.decode(fieldName, prefixMatches, field); err != nil {
+                               return err
+                       }
+               }
+               for _, match := range matches.Items {
+                       var decodeNode ast.Node = match.Val
+                       if ot, ok := decodeNode.(*ast.ObjectType); ok {
+                               decodeNode = &ast.ObjectList{Items: ot.List.Items}
+                       }
+
+                       if err := d.decode(fieldName, decodeNode, field); err != nil {
+                               return err
+                       }
+               }
+
+               decodedFields = append(decodedFields, fieldType.Name)
+       }
+
+       if len(decodedFieldsVal) > 0 {
+               // Sort it so that it is deterministic
+               sort.Strings(decodedFields)
+
+               for _, v := range decodedFieldsVal {
+                       v.Set(reflect.ValueOf(decodedFields))
+               }
+       }
+
+       return nil
+}
+
+// findNodeType returns the type of ast.Node
+func findNodeType() reflect.Type {
+       var nodeContainer struct {
+               Node ast.Node
+       }
+       value := reflect.ValueOf(nodeContainer).FieldByName("Node")
+       return value.Type()
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl.go b/vendor/github.com/hashicorp/hcl/hcl.go
new file mode 100644 (file)
index 0000000..575a20b
--- /dev/null
@@ -0,0 +1,11 @@
+// Package hcl decodes HCL into usable Go structures.
+//
+// hcl input can come in either pure HCL format or JSON format.
+// It can be parsed into an AST, and then decoded into a structure,
+// or it can be decoded directly from a string into a structure.
+//
+// If you choose to parse HCL into a raw AST, the benefit is that you
+// can write custom visitor implementations to implement custom
+// semantic checks. By default, HCL does not perform any semantic
+// checks.
+package hcl
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go b/vendor/github.com/hashicorp/hcl/hcl/ast/ast.go
new file mode 100644 (file)
index 0000000..6e5ef65
--- /dev/null
@@ -0,0 +1,219 @@
+// Package ast declares the types used to represent syntax trees for HCL
+// (HashiCorp Configuration Language)
+package ast
+
+import (
+       "fmt"
+       "strings"
+
+       "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Node is an element in the abstract syntax tree.
+type Node interface {
+       node()
+       Pos() token.Pos
+}
+
+func (File) node()         {}
+func (ObjectList) node()   {}
+func (ObjectKey) node()    {}
+func (ObjectItem) node()   {}
+func (Comment) node()      {}
+func (CommentGroup) node() {}
+func (ObjectType) node()   {}
+func (LiteralType) node()  {}
+func (ListType) node()     {}
+
+// File represents a single HCL file
+type File struct {
+       Node     Node            // usually a *ObjectList
+       Comments []*CommentGroup // list of all comments in the source
+}
+
+func (f *File) Pos() token.Pos {
+       return f.Node.Pos()
+}
+
+// ObjectList represents a list of ObjectItems. An HCL file itself is an
+// ObjectList.
+type ObjectList struct {
+       Items []*ObjectItem
+}
+
+func (o *ObjectList) Add(item *ObjectItem) {
+       o.Items = append(o.Items, item)
+}
+
+// Filter filters out the objects with the given key list as a prefix.
+//
+// The returned list of objects contain ObjectItems where the keys have
+// this prefix already stripped off. This might result in objects with
+// zero-length key lists if they have no children.
+//
+// If no matches are found, an empty ObjectList (non-nil) is returned.
+func (o *ObjectList) Filter(keys ...string) *ObjectList {
+       var result ObjectList
+       for _, item := range o.Items {
+               // If there aren't enough keys, then ignore this
+               if len(item.Keys) < len(keys) {
+                       continue
+               }
+
+               match := true
+               for i, key := range item.Keys[:len(keys)] {
+                       key := key.Token.Value().(string)
+                       if key != keys[i] && !strings.EqualFold(key, keys[i]) {
+                               match = false
+                               break
+                       }
+               }
+               if !match {
+                       continue
+               }
+
+               // Strip off the prefix from the children
+               newItem := *item
+               newItem.Keys = newItem.Keys[len(keys):]
+               result.Add(&newItem)
+       }
+
+       return &result
+}
+
+// Children returns further nested objects (key length > 0) within this
+// ObjectList. This should be used with Filter to get at child items.
+func (o *ObjectList) Children() *ObjectList {
+       var result ObjectList
+       for _, item := range o.Items {
+               if len(item.Keys) > 0 {
+                       result.Add(item)
+               }
+       }
+
+       return &result
+}
+
+// Elem returns items in the list that are direct element assignments
+// (key length == 0). This should be used with Filter to get at elements.
+func (o *ObjectList) Elem() *ObjectList {
+       var result ObjectList
+       for _, item := range o.Items {
+               if len(item.Keys) == 0 {
+                       result.Add(item)
+               }
+       }
+
+       return &result
+}
+
+func (o *ObjectList) Pos() token.Pos {
+       // always returns the uninitiliazed position
+       return o.Items[0].Pos()
+}
+
+// ObjectItem represents a HCL Object Item. An item is represented with a key
+// (or keys). It can be an assignment or an object (both normal and nested)
+type ObjectItem struct {
+       // keys is only one length long if it's of type assignment. If it's a
+       // nested object it can be larger than one. In that case "assign" is
+       // invalid as there is no assignments for a nested object.
+       Keys []*ObjectKey
+
+       // assign contains the position of "=", if any
+       Assign token.Pos
+
+       // val is the item itself. It can be an object,list, number, bool or a
+       // string. If key length is larger than one, val can be only of type
+       // Object.
+       Val Node
+
+       LeadComment *CommentGroup // associated lead comment
+       LineComment *CommentGroup // associated line comment
+}
+
+func (o *ObjectItem) Pos() token.Pos {
+       // I'm not entirely sure what causes this, but removing this causes
+       // a test failure. We should investigate at some point.
+       if len(o.Keys) == 0 {
+               return token.Pos{}
+       }
+
+       return o.Keys[0].Pos()
+}
+
+// ObjectKeys are either an identifier or of type string.
+type ObjectKey struct {
+       Token token.Token
+}
+
+func (o *ObjectKey) Pos() token.Pos {
+       return o.Token.Pos
+}
+
+// LiteralType represents a literal of basic type. Valid types are:
+// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
+type LiteralType struct {
+       Token token.Token
+
+       // comment types, only used when in a list
+       LeadComment *CommentGroup
+       LineComment *CommentGroup
+}
+
+func (l *LiteralType) Pos() token.Pos {
+       return l.Token.Pos
+}
+
+// ListStatement represents a HCL List type
+type ListType struct {
+       Lbrack token.Pos // position of "["
+       Rbrack token.Pos // position of "]"
+       List   []Node    // the elements in lexical order
+}
+
+func (l *ListType) Pos() token.Pos {
+       return l.Lbrack
+}
+
+func (l *ListType) Add(node Node) {
+       l.List = append(l.List, node)
+}
+
+// ObjectType represents a HCL Object Type
+type ObjectType struct {
+       Lbrace token.Pos   // position of "{"
+       Rbrace token.Pos   // position of "}"
+       List   *ObjectList // the nodes in lexical order
+}
+
+func (o *ObjectType) Pos() token.Pos {
+       return o.Lbrace
+}
+
+// Comment node represents a single //, # style or /*- style commment
+type Comment struct {
+       Start token.Pos // position of / or #
+       Text  string
+}
+
+func (c *Comment) Pos() token.Pos {
+       return c.Start
+}
+
+// CommentGroup node represents a sequence of comments with no other tokens and
+// no empty lines between.
+type CommentGroup struct {
+       List []*Comment // len(List) > 0
+}
+
+func (c *CommentGroup) Pos() token.Pos {
+       return c.List[0].Pos()
+}
+
+//-------------------------------------------------------------------
+// GoStringer
+//-------------------------------------------------------------------
+
+func (o *ObjectKey) GoString() string  { return fmt.Sprintf("*%#v", *o) }
+func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }
diff --git a/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go b/vendor/github.com/hashicorp/hcl/hcl/ast/walk.go
new file mode 100644 (file)
index 0000000..ba07ad4
--- /dev/null
@@ -0,0 +1,52 @@
+package ast
+
+import "fmt"
+
+// WalkFunc describes a function to be called for each node during a Walk. The
+// returned node can be used to rewrite the AST. Walking stops the returned
+// bool is false.
+type WalkFunc func(Node) (Node, bool)
+
+// Walk traverses an AST in depth-first order: It starts by calling fn(node);
+// node must not be nil. If fn returns true, Walk invokes fn recursively for
+// each of the non-nil children of node, followed by a call of fn(nil). The
+// returned node of fn can be used to rewrite the passed node to fn.
+func Walk(node Node, fn WalkFunc) Node {
+       rewritten, ok := fn(node)
+       if !ok {
+               return rewritten
+       }
+
+       switch n := node.(type) {
+       case *File:
+               n.Node = Walk(n.Node, fn)
+       case *ObjectList:
+               for i, item := range n.Items {
+                       n.Items[i] = Walk(item, fn).(*ObjectItem)
+               }
+       case *ObjectKey:
+               // nothing to do
+       case *ObjectItem:
+               for i, k := range n.Keys {
+                       n.Keys[i] = Walk(k, fn).(*ObjectKey)
+               }
+
+               if n.Val != nil {
+                       n.Val = Walk(n.Val, fn)
+               }
+       case *LiteralType:
+               // nothing to do
+       case *ListType:
+               for i, l := range n.List {
+                       n.List[i] = Walk(l, fn)
+               }
+       case *ObjectType:
+               n.List = Walk(n.List, fn).(*ObjectList)
+       default:
+               // should we panic here?
+               fmt.Printf("unknown type: %T\n", n)
+       }
+
+       fn(nil)
+       return rewritten
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/error.go b/vendor/github.com/hashicorp/hcl/hcl/parser/error.go
new file mode 100644 (file)
index 0000000..5c99381
--- /dev/null
@@ -0,0 +1,17 @@
+package parser
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/hcl/hcl/token"
+)
+
+// PosError is a parse error that contains a position.
+type PosError struct {
+       Pos token.Pos
+       Err error
+}
+
+func (e *PosError) Error() string {
+       return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
new file mode 100644 (file)
index 0000000..b488180
--- /dev/null
@@ -0,0 +1,520 @@
+// Package parser implements a parser for HCL (HashiCorp Configuration
+// Language)
+package parser
+
+import (
+       "bytes"
+       "errors"
+       "fmt"
+       "strings"
+
+       "github.com/hashicorp/hcl/hcl/ast"
+       "github.com/hashicorp/hcl/hcl/scanner"
+       "github.com/hashicorp/hcl/hcl/token"
+)
+
+type Parser struct {
+       sc *scanner.Scanner
+
+       // Last read token
+       tok       token.Token
+       commaPrev token.Token
+
+       comments    []*ast.CommentGroup
+       leadComment *ast.CommentGroup // last lead comment
+       lineComment *ast.CommentGroup // last line comment
+
+       enableTrace bool
+       indent      int
+       n           int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+       return &Parser{
+               sc: scanner.New(src),
+       }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+       // normalize all line endings
+       // since the scanner and output only work with "\n" line endings, we may
+       // end up with dangling "\r" characters in the parsed data.
+       src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
+
+       p := newParser(src)
+       return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+       f := &ast.File{}
+       var err, scerr error
+       p.sc.Error = func(pos token.Pos, msg string) {
+               scerr = &PosError{Pos: pos, Err: errors.New(msg)}
+       }
+
+       f.Node, err = p.objectList(false)
+       if scerr != nil {
+               return nil, scerr
+       }
+       if err != nil {
+               return nil, err
+       }
+
+       f.Comments = p.comments
+       return f, nil
+}
+
+// objectList parses a list of items within an object (generally k/v pairs).
+// The parameter" obj" tells this whether to we are within an object (braces:
+// '{', '}') or just at the top level. If we're within an object, we end
+// at an RBRACE.
+func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
+       defer un(trace(p, "ParseObjectList"))
+       node := &ast.ObjectList{}
+
+       for {
+               if obj {
+                       tok := p.scan()
+                       p.unscan()
+                       if tok.Type == token.RBRACE {
+                               break
+                       }
+               }
+
+               n, err := p.objectItem()
+               if err == errEofToken {
+                       break // we are finished
+               }
+
+               // we don't return a nil node, because might want to use already
+               // collected items.
+               if err != nil {
+                       return node, err
+               }
+
+               node.Add(n)
+
+               // object lists can be optionally comma-delimited e.g. when a list of maps
+               // is being expressed, so a comma is allowed here - it's simply consumed
+               tok := p.scan()
+               if tok.Type != token.COMMA {
+                       p.unscan()
+               }
+       }
+       return node, nil
+}
+
+func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
+       endline = p.tok.Pos.Line
+
+       // count the endline if it's multiline comment, ie starting with /*
+       if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
+               // don't use range here - no need to decode Unicode code points
+               for i := 0; i < len(p.tok.Text); i++ {
+                       if p.tok.Text[i] == '\n' {
+                               endline++
+                       }
+               }
+       }
+
+       comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
+       p.tok = p.sc.Scan()
+       return
+}
+
+func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
+       var list []*ast.Comment
+       endline = p.tok.Pos.Line
+
+       for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
+               var comment *ast.Comment
+               comment, endline = p.consumeComment()
+               list = append(list, comment)
+       }
+
+       // add comment group to the comments list
+       comments = &ast.CommentGroup{List: list}
+       p.comments = append(p.comments, comments)
+
+       return
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+       defer un(trace(p, "ParseObjectItem"))
+
+       keys, err := p.objectKey()
+       if len(keys) > 0 && err == errEofToken {
+               // We ignore eof token here since it is an error if we didn't
+               // receive a value (but we did receive a key) for the item.
+               err = nil
+       }
+       if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
+               // This is a strange boolean statement, but what it means is:
+               // We have keys with no value, and we're likely in an object
+               // (since RBrace ends an object). For this, we set err to nil so
+               // we continue and get the error below of having the wrong value
+               // type.
+               err = nil
+
+               // Reset the token type so we don't think it completed fine. See
+               // objectType which uses p.tok.Type to check if we're done with
+               // the object.
+               p.tok.Type = token.EOF
+       }
+       if err != nil {
+               return nil, err
+       }
+
+       o := &ast.ObjectItem{
+               Keys: keys,
+       }
+
+       if p.leadComment != nil {
+               o.LeadComment = p.leadComment
+               p.leadComment = nil
+       }
+
+       switch p.tok.Type {
+       case token.ASSIGN:
+               o.Assign = p.tok.Pos
+               o.Val, err = p.object()
+               if err != nil {
+                       return nil, err
+               }
+       case token.LBRACE:
+               o.Val, err = p.objectType()
+               if err != nil {
+                       return nil, err
+               }
+       default:
+               keyStr := make([]string, 0, len(keys))
+               for _, k := range keys {
+                       keyStr = append(keyStr, k.Token.Text)
+               }
+
+               return nil, fmt.Errorf(
+                       "key '%s' expected start of object ('{') or assignment ('=')",
+                       strings.Join(keyStr, " "))
+       }
+
+       // do a look-ahead for line comment
+       p.scan()
+       if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
+               o.LineComment = p.lineComment
+               p.lineComment = nil
+       }
+       p.unscan()
+       return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+       keyCount := 0
+       keys := make([]*ast.ObjectKey, 0)
+
+       for {
+               tok := p.scan()
+               switch tok.Type {
+               case token.EOF:
+                       // It is very important to also return the keys here as well as
+                       // the error. This is because we need to be able to tell if we
+                       // did parse keys prior to finding the EOF, or if we just found
+                       // a bare EOF.
+                       return keys, errEofToken
+               case token.ASSIGN:
+                       // assignment or object only, but not nested objects. this is not
+                       // allowed: `foo bar = {}`
+                       if keyCount > 1 {
+                               return nil, &PosError{
+                                       Pos: p.tok.Pos,
+                                       Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
+                               }
+                       }
+
+                       if keyCount == 0 {
+                               return nil, &PosError{
+                                       Pos: p.tok.Pos,
+                                       Err: errors.New("no object keys found!"),
+                               }
+                       }
+
+                       return keys, nil
+               case token.LBRACE:
+                       var err error
+
+                       // If we have no keys, then it is a syntax error. i.e. {{}} is not
+                       // allowed.
+                       if len(keys) == 0 {
+                               err = &PosError{
+                                       Pos: p.tok.Pos,
+                                       Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
+                               }
+                       }
+
+                       // object
+                       return keys, err
+               case token.IDENT, token.STRING:
+                       keyCount++
+                       keys = append(keys, &ast.ObjectKey{Token: p.tok})
+               case token.ILLEGAL:
+                       return keys, &PosError{
+                               Pos: p.tok.Pos,
+                               Err: fmt.Errorf("illegal character"),
+                       }
+               default:
+                       return keys, &PosError{
+                               Pos: p.tok.Pos,
+                               Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
+                       }
+               }
+       }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (ast.Node, error) {
+       defer un(trace(p, "ParseType"))
+       tok := p.scan()
+
+       switch tok.Type {
+       case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
+               return p.literalType()
+       case token.LBRACE:
+               return p.objectType()
+       case token.LBRACK:
+               return p.listType()
+       case token.COMMENT:
+               // implement comment
+       case token.EOF:
+               return nil, errEofToken
+       }
+
+       return nil, &PosError{
+               Pos: tok.Pos,
+               Err: fmt.Errorf("Unknown token: %+v", tok),
+       }
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+       defer un(trace(p, "ParseObjectType"))
+
+       // we assume that the currently scanned token is a LBRACE
+       o := &ast.ObjectType{
+               Lbrace: p.tok.Pos,
+       }
+
+       l, err := p.objectList(true)
+
+       // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+       // not a RBRACE, it's an syntax error and we just return it.
+       if err != nil && p.tok.Type != token.RBRACE {
+               return nil, err
+       }
+
+       // No error, scan and expect the ending to be a brace
+       if tok := p.scan(); tok.Type != token.RBRACE {
+               return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type)
+       }
+
+       o.List = l
+       o.Rbrace = p.tok.Pos // advanced via parseObjectList
+       return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+       defer un(trace(p, "ParseListType"))
+
+       // we assume that the currently scanned token is a LBRACK
+       l := &ast.ListType{
+               Lbrack: p.tok.Pos,
+       }
+
+       needComma := false
+       for {
+               tok := p.scan()
+               if needComma {
+                       switch tok.Type {
+                       case token.COMMA, token.RBRACK:
+                       default:
+                               return nil, &PosError{
+                                       Pos: tok.Pos,
+                                       Err: fmt.Errorf(
+                                               "error parsing list, expected comma or list end, got: %s",
+                                               tok.Type),
+                               }
+                       }
+               }
+               switch tok.Type {
+               case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
+                       node, err := p.literalType()
+                       if err != nil {
+                               return nil, err
+                       }
+
+                       // If there is a lead comment, apply it
+                       if p.leadComment != nil {
+                               node.LeadComment = p.leadComment
+                               p.leadComment = nil
+                       }
+
+                       l.Add(node)
+                       needComma = true
+               case token.COMMA:
+                       // get next list item or we are at the end
+                       // do a look-ahead for line comment
+                       p.scan()
+                       if p.lineComment != nil && len(l.List) > 0 {
+                               lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
+                               if ok {
+                                       lit.LineComment = p.lineComment
+                                       l.List[len(l.List)-1] = lit
+                                       p.lineComment = nil
+                               }
+                       }
+                       p.unscan()
+
+                       needComma = false
+                       continue
+               case token.LBRACE:
+                       // Looks like a nested object, so parse it out
+                       node, err := p.objectType()
+                       if err != nil {
+                               return nil, &PosError{
+                                       Pos: tok.Pos,
+                                       Err: fmt.Errorf(
+                                               "error while trying to parse object within list: %s", err),
+                               }
+                       }
+                       l.Add(node)
+                       needComma = true
+               case token.LBRACK:
+                       node, err := p.listType()
+                       if err != nil {
+                               return nil, &PosError{
+                                       Pos: tok.Pos,
+                                       Err: fmt.Errorf(
+                                               "error while trying to parse list within list: %s", err),
+                               }
+                       }
+                       l.Add(node)
+               case token.RBRACK:
+                       // finished
+                       l.Rbrack = p.tok.Pos
+                       return l, nil
+               default:
+                       return nil, &PosError{
+                               Pos: tok.Pos,
+                               Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
+                       }
+               }
+       }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+       defer un(trace(p, "ParseLiteral"))
+
+       return &ast.LiteralType{
+               Token: p.tok,
+       }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead. In the process, it collects any
+// comment groups encountered, and remembers the last lead and line comments.
+func (p *Parser) scan() token.Token {
+       // If we have a token on the buffer, then return it.
+       if p.n != 0 {
+               p.n = 0
+               return p.tok
+       }
+
+       // Otherwise read the next token from the scanner and Save it to the buffer
+       // in case we unscan later.
+       prev := p.tok
+       p.tok = p.sc.Scan()
+
+       if p.tok.Type == token.COMMENT {
+               var comment *ast.CommentGroup
+               var endline int
+
+               // fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
+               // p.tok.Pos.Line, prev.Pos.Line, endline)
+               if p.tok.Pos.Line == prev.Pos.Line {
+                       // The comment is on same line as the previous token; it
+                       // cannot be a lead comment but may be a line comment.
+                       comment, endline = p.consumeCommentGroup(0)
+                       if p.tok.Pos.Line != endline {
+                               // The next token is on a different line, thus
+                               // the last comment group is a line comment.
+                               p.lineComment = comment
+                       }
+               }
+
+               // consume successor comments, if any
+               endline = -1
+               for p.tok.Type == token.COMMENT {
+                       comment, endline = p.consumeCommentGroup(1)
+               }
+
+               if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
+                       switch p.tok.Type {
+                       case token.RBRACE, token.RBRACK:
+                               // Do not count for these cases
+                       default:
+                               // The next token is following on the line immediately after the
+                               // comment group, thus the last comment group is a lead comment.
+                               p.leadComment = comment
+                       }
+               }
+
+       }
+
+       return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+       p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+       if !p.enableTrace {
+               return
+       }
+
+       const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+       const n = len(dots)
+       fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+       i := 2 * p.indent
+       for i > n {
+               fmt.Print(dots)
+               i -= n
+       }
+       // i <= n
+       fmt.Print(dots[0:i])
+       fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+       p.printTrace(msg, "(")
+       p.indent++
+       return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+       p.indent--
+       p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
new file mode 100644 (file)
index 0000000..6966236
--- /dev/null
@@ -0,0 +1,651 @@
+// Package scanner implements a scanner for HCL (HashiCorp Configuration
+// Language) source text.
+package scanner
+
+import (
+       "bytes"
+       "fmt"
+       "os"
+       "regexp"
+       "unicode"
+       "unicode/utf8"
+
+       "github.com/hashicorp/hcl/hcl/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+       buf *bytes.Buffer // Source buffer for advancing and scanning
+       src []byte        // Source buffer for immutable access
+
+       // Source Position
+       srcPos  token.Pos // current position
+       prevPos token.Pos // previous position, used for peek() method
+
+       lastCharLen int // length of last character in bytes
+       lastLineLen int // length of last line in characters (for correct column reporting)
+
+       tokStart int // token text start position
+       tokEnd   int // token text end  position
+
+       // Error is called for each error encountered. If no Error
+       // function is set, the error is reported to os.Stderr.
+       Error func(pos token.Pos, msg string)
+
+       // ErrorCount is incremented by one for each error encountered.
+       ErrorCount int
+
+       // tokPos is the start position of most recently scanned token; set by
+       // Scan. The Filename field is always left untouched by the Scanner.  If
+       // an error is reported (via Error) and Position is invalid, the scanner is
+       // not inside a token.
+       tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+       // even though we accept a src, we read from a io.Reader compatible type
+       // (*bytes.Buffer). So in the future we might easily change it to streaming
+       // read.
+       b := bytes.NewBuffer(src)
+       s := &Scanner{
+               buf: b,
+               src: src,
+       }
+
+       // srcPosition always starts with 1
+       s.srcPos.Line = 1
+       return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+       ch, size, err := s.buf.ReadRune()
+       if err != nil {
+               // advance for error reporting
+               s.srcPos.Column++
+               s.srcPos.Offset += size
+               s.lastCharLen = size
+               return eof
+       }
+
+       if ch == utf8.RuneError && size == 1 {
+               s.srcPos.Column++
+               s.srcPos.Offset += size
+               s.lastCharLen = size
+               s.err("illegal UTF-8 encoding")
+               return ch
+       }
+
+       // remember last position
+       s.prevPos = s.srcPos
+
+       s.srcPos.Column++
+       s.lastCharLen = size
+       s.srcPos.Offset += size
+
+       if ch == '\n' {
+               s.srcPos.Line++
+               s.lastLineLen = s.srcPos.Column
+               s.srcPos.Column = 0
+       }
+
+       // If we see a null character with data left, then that is an error
+       if ch == '\x00' && s.buf.Len() > 0 {
+               s.err("unexpected null character (0x00)")
+               return eof
+       }
+
+       // debug
+       // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+       return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+       if err := s.buf.UnreadRune(); err != nil {
+               panic(err) // this is user fault, we should catch it
+       }
+       s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+       peek, _, err := s.buf.ReadRune()
+       if err != nil {
+               return eof
+       }
+
+       s.buf.UnreadRune()
+       return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+       ch := s.next()
+
+       // skip white space
+       for isWhitespace(ch) {
+               ch = s.next()
+       }
+
+       var tok token.Type
+
+       // token text markings
+       s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+       // token position, initial next() is moving the offset by one(size of rune
+       // actually), though we are interested with the starting point
+       s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+       if s.srcPos.Column > 0 {
+               // common case: last character was not a '\n'
+               s.tokPos.Line = s.srcPos.Line
+               s.tokPos.Column = s.srcPos.Column
+       } else {
+               // last character was a '\n'
+               // (we cannot be at the beginning of the source
+               // since we have called next() at least once)
+               s.tokPos.Line = s.srcPos.Line - 1
+               s.tokPos.Column = s.lastLineLen
+       }
+
+       switch {
+       case isLetter(ch):
+               tok = token.IDENT
+               lit := s.scanIdentifier()
+               if lit == "true" || lit == "false" {
+                       tok = token.BOOL
+               }
+       case isDecimal(ch):
+               tok = s.scanNumber(ch)
+       default:
+               switch ch {
+               case eof:
+                       tok = token.EOF
+               case '"':
+                       tok = token.STRING
+                       s.scanString()
+               case '#', '/':
+                       tok = token.COMMENT
+                       s.scanComment(ch)
+               case '.':
+                       tok = token.PERIOD
+                       ch = s.peek()
+                       if isDecimal(ch) {
+                               tok = token.FLOAT
+                               ch = s.scanMantissa(ch)
+                               ch = s.scanExponent(ch)
+                       }
+               case '<':
+                       tok = token.HEREDOC
+                       s.scanHeredoc()
+               case '[':
+                       tok = token.LBRACK
+               case ']':
+                       tok = token.RBRACK
+               case '{':
+                       tok = token.LBRACE
+               case '}':
+                       tok = token.RBRACE
+               case ',':
+                       tok = token.COMMA
+               case '=':
+                       tok = token.ASSIGN
+               case '+':
+                       tok = token.ADD
+               case '-':
+                       if isDecimal(s.peek()) {
+                               ch := s.next()
+                               tok = s.scanNumber(ch)
+                       } else {
+                               tok = token.SUB
+                       }
+               default:
+                       s.err("illegal char")
+               }
+       }
+
+       // finish token ending
+       s.tokEnd = s.srcPos.Offset
+
+       // create token literal
+       var tokenText string
+       if s.tokStart >= 0 {
+               tokenText = string(s.src[s.tokStart:s.tokEnd])
+       }
+       s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+       return token.Token{
+               Type: tok,
+               Pos:  s.tokPos,
+               Text: tokenText,
+       }
+}
+
+func (s *Scanner) scanComment(ch rune) {
+       // single line comments
+       if ch == '#' || (ch == '/' && s.peek() != '*') {
+               if ch == '/' && s.peek() != '/' {
+                       s.err("expected '/' for comment")
+                       return
+               }
+
+               ch = s.next()
+               for ch != '\n' && ch >= 0 && ch != eof {
+                       ch = s.next()
+               }
+               if ch != eof && ch >= 0 {
+                       s.unread()
+               }
+               return
+       }
+
+       // be sure we get the character after /* This allows us to find comment's
+       // that are not erminated
+       if ch == '/' {
+               s.next()
+               ch = s.next() // read character after "/*"
+       }
+
+       // look for /* - style comments
+       for {
+               if ch < 0 || ch == eof {
+                       s.err("comment not terminated")
+                       break
+               }
+
+               ch0 := ch
+               ch = s.next()
+               if ch0 == '*' && ch == '/' {
+                       break
+               }
+       }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+       if ch == '0' {
+               // check for hexadecimal, octal or float
+               ch = s.next()
+               if ch == 'x' || ch == 'X' {
+                       // hexadecimal
+                       ch = s.next()
+                       found := false
+                       for isHexadecimal(ch) {
+                               ch = s.next()
+                               found = true
+                       }
+
+                       if !found {
+                               s.err("illegal hexadecimal number")
+                       }
+
+                       if ch != eof {
+                               s.unread()
+                       }
+
+                       return token.NUMBER
+               }
+
+               // now it's either something like: 0421(octal) or 0.1231(float)
+               illegalOctal := false
+               for isDecimal(ch) {
+                       ch = s.next()
+                       if ch == '8' || ch == '9' {
+                               // this is just a possibility. For example 0159 is illegal, but
+                               // 0159.23 is valid. So we mark a possible illegal octal. If
+                               // the next character is not a period, we'll print the error.
+                               illegalOctal = true
+                       }
+               }
+
+               if ch == 'e' || ch == 'E' {
+                       ch = s.scanExponent(ch)
+                       return token.FLOAT
+               }
+
+               if ch == '.' {
+                       ch = s.scanFraction(ch)
+
+                       if ch == 'e' || ch == 'E' {
+                               ch = s.next()
+                               ch = s.scanExponent(ch)
+                       }
+                       return token.FLOAT
+               }
+
+               if illegalOctal {
+                       s.err("illegal octal number")
+               }
+
+               if ch != eof {
+                       s.unread()
+               }
+               return token.NUMBER
+       }
+
+       s.scanMantissa(ch)
+       ch = s.next() // seek forward
+       if ch == 'e' || ch == 'E' {
+               ch = s.scanExponent(ch)
+               return token.FLOAT
+       }
+
+       if ch == '.' {
+               ch = s.scanFraction(ch)
+               if ch == 'e' || ch == 'E' {
+                       ch = s.next()
+                       ch = s.scanExponent(ch)
+               }
+               return token.FLOAT
+       }
+
+       if ch != eof {
+               s.unread()
+       }
+       return token.NUMBER
+}
+
+// scanMantissa scans the mantissa begining from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+       scanned := false
+       for isDecimal(ch) {
+               ch = s.next()
+               scanned = true
+       }
+
+       if scanned && ch != eof {
+               s.unread()
+       }
+       return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+       if ch == '.' {
+               ch = s.peek() // we peek just to see if we can move forward
+               ch = s.scanMantissa(ch)
+       }
+       return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+       if ch == 'e' || ch == 'E' {
+               ch = s.next()
+               if ch == '-' || ch == '+' {
+                       ch = s.next()
+               }
+               ch = s.scanMantissa(ch)
+       }
+       return ch
+}
+
+// scanHeredoc scans a heredoc string
+func (s *Scanner) scanHeredoc() {
+       // Scan the second '<' in example: '<<EOF'
+       if s.next() != '<' {
+               s.err("heredoc expected second '<', didn't see it")
+               return
+       }
+
+       // Get the original offset so we can read just the heredoc ident
+       offs := s.srcPos.Offset
+
+       // Scan the identifier
+       ch := s.next()
+
+       // Indented heredoc syntax
+       if ch == '-' {
+               ch = s.next()
+       }
+
+       for isLetter(ch) || isDigit(ch) {
+               ch = s.next()
+       }
+
+       // If we reached an EOF then that is not good
+       if ch == eof {
+               s.err("heredoc not terminated")
+               return
+       }
+
+       // Ignore the '\r' in Windows line endings
+       if ch == '\r' {
+               if s.peek() == '\n' {
+                       ch = s.next()
+               }
+       }
+
+       // If we didn't reach a newline then that is also not good
+       if ch != '\n' {
+               s.err("invalid characters in heredoc anchor")
+               return
+       }
+
+       // Read the identifier
+       identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
+       if len(identBytes) == 0 {
+               s.err("zero-length heredoc anchor")
+               return
+       }
+
+       var identRegexp *regexp.Regexp
+       if identBytes[0] == '-' {
+               identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes[1:]))
+       } else {
+               identRegexp = regexp.MustCompile(fmt.Sprintf(`[[:space:]]*%s\z`, identBytes))
+       }
+
+       // Read the actual string value
+       lineStart := s.srcPos.Offset
+       for {
+               ch := s.next()
+
+               // Special newline handling.
+               if ch == '\n' {
+                       // Math is fast, so we first compare the byte counts to see if we have a chance
+                       // of seeing the same identifier - if the length is less than the number of bytes
+                       // in the identifier, this cannot be a valid terminator.
+                       lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
+                       if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
+                               break
+                       }
+
+                       // Not an anchor match, record the start of a new line
+                       lineStart = s.srcPos.Offset
+               }
+
+               if ch == eof {
+                       s.err("heredoc not terminated")
+                       return
+               }
+       }
+
+       return
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+       braces := 0
+       for {
+               // '"' opening already consumed
+               // read character after quote
+               ch := s.next()
+
+               if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
+                       s.err("literal not terminated")
+                       return
+               }
+
+               if ch == '"' && braces == 0 {
+                       break
+               }
+
+               // If we're going into a ${} then we can ignore quotes for awhile
+               if braces == 0 && ch == '$' && s.peek() == '{' {
+                       braces++
+                       s.next()
+               } else if braces > 0 && ch == '{' {
+                       braces++
+               }
+               if braces > 0 && ch == '}' {
+                       braces--
+               }
+
+               if ch == '\\' {
+                       s.scanEscape()
+               }
+       }
+
+       return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+       // http://en.cppreference.com/w/cpp/language/escape
+       ch := s.next() // read character after '/'
+       switch ch {
+       case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+               // nothing to do
+       case '0', '1', '2', '3', '4', '5', '6', '7':
+               // octal notation
+               ch = s.scanDigits(ch, 8, 3)
+       case 'x':
+               // hexademical notation
+               ch = s.scanDigits(s.next(), 16, 2)
+       case 'u':
+               // universal character name
+               ch = s.scanDigits(s.next(), 16, 4)
+       case 'U':
+               // universal character name
+               ch = s.scanDigits(s.next(), 16, 8)
+       default:
+               s.err("illegal char escape")
+       }
+       return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+       start := n
+       for n > 0 && digitVal(ch) < base {
+               ch = s.next()
+               if ch == eof {
+                       // If we see an EOF, we halt any more scanning of digits
+                       // immediately.
+                       break
+               }
+
+               n--
+       }
+       if n > 0 {
+               s.err("illegal char escape")
+       }
+
+       if n != start {
+               // we scanned all digits, put the last non digit char back,
+               // only if we read anything at all
+               s.unread()
+       }
+
+       return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+       offs := s.srcPos.Offset - s.lastCharLen
+       ch := s.next()
+       for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
+               ch = s.next()
+       }
+
+       if ch != eof {
+               s.unread() // we got identifier, put back latest char
+       }
+
+       return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+       pos.Offset = s.srcPos.Offset - s.lastCharLen
+       switch {
+       case s.srcPos.Column > 0:
+               // common case: last character was not a '\n'
+               pos.Line = s.srcPos.Line
+               pos.Column = s.srcPos.Column
+       case s.lastLineLen > 0:
+               // last character was a '\n'
+               // (we cannot be at the beginning of the source
+               // since we have called next() at least once)
+               pos.Line = s.srcPos.Line - 1
+               pos.Column = s.lastLineLen
+       default:
+               // at the beginning of the source
+               pos.Line = 1
+               pos.Column = 1
+       }
+       return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+       s.ErrorCount++
+       pos := s.recentPosition()
+
+       if s.Error != nil {
+               s.Error(pos, msg)
+               return
+       }
+
+       fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+       return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isDigit returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+       return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isDecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+       return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+       return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+       return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+       switch {
+       case '0' <= ch && ch <= '9':
+               return int(ch - '0')
+       case 'a' <= ch && ch <= 'f':
+               return int(ch - 'a' + 10)
+       case 'A' <= ch && ch <= 'F':
+               return int(ch - 'A' + 10)
+       }
+       return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go
new file mode 100644 (file)
index 0000000..5f981ea
--- /dev/null
@@ -0,0 +1,241 @@
+package strconv
+
+import (
+       "errors"
+       "unicode/utf8"
+)
+
+// ErrSyntax indicates that a value does not have the right syntax for the target type.
+var ErrSyntax = errors.New("invalid syntax")
+
+// Unquote interprets s as a single-quoted, double-quoted,
+// or backquoted Go string literal, returning the string value
+// that s quotes.  (If s is single-quoted, it would be a Go
+// character literal; Unquote returns the corresponding
+// one-character string.)
+func Unquote(s string) (t string, err error) {
+       n := len(s)
+       if n < 2 {
+               return "", ErrSyntax
+       }
+       quote := s[0]
+       if quote != s[n-1] {
+               return "", ErrSyntax
+       }
+       s = s[1 : n-1]
+
+       if quote != '"' {
+               return "", ErrSyntax
+       }
+       if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
+               return "", ErrSyntax
+       }
+
+       // Is it trivial?  Avoid allocation.
+       if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
+               switch quote {
+               case '"':
+                       return s, nil
+               case '\'':
+                       r, size := utf8.DecodeRuneInString(s)
+                       if size == len(s) && (r != utf8.RuneError || size != 1) {
+                               return s, nil
+                       }
+               }
+       }
+
+       var runeTmp [utf8.UTFMax]byte
+       buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
+       for len(s) > 0 {
+               // If we're starting a '${}' then let it through un-unquoted.
+               // Specifically: we don't unquote any characters within the `${}`
+               // section.
+               if s[0] == '$' && len(s) > 1 && s[1] == '{' {
+                       buf = append(buf, '$', '{')
+                       s = s[2:]
+
+                       // Continue reading until we find the closing brace, copying as-is
+                       braces := 1
+                       for len(s) > 0 && braces > 0 {
+                               r, size := utf8.DecodeRuneInString(s)
+                               if r == utf8.RuneError {
+                                       return "", ErrSyntax
+                               }
+
+                               s = s[size:]
+
+                               n := utf8.EncodeRune(runeTmp[:], r)
+                               buf = append(buf, runeTmp[:n]...)
+
+                               switch r {
+                               case '{':
+                                       braces++
+                               case '}':
+                                       braces--
+                               }
+                       }
+                       if braces != 0 {
+                               return "", ErrSyntax
+                       }
+                       if len(s) == 0 {
+                               // If there's no string left, we're done!
+                               break
+                       } else {
+                               // If there's more left, we need to pop back up to the top of the loop
+                               // in case there's another interpolation in this string.
+                               continue
+                       }
+               }
+
+               if s[0] == '\n' {
+                       return "", ErrSyntax
+               }
+
+               c, multibyte, ss, err := unquoteChar(s, quote)
+               if err != nil {
+                       return "", err
+               }
+               s = ss
+               if c < utf8.RuneSelf || !multibyte {
+                       buf = append(buf, byte(c))
+               } else {
+                       n := utf8.EncodeRune(runeTmp[:], c)
+                       buf = append(buf, runeTmp[:n]...)
+               }
+               if quote == '\'' && len(s) != 0 {
+                       // single-quoted must be single character
+                       return "", ErrSyntax
+               }
+       }
+       return string(buf), nil
+}
+
+// contains reports whether the string contains the byte c.
+func contains(s string, c byte) bool {
+       for i := 0; i < len(s); i++ {
+               if s[i] == c {
+                       return true
+               }
+       }
+       return false
+}
+
+func unhex(b byte) (v rune, ok bool) {
+       c := rune(b)
+       switch {
+       case '0' <= c && c <= '9':
+               return c - '0', true
+       case 'a' <= c && c <= 'f':
+               return c - 'a' + 10, true
+       case 'A' <= c && c <= 'F':
+               return c - 'A' + 10, true
+       }
+       return
+}
+
+func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
+       // easy cases
+       switch c := s[0]; {
+       case c == quote && (quote == '\'' || quote == '"'):
+               err = ErrSyntax
+               return
+       case c >= utf8.RuneSelf:
+               r, size := utf8.DecodeRuneInString(s)
+               return r, true, s[size:], nil
+       case c != '\\':
+               return rune(s[0]), false, s[1:], nil
+       }
+
+       // hard case: c is backslash
+       if len(s) <= 1 {
+               err = ErrSyntax
+               return
+       }
+       c := s[1]
+       s = s[2:]
+
+       switch c {
+       case 'a':
+               value = '\a'
+       case 'b':
+               value = '\b'
+       case 'f':
+               value = '\f'
+       case 'n':
+               value = '\n'
+       case 'r':
+               value = '\r'
+       case 't':
+               value = '\t'
+       case 'v':
+               value = '\v'
+       case 'x', 'u', 'U':
+               n := 0
+               switch c {
+               case 'x':
+                       n = 2
+               case 'u':
+                       n = 4
+               case 'U':
+                       n = 8
+               }
+               var v rune
+               if len(s) < n {
+                       err = ErrSyntax
+                       return
+               }
+               for j := 0; j < n; j++ {
+                       x, ok := unhex(s[j])
+                       if !ok {
+                               err = ErrSyntax
+                               return
+                       }
+                       v = v<<4 | x
+               }
+               s = s[n:]
+               if c == 'x' {
+                       // single-byte string, possibly not UTF-8
+                       value = v
+                       break
+               }
+               if v > utf8.MaxRune {
+                       err = ErrSyntax
+                       return
+               }
+               value = v
+               multibyte = true
+       case '0', '1', '2', '3', '4', '5', '6', '7':
+               v := rune(c) - '0'
+               if len(s) < 2 {
+                       err = ErrSyntax
+                       return
+               }
+               for j := 0; j < 2; j++ { // one digit already; two more
+                       x := rune(s[j]) - '0'
+                       if x < 0 || x > 7 {
+                               err = ErrSyntax
+                               return
+                       }
+                       v = (v << 3) | x
+               }
+               s = s[2:]
+               if v > 255 {
+                       err = ErrSyntax
+                       return
+               }
+               value = v
+       case '\\':
+               value = '\\'
+       case '\'', '"':
+               if c != quote {
+                       err = ErrSyntax
+                       return
+               }
+               value = rune(c)
+       default:
+               err = ErrSyntax
+               return
+       }
+       tail = s
+       return
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/position.go b/vendor/github.com/hashicorp/hcl/hcl/token/position.go
new file mode 100644 (file)
index 0000000..59c1bb7
--- /dev/null
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+       Filename string // filename, if any
+       Offset   int    // offset, starting at 0
+       Line     int    // line number, starting at 1
+       Column   int    // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+//     file:line:column    valid position with file name
+//     line:column         valid position without file name
+//     file                invalid position with file name
+//     -                   invalid position without file name
+func (p Pos) String() string {
+       s := p.Filename
+       if p.IsValid() {
+               if s != "" {
+                       s += ":"
+               }
+               s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+       }
+       if s == "" {
+               s = "-"
+       }
+       return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+       return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+       return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/hcl/token/token.go b/vendor/github.com/hashicorp/hcl/hcl/token/token.go
new file mode 100644 (file)
index 0000000..e37c066
--- /dev/null
@@ -0,0 +1,219 @@
+// Package token defines constants representing the lexical tokens for HCL
+// (HashiCorp Configuration Language)
+package token
+
+import (
+       "fmt"
+       "strconv"
+       "strings"
+
+       hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+       Type Type
+       Pos  Pos
+       Text string
+       JSON bool
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+       // Special tokens
+       ILLEGAL Type = iota
+       EOF
+       COMMENT
+
+       identifier_beg
+       IDENT // literals
+       literal_beg
+       NUMBER  // 12345
+       FLOAT   // 123.45
+       BOOL    // true,false
+       STRING  // "abc"
+       HEREDOC // <<FOO\nbar\nFOO
+       literal_end
+       identifier_end
+
+       operator_beg
+       LBRACK // [
+       LBRACE // {
+       COMMA  // ,
+       PERIOD // .
+
+       RBRACK // ]
+       RBRACE // }
+
+       ASSIGN // =
+       ADD    // +
+       SUB    // -
+       operator_end
+)
+
+var tokens = [...]string{
+       ILLEGAL: "ILLEGAL",
+
+       EOF:     "EOF",
+       COMMENT: "COMMENT",
+
+       IDENT:  "IDENT",
+       NUMBER: "NUMBER",
+       FLOAT:  "FLOAT",
+       BOOL:   "BOOL",
+       STRING: "STRING",
+
+       LBRACK:  "LBRACK",
+       LBRACE:  "LBRACE",
+       COMMA:   "COMMA",
+       PERIOD:  "PERIOD",
+       HEREDOC: "HEREDOC",
+
+       RBRACK: "RBRACK",
+       RBRACE: "RBRACE",
+
+       ASSIGN: "ASSIGN",
+       ADD:    "ADD",
+       SUB:    "SUB",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+       s := ""
+       if 0 <= t && t < Type(len(tokens)) {
+               s = tokens[t]
+       }
+       if s == "" {
+               s = "token(" + strconv.Itoa(int(t)) + ")"
+       }
+       return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+       return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// Value returns the properly typed value for this token. The type of
+// the returned interface{} is guaranteed based on the Type field.
+//
+// This can only be called for literal types. If it is called for any other
+// type, this will panic.
+func (t Token) Value() interface{} {
+       switch t.Type {
+       case BOOL:
+               if t.Text == "true" {
+                       return true
+               } else if t.Text == "false" {
+                       return false
+               }
+
+               panic("unknown bool value: " + t.Text)
+       case FLOAT:
+               v, err := strconv.ParseFloat(t.Text, 64)
+               if err != nil {
+                       panic(err)
+               }
+
+               return float64(v)
+       case NUMBER:
+               v, err := strconv.ParseInt(t.Text, 0, 64)
+               if err != nil {
+                       panic(err)
+               }
+
+               return int64(v)
+       case IDENT:
+               return t.Text
+       case HEREDOC:
+               return unindentHeredoc(t.Text)
+       case STRING:
+               // Determine the Unquote method to use. If it came from JSON,
+               // then we need to use the built-in unquote since we have to
+               // escape interpolations there.
+               f := hclstrconv.Unquote
+               if t.JSON {
+                       f = strconv.Unquote
+               }
+
+               // This case occurs if json null is used
+               if t.Text == "" {
+                       return ""
+               }
+
+               v, err := f(t.Text)
+               if err != nil {
+                       panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
+               }
+
+               return v
+       default:
+               panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
+       }
+}
+
+// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
+// and the content of a HEREDOC with the hanging indent removed if it is started with
+// a <<-, and the terminating line is at least as indented as the least indented line.
+func unindentHeredoc(heredoc string) string {
+       // We need to find the end of the marker
+       idx := strings.IndexByte(heredoc, '\n')
+       if idx == -1 {
+               panic("heredoc doesn't contain newline")
+       }
+
+       unindent := heredoc[2] == '-'
+
+       // We can optimize if the heredoc isn't marked for indentation
+       if !unindent {
+               return string(heredoc[idx+1 : len(heredoc)-idx+1])
+       }
+
+       // We need to unindent each line based on the indentation level of the marker
+       lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
+       whitespacePrefix := lines[len(lines)-1]
+
+       isIndented := true
+       for _, v := range lines {
+               if strings.HasPrefix(v, whitespacePrefix) {
+                       continue
+               }
+
+               isIndented = false
+               break
+       }
+
+       // If all lines are not at least as indented as the terminating mark, return the
+       // heredoc as is, but trim the leading space from the marker on the final line.
+       if !isIndented {
+               return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
+       }
+
+       unindentedLines := make([]string, len(lines))
+       for k, v := range lines {
+               if k == len(lines)-1 {
+                       unindentedLines[k] = ""
+                       break
+               }
+
+               unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
+       }
+
+       return strings.Join(unindentedLines, "\n")
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/flatten.go b/vendor/github.com/hashicorp/hcl/json/parser/flatten.go
new file mode 100644 (file)
index 0000000..f652d6f
--- /dev/null
@@ -0,0 +1,117 @@
+package parser
+
+import "github.com/hashicorp/hcl/hcl/ast"
+
+// flattenObjects takes an AST node, walks it, and flattens
+func flattenObjects(node ast.Node) {
+       ast.Walk(node, func(n ast.Node) (ast.Node, bool) {
+               // We only care about lists, because this is what we modify
+               list, ok := n.(*ast.ObjectList)
+               if !ok {
+                       return n, true
+               }
+
+               // Rebuild the item list
+               items := make([]*ast.ObjectItem, 0, len(list.Items))
+               frontier := make([]*ast.ObjectItem, len(list.Items))
+               copy(frontier, list.Items)
+               for len(frontier) > 0 {
+                       // Pop the current item
+                       n := len(frontier)
+                       item := frontier[n-1]
+                       frontier = frontier[:n-1]
+
+                       switch v := item.Val.(type) {
+                       case *ast.ObjectType:
+                               items, frontier = flattenObjectType(v, item, items, frontier)
+                       case *ast.ListType:
+                               items, frontier = flattenListType(v, item, items, frontier)
+                       default:
+                               items = append(items, item)
+                       }
+               }
+
+               // Reverse the list since the frontier model runs things backwards
+               for i := len(items)/2 - 1; i >= 0; i-- {
+                       opp := len(items) - 1 - i
+                       items[i], items[opp] = items[opp], items[i]
+               }
+
+               // Done! Set the original items
+               list.Items = items
+               return n, true
+       })
+}
+
+func flattenListType(
+       ot *ast.ListType,
+       item *ast.ObjectItem,
+       items []*ast.ObjectItem,
+       frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+       // If the list is empty, keep the original list
+       if len(ot.List) == 0 {
+               items = append(items, item)
+               return items, frontier
+       }
+
+       // All the elements of this object must also be objects!
+       for _, subitem := range ot.List {
+               if _, ok := subitem.(*ast.ObjectType); !ok {
+                       items = append(items, item)
+                       return items, frontier
+               }
+       }
+
+       // Great! We have a match go through all the items and flatten
+       for _, elem := range ot.List {
+               // Add it to the frontier so that we can recurse
+               frontier = append(frontier, &ast.ObjectItem{
+                       Keys:        item.Keys,
+                       Assign:      item.Assign,
+                       Val:         elem,
+                       LeadComment: item.LeadComment,
+                       LineComment: item.LineComment,
+               })
+       }
+
+       return items, frontier
+}
+
+func flattenObjectType(
+       ot *ast.ObjectType,
+       item *ast.ObjectItem,
+       items []*ast.ObjectItem,
+       frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
+       // If the list has no items we do not have to flatten anything
+       if ot.List.Items == nil {
+               items = append(items, item)
+               return items, frontier
+       }
+
+       // All the elements of this object must also be objects!
+       for _, subitem := range ot.List.Items {
+               if _, ok := subitem.Val.(*ast.ObjectType); !ok {
+                       items = append(items, item)
+                       return items, frontier
+               }
+       }
+
+       // Great! We have a match go through all the items and flatten
+       for _, subitem := range ot.List.Items {
+               // Copy the new key
+               keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
+               copy(keys, item.Keys)
+               copy(keys[len(item.Keys):], subitem.Keys)
+
+               // Add it to the frontier so that we can recurse
+               frontier = append(frontier, &ast.ObjectItem{
+                       Keys:        keys,
+                       Assign:      item.Assign,
+                       Val:         subitem.Val,
+                       LeadComment: item.LeadComment,
+                       LineComment: item.LineComment,
+               })
+       }
+
+       return items, frontier
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
new file mode 100644 (file)
index 0000000..125a5f0
--- /dev/null
@@ -0,0 +1,313 @@
+package parser
+
+import (
+       "errors"
+       "fmt"
+
+       "github.com/hashicorp/hcl/hcl/ast"
+       hcltoken "github.com/hashicorp/hcl/hcl/token"
+       "github.com/hashicorp/hcl/json/scanner"
+       "github.com/hashicorp/hcl/json/token"
+)
+
+type Parser struct {
+       sc *scanner.Scanner
+
+       // Last read token
+       tok       token.Token
+       commaPrev token.Token
+
+       enableTrace bool
+       indent      int
+       n           int // buffer size (max = 1)
+}
+
+func newParser(src []byte) *Parser {
+       return &Parser{
+               sc: scanner.New(src),
+       }
+}
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func Parse(src []byte) (*ast.File, error) {
+       p := newParser(src)
+       return p.Parse()
+}
+
+var errEofToken = errors.New("EOF token found")
+
+// Parse returns the fully parsed source and returns the abstract syntax tree.
+func (p *Parser) Parse() (*ast.File, error) {
+       f := &ast.File{}
+       var err, scerr error
+       p.sc.Error = func(pos token.Pos, msg string) {
+               scerr = fmt.Errorf("%s: %s", pos, msg)
+       }
+
+       // The root must be an object in JSON
+       object, err := p.object()
+       if scerr != nil {
+               return nil, scerr
+       }
+       if err != nil {
+               return nil, err
+       }
+
+       // We make our final node an object list so it is more HCL compatible
+       f.Node = object.List
+
+       // Flatten it, which finds patterns and turns them into more HCL-like
+       // AST trees.
+       flattenObjects(f.Node)
+
+       return f, nil
+}
+
+func (p *Parser) objectList() (*ast.ObjectList, error) {
+       defer un(trace(p, "ParseObjectList"))
+       node := &ast.ObjectList{}
+
+       for {
+               n, err := p.objectItem()
+               if err == errEofToken {
+                       break // we are finished
+               }
+
+               // we don't return a nil node, because might want to use already
+               // collected items.
+               if err != nil {
+                       return node, err
+               }
+
+               node.Add(n)
+
+               // Check for a followup comma. If it isn't a comma, then we're done
+               if tok := p.scan(); tok.Type != token.COMMA {
+                       break
+               }
+       }
+
+       return node, nil
+}
+
+// objectItem parses a single object item
+func (p *Parser) objectItem() (*ast.ObjectItem, error) {
+       defer un(trace(p, "ParseObjectItem"))
+
+       keys, err := p.objectKey()
+       if err != nil {
+               return nil, err
+       }
+
+       o := &ast.ObjectItem{
+               Keys: keys,
+       }
+
+       switch p.tok.Type {
+       case token.COLON:
+               pos := p.tok.Pos
+               o.Assign = hcltoken.Pos{
+                       Filename: pos.Filename,
+                       Offset:   pos.Offset,
+                       Line:     pos.Line,
+                       Column:   pos.Column,
+               }
+
+               o.Val, err = p.objectValue()
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       return o, nil
+}
+
+// objectKey parses an object key and returns a ObjectKey AST
+func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
+       keyCount := 0
+       keys := make([]*ast.ObjectKey, 0)
+
+       for {
+               tok := p.scan()
+               switch tok.Type {
+               case token.EOF:
+                       return nil, errEofToken
+               case token.STRING:
+                       keyCount++
+                       keys = append(keys, &ast.ObjectKey{
+                               Token: p.tok.HCLToken(),
+                       })
+               case token.COLON:
+                       // If we have a zero keycount it means that we never got
+                       // an object key, i.e. `{ :`. This is a syntax error.
+                       if keyCount == 0 {
+                               return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+                       }
+
+                       // Done
+                       return keys, nil
+               case token.ILLEGAL:
+                       return nil, errors.New("illegal")
+               default:
+                       return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
+               }
+       }
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) objectValue() (ast.Node, error) {
+       defer un(trace(p, "ParseObjectValue"))
+       tok := p.scan()
+
+       switch tok.Type {
+       case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
+               return p.literalType()
+       case token.LBRACE:
+               return p.objectType()
+       case token.LBRACK:
+               return p.listType()
+       case token.EOF:
+               return nil, errEofToken
+       }
+
+       return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
+}
+
+// object parses any type of object, such as number, bool, string, object or
+// list.
+func (p *Parser) object() (*ast.ObjectType, error) {
+       defer un(trace(p, "ParseType"))
+       tok := p.scan()
+
+       switch tok.Type {
+       case token.LBRACE:
+               return p.objectType()
+       case token.EOF:
+               return nil, errEofToken
+       }
+
+       return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
+}
+
+// objectType parses an object type and returns a ObjectType AST
+func (p *Parser) objectType() (*ast.ObjectType, error) {
+       defer un(trace(p, "ParseObjectType"))
+
+       // we assume that the currently scanned token is a LBRACE
+       o := &ast.ObjectType{}
+
+       l, err := p.objectList()
+
+       // if we hit RBRACE, we are good to go (means we parsed all Items), if it's
+       // not a RBRACE, it's an syntax error and we just return it.
+       if err != nil && p.tok.Type != token.RBRACE {
+               return nil, err
+       }
+
+       o.List = l
+       return o, nil
+}
+
+// listType parses a list type and returns a ListType AST
+func (p *Parser) listType() (*ast.ListType, error) {
+       defer un(trace(p, "ParseListType"))
+
+       // we assume that the currently scanned token is a LBRACK
+       l := &ast.ListType{}
+
+       for {
+               tok := p.scan()
+               switch tok.Type {
+               case token.NUMBER, token.FLOAT, token.STRING:
+                       node, err := p.literalType()
+                       if err != nil {
+                               return nil, err
+                       }
+
+                       l.Add(node)
+               case token.COMMA:
+                       continue
+               case token.LBRACE:
+                       node, err := p.objectType()
+                       if err != nil {
+                               return nil, err
+                       }
+
+                       l.Add(node)
+               case token.BOOL:
+                       // TODO(arslan) should we support? not supported by HCL yet
+               case token.LBRACK:
+                       // TODO(arslan) should we support nested lists? Even though it's
+                       // written in README of HCL, it's not a part of the grammar
+                       // (not defined in parse.y)
+               case token.RBRACK:
+                       // finished
+                       return l, nil
+               default:
+                       return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
+               }
+
+       }
+}
+
+// literalType parses a literal type and returns a LiteralType AST
+func (p *Parser) literalType() (*ast.LiteralType, error) {
+       defer un(trace(p, "ParseLiteral"))
+
+       return &ast.LiteralType{
+               Token: p.tok.HCLToken(),
+       }, nil
+}
+
+// scan returns the next token from the underlying scanner. If a token has
+// been unscanned then read that instead.
+func (p *Parser) scan() token.Token {
+       // If we have a token on the buffer, then return it.
+       if p.n != 0 {
+               p.n = 0
+               return p.tok
+       }
+
+       p.tok = p.sc.Scan()
+       return p.tok
+}
+
+// unscan pushes the previously read token back onto the buffer.
+func (p *Parser) unscan() {
+       p.n = 1
+}
+
+// ----------------------------------------------------------------------------
+// Parsing support
+
+func (p *Parser) printTrace(a ...interface{}) {
+       if !p.enableTrace {
+               return
+       }
+
+       const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
+       const n = len(dots)
+       fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
+
+       i := 2 * p.indent
+       for i > n {
+               fmt.Print(dots)
+               i -= n
+       }
+       // i <= n
+       fmt.Print(dots[0:i])
+       fmt.Println(a...)
+}
+
+func trace(p *Parser, msg string) *Parser {
+       p.printTrace(msg, "(")
+       p.indent++
+       return p
+}
+
+// Usage pattern: defer un(trace(p, "..."))
+func un(p *Parser) {
+       p.indent--
+       p.printTrace(")")
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
new file mode 100644 (file)
index 0000000..dd5c72b
--- /dev/null
@@ -0,0 +1,451 @@
+package scanner
+
+import (
+       "bytes"
+       "fmt"
+       "os"
+       "unicode"
+       "unicode/utf8"
+
+       "github.com/hashicorp/hcl/json/token"
+)
+
+// eof represents a marker rune for the end of the reader.
+const eof = rune(0)
+
+// Scanner defines a lexical scanner
+type Scanner struct {
+       buf *bytes.Buffer // Source buffer for advancing and scanning
+       src []byte        // Source buffer for immutable access
+
+       // Source Position
+       srcPos  token.Pos // current position
+       prevPos token.Pos // previous position, used for peek() method
+
+       lastCharLen int // length of last character in bytes
+       lastLineLen int // length of last line in characters (for correct column reporting)
+
+       tokStart int // token text start position
+       tokEnd   int // token text end  position
+
+       // Error is called for each error encountered. If no Error
+       // function is set, the error is reported to os.Stderr.
+       Error func(pos token.Pos, msg string)
+
+       // ErrorCount is incremented by one for each error encountered.
+       ErrorCount int
+
+       // tokPos is the start position of most recently scanned token; set by
+       // Scan. The Filename field is always left untouched by the Scanner.  If
+       // an error is reported (via Error) and Position is invalid, the scanner is
+       // not inside a token.
+       tokPos token.Pos
+}
+
+// New creates and initializes a new instance of Scanner using src as
+// its source content.
+func New(src []byte) *Scanner {
+       // even though we accept a src, we read from a io.Reader compatible type
+       // (*bytes.Buffer). So in the future we might easily change it to streaming
+       // read.
+       b := bytes.NewBuffer(src)
+       s := &Scanner{
+               buf: b,
+               src: src,
+       }
+
+       // srcPosition always starts with 1
+       s.srcPos.Line = 1
+       return s
+}
+
+// next reads the next rune from the bufferred reader. Returns the rune(0) if
+// an error occurs (or io.EOF is returned).
+func (s *Scanner) next() rune {
+       ch, size, err := s.buf.ReadRune()
+       if err != nil {
+               // advance for error reporting
+               s.srcPos.Column++
+               s.srcPos.Offset += size
+               s.lastCharLen = size
+               return eof
+       }
+
+       if ch == utf8.RuneError && size == 1 {
+               s.srcPos.Column++
+               s.srcPos.Offset += size
+               s.lastCharLen = size
+               s.err("illegal UTF-8 encoding")
+               return ch
+       }
+
+       // remember last position
+       s.prevPos = s.srcPos
+
+       s.srcPos.Column++
+       s.lastCharLen = size
+       s.srcPos.Offset += size
+
+       if ch == '\n' {
+               s.srcPos.Line++
+               s.lastLineLen = s.srcPos.Column
+               s.srcPos.Column = 0
+       }
+
+       // debug
+       // fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
+       return ch
+}
+
+// unread unreads the previous read Rune and updates the source position
+func (s *Scanner) unread() {
+       if err := s.buf.UnreadRune(); err != nil {
+               panic(err) // this is user fault, we should catch it
+       }
+       s.srcPos = s.prevPos // put back last position
+}
+
+// peek returns the next rune without advancing the reader.
+func (s *Scanner) peek() rune {
+       peek, _, err := s.buf.ReadRune()
+       if err != nil {
+               return eof
+       }
+
+       s.buf.UnreadRune()
+       return peek
+}
+
+// Scan scans the next token and returns the token.
+func (s *Scanner) Scan() token.Token {
+       ch := s.next()
+
+       // skip white space
+       for isWhitespace(ch) {
+               ch = s.next()
+       }
+
+       var tok token.Type
+
+       // token text markings
+       s.tokStart = s.srcPos.Offset - s.lastCharLen
+
+       // token position, initial next() is moving the offset by one(size of rune
+       // actually), though we are interested with the starting point
+       s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
+       if s.srcPos.Column > 0 {
+               // common case: last character was not a '\n'
+               s.tokPos.Line = s.srcPos.Line
+               s.tokPos.Column = s.srcPos.Column
+       } else {
+               // last character was a '\n'
+               // (we cannot be at the beginning of the source
+               // since we have called next() at least once)
+               s.tokPos.Line = s.srcPos.Line - 1
+               s.tokPos.Column = s.lastLineLen
+       }
+
+       switch {
+       case isLetter(ch):
+               lit := s.scanIdentifier()
+               if lit == "true" || lit == "false" {
+                       tok = token.BOOL
+               } else if lit == "null" {
+                       tok = token.NULL
+               } else {
+                       s.err("illegal char")
+               }
+       case isDecimal(ch):
+               tok = s.scanNumber(ch)
+       default:
+               switch ch {
+               case eof:
+                       tok = token.EOF
+               case '"':
+                       tok = token.STRING
+                       s.scanString()
+               case '.':
+                       tok = token.PERIOD
+                       ch = s.peek()
+                       if isDecimal(ch) {
+                               tok = token.FLOAT
+                               ch = s.scanMantissa(ch)
+                               ch = s.scanExponent(ch)
+                       }
+               case '[':
+                       tok = token.LBRACK
+               case ']':
+                       tok = token.RBRACK
+               case '{':
+                       tok = token.LBRACE
+               case '}':
+                       tok = token.RBRACE
+               case ',':
+                       tok = token.COMMA
+               case ':':
+                       tok = token.COLON
+               case '-':
+                       if isDecimal(s.peek()) {
+                               ch := s.next()
+                               tok = s.scanNumber(ch)
+                       } else {
+                               s.err("illegal char")
+                       }
+               default:
+                       s.err("illegal char: " + string(ch))
+               }
+       }
+
+       // finish token ending
+       s.tokEnd = s.srcPos.Offset
+
+       // create token literal
+       var tokenText string
+       if s.tokStart >= 0 {
+               tokenText = string(s.src[s.tokStart:s.tokEnd])
+       }
+       s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
+
+       return token.Token{
+               Type: tok,
+               Pos:  s.tokPos,
+               Text: tokenText,
+       }
+}
+
+// scanNumber scans a HCL number definition starting with the given rune
+func (s *Scanner) scanNumber(ch rune) token.Type {
+       zero := ch == '0'
+       pos := s.srcPos
+
+       s.scanMantissa(ch)
+       ch = s.next() // seek forward
+       if ch == 'e' || ch == 'E' {
+               ch = s.scanExponent(ch)
+               return token.FLOAT
+       }
+
+       if ch == '.' {
+               ch = s.scanFraction(ch)
+               if ch == 'e' || ch == 'E' {
+                       ch = s.next()
+                       ch = s.scanExponent(ch)
+               }
+               return token.FLOAT
+       }
+
+       if ch != eof {
+               s.unread()
+       }
+
+       // If we have a larger number and this is zero, error
+       if zero && pos != s.srcPos {
+               s.err("numbers cannot start with 0")
+       }
+
+       return token.NUMBER
+}
+
+// scanMantissa scans the mantissa begining from the rune. It returns the next
+// non decimal rune. It's used to determine wheter it's a fraction or exponent.
+func (s *Scanner) scanMantissa(ch rune) rune {
+       scanned := false
+       for isDecimal(ch) {
+               ch = s.next()
+               scanned = true
+       }
+
+       if scanned && ch != eof {
+               s.unread()
+       }
+       return ch
+}
+
+// scanFraction scans the fraction after the '.' rune
+func (s *Scanner) scanFraction(ch rune) rune {
+       if ch == '.' {
+               ch = s.peek() // we peek just to see if we can move forward
+               ch = s.scanMantissa(ch)
+       }
+       return ch
+}
+
+// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
+// rune.
+func (s *Scanner) scanExponent(ch rune) rune {
+       if ch == 'e' || ch == 'E' {
+               ch = s.next()
+               if ch == '-' || ch == '+' {
+                       ch = s.next()
+               }
+               ch = s.scanMantissa(ch)
+       }
+       return ch
+}
+
+// scanString scans a quoted string
+func (s *Scanner) scanString() {
+       braces := 0
+       for {
+               // '"' opening already consumed
+               // read character after quote
+               ch := s.next()
+
+               if ch == '\n' || ch < 0 || ch == eof {
+                       s.err("literal not terminated")
+                       return
+               }
+
+               if ch == '"' {
+                       break
+               }
+
+               // If we're going into a ${} then we can ignore quotes for awhile
+               if braces == 0 && ch == '$' && s.peek() == '{' {
+                       braces++
+                       s.next()
+               } else if braces > 0 && ch == '{' {
+                       braces++
+               }
+               if braces > 0 && ch == '}' {
+                       braces--
+               }
+
+               if ch == '\\' {
+                       s.scanEscape()
+               }
+       }
+
+       return
+}
+
+// scanEscape scans an escape sequence
+func (s *Scanner) scanEscape() rune {
+       // http://en.cppreference.com/w/cpp/language/escape
+       ch := s.next() // read character after '/'
+       switch ch {
+       case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
+               // nothing to do
+       case '0', '1', '2', '3', '4', '5', '6', '7':
+               // octal notation
+               ch = s.scanDigits(ch, 8, 3)
+       case 'x':
+               // hexademical notation
+               ch = s.scanDigits(s.next(), 16, 2)
+       case 'u':
+               // universal character name
+               ch = s.scanDigits(s.next(), 16, 4)
+       case 'U':
+               // universal character name
+               ch = s.scanDigits(s.next(), 16, 8)
+       default:
+               s.err("illegal char escape")
+       }
+       return ch
+}
+
+// scanDigits scans a rune with the given base for n times. For example an
+// octal notation \184 would yield in scanDigits(ch, 8, 3)
+func (s *Scanner) scanDigits(ch rune, base, n int) rune {
+       for n > 0 && digitVal(ch) < base {
+               ch = s.next()
+               n--
+       }
+       if n > 0 {
+               s.err("illegal char escape")
+       }
+
+       // we scanned all digits, put the last non digit char back
+       s.unread()
+       return ch
+}
+
+// scanIdentifier scans an identifier and returns the literal string
+func (s *Scanner) scanIdentifier() string {
+       offs := s.srcPos.Offset - s.lastCharLen
+       ch := s.next()
+       for isLetter(ch) || isDigit(ch) || ch == '-' {
+               ch = s.next()
+       }
+
+       if ch != eof {
+               s.unread() // we got identifier, put back latest char
+       }
+
+       return string(s.src[offs:s.srcPos.Offset])
+}
+
+// recentPosition returns the position of the character immediately after the
+// character or token returned by the last call to Scan.
+func (s *Scanner) recentPosition() (pos token.Pos) {
+       pos.Offset = s.srcPos.Offset - s.lastCharLen
+       switch {
+       case s.srcPos.Column > 0:
+               // common case: last character was not a '\n'
+               pos.Line = s.srcPos.Line
+               pos.Column = s.srcPos.Column
+       case s.lastLineLen > 0:
+               // last character was a '\n'
+               // (we cannot be at the beginning of the source
+               // since we have called next() at least once)
+               pos.Line = s.srcPos.Line - 1
+               pos.Column = s.lastLineLen
+       default:
+               // at the beginning of the source
+               pos.Line = 1
+               pos.Column = 1
+       }
+       return
+}
+
+// err prints the error of any scanning to s.Error function. If the function is
+// not defined, by default it prints them to os.Stderr
+func (s *Scanner) err(msg string) {
+       s.ErrorCount++
+       pos := s.recentPosition()
+
+       if s.Error != nil {
+               s.Error(pos, msg)
+               return
+       }
+
+       fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
+}
+
+// isHexadecimal returns true if the given rune is a letter
+func isLetter(ch rune) bool {
+       return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal digit
+func isDigit(ch rune) bool {
+       return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
+}
+
+// isHexadecimal returns true if the given rune is a decimal number
+func isDecimal(ch rune) bool {
+       return '0' <= ch && ch <= '9'
+}
+
+// isHexadecimal returns true if the given rune is an hexadecimal number
+func isHexadecimal(ch rune) bool {
+       return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
+}
+
+// isWhitespace returns true if the rune is a space, tab, newline or carriage return
+func isWhitespace(ch rune) bool {
+       return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
+}
+
+// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
+func digitVal(ch rune) int {
+       switch {
+       case '0' <= ch && ch <= '9':
+               return int(ch - '0')
+       case 'a' <= ch && ch <= 'f':
+               return int(ch - 'a' + 10)
+       case 'A' <= ch && ch <= 'F':
+               return int(ch - 'A' + 10)
+       }
+       return 16 // larger than any legal digit val
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/position.go b/vendor/github.com/hashicorp/hcl/json/token/position.go
new file mode 100644 (file)
index 0000000..59c1bb7
--- /dev/null
@@ -0,0 +1,46 @@
+package token
+
+import "fmt"
+
+// Pos describes an arbitrary source position
+// including the file, line, and column location.
+// A Position is valid if the line number is > 0.
+type Pos struct {
+       Filename string // filename, if any
+       Offset   int    // offset, starting at 0
+       Line     int    // line number, starting at 1
+       Column   int    // column number, starting at 1 (character count)
+}
+
+// IsValid returns true if the position is valid.
+func (p *Pos) IsValid() bool { return p.Line > 0 }
+
+// String returns a string in one of several forms:
+//
+//     file:line:column    valid position with file name
+//     line:column         valid position without file name
+//     file                invalid position with file name
+//     -                   invalid position without file name
+func (p Pos) String() string {
+       s := p.Filename
+       if p.IsValid() {
+               if s != "" {
+                       s += ":"
+               }
+               s += fmt.Sprintf("%d:%d", p.Line, p.Column)
+       }
+       if s == "" {
+               s = "-"
+       }
+       return s
+}
+
+// Before reports whether the position p is before u.
+func (p Pos) Before(u Pos) bool {
+       return u.Offset > p.Offset || u.Line > p.Line
+}
+
+// After reports whether the position p is after u.
+func (p Pos) After(u Pos) bool {
+       return u.Offset < p.Offset || u.Line < p.Line
+}
diff --git a/vendor/github.com/hashicorp/hcl/json/token/token.go b/vendor/github.com/hashicorp/hcl/json/token/token.go
new file mode 100644 (file)
index 0000000..95a0c3e
--- /dev/null
@@ -0,0 +1,118 @@
+package token
+
+import (
+       "fmt"
+       "strconv"
+
+       hcltoken "github.com/hashicorp/hcl/hcl/token"
+)
+
+// Token defines a single HCL token which can be obtained via the Scanner
+type Token struct {
+       Type Type
+       Pos  Pos
+       Text string
+}
+
+// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
+type Type int
+
+const (
+       // Special tokens
+       ILLEGAL Type = iota
+       EOF
+
+       identifier_beg
+       literal_beg
+       NUMBER // 12345
+       FLOAT  // 123.45
+       BOOL   // true,false
+       STRING // "abc"
+       NULL   // null
+       literal_end
+       identifier_end
+
+       operator_beg
+       LBRACK // [
+       LBRACE // {
+       COMMA  // ,
+       PERIOD // .
+       COLON  // :
+
+       RBRACK // ]
+       RBRACE // }
+
+       operator_end
+)
+
+var tokens = [...]string{
+       ILLEGAL: "ILLEGAL",
+
+       EOF: "EOF",
+
+       NUMBER: "NUMBER",
+       FLOAT:  "FLOAT",
+       BOOL:   "BOOL",
+       STRING: "STRING",
+       NULL:   "NULL",
+
+       LBRACK: "LBRACK",
+       LBRACE: "LBRACE",
+       COMMA:  "COMMA",
+       PERIOD: "PERIOD",
+       COLON:  "COLON",
+
+       RBRACK: "RBRACK",
+       RBRACE: "RBRACE",
+}
+
+// String returns the string corresponding to the token tok.
+func (t Type) String() string {
+       s := ""
+       if 0 <= t && t < Type(len(tokens)) {
+               s = tokens[t]
+       }
+       if s == "" {
+               s = "token(" + strconv.Itoa(int(t)) + ")"
+       }
+       return s
+}
+
+// IsIdentifier returns true for tokens corresponding to identifiers and basic
+// type literals; it returns false otherwise.
+func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
+
+// IsLiteral returns true for tokens corresponding to basic type literals; it
+// returns false otherwise.
+func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
+
+// IsOperator returns true for tokens corresponding to operators and
+// delimiters; it returns false otherwise.
+func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
+
+// String returns the token's literal text. Note that this is only
+// applicable for certain token types, such as token.IDENT,
+// token.STRING, etc..
+func (t Token) String() string {
+       return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
+}
+
+// HCLToken converts this token to an HCL token.
+//
+// The token type must be a literal type or this will panic.
+func (t Token) HCLToken() hcltoken.Token {
+       switch t.Type {
+       case BOOL:
+               return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
+       case FLOAT:
+               return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
+       case NULL:
+               return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
+       case NUMBER:
+               return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
+       case STRING:
+               return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
+       default:
+               panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
+       }
+}
diff --git a/vendor/github.com/hashicorp/hcl/lex.go b/vendor/github.com/hashicorp/hcl/lex.go
new file mode 100644 (file)
index 0000000..d9993c2
--- /dev/null
@@ -0,0 +1,38 @@
+package hcl
+
+import (
+       "unicode"
+       "unicode/utf8"
+)
+
+type lexModeValue byte
+
+const (
+       lexModeUnknown lexModeValue = iota
+       lexModeHcl
+       lexModeJson
+)
+
+// lexMode returns whether we're going to be parsing in JSON
+// mode or HCL mode.
+func lexMode(v []byte) lexModeValue {
+       var (
+               r      rune
+               w      int
+               offset int
+       )
+
+       for {
+               r, w = utf8.DecodeRune(v[offset:])
+               offset += w
+               if unicode.IsSpace(r) {
+                       continue
+               }
+               if r == '{' {
+                       return lexModeJson
+               }
+               break
+       }
+
+       return lexModeHcl
+}
diff --git a/vendor/github.com/hashicorp/hcl/parse.go b/vendor/github.com/hashicorp/hcl/parse.go
new file mode 100644 (file)
index 0000000..1fca53c
--- /dev/null
@@ -0,0 +1,39 @@
+package hcl
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/hcl/hcl/ast"
+       hclParser "github.com/hashicorp/hcl/hcl/parser"
+       jsonParser "github.com/hashicorp/hcl/json/parser"
+)
+
+// ParseBytes accepts as input byte slice and returns ast tree.
+//
+// Input can be either JSON or HCL
+func ParseBytes(in []byte) (*ast.File, error) {
+       return parse(in)
+}
+
+// ParseString accepts input as a string and returns ast tree.
+func ParseString(input string) (*ast.File, error) {
+       return parse([]byte(input))
+}
+
+func parse(in []byte) (*ast.File, error) {
+       switch lexMode(in) {
+       case lexModeHcl:
+               return hclParser.Parse(in)
+       case lexModeJson:
+               return jsonParser.Parse(in)
+       }
+
+       return nil, fmt.Errorf("unknown config format")
+}
+
+// Parse parses the given input and returns the root object.
+//
+// The input format can be either HCL or JSON.
+func Parse(input string) (*ast.File, error) {
+       return parse([]byte(input))
+}
diff --git a/vendor/github.com/hashicorp/hil/LICENSE b/vendor/github.com/hashicorp/hil/LICENSE
new file mode 100644 (file)
index 0000000..82b4de9
--- /dev/null
@@ -0,0 +1,353 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/hil/README.md b/vendor/github.com/hashicorp/hil/README.md
new file mode 100644 (file)
index 0000000..186ed25
--- /dev/null
@@ -0,0 +1,102 @@
+# HIL
+
+[![GoDoc](https://godoc.org/github.com/hashicorp/hil?status.png)](https://godoc.org/github.com/hashicorp/hil) [![Build Status](https://travis-ci.org/hashicorp/hil.svg?branch=master)](https://travis-ci.org/hashicorp/hil)
+
+HIL (HashiCorp Interpolation Language) is a lightweight embedded language used
+primarily for configuration interpolation. The goal of HIL is to make a simple
+language for interpolations in the various configurations of HashiCorp tools.
+
+HIL is built to interpolate any string, but is in use by HashiCorp primarily
+with [HCL](https://github.com/hashicorp/hcl). HCL is _not required_ in any
+way for use with HIL.
+
+HIL isn't meant to be a general purpose language. It was built for basic
+configuration interpolations. Therefore, you can't currently write functions,
+have conditionals, set intermediary variables, etc. within HIL itself. It is
+possible some of these may be added later but the right use case must exist.
+
+## Why?
+
+Many of our tools have support for something similar to templates, but
+within the configuration itself. The most prominent requirement was in
+[Terraform](https://github.com/hashicorp/terraform) where we wanted the
+configuration to be able to reference values from elsewhere in the
+configuration. Example:
+
+    foo = "hi ${var.world}"
+
+We originally used a full templating language for this, but found it
+was too heavy weight. Additionally, many full languages required bindings
+to C (and thus the usage of cgo) which we try to avoid to make cross-compilation
+easier. We then moved to very basic regular expression based
+string replacement, but found the need for basic arithmetic and function
+calls resulting in overly complex regular expressions.
+
+Ultimately, we wrote our own mini-language within Terraform itself. As
+we built other projects such as [Nomad](https://nomadproject.io) and
+[Otto](https://ottoproject.io), the need for basic interpolations arose
+again.
+
+Thus HIL was born. It is extracted from Terraform, cleaned up, and
+better tested for general purpose use.
+
+## Syntax
+
+For a complete grammar, please see the parser itself. A high-level overview
+of the syntax and grammer is listed here.
+
+Code begins within `${` and `}`. Outside of this, text is treated
+literally. For example, `foo` is a valid HIL program that is just the
+string "foo", but `foo ${bar}` is an HIL program that is the string "foo "
+concatened with the value of `bar`. For the remainder of the syntax
+docs, we'll assume you're within `${}`.
+
+  * Identifiers are any text in the format of `[a-zA-Z0-9-.]`. Example
+    identifiers: `foo`, `var.foo`, `foo-bar`.
+
+  * Strings are double quoted and can contain any UTF-8 characters.
+    Example: `"Hello, World"`
+
+  * Numbers are assumed to be base 10. If you prefix a number with 0x,
+    it is treated as a hexadecimal. If it is prefixed with 0, it is
+    treated as an octal. Numbers can be in scientific notation: "1e10".
+
+  * Unary `-` can be used for negative numbers. Example: `-10` or `-0.2`
+
+  * Boolean values: `true`, `false`
+  
+  * The following arithmetic operations are allowed: +, -, *, /, %. 
+
+  * Function calls are in the form of `name(arg1, arg2, ...)`. Example:
+    `add(1, 5)`. Arguments can be any valid HIL expression, example:
+    `add(1, var.foo)` or even nested function calls:
+    `add(1, get("some value"))`. 
+
+  * Within strings, further interpolations can be opened with `${}`.
+    Example: `"Hello ${nested}"`. A full example including the 
+    original `${}` (remember this list assumes were inside of one
+    already) could be: `foo ${func("hello ${var.foo}")}`. 
+
+## Language Changes
+
+We've used this mini-language in Terraform for years. For backwards compatibility
+reasons, we're unlikely to make an incompatible change to the language but
+we're not currently making that promise, either.
+
+The internal API of this project may very well change as we evolve it
+to work with more of our projects. We recommend using some sort of dependency
+management solution with this package.
+
+## Future Changes
+
+The following changes are already planned to be made at some point:
+
+  * Richer types: lists, maps, etc.
+
+  * Convert to a more standard Go parser structure similar to HCL. This
+    will improve our error messaging as well as allow us to have automatic
+    formatting.
+
+  * Allow interpolations to result in more types than just a string. While
+    within the interpolation basic types are honored, the result is always
+    a string.
diff --git a/vendor/github.com/hashicorp/hil/appveyor.yml b/vendor/github.com/hashicorp/hil/appveyor.yml
new file mode 100644 (file)
index 0000000..feaf7a3
--- /dev/null
@@ -0,0 +1,18 @@
+version: "build-{branch}-{build}"
+image: Visual Studio 2015
+clone_folder: c:\gopath\src\github.com\hashicorp\hil
+environment:
+  GOPATH: c:\gopath
+init:
+  - git config --global core.autocrlf true
+install:
+- cmd: >-
+    echo %Path%
+
+    go version
+
+    go env
+
+    go get -d -v -t ./...
+build_script:
+- cmd: go test -v ./...
diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic.go b/vendor/github.com/hashicorp/hil/ast/arithmetic.go
new file mode 100644 (file)
index 0000000..94dc24f
--- /dev/null
@@ -0,0 +1,43 @@
+package ast
+
+import (
+       "bytes"
+       "fmt"
+)
+
+// Arithmetic represents a node where the result is arithmetic of
+// two or more operands in the order given.
+type Arithmetic struct {
+       Op    ArithmeticOp
+       Exprs []Node
+       Posx  Pos
+}
+
+func (n *Arithmetic) Accept(v Visitor) Node {
+       for i, expr := range n.Exprs {
+               n.Exprs[i] = expr.Accept(v)
+       }
+
+       return v(n)
+}
+
+func (n *Arithmetic) Pos() Pos {
+       return n.Posx
+}
+
+func (n *Arithmetic) GoString() string {
+       return fmt.Sprintf("*%#v", *n)
+}
+
+func (n *Arithmetic) String() string {
+       var b bytes.Buffer
+       for _, expr := range n.Exprs {
+               b.WriteString(fmt.Sprintf("%s", expr))
+       }
+
+       return b.String()
+}
+
+func (n *Arithmetic) Type(Scope) (Type, error) {
+       return TypeInt, nil
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go b/vendor/github.com/hashicorp/hil/ast/arithmetic_op.go
new file mode 100644 (file)
index 0000000..18880c6
--- /dev/null
@@ -0,0 +1,24 @@
+package ast
+
+// ArithmeticOp is the operation to use for the math.
+type ArithmeticOp int
+
+const (
+       ArithmeticOpInvalid ArithmeticOp = 0
+
+       ArithmeticOpAdd ArithmeticOp = iota
+       ArithmeticOpSub
+       ArithmeticOpMul
+       ArithmeticOpDiv
+       ArithmeticOpMod
+
+       ArithmeticOpLogicalAnd
+       ArithmeticOpLogicalOr
+
+       ArithmeticOpEqual
+       ArithmeticOpNotEqual
+       ArithmeticOpLessThan
+       ArithmeticOpLessThanOrEqual
+       ArithmeticOpGreaterThan
+       ArithmeticOpGreaterThanOrEqual
+)
diff --git a/vendor/github.com/hashicorp/hil/ast/ast.go b/vendor/github.com/hashicorp/hil/ast/ast.go
new file mode 100644 (file)
index 0000000..c6350f8
--- /dev/null
@@ -0,0 +1,99 @@
+package ast
+
+import (
+       "fmt"
+)
+
+// Node is the interface that all AST nodes must implement.
+type Node interface {
+       // Accept is called to dispatch to the visitors. It must return the
+       // resulting Node (which might be different in an AST transform).
+       Accept(Visitor) Node
+
+       // Pos returns the position of this node in some source.
+       Pos() Pos
+
+       // Type returns the type of this node for the given context.
+       Type(Scope) (Type, error)
+}
+
+// Pos is the starting position of an AST node
+type Pos struct {
+       Column, Line int    // Column/Line number, starting at 1
+       Filename     string // Optional source filename, if known
+}
+
+func (p Pos) String() string {
+       if p.Filename == "" {
+               return fmt.Sprintf("%d:%d", p.Line, p.Column)
+       } else {
+               return fmt.Sprintf("%s:%d:%d", p.Filename, p.Line, p.Column)
+       }
+}
+
+// InitPos is an initiaial position value. This should be used as
+// the starting position (presets the column and line to 1).
+var InitPos = Pos{Column: 1, Line: 1}
+
+// Visitors are just implementations of this function.
+//
+// The function must return the Node to replace this node with. "nil" is
+// _not_ a valid return value. If there is no replacement, the original node
+// should be returned. We build this replacement directly into the visitor
+// pattern since AST transformations are a common and useful tool and
+// building it into the AST itself makes it required for future Node
+// implementations and very easy to do.
+//
+// Note that this isn't a true implementation of the visitor pattern, which
+// generally requires proper type dispatch on the function. However,
+// implementing this basic visitor pattern style is still very useful even
+// if you have to type switch.
+type Visitor func(Node) Node
+
+//go:generate stringer -type=Type
+
+// Type is the type of any value.
+type Type uint32
+
+const (
+       TypeInvalid Type = 0
+       TypeAny     Type = 1 << iota
+       TypeBool
+       TypeString
+       TypeInt
+       TypeFloat
+       TypeList
+       TypeMap
+
+       // This is a special type used by Terraform to mark "unknown" values.
+       // It is impossible for this type to be introduced into your HIL programs
+       // unless you explicitly set a variable to this value. In that case,
+       // any operation including the variable will return "TypeUnknown" as the
+       // type.
+       TypeUnknown
+)
+
+func (t Type) Printable() string {
+       switch t {
+       case TypeInvalid:
+               return "invalid type"
+       case TypeAny:
+               return "any type"
+       case TypeBool:
+               return "type bool"
+       case TypeString:
+               return "type string"
+       case TypeInt:
+               return "type int"
+       case TypeFloat:
+               return "type float"
+       case TypeList:
+               return "type list"
+       case TypeMap:
+               return "type map"
+       case TypeUnknown:
+               return "type unknown"
+       default:
+               return "unknown type"
+       }
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/call.go b/vendor/github.com/hashicorp/hil/ast/call.go
new file mode 100644 (file)
index 0000000..0557011
--- /dev/null
@@ -0,0 +1,47 @@
+package ast
+
+import (
+       "fmt"
+       "strings"
+)
+
+// Call represents a function call.
+type Call struct {
+       Func string
+       Args []Node
+       Posx Pos
+}
+
+func (n *Call) Accept(v Visitor) Node {
+       for i, a := range n.Args {
+               n.Args[i] = a.Accept(v)
+       }
+
+       return v(n)
+}
+
+func (n *Call) Pos() Pos {
+       return n.Posx
+}
+
+func (n *Call) String() string {
+       args := make([]string, len(n.Args))
+       for i, arg := range n.Args {
+               args[i] = fmt.Sprintf("%s", arg)
+       }
+
+       return fmt.Sprintf("Call(%s, %s)", n.Func, strings.Join(args, ", "))
+}
+
+func (n *Call) Type(s Scope) (Type, error) {
+       f, ok := s.LookupFunc(n.Func)
+       if !ok {
+               return TypeInvalid, fmt.Errorf("unknown function: %s", n.Func)
+       }
+
+       return f.ReturnType, nil
+}
+
+func (n *Call) GoString() string {
+       return fmt.Sprintf("*%#v", *n)
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/conditional.go b/vendor/github.com/hashicorp/hil/ast/conditional.go
new file mode 100644 (file)
index 0000000..be48f89
--- /dev/null
@@ -0,0 +1,36 @@
+package ast
+
+import (
+       "fmt"
+)
+
+type Conditional struct {
+       CondExpr  Node
+       TrueExpr  Node
+       FalseExpr Node
+       Posx      Pos
+}
+
+// Accept passes the given visitor to the child nodes in this order:
+// CondExpr, TrueExpr, FalseExpr. It then finally passes itself to the visitor.
+func (n *Conditional) Accept(v Visitor) Node {
+       n.CondExpr = n.CondExpr.Accept(v)
+       n.TrueExpr = n.TrueExpr.Accept(v)
+       n.FalseExpr = n.FalseExpr.Accept(v)
+
+       return v(n)
+}
+
+func (n *Conditional) Pos() Pos {
+       return n.Posx
+}
+
+func (n *Conditional) Type(Scope) (Type, error) {
+       // This is not actually a useful value; the type checker ignores
+       // this function when analyzing conditionals, just as with Arithmetic.
+       return TypeInt, nil
+}
+
+func (n *Conditional) GoString() string {
+       return fmt.Sprintf("*%#v", *n)
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/index.go b/vendor/github.com/hashicorp/hil/ast/index.go
new file mode 100644 (file)
index 0000000..860c25f
--- /dev/null
@@ -0,0 +1,76 @@
+package ast
+
+import (
+       "fmt"
+       "strings"
+)
+
+// Index represents an indexing operation into another data structure
+type Index struct {
+       Target Node
+       Key    Node
+       Posx   Pos
+}
+
+func (n *Index) Accept(v Visitor) Node {
+       n.Target = n.Target.Accept(v)
+       n.Key = n.Key.Accept(v)
+       return v(n)
+}
+
+func (n *Index) Pos() Pos {
+       return n.Posx
+}
+
+func (n *Index) String() string {
+       return fmt.Sprintf("Index(%s, %s)", n.Target, n.Key)
+}
+
+func (n *Index) Type(s Scope) (Type, error) {
+       variableAccess, ok := n.Target.(*VariableAccess)
+       if !ok {
+               return TypeInvalid, fmt.Errorf("target is not a variable")
+       }
+
+       variable, ok := s.LookupVar(variableAccess.Name)
+       if !ok {
+               return TypeInvalid, fmt.Errorf("unknown variable accessed: %s", variableAccess.Name)
+       }
+
+       switch variable.Type {
+       case TypeList:
+               return n.typeList(variable, variableAccess.Name)
+       case TypeMap:
+               return n.typeMap(variable, variableAccess.Name)
+       default:
+               return TypeInvalid, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type)
+       }
+}
+
+func (n *Index) typeList(variable Variable, variableName string) (Type, error) {
+       // We assume type checking has already determined that this is a list
+       list := variable.Value.([]Variable)
+
+       return VariableListElementTypesAreHomogenous(variableName, list)
+}
+
+func (n *Index) typeMap(variable Variable, variableName string) (Type, error) {
+       // We assume type checking has already determined that this is a map
+       vmap := variable.Value.(map[string]Variable)
+
+       return VariableMapValueTypesAreHomogenous(variableName, vmap)
+}
+
+func reportTypes(typesFound map[Type]struct{}) string {
+       stringTypes := make([]string, len(typesFound))
+       i := 0
+       for k, _ := range typesFound {
+               stringTypes[0] = k.String()
+               i++
+       }
+       return strings.Join(stringTypes, ", ")
+}
+
+func (n *Index) GoString() string {
+       return fmt.Sprintf("*%#v", *n)
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/literal.go b/vendor/github.com/hashicorp/hil/ast/literal.go
new file mode 100644 (file)
index 0000000..da6014f
--- /dev/null
@@ -0,0 +1,88 @@
+package ast
+
+import (
+       "fmt"
+       "reflect"
+)
+
+// LiteralNode represents a single literal value, such as "foo" or
+// 42 or 3.14159. Based on the Type, the Value can be safely cast.
+type LiteralNode struct {
+       Value interface{}
+       Typex Type
+       Posx  Pos
+}
+
+// NewLiteralNode returns a new literal node representing the given
+// literal Go value, which must correspond to one of the primitive types
+// supported by HIL. Lists and maps cannot currently be constructed via
+// this function.
+//
+// If an inappropriately-typed value is provided, this function will
+// return an error. The main intended use of this function is to produce
+// "synthetic" literals from constants in code, where the value type is
+// well known at compile time. To easily store these in global variables,
+// see also MustNewLiteralNode.
+func NewLiteralNode(value interface{}, pos Pos) (*LiteralNode, error) {
+       goType := reflect.TypeOf(value)
+       var hilType Type
+
+       switch goType.Kind() {
+       case reflect.Bool:
+               hilType = TypeBool
+       case reflect.Int:
+               hilType = TypeInt
+       case reflect.Float64:
+               hilType = TypeFloat
+       case reflect.String:
+               hilType = TypeString
+       default:
+               return nil, fmt.Errorf("unsupported literal node type: %T", value)
+       }
+
+       return &LiteralNode{
+               Value: value,
+               Typex: hilType,
+               Posx:  pos,
+       }, nil
+}
+
+// MustNewLiteralNode wraps NewLiteralNode and panics if an error is
+// returned, thus allowing valid literal nodes to be easily assigned to
+// global variables.
+func MustNewLiteralNode(value interface{}, pos Pos) *LiteralNode {
+       node, err := NewLiteralNode(value, pos)
+       if err != nil {
+               panic(err)
+       }
+       return node
+}
+
+func (n *LiteralNode) Accept(v Visitor) Node {
+       return v(n)
+}
+
+func (n *LiteralNode) Pos() Pos {
+       return n.Posx
+}
+
+func (n *LiteralNode) GoString() string {
+       return fmt.Sprintf("*%#v", *n)
+}
+
+func (n *LiteralNode) String() string {
+       return fmt.Sprintf("Literal(%s, %v)", n.Typex, n.Value)
+}
+
+func (n *LiteralNode) Type(Scope) (Type, error) {
+       return n.Typex, nil
+}
+
+// IsUnknown returns true either if the node's value is itself unknown
+// of if it is a collection containing any unknown elements, deeply.
+func (n *LiteralNode) IsUnknown() bool {
+       return IsUnknown(Variable{
+               Type:  n.Typex,
+               Value: n.Value,
+       })
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/output.go b/vendor/github.com/hashicorp/hil/ast/output.go
new file mode 100644 (file)
index 0000000..1e27f97
--- /dev/null
@@ -0,0 +1,78 @@
+package ast
+
+import (
+       "bytes"
+       "fmt"
+)
+
+// Output represents the root node of all interpolation evaluations. If the
+// output only has one expression which is either a TypeList or TypeMap, the
+// Output can be type-asserted to []interface{} or map[string]interface{}
+// respectively. Otherwise the Output evaluates as a string, and concatenates
+// the evaluation of each expression.
+type Output struct {
+       Exprs []Node
+       Posx  Pos
+}
+
+func (n *Output) Accept(v Visitor) Node {
+       for i, expr := range n.Exprs {
+               n.Exprs[i] = expr.Accept(v)
+       }
+
+       return v(n)
+}
+
+func (n *Output) Pos() Pos {
+       return n.Posx
+}
+
+func (n *Output) GoString() string {
+       return fmt.Sprintf("*%#v", *n)
+}
+
+func (n *Output) String() string {
+       var b bytes.Buffer
+       for _, expr := range n.Exprs {
+               b.WriteString(fmt.Sprintf("%s", expr))
+       }
+
+       return b.String()
+}
+
+func (n *Output) Type(s Scope) (Type, error) {
+       // Special case no expressions for backward compatibility
+       if len(n.Exprs) == 0 {
+               return TypeString, nil
+       }
+
+       // Special case a single expression of types list or map
+       if len(n.Exprs) == 1 {
+               exprType, err := n.Exprs[0].Type(s)
+               if err != nil {
+                       return TypeInvalid, err
+               }
+               switch exprType {
+               case TypeList:
+                       return TypeList, nil
+               case TypeMap:
+                       return TypeMap, nil
+               }
+       }
+
+       // Otherwise ensure all our expressions are strings
+       for index, expr := range n.Exprs {
+               exprType, err := expr.Type(s)
+               if err != nil {
+                       return TypeInvalid, err
+               }
+               // We only look for things we know we can't coerce with an implicit conversion func
+               if exprType == TypeList || exprType == TypeMap {
+                       return TypeInvalid, fmt.Errorf(
+                               "multi-expression HIL outputs may only have string inputs: %d is type %s",
+                               index, exprType)
+               }
+       }
+
+       return TypeString, nil
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/scope.go b/vendor/github.com/hashicorp/hil/ast/scope.go
new file mode 100644 (file)
index 0000000..7a975d9
--- /dev/null
@@ -0,0 +1,90 @@
+package ast
+
+import (
+       "fmt"
+       "reflect"
+)
+
+// Scope is the interface used to look up variables and functions while
+// evaluating. How these functions/variables are defined are up to the caller.
+type Scope interface {
+       LookupFunc(string) (Function, bool)
+       LookupVar(string) (Variable, bool)
+}
+
+// Variable is a variable value for execution given as input to the engine.
+// It records the value of a variables along with their type.
+type Variable struct {
+       Value interface{}
+       Type  Type
+}
+
+// NewVariable creates a new Variable for the given value. This will
+// attempt to infer the correct type. If it can't, an error will be returned.
+func NewVariable(v interface{}) (result Variable, err error) {
+       switch v := reflect.ValueOf(v); v.Kind() {
+       case reflect.String:
+               result.Type = TypeString
+       default:
+               err = fmt.Errorf("Unknown type: %s", v.Kind())
+       }
+
+       result.Value = v
+       return
+}
+
+// String implements Stringer on Variable, displaying the type and value
+// of the Variable.
+func (v Variable) String() string {
+       return fmt.Sprintf("{Variable (%s): %+v}", v.Type, v.Value)
+}
+
+// Function defines a function that can be executed by the engine.
+// The type checker will validate that the proper types will be called
+// to the callback.
+type Function struct {
+       // ArgTypes is the list of types in argument order. These are the
+       // required arguments.
+       //
+       // ReturnType is the type of the returned value. The Callback MUST
+       // return this type.
+       ArgTypes   []Type
+       ReturnType Type
+
+       // Variadic, if true, says that this function is variadic, meaning
+       // it takes a variable number of arguments. In this case, the
+       // VariadicType must be set.
+       Variadic     bool
+       VariadicType Type
+
+       // Callback is the function called for a function. The argument
+       // types are guaranteed to match the spec above by the type checker.
+       // The length of the args is strictly == len(ArgTypes) unless Varidiac
+       // is true, in which case its >= len(ArgTypes).
+       Callback func([]interface{}) (interface{}, error)
+}
+
+// BasicScope is a simple scope that looks up variables and functions
+// using a map.
+type BasicScope struct {
+       FuncMap map[string]Function
+       VarMap  map[string]Variable
+}
+
+func (s *BasicScope) LookupFunc(n string) (Function, bool) {
+       if s == nil {
+               return Function{}, false
+       }
+
+       v, ok := s.FuncMap[n]
+       return v, ok
+}
+
+func (s *BasicScope) LookupVar(n string) (Variable, bool) {
+       if s == nil {
+               return Variable{}, false
+       }
+
+       v, ok := s.VarMap[n]
+       return v, ok
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/stack.go b/vendor/github.com/hashicorp/hil/ast/stack.go
new file mode 100644 (file)
index 0000000..bd2bc15
--- /dev/null
@@ -0,0 +1,25 @@
+package ast
+
+// Stack is a stack of Node.
+type Stack struct {
+       stack []Node
+}
+
+func (s *Stack) Len() int {
+       return len(s.stack)
+}
+
+func (s *Stack) Push(n Node) {
+       s.stack = append(s.stack, n)
+}
+
+func (s *Stack) Pop() Node {
+       x := s.stack[len(s.stack)-1]
+       s.stack[len(s.stack)-1] = nil
+       s.stack = s.stack[:len(s.stack)-1]
+       return x
+}
+
+func (s *Stack) Reset() {
+       s.stack = nil
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/type_string.go b/vendor/github.com/hashicorp/hil/ast/type_string.go
new file mode 100644 (file)
index 0000000..1f51a98
--- /dev/null
@@ -0,0 +1,54 @@
+// Code generated by "stringer -type=Type"; DO NOT EDIT
+
+package ast
+
+import "fmt"
+
+const (
+       _Type_name_0 = "TypeInvalid"
+       _Type_name_1 = "TypeAny"
+       _Type_name_2 = "TypeBool"
+       _Type_name_3 = "TypeString"
+       _Type_name_4 = "TypeInt"
+       _Type_name_5 = "TypeFloat"
+       _Type_name_6 = "TypeList"
+       _Type_name_7 = "TypeMap"
+       _Type_name_8 = "TypeUnknown"
+)
+
+var (
+       _Type_index_0 = [...]uint8{0, 11}
+       _Type_index_1 = [...]uint8{0, 7}
+       _Type_index_2 = [...]uint8{0, 8}
+       _Type_index_3 = [...]uint8{0, 10}
+       _Type_index_4 = [...]uint8{0, 7}
+       _Type_index_5 = [...]uint8{0, 9}
+       _Type_index_6 = [...]uint8{0, 8}
+       _Type_index_7 = [...]uint8{0, 7}
+       _Type_index_8 = [...]uint8{0, 11}
+)
+
+func (i Type) String() string {
+       switch {
+       case i == 0:
+               return _Type_name_0
+       case i == 2:
+               return _Type_name_1
+       case i == 4:
+               return _Type_name_2
+       case i == 8:
+               return _Type_name_3
+       case i == 16:
+               return _Type_name_4
+       case i == 32:
+               return _Type_name_5
+       case i == 64:
+               return _Type_name_6
+       case i == 128:
+               return _Type_name_7
+       case i == 256:
+               return _Type_name_8
+       default:
+               return fmt.Sprintf("Type(%d)", i)
+       }
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/unknown.go b/vendor/github.com/hashicorp/hil/ast/unknown.go
new file mode 100644 (file)
index 0000000..d6ddaec
--- /dev/null
@@ -0,0 +1,30 @@
+package ast
+
+// IsUnknown reports whether a variable is unknown or contains any value
+// that is unknown. This will recurse into lists and maps and so on.
+func IsUnknown(v Variable) bool {
+       // If it is unknown itself, return true
+       if v.Type == TypeUnknown {
+               return true
+       }
+
+       // If it is a container type, check the values
+       switch v.Type {
+       case TypeList:
+               for _, el := range v.Value.([]Variable) {
+                       if IsUnknown(el) {
+                               return true
+                       }
+               }
+       case TypeMap:
+               for _, el := range v.Value.(map[string]Variable) {
+                       if IsUnknown(el) {
+                               return true
+                       }
+               }
+       default:
+       }
+
+       // Not a container type or survive the above checks
+       return false
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/variable_access.go b/vendor/github.com/hashicorp/hil/ast/variable_access.go
new file mode 100644 (file)
index 0000000..4c1362d
--- /dev/null
@@ -0,0 +1,36 @@
+package ast
+
+import (
+       "fmt"
+)
+
+// VariableAccess represents a variable access.
+type VariableAccess struct {
+       Name string
+       Posx Pos
+}
+
+func (n *VariableAccess) Accept(v Visitor) Node {
+       return v(n)
+}
+
+func (n *VariableAccess) Pos() Pos {
+       return n.Posx
+}
+
+func (n *VariableAccess) GoString() string {
+       return fmt.Sprintf("*%#v", *n)
+}
+
+func (n *VariableAccess) String() string {
+       return fmt.Sprintf("Variable(%s)", n.Name)
+}
+
+func (n *VariableAccess) Type(s Scope) (Type, error) {
+       v, ok := s.LookupVar(n.Name)
+       if !ok {
+               return TypeInvalid, fmt.Errorf("unknown variable: %s", n.Name)
+       }
+
+       return v.Type, nil
+}
diff --git a/vendor/github.com/hashicorp/hil/ast/variables_helper.go b/vendor/github.com/hashicorp/hil/ast/variables_helper.go
new file mode 100644 (file)
index 0000000..06bd18d
--- /dev/null
@@ -0,0 +1,63 @@
+package ast
+
+import "fmt"
+
+func VariableListElementTypesAreHomogenous(variableName string, list []Variable) (Type, error) {
+       if len(list) == 0 {
+               return TypeInvalid, fmt.Errorf("list %q does not have any elements so cannot determine type.", variableName)
+       }
+
+       elemType := TypeUnknown
+       for _, v := range list {
+               if v.Type == TypeUnknown {
+                       continue
+               }
+
+               if elemType == TypeUnknown {
+                       elemType = v.Type
+                       continue
+               }
+
+               if v.Type != elemType {
+                       return TypeInvalid, fmt.Errorf(
+                               "list %q does not have homogenous types. found %s and then %s",
+                               variableName,
+                               elemType, v.Type,
+                       )
+               }
+
+               elemType = v.Type
+       }
+
+       return elemType, nil
+}
+
+func VariableMapValueTypesAreHomogenous(variableName string, vmap map[string]Variable) (Type, error) {
+       if len(vmap) == 0 {
+               return TypeInvalid, fmt.Errorf("map %q does not have any elements so cannot determine type.", variableName)
+       }
+
+       elemType := TypeUnknown
+       for _, v := range vmap {
+               if v.Type == TypeUnknown {
+                       continue
+               }
+
+               if elemType == TypeUnknown {
+                       elemType = v.Type
+                       continue
+               }
+
+               if v.Type != elemType {
+                       return TypeInvalid, fmt.Errorf(
+                               "map %q does not have homogenous types. found %s and then %s",
+                               variableName,
+                               elemType, v.Type,
+                       )
+               }
+
+               elemType = v.Type
+       }
+
+       return elemType, nil
+}
diff --git a/vendor/github.com/hashicorp/hil/builtins.go b/vendor/github.com/hashicorp/hil/builtins.go
new file mode 100644 (file)
index 0000000..909c788
--- /dev/null
@@ -0,0 +1,331 @@
+package hil
+
+import (
+       "errors"
+       "strconv"
+
+       "github.com/hashicorp/hil/ast"
+)
+
+// NOTE: All builtins are tested in engine_test.go
+
+func registerBuiltins(scope *ast.BasicScope) *ast.BasicScope {
+       if scope == nil {
+               scope = new(ast.BasicScope)
+       }
+       if scope.FuncMap == nil {
+               scope.FuncMap = make(map[string]ast.Function)
+       }
+
+       // Implicit conversions
+       scope.FuncMap["__builtin_BoolToString"] = builtinBoolToString()
+       scope.FuncMap["__builtin_FloatToInt"] = builtinFloatToInt()
+       scope.FuncMap["__builtin_FloatToString"] = builtinFloatToString()
+       scope.FuncMap["__builtin_IntToFloat"] = builtinIntToFloat()
+       scope.FuncMap["__builtin_IntToString"] = builtinIntToString()
+       scope.FuncMap["__builtin_StringToInt"] = builtinStringToInt()
+       scope.FuncMap["__builtin_StringToFloat"] = builtinStringToFloat()
+       scope.FuncMap["__builtin_StringToBool"] = builtinStringToBool()
+
+       // Math operations
+       scope.FuncMap["__builtin_IntMath"] = builtinIntMath()
+       scope.FuncMap["__builtin_FloatMath"] = builtinFloatMath()
+       scope.FuncMap["__builtin_BoolCompare"] = builtinBoolCompare()
+       scope.FuncMap["__builtin_FloatCompare"] = builtinFloatCompare()
+       scope.FuncMap["__builtin_IntCompare"] = builtinIntCompare()
+       scope.FuncMap["__builtin_StringCompare"] = builtinStringCompare()
+       scope.FuncMap["__builtin_Logical"] = builtinLogical()
+       return scope
+}
+
+func builtinFloatMath() ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{ast.TypeInt},
+               Variadic:     true,
+               VariadicType: ast.TypeFloat,
+               ReturnType:   ast.TypeFloat,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       op := args[0].(ast.ArithmeticOp)
+                       result := args[1].(float64)
+                       for _, raw := range args[2:] {
+                               arg := raw.(float64)
+                               switch op {
+                               case ast.ArithmeticOpAdd:
+                                       result += arg
+                               case ast.ArithmeticOpSub:
+                                       result -= arg
+                               case ast.ArithmeticOpMul:
+                                       result *= arg
+                               case ast.ArithmeticOpDiv:
+                                       result /= arg
+                               }
+                       }
+
+                       return result, nil
+               },
+       }
+}
+
+func builtinIntMath() ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{ast.TypeInt},
+               Variadic:     true,
+               VariadicType: ast.TypeInt,
+               ReturnType:   ast.TypeInt,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       op := args[0].(ast.ArithmeticOp)
+                       result := args[1].(int)
+                       for _, raw := range args[2:] {
+                               arg := raw.(int)
+                               switch op {
+                               case ast.ArithmeticOpAdd:
+                                       result += arg
+                               case ast.ArithmeticOpSub:
+                                       result -= arg
+                               case ast.ArithmeticOpMul:
+                                       result *= arg
+                               case ast.ArithmeticOpDiv:
+                                       if arg == 0 {
+                                               return nil, errors.New("divide by zero")
+                                       }
+
+                                       result /= arg
+                               case ast.ArithmeticOpMod:
+                                       if arg == 0 {
+                                               return nil, errors.New("divide by zero")
+                                       }
+
+                                       result = result % arg
+                               }
+                       }
+
+                       return result, nil
+               },
+       }
+}
+
+func builtinBoolCompare() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeInt, ast.TypeBool, ast.TypeBool},
+               Variadic:   false,
+               ReturnType: ast.TypeBool,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       op := args[0].(ast.ArithmeticOp)
+                       lhs := args[1].(bool)
+                       rhs := args[2].(bool)
+
+                       switch op {
+                       case ast.ArithmeticOpEqual:
+                               return lhs == rhs, nil
+                       case ast.ArithmeticOpNotEqual:
+                               return lhs != rhs, nil
+                       default:
+                               return nil, errors.New("invalid comparison operation")
+                       }
+               },
+       }
+}
+
+func builtinFloatCompare() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeInt, ast.TypeFloat, ast.TypeFloat},
+               Variadic:   false,
+               ReturnType: ast.TypeBool,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       op := args[0].(ast.ArithmeticOp)
+                       lhs := args[1].(float64)
+                       rhs := args[2].(float64)
+
+                       switch op {
+                       case ast.ArithmeticOpEqual:
+                               return lhs == rhs, nil
+                       case ast.ArithmeticOpNotEqual:
+                               return lhs != rhs, nil
+                       case ast.ArithmeticOpLessThan:
+                               return lhs < rhs, nil
+                       case ast.ArithmeticOpLessThanOrEqual:
+                               return lhs <= rhs, nil
+                       case ast.ArithmeticOpGreaterThan:
+                               return lhs > rhs, nil
+                       case ast.ArithmeticOpGreaterThanOrEqual:
+                               return lhs >= rhs, nil
+                       default:
+                               return nil, errors.New("invalid comparison operation")
+                       }
+               },
+       }
+}
+
+func builtinIntCompare() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeInt, ast.TypeInt, ast.TypeInt},
+               Variadic:   false,
+               ReturnType: ast.TypeBool,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       op := args[0].(ast.ArithmeticOp)
+                       lhs := args[1].(int)
+                       rhs := args[2].(int)
+
+                       switch op {
+                       case ast.ArithmeticOpEqual:
+                               return lhs == rhs, nil
+                       case ast.ArithmeticOpNotEqual:
+                               return lhs != rhs, nil
+                       case ast.ArithmeticOpLessThan:
+                               return lhs < rhs, nil
+                       case ast.ArithmeticOpLessThanOrEqual:
+                               return lhs <= rhs, nil
+                       case ast.ArithmeticOpGreaterThan:
+                               return lhs > rhs, nil
+                       case ast.ArithmeticOpGreaterThanOrEqual:
+                               return lhs >= rhs, nil
+                       default:
+                               return nil, errors.New("invalid comparison operation")
+                       }
+               },
+       }
+}
+
+func builtinStringCompare() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeInt, ast.TypeString, ast.TypeString},
+               Variadic:   false,
+               ReturnType: ast.TypeBool,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       op := args[0].(ast.ArithmeticOp)
+                       lhs := args[1].(string)
+                       rhs := args[2].(string)
+
+                       switch op {
+                       case ast.ArithmeticOpEqual:
+                               return lhs == rhs, nil
+                       case ast.ArithmeticOpNotEqual:
+                               return lhs != rhs, nil
+                       default:
+                               return nil, errors.New("invalid comparison operation")
+                       }
+               },
+       }
+}
+
+func builtinLogical() ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{ast.TypeInt},
+               Variadic:     true,
+               VariadicType: ast.TypeBool,
+               ReturnType:   ast.TypeBool,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       op := args[0].(ast.ArithmeticOp)
+                       result := args[1].(bool)
+                       for _, raw := range args[2:] {
+                               arg := raw.(bool)
+                               switch op {
+                               case ast.ArithmeticOpLogicalOr:
+                                       result = result || arg
+                               case ast.ArithmeticOpLogicalAnd:
+                                       result = result && arg
+                               default:
+                                       return nil, errors.New("invalid logical operator")
+                               }
+                       }
+
+                       return result, nil
+               },
+       }
+}
+
+func builtinFloatToInt() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeFloat},
+               ReturnType: ast.TypeInt,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       return int(args[0].(float64)), nil
+               },
+       }
+}
+
+func builtinFloatToString() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeFloat},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       return strconv.FormatFloat(
+                               args[0].(float64), 'g', -1, 64), nil
+               },
+       }
+}
+
+func builtinIntToFloat() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeInt},
+               ReturnType: ast.TypeFloat,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       return float64(args[0].(int)), nil
+               },
+       }
+}
+
+func builtinIntToString() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeInt},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       return strconv.FormatInt(int64(args[0].(int)), 10), nil
+               },
+       }
+}
+
+func builtinStringToInt() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeInt},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       v, err := strconv.ParseInt(args[0].(string), 0, 0)
+                       if err != nil {
+                               return nil, err
+                       }
+
+                       return int(v), nil
+               },
+       }
+}
+
+func builtinStringToFloat() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeFloat,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       v, err := strconv.ParseFloat(args[0].(string), 64)
+                       if err != nil {
+                               return nil, err
+                       }
+
+                       return v, nil
+               },
+       }
+}
+
+func builtinBoolToString() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeBool},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       return strconv.FormatBool(args[0].(bool)), nil
+               },
+       }
+}
+
+func builtinStringToBool() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeBool,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       v, err := strconv.ParseBool(args[0].(string))
+                       if err != nil {
+                               return nil, err
+                       }
+
+                       return v, nil
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/hil/check_identifier.go b/vendor/github.com/hashicorp/hil/check_identifier.go
new file mode 100644 (file)
index 0000000..474f505
--- /dev/null
@@ -0,0 +1,88 @@
+package hil
+
+import (
+       "fmt"
+       "sync"
+
+       "github.com/hashicorp/hil/ast"
+)
+
+// IdentifierCheck is a SemanticCheck that checks that all identifiers
+// resolve properly and that the right number of arguments are passed
+// to functions.
+type IdentifierCheck struct {
+       Scope ast.Scope
+
+       err  error
+       lock sync.Mutex
+}
+
+func (c *IdentifierCheck) Visit(root ast.Node) error {
+       c.lock.Lock()
+       defer c.lock.Unlock()
+       defer c.reset()
+       root.Accept(c.visit)
+       return c.err
+}
+
+func (c *IdentifierCheck) visit(raw ast.Node) ast.Node {
+       if c.err != nil {
+               return raw
+       }
+
+       switch n := raw.(type) {
+       case *ast.Call:
+               c.visitCall(n)
+       case *ast.VariableAccess:
+               c.visitVariableAccess(n)
+       case *ast.Output:
+               // Ignore
+       case *ast.LiteralNode:
+               // Ignore
+       default:
+               // Ignore
+       }
+
+       // We never do replacement with this visitor
+       return raw
+}
+
+func (c *IdentifierCheck) visitCall(n *ast.Call) {
+       // Look up the function in the map
+       function, ok := c.Scope.LookupFunc(n.Func)
+       if !ok {
+               c.createErr(n, fmt.Sprintf("unknown function called: %s", n.Func))
+               return
+       }
+
+       // Break up the args into what is variadic and what is required
+       args := n.Args
+       if function.Variadic && len(args) > len(function.ArgTypes) {
+               args = n.Args[:len(function.ArgTypes)]
+       }
+
+       // Verify the number of arguments
+       if len(args) != len(function.ArgTypes) {
+               c.createErr(n, fmt.Sprintf(
+                       "%s: expected %d arguments, got %d",
+                       n.Func, len(function.ArgTypes), len(n.Args)))
+               return
+       }
+}
+
+func (c *IdentifierCheck) visitVariableAccess(n *ast.VariableAccess) {
+       // Look up the variable in the map
+       if _, ok := c.Scope.LookupVar(n.Name); !ok {
+               c.createErr(n, fmt.Sprintf(
+                       "unknown variable accessed: %s", n.Name))
+               return
+       }
+}
+
+func (c *IdentifierCheck) createErr(n ast.Node, str string) {
+       c.err = fmt.Errorf("%s: %s", n.Pos(), str)
+}
+
+func (c *IdentifierCheck) reset() {
+       c.err = nil
+}
diff --git a/vendor/github.com/hashicorp/hil/check_types.go b/vendor/github.com/hashicorp/hil/check_types.go
new file mode 100644 (file)
index 0000000..f16da39
--- /dev/null
@@ -0,0 +1,668 @@
+package hil
+
+import (
+       "fmt"
+       "sync"
+
+       "github.com/hashicorp/hil/ast"
+)
+
+// TypeCheck implements ast.Visitor for type checking an AST tree.
+// It requires some configuration to look up the type of nodes.
+//
+// It also optionally will not type error and will insert an implicit
+// type conversions for specific types if specified by the Implicit
+// field. Note that this is kind of organizationally weird to put into
+// this structure but we'd rather do that than duplicate the type checking
+// logic multiple times.
+type TypeCheck struct {
+       Scope ast.Scope
+
+       // Implicit is a map of implicit type conversions that we can do,
+       // and that shouldn't error. The key of the first map is the from type,
+       // the key of the second map is the to type, and the final string
+       // value is the function to call (which must be registered in the Scope).
+       Implicit map[ast.Type]map[ast.Type]string
+
+       // Stack of types. This shouldn't be used directly except by implementations
+       // of TypeCheckNode.
+       Stack []ast.Type
+
+       err  error
+       lock sync.Mutex
+}
+
+// TypeCheckNode is the interface that must be implemented by any
+// ast.Node that wants to support type-checking. If the type checker
+// encounters a node that doesn't implement this, it will error.
+type TypeCheckNode interface {
+       TypeCheck(*TypeCheck) (ast.Node, error)
+}
+
+func (v *TypeCheck) Visit(root ast.Node) error {
+       v.lock.Lock()
+       defer v.lock.Unlock()
+       defer v.reset()
+       root.Accept(v.visit)
+
+       // If the resulting type is unknown, then just let the whole thing go.
+       if v.err == errExitUnknown {
+               v.err = nil
+       }
+
+       return v.err
+}
+
+func (v *TypeCheck) visit(raw ast.Node) ast.Node {
+       if v.err != nil {
+               return raw
+       }
+
+       var result ast.Node
+       var err error
+       switch n := raw.(type) {
+       case *ast.Arithmetic:
+               tc := &typeCheckArithmetic{n}
+               result, err = tc.TypeCheck(v)
+       case *ast.Call:
+               tc := &typeCheckCall{n}
+               result, err = tc.TypeCheck(v)
+       case *ast.Conditional:
+               tc := &typeCheckConditional{n}
+               result, err = tc.TypeCheck(v)
+       case *ast.Index:
+               tc := &typeCheckIndex{n}
+               result, err = tc.TypeCheck(v)
+       case *ast.Output:
+               tc := &typeCheckOutput{n}
+               result, err = tc.TypeCheck(v)
+       case *ast.LiteralNode:
+               tc := &typeCheckLiteral{n}
+               result, err = tc.TypeCheck(v)
+       case *ast.VariableAccess:
+               tc := &typeCheckVariableAccess{n}
+               result, err = tc.TypeCheck(v)
+       default:
+               tc, ok := raw.(TypeCheckNode)
+               if !ok {
+                       err = fmt.Errorf("unknown node for type check: %#v", raw)
+                       break
+               }
+
+               result, err = tc.TypeCheck(v)
+       }
+
+       if err != nil {
+               pos := raw.Pos()
+               v.err = fmt.Errorf("At column %d, line %d: %s",
+                       pos.Column, pos.Line, err)
+       }
+
+       return result
+}
+
+type typeCheckArithmetic struct {
+       n *ast.Arithmetic
+}
+
+func (tc *typeCheckArithmetic) TypeCheck(v *TypeCheck) (ast.Node, error) {
+       // The arguments are on the stack in reverse order, so pop them off.
+       exprs := make([]ast.Type, len(tc.n.Exprs))
+       for i, _ := range tc.n.Exprs {
+               exprs[len(tc.n.Exprs)-1-i] = v.StackPop()
+       }
+
+       // If any operand is unknown then our result is automatically unknown
+       for _, ty := range exprs {
+               if ty == ast.TypeUnknown {
+                       v.StackPush(ast.TypeUnknown)
+                       return tc.n, nil
+               }
+       }
+
+       switch tc.n.Op {
+       case ast.ArithmeticOpLogicalAnd, ast.ArithmeticOpLogicalOr:
+               return tc.checkLogical(v, exprs)
+       case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual,
+               ast.ArithmeticOpLessThan, ast.ArithmeticOpGreaterThan,
+               ast.ArithmeticOpGreaterThanOrEqual, ast.ArithmeticOpLessThanOrEqual:
+               return tc.checkComparison(v, exprs)
+       default:
+               return tc.checkNumeric(v, exprs)
+       }
+
+}
+
+func (tc *typeCheckArithmetic) checkNumeric(v *TypeCheck, exprs []ast.Type) (ast.Node, error) {
+       // Determine the resulting type we want. We do this by going over
+       // every expression until we find one with a type we recognize.
+       // We do this because the first expr might be a string ("var.foo")
+       // and we need to know what to implicit to.
+       mathFunc := "__builtin_IntMath"
+       mathType := ast.TypeInt
+       for _, v := range exprs {
+               // We assume int math but if we find ANY float, the entire
+               // expression turns into floating point math.
+               if v == ast.TypeFloat {
+                       mathFunc = "__builtin_FloatMath"
+                       mathType = v
+                       break
+               }
+       }
+
+       // Verify the args
+       for i, arg := range exprs {
+               if arg != mathType {
+                       cn := v.ImplicitConversion(exprs[i], mathType, tc.n.Exprs[i])
+                       if cn != nil {
+                               tc.n.Exprs[i] = cn
+                               continue
+                       }
+
+                       return nil, fmt.Errorf(
+                               "operand %d should be %s, got %s",
+                               i+1, mathType, arg)
+               }
+       }
+
+       // Modulo doesn't work for floats
+       if mathType == ast.TypeFloat && tc.n.Op == ast.ArithmeticOpMod {
+               return nil, fmt.Errorf("modulo cannot be used with floats")
+       }
+
+       // Return type
+       v.StackPush(mathType)
+
+       // Replace our node with a call to the proper function. This isn't
+       // type checked but we already verified types.
+       args := make([]ast.Node, len(tc.n.Exprs)+1)
+       args[0] = &ast.LiteralNode{
+               Value: tc.n.Op,
+               Typex: ast.TypeInt,
+               Posx:  tc.n.Pos(),
+       }
+       copy(args[1:], tc.n.Exprs)
+       return &ast.Call{
+               Func: mathFunc,
+               Args: args,
+               Posx: tc.n.Pos(),
+       }, nil
+}
+
+func (tc *typeCheckArithmetic) checkComparison(v *TypeCheck, exprs []ast.Type) (ast.Node, error) {
+       if len(exprs) != 2 {
+               // This should never happen, because the parser never produces
+               // nodes that violate this.
+               return nil, fmt.Errorf(
+                       "comparison operators must have exactly two operands",
+               )
+       }
+
+       // The first operand always dictates the type for a comparison.
+       compareFunc := ""
+       compareType := exprs[0]
+       switch compareType {
+       case ast.TypeBool:
+               compareFunc = "__builtin_BoolCompare"
+       case ast.TypeFloat:
+               compareFunc = "__builtin_FloatCompare"
+       case ast.TypeInt:
+               compareFunc = "__builtin_IntCompare"
+       case ast.TypeString:
+               compareFunc = "__builtin_StringCompare"
+       default:
+               return nil, fmt.Errorf(
+                       "comparison operators apply only to bool, float, int, and string",
+               )
+       }
+
+       // For non-equality comparisons, we will do implicit conversions to
+       // integer types if possible. In this case, we need to go through and
+       // determine the type of comparison we're doing to enable the implicit
+       // conversion.
+       if tc.n.Op != ast.ArithmeticOpEqual && tc.n.Op != ast.ArithmeticOpNotEqual {
+               compareFunc = "__builtin_IntCompare"
+               compareType = ast.TypeInt
+               for _, expr := range exprs {
+                       if expr == ast.TypeFloat {
+                               compareFunc = "__builtin_FloatCompare"
+                               compareType = ast.TypeFloat
+                               break
+                       }
+               }
+       }
+
+       // Verify (and possibly, convert) the args
+       for i, arg := range exprs {
+               if arg != compareType {
+                       cn := v.ImplicitConversion(exprs[i], compareType, tc.n.Exprs[i])
+                       if cn != nil {
+                               tc.n.Exprs[i] = cn
+                               continue
+                       }
+
+                       return nil, fmt.Errorf(
+                               "operand %d should be %s, got %s",
+                               i+1, compareType, arg,
+                       )
+               }
+       }
+
+       // Only ints and floats can have the <, >, <= and >= operators applied
+       switch tc.n.Op {
+       case ast.ArithmeticOpEqual, ast.ArithmeticOpNotEqual:
+               // anything goes
+       default:
+               switch compareType {
+               case ast.TypeFloat, ast.TypeInt:
+                       // fine
+               default:
+                       return nil, fmt.Errorf(
+                               "<, >, <= and >= may apply only to int and float values",
+                       )
+               }
+       }
+
+       // Comparison operators always return bool
+       v.StackPush(ast.TypeBool)
+
+       // Replace our node with a call to the proper function. This isn't
+       // type checked but we already verified types.
+       args := make([]ast.Node, len(tc.n.Exprs)+1)
+       args[0] = &ast.LiteralNode{
+               Value: tc.n.Op,
+               Typex: ast.TypeInt,
+               Posx:  tc.n.Pos(),
+       }
+       copy(args[1:], tc.n.Exprs)
+       return &ast.Call{
+               Func: compareFunc,
+               Args: args,
+               Posx: tc.n.Pos(),
+       }, nil
+}
+
+func (tc *typeCheckArithmetic) checkLogical(v *TypeCheck, exprs []ast.Type) (ast.Node, error) {
+       for i, t := range exprs {
+               if t != ast.TypeBool {
+                       cn := v.ImplicitConversion(t, ast.TypeBool, tc.n.Exprs[i])
+                       if cn == nil {
+                               return nil, fmt.Errorf(
+                                       "logical operators require boolean operands, not %s",
+                                       t,
+                               )
+                       }
+                       tc.n.Exprs[i] = cn
+               }
+       }
+
+       // Return type is always boolean
+       v.StackPush(ast.TypeBool)
+
+       // Arithmetic nodes are replaced with a call to a built-in function
+       args := make([]ast.Node, len(tc.n.Exprs)+1)
+       args[0] = &ast.LiteralNode{
+               Value: tc.n.Op,
+               Typex: ast.TypeInt,
+               Posx:  tc.n.Pos(),
+       }
+       copy(args[1:], tc.n.Exprs)
+       return &ast.Call{
+               Func: "__builtin_Logical",
+               Args: args,
+               Posx: tc.n.Pos(),
+       }, nil
+}
+
+type typeCheckCall struct {
+       n *ast.Call
+}
+
+func (tc *typeCheckCall) TypeCheck(v *TypeCheck) (ast.Node, error) {
+       // Look up the function in the map
+       function, ok := v.Scope.LookupFunc(tc.n.Func)
+       if !ok {
+               return nil, fmt.Errorf("unknown function called: %s", tc.n.Func)
+       }
+
+       // The arguments are on the stack in reverse order, so pop them off.
+       args := make([]ast.Type, len(tc.n.Args))
+       for i, _ := range tc.n.Args {
+               args[len(tc.n.Args)-1-i] = v.StackPop()
+       }
+
+       // Verify the args
+       for i, expected := range function.ArgTypes {
+               if expected == ast.TypeAny {
+                       continue
+               }
+
+               if args[i] == ast.TypeUnknown {
+                       v.StackPush(ast.TypeUnknown)
+                       return tc.n, nil
+               }
+
+               if args[i] != expected {
+                       cn := v.ImplicitConversion(args[i], expected, tc.n.Args[i])
+                       if cn != nil {
+                               tc.n.Args[i] = cn
+                               continue
+                       }
+
+                       return nil, fmt.Errorf(
+                               "%s: argument %d should be %s, got %s",
+                               tc.n.Func, i+1, expected.Printable(), args[i].Printable())
+               }
+       }
+
+       // If we're variadic, then verify the types there
+       if function.Variadic && function.VariadicType != ast.TypeAny {
+               args = args[len(function.ArgTypes):]
+               for i, t := range args {
+                       if t == ast.TypeUnknown {
+                               v.StackPush(ast.TypeUnknown)
+                               return tc.n, nil
+                       }
+
+                       if t != function.VariadicType {
+                               realI := i + len(function.ArgTypes)
+                               cn := v.ImplicitConversion(
+                                       t, function.VariadicType, tc.n.Args[realI])
+                               if cn != nil {
+                                       tc.n.Args[realI] = cn
+                                       continue
+                               }
+
+                               return nil, fmt.Errorf(
+                                       "%s: argument %d should be %s, got %s",
+                                       tc.n.Func, realI,
+                                       function.VariadicType.Printable(), t.Printable())
+                       }
+               }
+       }
+
+       // Return type
+       v.StackPush(function.ReturnType)
+
+       return tc.n, nil
+}
+
+type typeCheckConditional struct {
+       n *ast.Conditional
+}
+
+func (tc *typeCheckConditional) TypeCheck(v *TypeCheck) (ast.Node, error) {
+       // On the stack we have the types of the condition, true and false
+       // expressions, but they are in reverse order.
+       falseType := v.StackPop()
+       trueType := v.StackPop()
+       condType := v.StackPop()
+
+       if condType == ast.TypeUnknown {
+               v.StackPush(ast.TypeUnknown)
+               return tc.n, nil
+       }
+
+       if condType != ast.TypeBool {
+               cn := v.ImplicitConversion(condType, ast.TypeBool, tc.n.CondExpr)
+               if cn == nil {
+                       return nil, fmt.Errorf(
+                               "condition must be type bool, not %s", condType.Printable(),
+                       )
+               }
+               tc.n.CondExpr = cn
+       }
+
+       // The types of the true and false expression must match
+       if trueType != falseType && trueType != ast.TypeUnknown && falseType != ast.TypeUnknown {
+
+               // Since passing around stringified versions of other types is
+               // common, we pragmatically allow the false expression to dictate
+               // the result type when the true expression is a string.
+               if trueType == ast.TypeString {
+                       cn := v.ImplicitConversion(trueType, falseType, tc.n.TrueExpr)
+                       if cn == nil {
+                               return nil, fmt.Errorf(
+                                       "true and false expression types must match; have %s and %s",
+                                       trueType.Printable(), falseType.Printable(),
+                               )
+                       }
+                       tc.n.TrueExpr = cn
+                       trueType = falseType
+               } else {
+                       cn := v.ImplicitConversion(falseType, trueType, tc.n.FalseExpr)
+                       if cn == nil {
+                               return nil, fmt.Errorf(
+                                       "true and false expression types must match; have %s and %s",
+                                       trueType.Printable(), falseType.Printable(),
+                               )
+                       }
+                       tc.n.FalseExpr = cn
+                       falseType = trueType
+               }
+       }
+
+       // Currently list and map types cannot be used, because we cannot
+       // generally assert that their element types are consistent.
+       // Such support might be added later, either by improving the type
+       // system or restricting usage to only variable and literal expressions,
+       // but for now this is simply prohibited because it doesn't seem to
+       // be a common enough case to be worth the complexity.
+       switch trueType {
+       case ast.TypeList:
+               return nil, fmt.Errorf(
+                       "conditional operator cannot be used with list values",
+               )
+       case ast.TypeMap:
+               return nil, fmt.Errorf(
+                       "conditional operator cannot be used with map values",
+               )
+       }
+
+       // Result type (guaranteed to also match falseType due to the above)
+       if trueType == ast.TypeUnknown {
+               // falseType may also be unknown, but that's okay because two
+               // unknowns means our result is unknown anyway.
+               v.StackPush(falseType)
+       } else {
+               v.StackPush(trueType)
+       }
+
+       return tc.n, nil
+}
+
+type typeCheckOutput struct {
+       n *ast.Output
+}
+
+func (tc *typeCheckOutput) TypeCheck(v *TypeCheck) (ast.Node, error) {
+       n := tc.n
+       types := make([]ast.Type, len(n.Exprs))
+       for i, _ := range n.Exprs {
+               types[len(n.Exprs)-1-i] = v.StackPop()
+       }
+
+       for _, ty := range types {
+               if ty == ast.TypeUnknown {
+                       v.StackPush(ast.TypeUnknown)
+                       return tc.n, nil
+               }
+       }
+
+       // If there is only one argument and it is a list, we evaluate to a list
+       if len(types) == 1 {
+               switch t := types[0]; t {
+               case ast.TypeList:
+                       fallthrough
+               case ast.TypeMap:
+                       v.StackPush(t)
+                       return n, nil
+               }
+       }
+
+       // Otherwise, all concat args must be strings, so validate that
+       resultType := ast.TypeString
+       for i, t := range types {
+
+               if t == ast.TypeUnknown {
+                       resultType = ast.TypeUnknown
+                       continue
+               }
+
+               if t != ast.TypeString {
+                       cn := v.ImplicitConversion(t, ast.TypeString, n.Exprs[i])
+                       if cn != nil {
+                               n.Exprs[i] = cn
+                               continue
+                       }
+
+                       return nil, fmt.Errorf(
+                               "output of an HIL expression must be a string, or a single list (argument %d is %s)", i+1, t)
+               }
+       }
+
+       // This always results in type string, unless there are unknowns
+       v.StackPush(resultType)
+
+       return n, nil
+}
+
+type typeCheckLiteral struct {
+       n *ast.LiteralNode
+}
+
+func (tc *typeCheckLiteral) TypeCheck(v *TypeCheck) (ast.Node, error) {
+       v.StackPush(tc.n.Typex)
+       return tc.n, nil
+}
+
+type typeCheckVariableAccess struct {
+       n *ast.VariableAccess
+}
+
+func (tc *typeCheckVariableAccess) TypeCheck(v *TypeCheck) (ast.Node, error) {
+       // Look up the variable in the map
+       variable, ok := v.Scope.LookupVar(tc.n.Name)
+       if !ok {
+               return nil, fmt.Errorf(
+                       "unknown variable accessed: %s", tc.n.Name)
+       }
+
+       // Add the type to the stack
+       v.StackPush(variable.Type)
+
+       return tc.n, nil
+}
+
+type typeCheckIndex struct {
+       n *ast.Index
+}
+
+func (tc *typeCheckIndex) TypeCheck(v *TypeCheck) (ast.Node, error) {
+       keyType := v.StackPop()
+       targetType := v.StackPop()
+
+       if keyType == ast.TypeUnknown || targetType == ast.TypeUnknown {
+               v.StackPush(ast.TypeUnknown)
+               return tc.n, nil
+       }
+
+       // Ensure we have a VariableAccess as the target
+       varAccessNode, ok := tc.n.Target.(*ast.VariableAccess)
+       if !ok {
+               return nil, fmt.Errorf(
+                       "target of an index must be a VariableAccess node, was %T", tc.n.Target)
+       }
+
+       // Get the variable
+       variable, ok := v.Scope.LookupVar(varAccessNode.Name)
+       if !ok {
+               return nil, fmt.Errorf(
+                       "unknown variable accessed: %s", varAccessNode.Name)
+       }
+
+       switch targetType {
+       case ast.TypeList:
+               if keyType != ast.TypeInt {
+                       tc.n.Key = v.ImplicitConversion(keyType, ast.TypeInt, tc.n.Key)
+                       if tc.n.Key == nil {
+                               return nil, fmt.Errorf(
+                                       "key of an index must be an int, was %s", keyType)
+                       }
+               }
+
+               valType, err := ast.VariableListElementTypesAreHomogenous(
+                       varAccessNode.Name, variable.Value.([]ast.Variable))
+               if err != nil {
+                       return tc.n, err
+               }
+
+               v.StackPush(valType)
+               return tc.n, nil
+       case ast.TypeMap:
+               if keyType != ast.TypeString {
+                       tc.n.Key = v.ImplicitConversion(keyType, ast.TypeString, tc.n.Key)
+                       if tc.n.Key == nil {
+                               return nil, fmt.Errorf(
+                                       "key of an index must be a string, was %s", keyType)
+                       }
+               }
+
+               valType, err := ast.VariableMapValueTypesAreHomogenous(
+                       varAccessNode.Name, variable.Value.(map[string]ast.Variable))
+               if err != nil {
+                       return tc.n, err
+               }
+
+               v.StackPush(valType)
+               return tc.n, nil
+       default:
+               return nil, fmt.Errorf("invalid index operation into non-indexable type: %s", variable.Type)
+       }
+}
+
+func (v *TypeCheck) ImplicitConversion(
+       actual ast.Type, expected ast.Type, n ast.Node) ast.Node {
+       if v.Implicit == nil {
+               return nil
+       }
+
+       fromMap, ok := v.Implicit[actual]
+       if !ok {
+               return nil
+       }
+
+       toFunc, ok := fromMap[expected]
+       if !ok {
+               return nil
+       }
+
+       return &ast.Call{
+               Func: toFunc,
+               Args: []ast.Node{n},
+               Posx: n.Pos(),
+       }
+}
+
+func (v *TypeCheck) reset() {
+       v.Stack = nil
+       v.err = nil
+}
+
+func (v *TypeCheck) StackPush(t ast.Type) {
+       v.Stack = append(v.Stack, t)
+}
+
+func (v *TypeCheck) StackPop() ast.Type {
+       var x ast.Type
+       x, v.Stack = v.Stack[len(v.Stack)-1], v.Stack[:len(v.Stack)-1]
+       return x
+}
+
+func (v *TypeCheck) StackPeek() ast.Type {
+       if len(v.Stack) == 0 {
+               return ast.TypeInvalid
+       }
+
+       return v.Stack[len(v.Stack)-1]
+}
diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go
new file mode 100644 (file)
index 0000000..f2024d0
--- /dev/null
@@ -0,0 +1,159 @@
+package hil
+
+import (
+       "fmt"
+       "reflect"
+
+       "github.com/hashicorp/hil/ast"
+       "github.com/mitchellh/mapstructure"
+)
+
+// UnknownValue is a sentinel value that can be used to denote
+// that a value of a variable (or map element, list element, etc.)
+// is unknown. This will always have the type ast.TypeUnknown.
+const UnknownValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
+
+var hilMapstructureDecodeHookSlice []interface{}
+var hilMapstructureDecodeHookStringSlice []string
+var hilMapstructureDecodeHookMap map[string]interface{}
+
+// hilMapstructureWeakDecode behaves in the same way as mapstructure.WeakDecode
+// but has a DecodeHook which defeats the backward compatibility mode of mapstructure
+// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This
+// allows us to use WeakDecode (desirable), but not fail on empty lists.
+func hilMapstructureWeakDecode(m interface{}, rawVal interface{}) error {
+       config := &mapstructure.DecoderConfig{
+               DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) {
+                       sliceType := reflect.TypeOf(hilMapstructureDecodeHookSlice)
+                       stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice)
+                       mapType := reflect.TypeOf(hilMapstructureDecodeHookMap)
+
+                       if (source == sliceType || source == stringSliceType) && target == mapType {
+                               return nil, fmt.Errorf("Cannot convert %s into a %s", source, target)
+                       }
+
+                       return val, nil
+               },
+               WeaklyTypedInput: true,
+               Result:           rawVal,
+       }
+
+       decoder, err := mapstructure.NewDecoder(config)
+       if err != nil {
+               return err
+       }
+
+       return decoder.Decode(m)
+}
+
+func InterfaceToVariable(input interface{}) (ast.Variable, error) {
+       if inputVariable, ok := input.(ast.Variable); ok {
+               return inputVariable, nil
+       }
+
+       var stringVal string
+       if err := hilMapstructureWeakDecode(input, &stringVal); err == nil {
+               // Special case the unknown value to turn into "unknown"
+               if stringVal == UnknownValue {
+                       return ast.Variable{Value: UnknownValue, Type: ast.TypeUnknown}, nil
+               }
+
+               // Otherwise return the string value
+               return ast.Variable{
+                       Type:  ast.TypeString,
+                       Value: stringVal,
+               }, nil
+       }
+
+       var mapVal map[string]interface{}
+       if err := hilMapstructureWeakDecode(input, &mapVal); err == nil {
+               elements := make(map[string]ast.Variable)
+               for i, element := range mapVal {
+                       varElement, err := InterfaceToVariable(element)
+                       if err != nil {
+                               return ast.Variable{}, err
+                       }
+                       elements[i] = varElement
+               }
+
+               return ast.Variable{
+                       Type:  ast.TypeMap,
+                       Value: elements,
+               }, nil
+       }
+
+       var sliceVal []interface{}
+       if err := hilMapstructureWeakDecode(input, &sliceVal); err == nil {
+               elements := make([]ast.Variable, len(sliceVal))
+               for i, element := range sliceVal {
+                       varElement, err := InterfaceToVariable(element)
+                       if err != nil {
+                               return ast.Variable{}, err
+                       }
+                       elements[i] = varElement
+               }
+
+               return ast.Variable{
+                       Type:  ast.TypeList,
+                       Value: elements,
+               }, nil
+       }
+
+       return ast.Variable{}, fmt.Errorf("value for conversion must be a string, interface{} or map[string]interface: got %T", input)
+}
+
+func VariableToInterface(input ast.Variable) (interface{}, error) {
+       if input.Type == ast.TypeString {
+               if inputStr, ok := input.Value.(string); ok {
+                       return inputStr, nil
+               } else {
+                       return nil, fmt.Errorf("ast.Variable with type string has value which is not a string")
+               }
+       }
+
+       if input.Type == ast.TypeList {
+               inputList, ok := input.Value.([]ast.Variable)
+               if !ok {
+                       return nil, fmt.Errorf("ast.Variable with type list has value which is not a []ast.Variable")
+               }
+
+               result := make([]interface{}, 0)
+               if len(inputList) == 0 {
+                       return result, nil
+               }
+
+               for _, element := range inputList {
+                       if convertedElement, err := VariableToInterface(element); err == nil {
+                               result = append(result, convertedElement)
+                       } else {
+                               return nil, err
+                       }
+               }
+
+               return result, nil
+       }
+
+       if input.Type == ast.TypeMap {
+               inputMap, ok := input.Value.(map[string]ast.Variable)
+               if !ok {
+                       return nil, fmt.Errorf("ast.Variable with type map has value which is not a map[string]ast.Variable")
+               }
+
+               result := make(map[string]interface{}, 0)
+               if len(inputMap) == 0 {
+                       return result, nil
+               }
+
+               for key, value := range inputMap {
+                       if convertedValue, err := VariableToInterface(value); err == nil {
+                               result[key] = convertedValue
+                       } else {
+                               return nil, err
+                       }
+               }
+
+               return result, nil
+       }
+
+       return nil, fmt.Errorf("unknown input type: %s", input.Type)
+}
diff --git a/vendor/github.com/hashicorp/hil/eval.go b/vendor/github.com/hashicorp/hil/eval.go
new file mode 100644 (file)
index 0000000..2782076
--- /dev/null
@@ -0,0 +1,472 @@
+package hil
+
+import (
+       "bytes"
+       "errors"
+       "fmt"
+       "sync"
+
+       "github.com/hashicorp/hil/ast"
+)
+
+// EvalConfig is the configuration for evaluating.
+type EvalConfig struct {
+       // GlobalScope is the global scope of execution for evaluation.
+       GlobalScope *ast.BasicScope
+
+       // SemanticChecks is a list of additional semantic checks that will be run
+       // on the tree prior to evaluating it. The type checker, identifier checker,
+       // etc. will be run before these automatically.
+       SemanticChecks []SemanticChecker
+}
+
+// SemanticChecker is the type that must be implemented to do a
+// semantic check on an AST tree. This will be called with the root node.
+type SemanticChecker func(ast.Node) error
+
+// EvaluationResult is a struct returned from the hil.Eval function,
+// representing the result of an interpolation. Results are returned in their
+// "natural" Go structure rather than in terms of the HIL AST.  For the types
+// currently implemented, this means that the Value field can be interpreted as
+// the following Go types:
+//     TypeInvalid: undefined
+//     TypeString:  string
+//     TypeList:    []interface{}
+//     TypeMap:     map[string]interface{}
+//     TypBool:     bool
+type EvaluationResult struct {
+       Type  EvalType
+       Value interface{}
+}
+
+// InvalidResult is a structure representing the result of a HIL interpolation
+// which has invalid syntax, missing variables, or some other type of error.
+// The error is described out of band in the accompanying error return value.
+var InvalidResult = EvaluationResult{Type: TypeInvalid, Value: nil}
+
+// errExitUnknown is an internal error that when returned means the result
+// is an unknown value. We use this for early exit.
+var errExitUnknown = errors.New("unknown value")
+
+func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) {
+       output, outputType, err := internalEval(root, config)
+       if err != nil {
+               return InvalidResult, err
+       }
+
+       // If the result contains any nested unknowns then the result as a whole
+       // is unknown, so that callers only have to deal with "entirely known"
+       // or "entirely unknown" as outcomes.
+       if ast.IsUnknown(ast.Variable{Type: outputType, Value: output}) {
+               outputType = ast.TypeUnknown
+               output = UnknownValue
+       }
+
+       switch outputType {
+       case ast.TypeList:
+               val, err := VariableToInterface(ast.Variable{
+                       Type:  ast.TypeList,
+                       Value: output,
+               })
+               return EvaluationResult{
+                       Type:  TypeList,
+                       Value: val,
+               }, err
+       case ast.TypeMap:
+               val, err := VariableToInterface(ast.Variable{
+                       Type:  ast.TypeMap,
+                       Value: output,
+               })
+               return EvaluationResult{
+                       Type:  TypeMap,
+                       Value: val,
+               }, err
+       case ast.TypeString:
+               return EvaluationResult{
+                       Type:  TypeString,
+                       Value: output,
+               }, nil
+       case ast.TypeBool:
+               return EvaluationResult{
+                       Type:  TypeBool,
+                       Value: output,
+               }, nil
+       case ast.TypeUnknown:
+               return EvaluationResult{
+                       Type:  TypeUnknown,
+                       Value: UnknownValue,
+               }, nil
+       default:
+               return InvalidResult, fmt.Errorf("unknown type %s as interpolation output", outputType)
+       }
+}
+
+// Eval evaluates the given AST tree and returns its output value, the type
+// of the output, and any error that occurred.
+func internalEval(root ast.Node, config *EvalConfig) (interface{}, ast.Type, error) {
+       // Copy the scope so we can add our builtins
+       if config == nil {
+               config = new(EvalConfig)
+       }
+       scope := registerBuiltins(config.GlobalScope)
+       implicitMap := map[ast.Type]map[ast.Type]string{
+               ast.TypeFloat: {
+                       ast.TypeInt:    "__builtin_FloatToInt",
+                       ast.TypeString: "__builtin_FloatToString",
+               },
+               ast.TypeInt: {
+                       ast.TypeFloat:  "__builtin_IntToFloat",
+                       ast.TypeString: "__builtin_IntToString",
+               },
+               ast.TypeString: {
+                       ast.TypeInt:   "__builtin_StringToInt",
+                       ast.TypeFloat: "__builtin_StringToFloat",
+                       ast.TypeBool:  "__builtin_StringToBool",
+               },
+               ast.TypeBool: {
+                       ast.TypeString: "__builtin_BoolToString",
+               },
+       }
+
+       // Build our own semantic checks that we always run
+       tv := &TypeCheck{Scope: scope, Implicit: implicitMap}
+       ic := &IdentifierCheck{Scope: scope}
+
+       // Build up the semantic checks for execution
+       checks := make(
+               []SemanticChecker,
+               len(config.SemanticChecks),
+               len(config.SemanticChecks)+2)
+       copy(checks, config.SemanticChecks)
+       checks = append(checks, ic.Visit)
+       checks = append(checks, tv.Visit)
+
+       // Run the semantic checks
+       for _, check := range checks {
+               if err := check(root); err != nil {
+                       return nil, ast.TypeInvalid, err
+               }
+       }
+
+       // Execute
+       v := &evalVisitor{Scope: scope}
+       return v.Visit(root)
+}
+
+// EvalNode is the interface that must be implemented by any ast.Node
+// to support evaluation. This will be called in visitor pattern order.
+// The result of each call to Eval is automatically pushed onto the
+// stack as a LiteralNode. Pop elements off the stack to get child
+// values.
+type EvalNode interface {
+       Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error)
+}
+
+type evalVisitor struct {
+       Scope ast.Scope
+       Stack ast.Stack
+
+       err  error
+       lock sync.Mutex
+}
+
+func (v *evalVisitor) Visit(root ast.Node) (interface{}, ast.Type, error) {
+       // Run the actual visitor pattern
+       root.Accept(v.visit)
+
+       // Get our result and clear out everything else
+       var result *ast.LiteralNode
+       if v.Stack.Len() > 0 {
+               result = v.Stack.Pop().(*ast.LiteralNode)
+       } else {
+               result = new(ast.LiteralNode)
+       }
+       resultErr := v.err
+       if resultErr == errExitUnknown {
+               // This means the return value is unknown and we used the error
+               // as an early exit mechanism. Reset since the value on the stack
+               // should be the unknown value.
+               resultErr = nil
+       }
+
+       // Clear everything else so we aren't just dangling
+       v.Stack.Reset()
+       v.err = nil
+
+       t, err := result.Type(v.Scope)
+       if err != nil {
+               return nil, ast.TypeInvalid, err
+       }
+
+       return result.Value, t, resultErr
+}
+
+func (v *evalVisitor) visit(raw ast.Node) ast.Node {
+       if v.err != nil {
+               return raw
+       }
+
+       en, err := evalNode(raw)
+       if err != nil {
+               v.err = err
+               return raw
+       }
+
+       out, outType, err := en.Eval(v.Scope, &v.Stack)
+       if err != nil {
+               v.err = err
+               return raw
+       }
+
+       v.Stack.Push(&ast.LiteralNode{
+               Value: out,
+               Typex: outType,
+       })
+
+       if outType == ast.TypeUnknown {
+               // Halt immediately
+               v.err = errExitUnknown
+               return raw
+       }
+
+       return raw
+}
+
+// evalNode is a private function that returns an EvalNode for built-in
+// types as well as any other EvalNode implementations.
+func evalNode(raw ast.Node) (EvalNode, error) {
+       switch n := raw.(type) {
+       case *ast.Index:
+               return &evalIndex{n}, nil
+       case *ast.Call:
+               return &evalCall{n}, nil
+       case *ast.Conditional:
+               return &evalConditional{n}, nil
+       case *ast.Output:
+               return &evalOutput{n}, nil
+       case *ast.LiteralNode:
+               return &evalLiteralNode{n}, nil
+       case *ast.VariableAccess:
+               return &evalVariableAccess{n}, nil
+       default:
+               en, ok := n.(EvalNode)
+               if !ok {
+                       return nil, fmt.Errorf("node doesn't support evaluation: %#v", raw)
+               }
+
+               return en, nil
+       }
+}
+
+type evalCall struct{ *ast.Call }
+
+func (v *evalCall) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
+       // Look up the function in the map
+       function, ok := s.LookupFunc(v.Func)
+       if !ok {
+               return nil, ast.TypeInvalid, fmt.Errorf(
+                       "unknown function called: %s", v.Func)
+       }
+
+       // The arguments are on the stack in reverse order, so pop them off.
+       args := make([]interface{}, len(v.Args))
+       for i, _ := range v.Args {
+               node := stack.Pop().(*ast.LiteralNode)
+               if node.IsUnknown() {
+                       // If any arguments are unknown then the result is automatically unknown
+                       return UnknownValue, ast.TypeUnknown, nil
+               }
+               args[len(v.Args)-1-i] = node.Value
+       }
+
+       // Call the function
+       result, err := function.Callback(args)
+       if err != nil {
+               return nil, ast.TypeInvalid, fmt.Errorf("%s: %s", v.Func, err)
+       }
+
+       return result, function.ReturnType, nil
+}
+
+type evalConditional struct{ *ast.Conditional }
+
+func (v *evalConditional) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
+       // On the stack we have literal nodes representing the resulting values
+       // of the condition, true and false expressions, but they are in reverse
+       // order.
+       falseLit := stack.Pop().(*ast.LiteralNode)
+       trueLit := stack.Pop().(*ast.LiteralNode)
+       condLit := stack.Pop().(*ast.LiteralNode)
+
+       if condLit.IsUnknown() {
+               // If our conditional is unknown then our result is also unknown
+               return UnknownValue, ast.TypeUnknown, nil
+       }
+
+       if condLit.Value.(bool) {
+               return trueLit.Value, trueLit.Typex, nil
+       } else {
+               return falseLit.Value, trueLit.Typex, nil
+       }
+}
+
+type evalIndex struct{ *ast.Index }
+
+func (v *evalIndex) Eval(scope ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
+       key := stack.Pop().(*ast.LiteralNode)
+       target := stack.Pop().(*ast.LiteralNode)
+
+       variableName := v.Index.Target.(*ast.VariableAccess).Name
+
+       if key.IsUnknown() {
+               // If our key is unknown then our result is also unknown
+               return UnknownValue, ast.TypeUnknown, nil
+       }
+
+       // For target, we'll accept collections containing unknown values but
+       // we still need to catch when the collection itself is unknown, shallowly.
+       if target.Typex == ast.TypeUnknown {
+               return UnknownValue, ast.TypeUnknown, nil
+       }
+
+       switch target.Typex {
+       case ast.TypeList:
+               return v.evalListIndex(variableName, target.Value, key.Value)
+       case ast.TypeMap:
+               return v.evalMapIndex(variableName, target.Value, key.Value)
+       default:
+               return nil, ast.TypeInvalid, fmt.Errorf(
+                       "target %q for indexing must be ast.TypeList or ast.TypeMap, is %s",
+                       variableName, target.Typex)
+       }
+}
+
+func (v *evalIndex) evalListIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) {
+       // We assume type checking was already done and we can assume that target
+       // is a list and key is an int
+       list, ok := target.([]ast.Variable)
+       if !ok {
+               return nil, ast.TypeInvalid, fmt.Errorf(
+                       "cannot cast target to []Variable, is: %T", target)
+       }
+
+       keyInt, ok := key.(int)
+       if !ok {
+               return nil, ast.TypeInvalid, fmt.Errorf(
+                       "cannot cast key to int, is: %T", key)
+       }
+
+       if len(list) == 0 {
+               return nil, ast.TypeInvalid, fmt.Errorf("list is empty")
+       }
+
+       if keyInt < 0 || len(list) < keyInt+1 {
+               return nil, ast.TypeInvalid, fmt.Errorf(
+                       "index %d out of range for list %s (max %d)",
+                       keyInt, variableName, len(list))
+       }
+
+       returnVal := list[keyInt].Value
+       returnType := list[keyInt].Type
+       return returnVal, returnType, nil
+}
+
+func (v *evalIndex) evalMapIndex(variableName string, target interface{}, key interface{}) (interface{}, ast.Type, error) {
+       // We assume type checking was already done and we can assume that target
+       // is a map and key is a string
+       vmap, ok := target.(map[string]ast.Variable)
+       if !ok {
+               return nil, ast.TypeInvalid, fmt.Errorf(
+                       "cannot cast target to map[string]Variable, is: %T", target)
+       }
+
+       keyString, ok := key.(string)
+       if !ok {
+               return nil, ast.TypeInvalid, fmt.Errorf(
+                       "cannot cast key to string, is: %T", key)
+       }
+
+       if len(vmap) == 0 {
+               return nil, ast.TypeInvalid, fmt.Errorf("map is empty")
+       }
+
+       value, ok := vmap[keyString]
+       if !ok {
+               return nil, ast.TypeInvalid, fmt.Errorf(
+                       "key %q does not exist in map %s", keyString, variableName)
+       }
+
+       return value.Value, value.Type, nil
+}
+
+type evalOutput struct{ *ast.Output }
+
+func (v *evalOutput) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) {
+       // The expressions should all be on the stack in reverse
+       // order. So pop them off, reverse their order, and concatenate.
+       nodes := make([]*ast.LiteralNode, 0, len(v.Exprs))
+       haveUnknown := false
+       for range v.Exprs {
+               n := stack.Pop().(*ast.LiteralNode)
+               nodes = append(nodes, n)
+
+               // If we have any unknowns then the whole result is unknown
+               // (we must deal with this first, because the type checker can
+               // skip type conversions in the presence of unknowns, and thus
+               // any of our other nodes may be incorrectly typed.)
+               if n.IsUnknown() {
+                       haveUnknown = true
+               }
+       }
+
+       if haveUnknown {
+               return UnknownValue, ast.TypeUnknown, nil
+       }
+
+       // Special case the single list and map
+       if len(nodes) == 1 {
+               switch t := nodes[0].Typex; t {
+               case ast.TypeList:
+                       fallthrough
+               case ast.TypeMap:
+                       fallthrough
+               case ast.TypeUnknown:
+                       return nodes[0].Value, t, nil
+               }
+       }
+
+       // Otherwise concatenate the strings
+       var buf bytes.Buffer
+       for i := len(nodes) - 1; i >= 0; i-- {
+               if nodes[i].Typex != ast.TypeString {
+                       return nil, ast.TypeInvalid, fmt.Errorf(
+                               "invalid output with %s value at index %d: %#v",
+                               nodes[i].Typex,
+                               i,
+                               nodes[i].Value,
+                       )
+               }
+               buf.WriteString(nodes[i].Value.(string))
+       }
+
+       return buf.String(), ast.TypeString, nil
+}
+
+type evalLiteralNode struct{ *ast.LiteralNode }
+
+func (v *evalLiteralNode) Eval(ast.Scope, *ast.Stack) (interface{}, ast.Type, error) {
+       return v.Value, v.Typex, nil
+}
+
+type evalVariableAccess struct{ *ast.VariableAccess }
+
+func (v *evalVariableAccess) Eval(scope ast.Scope, _ *ast.Stack) (interface{}, ast.Type, error) {
+       // Look up the variable in the map
+       variable, ok := scope.LookupVar(v.Name)
+       if !ok {
+               return nil, ast.TypeInvalid, fmt.Errorf(
+                       "unknown variable accessed: %s", v.Name)
+       }
+
+       return variable.Value, variable.Type, nil
+}
diff --git a/vendor/github.com/hashicorp/hil/eval_type.go b/vendor/github.com/hashicorp/hil/eval_type.go
new file mode 100644 (file)
index 0000000..6946ecd
--- /dev/null
@@ -0,0 +1,16 @@
+package hil
+
+//go:generate stringer -type=EvalType eval_type.go
+
+// EvalType represents the type of the output returned from a HIL
+// evaluation.
+type EvalType uint32
+
+const (
+       TypeInvalid EvalType = 0
+       TypeString  EvalType = 1 << iota
+       TypeBool
+       TypeList
+       TypeMap
+       TypeUnknown
+)
diff --git a/vendor/github.com/hashicorp/hil/evaltype_string.go b/vendor/github.com/hashicorp/hil/evaltype_string.go
new file mode 100644 (file)
index 0000000..b107ddd
--- /dev/null
@@ -0,0 +1,42 @@
+// Code generated by "stringer -type=EvalType eval_type.go"; DO NOT EDIT
+
+package hil
+
+import "fmt"
+
+const (
+       _EvalType_name_0 = "TypeInvalid"
+       _EvalType_name_1 = "TypeString"
+       _EvalType_name_2 = "TypeBool"
+       _EvalType_name_3 = "TypeList"
+       _EvalType_name_4 = "TypeMap"
+       _EvalType_name_5 = "TypeUnknown"
+)
+
+var (
+       _EvalType_index_0 = [...]uint8{0, 11}
+       _EvalType_index_1 = [...]uint8{0, 10}
+       _EvalType_index_2 = [...]uint8{0, 8}
+       _EvalType_index_3 = [...]uint8{0, 8}
+       _EvalType_index_4 = [...]uint8{0, 7}
+       _EvalType_index_5 = [...]uint8{0, 11}
+)
+
+func (i EvalType) String() string {
+       switch {
+       case i == 0:
+               return _EvalType_name_0
+       case i == 2:
+               return _EvalType_name_1
+       case i == 4:
+               return _EvalType_name_2
+       case i == 8:
+               return _EvalType_name_3
+       case i == 16:
+               return _EvalType_name_4
+       case i == 32:
+               return _EvalType_name_5
+       default:
+               return fmt.Sprintf("EvalType(%d)", i)
+       }
+}
diff --git a/vendor/github.com/hashicorp/hil/parse.go b/vendor/github.com/hashicorp/hil/parse.go
new file mode 100644 (file)
index 0000000..ecbe1fd
--- /dev/null
@@ -0,0 +1,29 @@
+package hil
+
+import (
+       "github.com/hashicorp/hil/ast"
+       "github.com/hashicorp/hil/parser"
+       "github.com/hashicorp/hil/scanner"
+)
+
+// Parse parses the given program and returns an executable AST tree.
+//
+// Syntax errors are returned with error having the dynamic type
+// *parser.ParseError, which gives the caller access to the source position
+// where the error was found, which allows (for example) combining it with
+// a known source filename to add context to the error message.
+func Parse(v string) (ast.Node, error) {
+       return ParseWithPosition(v, ast.Pos{Line: 1, Column: 1})
+}
+
+// ParseWithPosition is like Parse except that it overrides the source
+// row and column position of the first character in the string, which should
+// be 1-based.
+//
+// This can be used when HIL is embedded in another language and the outer
+// parser knows the row and column where the HIL expression started within
+// the overall source file.
+func ParseWithPosition(v string, pos ast.Pos) (ast.Node, error) {
+       ch := scanner.Scan(v, pos)
+       return parser.Parse(ch)
+}
diff --git a/vendor/github.com/hashicorp/hil/parser/binary_op.go b/vendor/github.com/hashicorp/hil/parser/binary_op.go
new file mode 100644 (file)
index 0000000..2e013e0
--- /dev/null
@@ -0,0 +1,45 @@
+package parser
+
+import (
+       "github.com/hashicorp/hil/ast"
+       "github.com/hashicorp/hil/scanner"
+)
+
+var binaryOps []map[scanner.TokenType]ast.ArithmeticOp
+
+func init() {
+       // This operation table maps from the operator's scanner token type
+       // to the AST arithmetic operation. All expressions produced from
+       // binary operators are *ast.Arithmetic nodes.
+       //
+       // Binary operator groups are listed in order of precedence, with
+       // the *lowest* precedence first. Operators within the same group
+       // have left-to-right associativity.
+       binaryOps = []map[scanner.TokenType]ast.ArithmeticOp{
+               {
+                       scanner.OR: ast.ArithmeticOpLogicalOr,
+               },
+               {
+                       scanner.AND: ast.ArithmeticOpLogicalAnd,
+               },
+               {
+                       scanner.EQUAL:    ast.ArithmeticOpEqual,
+                       scanner.NOTEQUAL: ast.ArithmeticOpNotEqual,
+               },
+               {
+                       scanner.GT:  ast.ArithmeticOpGreaterThan,
+                       scanner.GTE: ast.ArithmeticOpGreaterThanOrEqual,
+                       scanner.LT:  ast.ArithmeticOpLessThan,
+                       scanner.LTE: ast.ArithmeticOpLessThanOrEqual,
+               },
+               {
+                       scanner.PLUS:  ast.ArithmeticOpAdd,
+                       scanner.MINUS: ast.ArithmeticOpSub,
+               },
+               {
+                       scanner.STAR:    ast.ArithmeticOpMul,
+                       scanner.SLASH:   ast.ArithmeticOpDiv,
+                       scanner.PERCENT: ast.ArithmeticOpMod,
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/hil/parser/error.go b/vendor/github.com/hashicorp/hil/parser/error.go
new file mode 100644 (file)
index 0000000..bacd696
--- /dev/null
@@ -0,0 +1,38 @@
+package parser
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/hil/ast"
+       "github.com/hashicorp/hil/scanner"
+)
+
+type ParseError struct {
+       Message string
+       Pos     ast.Pos
+}
+
+func Errorf(pos ast.Pos, format string, args ...interface{}) error {
+       return &ParseError{
+               Message: fmt.Sprintf(format, args...),
+               Pos:     pos,
+       }
+}
+
+// TokenErrorf is a convenient wrapper around Errorf that uses the
+// position of the given token.
+func TokenErrorf(token *scanner.Token, format string, args ...interface{}) error {
+       return Errorf(token.Pos, format, args...)
+}
+
+func ExpectationError(wanted string, got *scanner.Token) error {
+       return TokenErrorf(got, "expected %s but found %s", wanted, got)
+}
+
+func (e *ParseError) Error() string {
+       return fmt.Sprintf("parse error at %s: %s", e.Pos, e.Message)
+}
+
+func (e *ParseError) String() string {
+       return e.Error()
+}
diff --git a/vendor/github.com/hashicorp/hil/parser/fuzz.go b/vendor/github.com/hashicorp/hil/parser/fuzz.go
new file mode 100644 (file)
index 0000000..de954f3
--- /dev/null
@@ -0,0 +1,28 @@
+// +build gofuzz
+
+package parser
+
+import (
+       "github.com/hashicorp/hil/ast"
+       "github.com/hashicorp/hil/scanner"
+)
+
+// This is a fuzz testing function designed to be used with go-fuzz:
+//    https://github.com/dvyukov/go-fuzz
+//
+// It's not included in a normal build due to the gofuzz build tag above.
+//
+// There are some input files that you can use as a seed corpus for go-fuzz
+// in the directory ./fuzz-corpus .
+
+func Fuzz(data []byte) int {
+       str := string(data)
+
+       ch := scanner.Scan(str, ast.Pos{Line: 1, Column: 1})
+       _, err := Parse(ch)
+       if err != nil {
+               return 0
+       }
+
+       return 1
+}
diff --git a/vendor/github.com/hashicorp/hil/parser/parser.go b/vendor/github.com/hashicorp/hil/parser/parser.go
new file mode 100644 (file)
index 0000000..376f1c4
--- /dev/null
@@ -0,0 +1,522 @@
+package parser
+
+import (
+       "strconv"
+       "unicode/utf8"
+
+       "github.com/hashicorp/hil/ast"
+       "github.com/hashicorp/hil/scanner"
+)
+
+func Parse(ch <-chan *scanner.Token) (ast.Node, error) {
+       peeker := scanner.NewPeeker(ch)
+       parser := &parser{peeker}
+       output, err := parser.ParseTopLevel()
+       peeker.Close()
+       return output, err
+}
+
+type parser struct {
+       peeker *scanner.Peeker
+}
+
+func (p *parser) ParseTopLevel() (ast.Node, error) {
+       return p.parseInterpolationSeq(false)
+}
+
+func (p *parser) ParseQuoted() (ast.Node, error) {
+       return p.parseInterpolationSeq(true)
+}
+
+// parseInterpolationSeq parses either the top-level sequence of literals
+// and interpolation expressions or a similar sequence within a quoted
+// string inside an interpolation expression. The latter case is requested
+// by setting 'quoted' to true.
+func (p *parser) parseInterpolationSeq(quoted bool) (ast.Node, error) {
+       literalType := scanner.LITERAL
+       endType := scanner.EOF
+       if quoted {
+               // exceptions for quoted sequences
+               literalType = scanner.STRING
+               endType = scanner.CQUOTE
+       }
+
+       startPos := p.peeker.Peek().Pos
+
+       if quoted {
+               tok := p.peeker.Read()
+               if tok.Type != scanner.OQUOTE {
+                       return nil, ExpectationError("open quote", tok)
+               }
+       }
+
+       var exprs []ast.Node
+       for {
+               tok := p.peeker.Read()
+
+               if tok.Type == endType {
+                       break
+               }
+
+               switch tok.Type {
+               case literalType:
+                       val, err := p.parseStringToken(tok)
+                       if err != nil {
+                               return nil, err
+                       }
+                       exprs = append(exprs, &ast.LiteralNode{
+                               Value: val,
+                               Typex: ast.TypeString,
+                               Posx:  tok.Pos,
+                       })
+               case scanner.BEGIN:
+                       expr, err := p.ParseInterpolation()
+                       if err != nil {
+                               return nil, err
+                       }
+                       exprs = append(exprs, expr)
+               default:
+                       return nil, ExpectationError(`"${"`, tok)
+               }
+       }
+
+       if len(exprs) == 0 {
+               // If we have no parts at all then the input must've
+               // been an empty string.
+               exprs = append(exprs, &ast.LiteralNode{
+                       Value: "",
+                       Typex: ast.TypeString,
+                       Posx:  startPos,
+               })
+       }
+
+       // As a special case, if our "Output" contains only one expression
+       // and it's a literal string then we'll hoist it up to be our
+       // direct return value, so callers can easily recognize a string
+       // that has no interpolations at all.
+       if len(exprs) == 1 {
+               if lit, ok := exprs[0].(*ast.LiteralNode); ok {
+                       if lit.Typex == ast.TypeString {
+                               return lit, nil
+                       }
+               }
+       }
+
+       return &ast.Output{
+               Exprs: exprs,
+               Posx:  startPos,
+       }, nil
+}
+
+// parseStringToken takes a token of either LITERAL or STRING type and
+// returns the interpreted string, after processing any relevant
+// escape sequences.
+func (p *parser) parseStringToken(tok *scanner.Token) (string, error) {
+       var backslashes bool
+       switch tok.Type {
+       case scanner.LITERAL:
+               backslashes = false
+       case scanner.STRING:
+               backslashes = true
+       default:
+               panic("unsupported string token type")
+       }
+
+       raw := []byte(tok.Content)
+       buf := make([]byte, 0, len(raw))
+
+       for i := 0; i < len(raw); i++ {
+               b := raw[i]
+               more := len(raw) > (i + 1)
+
+               if b == '$' {
+                       if more && raw[i+1] == '$' {
+                               // skip over the second dollar sign
+                               i++
+                       }
+               } else if backslashes && b == '\\' {
+                       if !more {
+                               return "", Errorf(
+                                       ast.Pos{
+                                               Column: tok.Pos.Column + utf8.RuneCount(raw[:i]),
+                                               Line:   tok.Pos.Line,
+                                       },
+                                       `unfinished backslash escape sequence`,
+                               )
+                       }
+                       escapeType := raw[i+1]
+                       switch escapeType {
+                       case '\\':
+                               // skip over the second slash
+                               i++
+                       case 'n':
+                               b = '\n'
+                               i++
+                       case '"':
+                               b = '"'
+                               i++
+                       default:
+                               return "", Errorf(
+                                       ast.Pos{
+                                               Column: tok.Pos.Column + utf8.RuneCount(raw[:i]),
+                                               Line:   tok.Pos.Line,
+                                       },
+                                       `invalid backslash escape sequence`,
+                               )
+                       }
+               }
+
+               buf = append(buf, b)
+       }
+
+       return string(buf), nil
+}
+
+func (p *parser) ParseInterpolation() (ast.Node, error) {
+       // By the time we're called, we're already "inside" the ${ sequence
+       // because the caller consumed the ${ token.
+
+       expr, err := p.ParseExpression()
+       if err != nil {
+               return nil, err
+       }
+
+       err = p.requireTokenType(scanner.END, `"}"`)
+       if err != nil {
+               return nil, err
+       }
+
+       return expr, nil
+}
+
+func (p *parser) ParseExpression() (ast.Node, error) {
+       return p.parseTernaryCond()
+}
+
+func (p *parser) parseTernaryCond() (ast.Node, error) {
+       // The ternary condition operator (.. ? .. : ..) behaves somewhat
+       // like a binary operator except that the "operator" is itself
+       // an expression enclosed in two punctuation characters.
+       // The middle expression is parsed as if the ? and : symbols
+       // were parentheses. The "rhs" (the "false expression") is then
+       // treated right-associatively so it behaves similarly to the
+       // middle in terms of precedence.
+
+       startPos := p.peeker.Peek().Pos
+
+       var cond, trueExpr, falseExpr ast.Node
+       var err error
+
+       cond, err = p.parseBinaryOps(binaryOps)
+       if err != nil {
+               return nil, err
+       }
+
+       next := p.peeker.Peek()
+       if next.Type != scanner.QUESTION {
+               return cond, nil
+       }
+
+       p.peeker.Read() // eat question mark
+
+       trueExpr, err = p.ParseExpression()
+       if err != nil {
+               return nil, err
+       }
+
+       colon := p.peeker.Read()
+       if colon.Type != scanner.COLON {
+               return nil, ExpectationError(":", colon)
+       }
+
+       falseExpr, err = p.ParseExpression()
+       if err != nil {
+               return nil, err
+       }
+
+       return &ast.Conditional{
+               CondExpr:  cond,
+               TrueExpr:  trueExpr,
+               FalseExpr: falseExpr,
+               Posx:      startPos,
+       }, nil
+}
+
+// parseBinaryOps calls itself recursively to work through all of the
+// operator precedence groups, and then eventually calls ParseExpressionTerm
+// for each operand.
+func (p *parser) parseBinaryOps(ops []map[scanner.TokenType]ast.ArithmeticOp) (ast.Node, error) {
+       if len(ops) == 0 {
+               // We've run out of operators, so now we'll just try to parse a term.
+               return p.ParseExpressionTerm()
+       }
+
+       thisLevel := ops[0]
+       remaining := ops[1:]
+
+       startPos := p.peeker.Peek().Pos
+
+       var lhs, rhs ast.Node
+       operator := ast.ArithmeticOpInvalid
+       var err error
+
+       // parse a term that might be the first operand of a binary
+       // expression or it might just be a standalone term, but
+       // we won't know until we've parsed it and can look ahead
+       // to see if there's an operator token.
+       lhs, err = p.parseBinaryOps(remaining)
+       if err != nil {
+               return nil, err
+       }
+
+       // We'll keep eating up arithmetic operators until we run
+       // out, so that operators with the same precedence will combine in a
+       // left-associative manner:
+       // a+b+c => (a+b)+c, not a+(b+c)
+       //
+       // Should we later want to have right-associative operators, a way
+       // to achieve that would be to call back up to ParseExpression here
+       // instead of iteratively parsing only the remaining operators.
+       for {
+               next := p.peeker.Peek()
+               var newOperator ast.ArithmeticOp
+               var ok bool
+               if newOperator, ok = thisLevel[next.Type]; !ok {
+                       break
+               }
+
+               // Are we extending an expression started on
+               // the previous iteration?
+               if operator != ast.ArithmeticOpInvalid {
+                       lhs = &ast.Arithmetic{
+                               Op:    operator,
+                               Exprs: []ast.Node{lhs, rhs},
+                               Posx:  startPos,
+                       }
+               }
+
+               operator = newOperator
+               p.peeker.Read() // eat operator token
+               rhs, err = p.parseBinaryOps(remaining)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       if operator != ast.ArithmeticOpInvalid {
+               return &ast.Arithmetic{
+                       Op:    operator,
+                       Exprs: []ast.Node{lhs, rhs},
+                       Posx:  startPos,
+               }, nil
+       } else {
+               return lhs, nil
+       }
+}
+
+func (p *parser) ParseExpressionTerm() (ast.Node, error) {
+
+       next := p.peeker.Peek()
+
+       switch next.Type {
+
+       case scanner.OPAREN:
+               p.peeker.Read()
+               expr, err := p.ParseExpression()
+               if err != nil {
+                       return nil, err
+               }
+               err = p.requireTokenType(scanner.CPAREN, `")"`)
+               return expr, err
+
+       case scanner.OQUOTE:
+               return p.ParseQuoted()
+
+       case scanner.INTEGER:
+               tok := p.peeker.Read()
+               val, err := strconv.Atoi(tok.Content)
+               if err != nil {
+                       return nil, TokenErrorf(tok, "invalid integer: %s", err)
+               }
+               return &ast.LiteralNode{
+                       Value: val,
+                       Typex: ast.TypeInt,
+                       Posx:  tok.Pos,
+               }, nil
+
+       case scanner.FLOAT:
+               tok := p.peeker.Read()
+               val, err := strconv.ParseFloat(tok.Content, 64)
+               if err != nil {
+                       return nil, TokenErrorf(tok, "invalid float: %s", err)
+               }
+               return &ast.LiteralNode{
+                       Value: val,
+                       Typex: ast.TypeFloat,
+                       Posx:  tok.Pos,
+               }, nil
+
+       case scanner.BOOL:
+               tok := p.peeker.Read()
+               // the scanner guarantees that tok.Content is either "true" or "false"
+               var val bool
+               if tok.Content[0] == 't' {
+                       val = true
+               } else {
+                       val = false
+               }
+               return &ast.LiteralNode{
+                       Value: val,
+                       Typex: ast.TypeBool,
+                       Posx:  tok.Pos,
+               }, nil
+
+       case scanner.MINUS:
+               opTok := p.peeker.Read()
+               // important to use ParseExpressionTerm rather than ParseExpression
+               // here, otherwise we can capture a following binary expression into
+               // our negation.
+               // e.g. -46+5 should parse as (0-46)+5, not 0-(46+5)
+               operand, err := p.ParseExpressionTerm()
+               if err != nil {
+                       return nil, err
+               }
+               // The AST currently represents negative numbers as
+               // a binary subtraction of the number from zero.
+               return &ast.Arithmetic{
+                       Op: ast.ArithmeticOpSub,
+                       Exprs: []ast.Node{
+                               &ast.LiteralNode{
+                                       Value: 0,
+                                       Typex: ast.TypeInt,
+                                       Posx:  opTok.Pos,
+                               },
+                               operand,
+                       },
+                       Posx: opTok.Pos,
+               }, nil
+
+       case scanner.BANG:
+               opTok := p.peeker.Read()
+               // important to use ParseExpressionTerm rather than ParseExpression
+               // here, otherwise we can capture a following binary expression into
+               // our negation.
+               operand, err := p.ParseExpressionTerm()
+               if err != nil {
+                       return nil, err
+               }
+               // The AST currently represents binary negation as an equality
+               // test with "false".
+               return &ast.Arithmetic{
+                       Op: ast.ArithmeticOpEqual,
+                       Exprs: []ast.Node{
+                               &ast.LiteralNode{
+                                       Value: false,
+                                       Typex: ast.TypeBool,
+                                       Posx:  opTok.Pos,
+                               },
+                               operand,
+                       },
+                       Posx: opTok.Pos,
+               }, nil
+
+       case scanner.IDENTIFIER:
+               return p.ParseScopeInteraction()
+
+       default:
+               return nil, ExpectationError("expression", next)
+       }
+}
+
+// ParseScopeInteraction parses the expression types that interact
+// with the evaluation scope: variable access, function calls, and
+// indexing.
+//
+// Indexing should actually be a distinct operator in its own right,
+// so that e.g. it can be applied to the result of a function call,
+// but for now we're preserving the behavior of the older yacc-based
+// parser.
+func (p *parser) ParseScopeInteraction() (ast.Node, error) {
+       first := p.peeker.Read()
+       startPos := first.Pos
+       if first.Type != scanner.IDENTIFIER {
+               return nil, ExpectationError("identifier", first)
+       }
+
+       next := p.peeker.Peek()
+       if next.Type == scanner.OPAREN {
+               // function call
+               funcName := first.Content
+               p.peeker.Read() // eat paren
+               var args []ast.Node
+
+               for {
+                       if p.peeker.Peek().Type == scanner.CPAREN {
+                               break
+                       }
+
+                       arg, err := p.ParseExpression()
+                       if err != nil {
+                               return nil, err
+                       }
+
+                       args = append(args, arg)
+
+                       if p.peeker.Peek().Type == scanner.COMMA {
+                               p.peeker.Read() // eat comma
+                               continue
+                       } else {
+                               break
+                       }
+               }
+
+               err := p.requireTokenType(scanner.CPAREN, `")"`)
+               if err != nil {
+                       return nil, err
+               }
+
+               return &ast.Call{
+                       Func: funcName,
+                       Args: args,
+                       Posx: startPos,
+               }, nil
+       }
+
+       varNode := &ast.VariableAccess{
+               Name: first.Content,
+               Posx: startPos,
+       }
+
+       if p.peeker.Peek().Type == scanner.OBRACKET {
+               // index operator
+               startPos := p.peeker.Read().Pos // eat bracket
+               indexExpr, err := p.ParseExpression()
+               if err != nil {
+                       return nil, err
+               }
+               err = p.requireTokenType(scanner.CBRACKET, `"]"`)
+               if err != nil {
+                       return nil, err
+               }
+               return &ast.Index{
+                       Target: varNode,
+                       Key:    indexExpr,
+                       Posx:   startPos,
+               }, nil
+       }
+
+       return varNode, nil
+}
+
+// requireTokenType consumes the next token an returns an error if its
+// type does not match the given type. nil is returned if the type matches.
+//
+// This is a helper around peeker.Read() for situations where the parser just
+// wants to assert that a particular token type must be present.
+func (p *parser) requireTokenType(wantType scanner.TokenType, wantName string) error {
+       token := p.peeker.Read()
+       if token.Type != wantType {
+               return ExpectationError(wantName, token)
+       }
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/hil/scanner/peeker.go b/vendor/github.com/hashicorp/hil/scanner/peeker.go
new file mode 100644 (file)
index 0000000..4de3728
--- /dev/null
@@ -0,0 +1,55 @@
+package scanner
+
+// Peeker is a utility that wraps a token channel returned by Scan and
+// provides an interface that allows a caller (e.g. the parser) to
+// work with the token stream in a mode that allows one token of lookahead,
+// and provides utilities for more convenient processing of the stream.
+type Peeker struct {
+       ch     <-chan *Token
+       peeked *Token
+}
+
+func NewPeeker(ch <-chan *Token) *Peeker {
+       return &Peeker{
+               ch: ch,
+       }
+}
+
+// Peek returns the next token in the stream without consuming it. A
+// subsequent call to Read will return the same token.
+func (p *Peeker) Peek() *Token {
+       if p.peeked == nil {
+               p.peeked = <-p.ch
+       }
+       return p.peeked
+}
+
+// Read consumes the next token in the stream and returns it.
+func (p *Peeker) Read() *Token {
+       token := p.Peek()
+
+       // As a special case, we will produce the EOF token forever once
+       // it is reached.
+       if token.Type != EOF {
+               p.peeked = nil
+       }
+
+       return token
+}
+
+// Close ensures that the token stream has been exhausted, to prevent
+// the goroutine in the underlying scanner from leaking.
+//
+// It's not necessary to call this if the caller reads the token stream
+// to EOF, since that implicitly closes the scanner.
+func (p *Peeker) Close() {
+       for _ = range p.ch {
+               // discard
+       }
+       // Install a synthetic EOF token in 'peeked' in case someone
+       // erroneously calls Peek() or Read() after we've closed.
+       p.peeked = &Token{
+               Type:    EOF,
+               Content: "",
+       }
+}
diff --git a/vendor/github.com/hashicorp/hil/scanner/scanner.go b/vendor/github.com/hashicorp/hil/scanner/scanner.go
new file mode 100644 (file)
index 0000000..bab86c6
--- /dev/null
@@ -0,0 +1,550 @@
+package scanner
+
+import (
+       "unicode"
+       "unicode/utf8"
+
+       "github.com/hashicorp/hil/ast"
+)
+
+// Scan returns a channel that recieves Tokens from the given input string.
+//
+// The scanner's job is just to partition the string into meaningful parts.
+// It doesn't do any transformation of the raw input string, so the caller
+// must deal with any further interpretation required, such as parsing INTEGER
+// tokens into real ints, or dealing with escape sequences in LITERAL or
+// STRING tokens.
+//
+// Strings in the returned tokens are slices from the original string.
+//
+// startPos should be set to ast.InitPos unless the caller knows that
+// this interpolation string is part of a larger file and knows the position
+// of the first character in that larger file.
+func Scan(s string, startPos ast.Pos) <-chan *Token {
+       ch := make(chan *Token)
+       go scan(s, ch, startPos)
+       return ch
+}
+
+func scan(s string, ch chan<- *Token, pos ast.Pos) {
+       // 'remain' starts off as the whole string but we gradually
+       // slice of the front of it as we work our way through.
+       remain := s
+
+       // nesting keeps track of how many ${ .. } sequences we are
+       // inside, so we can recognize the minor differences in syntax
+       // between outer string literals (LITERAL tokens) and quoted
+       // string literals (STRING tokens).
+       nesting := 0
+
+       // We're going to flip back and forth between parsing literals/strings
+       // and parsing interpolation sequences ${ .. } until we reach EOF or
+       // some INVALID token.
+All:
+       for {
+               startPos := pos
+               // Literal string processing first, since the beginning of
+               // a string is always outside of an interpolation sequence.
+               literalVal, terminator := scanLiteral(remain, pos, nesting > 0)
+
+               if len(literalVal) > 0 {
+                       litType := LITERAL
+                       if nesting > 0 {
+                               litType = STRING
+                       }
+                       ch <- &Token{
+                               Type:    litType,
+                               Content: literalVal,
+                               Pos:     startPos,
+                       }
+                       remain = remain[len(literalVal):]
+               }
+
+               ch <- terminator
+               remain = remain[len(terminator.Content):]
+               pos = terminator.Pos
+               // Safe to use len() here because none of the terminator tokens
+               // can contain UTF-8 sequences.
+               pos.Column = pos.Column + len(terminator.Content)
+
+               switch terminator.Type {
+               case INVALID:
+                       // Synthetic EOF after invalid token, since further scanning
+                       // is likely to just produce more garbage.
+                       ch <- &Token{
+                               Type:    EOF,
+                               Content: "",
+                               Pos:     pos,
+                       }
+                       break All
+               case EOF:
+                       // All done!
+                       break All
+               case BEGIN:
+                       nesting++
+               case CQUOTE:
+                       // nothing special to do
+               default:
+                       // Should never happen
+                       panic("invalid string/literal terminator")
+               }
+
+               // Now we do the processing of the insides of ${ .. } sequences.
+               // This loop terminates when we encounter either a closing } or
+               // an opening ", which will cause us to return to literal processing.
+       Interpolation:
+               for {
+
+                       token, size, newPos := scanInterpolationToken(remain, pos)
+                       ch <- token
+                       remain = remain[size:]
+                       pos = newPos
+
+                       switch token.Type {
+                       case INVALID:
+                               // Synthetic EOF after invalid token, since further scanning
+                               // is likely to just produce more garbage.
+                               ch <- &Token{
+                                       Type:    EOF,
+                                       Content: "",
+                                       Pos:     pos,
+                               }
+                               break All
+                       case EOF:
+                               // All done
+                               // (though a syntax error that we'll catch in the parser)
+                               break All
+                       case END:
+                               nesting--
+                               if nesting < 0 {
+                                       // Can happen if there are unbalanced ${ and } sequences
+                                       // in the input, which we'll catch in the parser.
+                                       nesting = 0
+                               }
+                               break Interpolation
+                       case OQUOTE:
+                               // Beginning of nested quoted string
+                               break Interpolation
+                       }
+               }
+       }
+
+       close(ch)
+}
+
+// Returns the token found at the start of the given string, followed by
+// the number of bytes that were consumed from the string and the adjusted
+// source position.
+//
+// Note that the number of bytes consumed can be more than the length of
+// the returned token contents if the string begins with whitespace, since
+// it will be silently consumed before reading the token.
+func scanInterpolationToken(s string, startPos ast.Pos) (*Token, int, ast.Pos) {
+       pos := startPos
+       size := 0
+
+       // Consume whitespace, if any
+       for len(s) > 0 && byteIsSpace(s[0]) {
+               if s[0] == '\n' {
+                       pos.Column = 1
+                       pos.Line++
+               } else {
+                       pos.Column++
+               }
+               size++
+               s = s[1:]
+       }
+
+       // Unexpected EOF during sequence
+       if len(s) == 0 {
+               return &Token{
+                       Type:    EOF,
+                       Content: "",
+                       Pos:     pos,
+               }, size, pos
+       }
+
+       next := s[0]
+       var token *Token
+
+       switch next {
+       case '(', ')', '[', ']', ',', '.', '+', '-', '*', '/', '%', '?', ':':
+               // Easy punctuation symbols that don't have any special meaning
+               // during scanning, and that stand for themselves in the
+               // TokenType enumeration.
+               token = &Token{
+                       Type:    TokenType(next),
+                       Content: s[:1],
+                       Pos:     pos,
+               }
+       case '}':
+               token = &Token{
+                       Type:    END,
+                       Content: s[:1],
+                       Pos:     pos,
+               }
+       case '"':
+               token = &Token{
+                       Type:    OQUOTE,
+                       Content: s[:1],
+                       Pos:     pos,
+               }
+       case '!':
+               if len(s) >= 2 && s[:2] == "!=" {
+                       token = &Token{
+                               Type:    NOTEQUAL,
+                               Content: s[:2],
+                               Pos:     pos,
+                       }
+               } else {
+                       token = &Token{
+                               Type:    BANG,
+                               Content: s[:1],
+                               Pos:     pos,
+                       }
+               }
+       case '<':
+               if len(s) >= 2 && s[:2] == "<=" {
+                       token = &Token{
+                               Type:    LTE,
+                               Content: s[:2],
+                               Pos:     pos,
+                       }
+               } else {
+                       token = &Token{
+                               Type:    LT,
+                               Content: s[:1],
+                               Pos:     pos,
+                       }
+               }
+       case '>':
+               if len(s) >= 2 && s[:2] == ">=" {
+                       token = &Token{
+                               Type:    GTE,
+                               Content: s[:2],
+                               Pos:     pos,
+                       }
+               } else {
+                       token = &Token{
+                               Type:    GT,
+                               Content: s[:1],
+                               Pos:     pos,
+                       }
+               }
+       case '=':
+               if len(s) >= 2 && s[:2] == "==" {
+                       token = &Token{
+                               Type:    EQUAL,
+                               Content: s[:2],
+                               Pos:     pos,
+                       }
+               } else {
+                       // A single equals is not a valid operator
+                       token = &Token{
+                               Type:    INVALID,
+                               Content: s[:1],
+                               Pos:     pos,
+                       }
+               }
+       case '&':
+               if len(s) >= 2 && s[:2] == "&&" {
+                       token = &Token{
+                               Type:    AND,
+                               Content: s[:2],
+                               Pos:     pos,
+                       }
+               } else {
+                       token = &Token{
+                               Type:    INVALID,
+                               Content: s[:1],
+                               Pos:     pos,
+                       }
+               }
+       case '|':
+               if len(s) >= 2 && s[:2] == "||" {
+                       token = &Token{
+                               Type:    OR,
+                               Content: s[:2],
+                               Pos:     pos,
+                       }
+               } else {
+                       token = &Token{
+                               Type:    INVALID,
+                               Content: s[:1],
+                               Pos:     pos,
+                       }
+               }
+       default:
+               if next >= '0' && next <= '9' {
+                       num, numType := scanNumber(s)
+                       token = &Token{
+                               Type:    numType,
+                               Content: num,
+                               Pos:     pos,
+                       }
+               } else if stringStartsWithIdentifier(s) {
+                       ident, runeLen := scanIdentifier(s)
+                       tokenType := IDENTIFIER
+                       if ident == "true" || ident == "false" {
+                               tokenType = BOOL
+                       }
+                       token = &Token{
+                               Type:    tokenType,
+                               Content: ident,
+                               Pos:     pos,
+                       }
+                       // Skip usual token handling because it doesn't
+                       // know how to deal with UTF-8 sequences.
+                       pos.Column = pos.Column + runeLen
+                       return token, size + len(ident), pos
+               } else {
+                       _, byteLen := utf8.DecodeRuneInString(s)
+                       token = &Token{
+                               Type:    INVALID,
+                               Content: s[:byteLen],
+                               Pos:     pos,
+                       }
+                       // Skip usual token handling because it doesn't
+                       // know how to deal with UTF-8 sequences.
+                       pos.Column = pos.Column + 1
+                       return token, size + byteLen, pos
+               }
+       }
+
+       // Here we assume that the token content contains no UTF-8 sequences,
+       // because we dealt with UTF-8 characters as a special case where
+       // necessary above.
+       size = size + len(token.Content)
+       pos.Column = pos.Column + len(token.Content)
+
+       return token, size, pos
+}
+
+// Returns the (possibly-empty) prefix of the given string that represents
+// a literal, followed by the token that marks the end of the literal.
+func scanLiteral(s string, startPos ast.Pos, nested bool) (string, *Token) {
+       litLen := 0
+       pos := startPos
+       var terminator *Token
+       for {
+
+               if litLen >= len(s) {
+                       if nested {
+                               // We've ended in the middle of a quoted string,
+                               // which means this token is actually invalid.
+                               return "", &Token{
+                                       Type:    INVALID,
+                                       Content: s,
+                                       Pos:     startPos,
+                               }
+                       }
+                       terminator = &Token{
+                               Type:    EOF,
+                               Content: "",
+                               Pos:     pos,
+                       }
+                       break
+               }
+
+               next := s[litLen]
+
+               if next == '$' && len(s) > litLen+1 {
+                       follow := s[litLen+1]
+
+                       if follow == '{' {
+                               terminator = &Token{
+                                       Type:    BEGIN,
+                                       Content: s[litLen : litLen+2],
+                                       Pos:     pos,
+                               }
+                               pos.Column = pos.Column + 2
+                               break
+                       } else if follow == '$' {
+                               // Double-$ escapes the special processing of $,
+                               // so we will consume both characters here.
+                               pos.Column = pos.Column + 2
+                               litLen = litLen + 2
+                               continue
+                       }
+               }
+
+               // special handling that applies only to quoted strings
+               if nested {
+                       if next == '"' {
+                               terminator = &Token{
+                                       Type:    CQUOTE,
+                                       Content: s[litLen : litLen+1],
+                                       Pos:     pos,
+                               }
+                               pos.Column = pos.Column + 1
+                               break
+                       }
+
+                       // Escaped quote marks do not terminate the string.
+                       //
+                       // All we do here in the scanner is avoid terminating a string
+                       // due to an escaped quote. The parser is responsible for the
+                       // full handling of escape sequences, since it's able to produce
+                       // better error messages than we can produce in here.
+                       if next == '\\' && len(s) > litLen+1 {
+                               follow := s[litLen+1]
+
+                               if follow == '"' {
+                                       // \" escapes the special processing of ",
+                                       // so we will consume both characters here.
+                                       pos.Column = pos.Column + 2
+                                       litLen = litLen + 2
+                                       continue
+                               }
+                       }
+               }
+
+               if next == '\n' {
+                       pos.Column = 1
+                       pos.Line++
+                       litLen++
+               } else {
+                       pos.Column++
+
+                       // "Column" measures runes, so we need to actually consume
+                       // a valid UTF-8 character here.
+                       _, size := utf8.DecodeRuneInString(s[litLen:])
+                       litLen = litLen + size
+               }
+
+       }
+
+       return s[:litLen], terminator
+}
+
+// scanNumber returns the extent of the prefix of the string that represents
+// a valid number, along with what type of number it represents: INT or FLOAT.
+//
+// scanNumber does only basic character analysis: numbers consist of digits
+// and periods, with at least one period signalling a FLOAT. It's the parser's
+// responsibility to validate the form and range of the number, such as ensuring
+// that a FLOAT actually contains only one period, etc.
+func scanNumber(s string) (string, TokenType) {
+       period := -1
+       byteLen := 0
+       numType := INTEGER
+       for {
+               if byteLen >= len(s) {
+                       break
+               }
+
+               next := s[byteLen]
+               if next != '.' && (next < '0' || next > '9') {
+                       // If our last value was a period, then we're not a float,
+                       // we're just an integer that ends in a period.
+                       if period == byteLen-1 {
+                               byteLen--
+                               numType = INTEGER
+                       }
+
+                       break
+               }
+
+               if next == '.' {
+                       // If we've already seen a period, break out
+                       if period >= 0 {
+                               break
+                       }
+
+                       period = byteLen
+                       numType = FLOAT
+               }
+
+               byteLen++
+       }
+
+       return s[:byteLen], numType
+}
+
+// scanIdentifier returns the extent of the prefix of the string that
+// represents a valid identifier, along with the length of that prefix
+// in runes.
+//
+// Identifiers may contain utf8-encoded non-Latin letters, which will
+// cause the returned "rune length" to be shorter than the byte length
+// of the returned string.
+func scanIdentifier(s string) (string, int) {
+       byteLen := 0
+       runeLen := 0
+       for {
+               if byteLen >= len(s) {
+                       break
+               }
+
+               nextRune, size := utf8.DecodeRuneInString(s[byteLen:])
+               if !(nextRune == '_' ||
+                       nextRune == '-' ||
+                       nextRune == '.' ||
+                       nextRune == '*' ||
+                       unicode.IsNumber(nextRune) ||
+                       unicode.IsLetter(nextRune) ||
+                       unicode.IsMark(nextRune)) {
+                       break
+               }
+
+               // If we reach a star, it must be between periods to be part
+               // of the same identifier.
+               if nextRune == '*' && s[byteLen-1] != '.' {
+                       break
+               }
+
+               // If our previous character was a star, then the current must
+               // be period. Otherwise, undo that and exit.
+               if byteLen > 0 && s[byteLen-1] == '*' && nextRune != '.' {
+                       byteLen--
+                       if s[byteLen-1] == '.' {
+                               byteLen--
+                       }
+
+                       break
+               }
+
+               byteLen = byteLen + size
+               runeLen = runeLen + 1
+       }
+
+       return s[:byteLen], runeLen
+}
+
+// byteIsSpace implements a restrictive interpretation of spaces that includes
+// only what's valid inside interpolation sequences: spaces, tabs, newlines.
+func byteIsSpace(b byte) bool {
+       switch b {
+       case ' ', '\t', '\r', '\n':
+               return true
+       default:
+               return false
+       }
+}
+
+// stringStartsWithIdentifier returns true if the given string begins with
+// a character that is a legal start of an identifier: an underscore or
+// any character that Unicode considers to be a letter.
+func stringStartsWithIdentifier(s string) bool {
+       if len(s) == 0 {
+               return false
+       }
+
+       first := s[0]
+
+       // Easy ASCII cases first
+       if (first >= 'a' && first <= 'z') || (first >= 'A' && first <= 'Z') || first == '_' {
+               return true
+       }
+
+       // If our first byte begins a UTF-8 sequence then the sequence might
+       // be a unicode letter.
+       if utf8.RuneStart(first) {
+               firstRune, _ := utf8.DecodeRuneInString(s)
+               if unicode.IsLetter(firstRune) {
+                       return true
+               }
+       }
+
+       return false
+}
diff --git a/vendor/github.com/hashicorp/hil/scanner/token.go b/vendor/github.com/hashicorp/hil/scanner/token.go
new file mode 100644 (file)
index 0000000..b6c82ae
--- /dev/null
@@ -0,0 +1,105 @@
+package scanner
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/hil/ast"
+)
+
+type Token struct {
+       Type    TokenType
+       Content string
+       Pos     ast.Pos
+}
+
+//go:generate stringer -type=TokenType
+type TokenType rune
+
+const (
+       // Raw string data outside of ${ .. } sequences
+       LITERAL TokenType = 'o'
+
+       // STRING is like a LITERAL but it's inside a quoted string
+       // within a ${ ... } sequence, and so it can contain backslash
+       // escaping.
+       STRING TokenType = 'S'
+
+       // Other Literals
+       INTEGER TokenType = 'I'
+       FLOAT   TokenType = 'F'
+       BOOL    TokenType = 'B'
+
+       BEGIN    TokenType = '$' // actually "${"
+       END      TokenType = '}'
+       OQUOTE   TokenType = '“' // Opening quote of a nested quoted sequence
+       CQUOTE   TokenType = '”' // Closing quote of a nested quoted sequence
+       OPAREN   TokenType = '('
+       CPAREN   TokenType = ')'
+       OBRACKET TokenType = '['
+       CBRACKET TokenType = ']'
+       COMMA    TokenType = ','
+
+       IDENTIFIER TokenType = 'i'
+
+       PERIOD  TokenType = '.'
+       PLUS    TokenType = '+'
+       MINUS   TokenType = '-'
+       STAR    TokenType = '*'
+       SLASH   TokenType = '/'
+       PERCENT TokenType = '%'
+
+       AND  TokenType = '∧'
+       OR   TokenType = '∨'
+       BANG TokenType = '!'
+
+       EQUAL    TokenType = '='
+       NOTEQUAL TokenType = '≠'
+       GT       TokenType = '>'
+       LT       TokenType = '<'
+       GTE      TokenType = '≥'
+       LTE      TokenType = '≤'
+
+       QUESTION TokenType = '?'
+       COLON    TokenType = ':'
+
+       EOF TokenType = '␄'
+
+       // Produced for sequences that cannot be understood as valid tokens
+       // e.g. due to use of unrecognized punctuation.
+       INVALID TokenType = '�'
+)
+
+func (t *Token) String() string {
+       switch t.Type {
+       case EOF:
+               return "end of string"
+       case INVALID:
+               return fmt.Sprintf("invalid sequence %q", t.Content)
+       case INTEGER:
+               return fmt.Sprintf("integer %s", t.Content)
+       case FLOAT:
+               return fmt.Sprintf("float %s", t.Content)
+       case STRING:
+               return fmt.Sprintf("string %q", t.Content)
+       case LITERAL:
+               return fmt.Sprintf("literal %q", t.Content)
+       case OQUOTE:
+               return fmt.Sprintf("opening quote")
+       case CQUOTE:
+               return fmt.Sprintf("closing quote")
+       case AND:
+               return "&&"
+       case OR:
+               return "||"
+       case NOTEQUAL:
+               return "!="
+       case GTE:
+               return ">="
+       case LTE:
+               return "<="
+       default:
+               // The remaining token types have content that
+               // speaks for itself.
+               return fmt.Sprintf("%q", t.Content)
+       }
+}
diff --git a/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go b/vendor/github.com/hashicorp/hil/scanner/tokentype_string.go
new file mode 100644 (file)
index 0000000..a602f5f
--- /dev/null
@@ -0,0 +1,51 @@
+// Code generated by "stringer -type=TokenType"; DO NOT EDIT
+
+package scanner
+
+import "fmt"
+
+const _TokenType_name = "BANGBEGINPERCENTOPARENCPARENSTARPLUSCOMMAMINUSPERIODSLASHCOLONLTEQUALGTQUESTIONBOOLFLOATINTEGERSTRINGOBRACKETCBRACKETIDENTIFIERLITERALENDOQUOTECQUOTEANDORNOTEQUALLTEGTEEOFINVALID"
+
+var _TokenType_map = map[TokenType]string{
+       33:    _TokenType_name[0:4],
+       36:    _TokenType_name[4:9],
+       37:    _TokenType_name[9:16],
+       40:    _TokenType_name[16:22],
+       41:    _TokenType_name[22:28],
+       42:    _TokenType_name[28:32],
+       43:    _TokenType_name[32:36],
+       44:    _TokenType_name[36:41],
+       45:    _TokenType_name[41:46],
+       46:    _TokenType_name[46:52],
+       47:    _TokenType_name[52:57],
+       58:    _TokenType_name[57:62],
+       60:    _TokenType_name[62:64],
+       61:    _TokenType_name[64:69],
+       62:    _TokenType_name[69:71],
+       63:    _TokenType_name[71:79],
+       66:    _TokenType_name[79:83],
+       70:    _TokenType_name[83:88],
+       73:    _TokenType_name[88:95],
+       83:    _TokenType_name[95:101],
+       91:    _TokenType_name[101:109],
+       93:    _TokenType_name[109:117],
+       105:   _TokenType_name[117:127],
+       111:   _TokenType_name[127:134],
+       125:   _TokenType_name[134:137],
+       8220:  _TokenType_name[137:143],
+       8221:  _TokenType_name[143:149],
+       8743:  _TokenType_name[149:152],
+       8744:  _TokenType_name[152:154],
+       8800:  _TokenType_name[154:162],
+       8804:  _TokenType_name[162:165],
+       8805:  _TokenType_name[165:168],
+       9220:  _TokenType_name[168:171],
+       65533: _TokenType_name[171:178],
+}
+
+func (i TokenType) String() string {
+       if str, ok := _TokenType_map[i]; ok {
+               return str
+       }
+       return fmt.Sprintf("TokenType(%d)", i)
+}
diff --git a/vendor/github.com/hashicorp/hil/transform_fixed.go b/vendor/github.com/hashicorp/hil/transform_fixed.go
new file mode 100644 (file)
index 0000000..e69df29
--- /dev/null
@@ -0,0 +1,29 @@
+package hil
+
+import (
+       "github.com/hashicorp/hil/ast"
+)
+
+// FixedValueTransform transforms an AST to return a fixed value for
+// all interpolations. i.e. you can make "hi ${anything}" always
+// turn into "hi foo".
+//
+// The primary use case for this is for config validations where you can
+// verify that interpolations result in a certain type of string.
+func FixedValueTransform(root ast.Node, Value *ast.LiteralNode) ast.Node {
+       // We visit the nodes in top-down order
+       result := root
+       switch n := result.(type) {
+       case *ast.Output:
+               for i, v := range n.Exprs {
+                       n.Exprs[i] = FixedValueTransform(v, Value)
+               }
+       case *ast.LiteralNode:
+               // We keep it as-is
+       default:
+               // Anything else we replace
+               result = Value
+       }
+
+       return result
+}
diff --git a/vendor/github.com/hashicorp/hil/walk.go b/vendor/github.com/hashicorp/hil/walk.go
new file mode 100644 (file)
index 0000000..0ace830
--- /dev/null
@@ -0,0 +1,266 @@
+package hil
+
+import (
+       "fmt"
+       "reflect"
+       "strings"
+
+       "github.com/hashicorp/hil/ast"
+       "github.com/mitchellh/reflectwalk"
+)
+
+// WalkFn is the type of function to pass to Walk. Modify fields within
+// WalkData to control whether replacement happens.
+type WalkFn func(*WalkData) error
+
+// WalkData is the structure passed to the callback of the Walk function.
+//
+// This structure contains data passed in as well as fields that are expected
+// to be written by the caller as a result. Please see the documentation for
+// each field for more information.
+type WalkData struct {
+       // Root is the parsed root of this HIL program
+       Root ast.Node
+
+       // Location is the location within the structure where this
+       // value was found. This can be used to modify behavior within
+       // slices and so on.
+       Location reflectwalk.Location
+
+       // The below two values must be set by the callback to have any effect.
+       //
+       // Replace, if true, will replace the value in the structure with
+       // ReplaceValue. It is up to the caller to make sure this is a string.
+       Replace      bool
+       ReplaceValue string
+}
+
+// Walk will walk an arbitrary Go structure and parse any string as an
+// HIL program and call the callback cb to determine what to replace it
+// with.
+//
+// This function is very useful for arbitrary HIL program interpolation
+// across a complex configuration structure. Due to the heavy use of
+// reflection in this function, it is recommend to write many unit tests
+// with your typical configuration structures to hilp mitigate the risk
+// of panics.
+func Walk(v interface{}, cb WalkFn) error {
+       walker := &interpolationWalker{F: cb}
+       return reflectwalk.Walk(v, walker)
+}
+
+// interpolationWalker implements interfaces for the reflectwalk package
+// (github.com/mitchellh/reflectwalk) that can be used to automatically
+// execute a callback for an interpolation.
+type interpolationWalker struct {
+       F WalkFn
+
+       key         []string
+       lastValue   reflect.Value
+       loc         reflectwalk.Location
+       cs          []reflect.Value
+       csKey       []reflect.Value
+       csData      interface{}
+       sliceIndex  int
+       unknownKeys []string
+}
+
+func (w *interpolationWalker) Enter(loc reflectwalk.Location) error {
+       w.loc = loc
+       return nil
+}
+
+func (w *interpolationWalker) Exit(loc reflectwalk.Location) error {
+       w.loc = reflectwalk.None
+
+       switch loc {
+       case reflectwalk.Map:
+               w.cs = w.cs[:len(w.cs)-1]
+       case reflectwalk.MapValue:
+               w.key = w.key[:len(w.key)-1]
+               w.csKey = w.csKey[:len(w.csKey)-1]
+       case reflectwalk.Slice:
+               // Split any values that need to be split
+               w.splitSlice()
+               w.cs = w.cs[:len(w.cs)-1]
+       case reflectwalk.SliceElem:
+               w.csKey = w.csKey[:len(w.csKey)-1]
+       }
+
+       return nil
+}
+
+func (w *interpolationWalker) Map(m reflect.Value) error {
+       w.cs = append(w.cs, m)
+       return nil
+}
+
+func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error {
+       w.csData = k
+       w.csKey = append(w.csKey, k)
+       w.key = append(w.key, k.String())
+       w.lastValue = v
+       return nil
+}
+
+func (w *interpolationWalker) Slice(s reflect.Value) error {
+       w.cs = append(w.cs, s)
+       return nil
+}
+
+func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error {
+       w.csKey = append(w.csKey, reflect.ValueOf(i))
+       w.sliceIndex = i
+       return nil
+}
+
+func (w *interpolationWalker) Primitive(v reflect.Value) error {
+       setV := v
+
+       // We only care about strings
+       if v.Kind() == reflect.Interface {
+               setV = v
+               v = v.Elem()
+       }
+       if v.Kind() != reflect.String {
+               return nil
+       }
+
+       astRoot, err := Parse(v.String())
+       if err != nil {
+               return err
+       }
+
+       // If the AST we got is just a literal string value with the same
+       // value then we ignore it. We have to check if its the same value
+       // because it is possible to input a string, get out a string, and
+       // have it be different. For example: "foo-$${bar}" turns into
+       // "foo-${bar}"
+       if n, ok := astRoot.(*ast.LiteralNode); ok {
+               if s, ok := n.Value.(string); ok && s == v.String() {
+                       return nil
+               }
+       }
+
+       if w.F == nil {
+               return nil
+       }
+
+       data := WalkData{Root: astRoot, Location: w.loc}
+       if err := w.F(&data); err != nil {
+               return fmt.Errorf(
+                       "%s in:\n\n%s",
+                       err, v.String())
+       }
+
+       if data.Replace {
+               /*
+                       if remove {
+                               w.removeCurrent()
+                               return nil
+                       }
+               */
+
+               resultVal := reflect.ValueOf(data.ReplaceValue)
+               switch w.loc {
+               case reflectwalk.MapKey:
+                       m := w.cs[len(w.cs)-1]
+
+                       // Delete the old value
+                       var zero reflect.Value
+                       m.SetMapIndex(w.csData.(reflect.Value), zero)
+
+                       // Set the new key with the existing value
+                       m.SetMapIndex(resultVal, w.lastValue)
+
+                       // Set the key to be the new key
+                       w.csData = resultVal
+               case reflectwalk.MapValue:
+                       // If we're in a map, then the only way to set a map value is
+                       // to set it directly.
+                       m := w.cs[len(w.cs)-1]
+                       mk := w.csData.(reflect.Value)
+                       m.SetMapIndex(mk, resultVal)
+               default:
+                       // Otherwise, we should be addressable
+                       setV.Set(resultVal)
+               }
+       }
+
+       return nil
+}
+
+func (w *interpolationWalker) removeCurrent() {
+       // Append the key to the unknown keys
+       w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, "."))
+
+       for i := 1; i <= len(w.cs); i++ {
+               c := w.cs[len(w.cs)-i]
+               switch c.Kind() {
+               case reflect.Map:
+                       // Zero value so that we delete the map key
+                       var val reflect.Value
+
+                       // Get the key and delete it
+                       k := w.csData.(reflect.Value)
+                       c.SetMapIndex(k, val)
+                       return
+               }
+       }
+
+       panic("No container found for removeCurrent")
+}
+
+func (w *interpolationWalker) replaceCurrent(v reflect.Value) {
+       c := w.cs[len(w.cs)-2]
+       switch c.Kind() {
+       case reflect.Map:
+               // Get the key and delete it
+               k := w.csKey[len(w.csKey)-1]
+               c.SetMapIndex(k, v)
+       }
+}
+
+func (w *interpolationWalker) splitSlice() {
+       // Get the []interface{} slice so we can do some operations on
+       // it without dealing with reflection. We'll document each step
+       // here to be clear.
+       var s []interface{}
+       raw := w.cs[len(w.cs)-1]
+       switch v := raw.Interface().(type) {
+       case []interface{}:
+               s = v
+       case []map[string]interface{}:
+               return
+       default:
+               panic("Unknown kind: " + raw.Kind().String())
+       }
+
+       // Check if we have any elements that we need to split. If not, then
+       // just return since we're done.
+       split := false
+       if !split {
+               return
+       }
+
+       // Make a new result slice that is twice the capacity to fit our growth.
+       result := make([]interface{}, 0, len(s)*2)
+
+       // Go over each element of the original slice and start building up
+       // the resulting slice by splitting where we have to.
+       for _, v := range s {
+               sv, ok := v.(string)
+               if !ok {
+                       // Not a string, so just set it
+                       result = append(result, v)
+                       continue
+               }
+
+               // Not a string list, so just set it
+               result = append(result, sv)
+       }
+
+       // Our slice is now done, we have to replace the slice now
+       // with this new one that we have.
+       w.replaceCurrent(reflect.ValueOf(result))
+}
diff --git a/vendor/github.com/hashicorp/logutils/LICENSE b/vendor/github.com/hashicorp/logutils/LICENSE
new file mode 100644 (file)
index 0000000..c33dcc7
--- /dev/null
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/logutils/README.md b/vendor/github.com/hashicorp/logutils/README.md
new file mode 100644 (file)
index 0000000..49490ea
--- /dev/null
@@ -0,0 +1,36 @@
+# logutils
+
+logutils is a Go package that augments the standard library "log" package
+to make logging a bit more modern, without fragmenting the Go ecosystem
+with new logging packages.
+
+## The simplest thing that could possibly work
+
+Presumably your application already uses the default `log` package. To switch, you'll want your code to look like the following:
+
+```go
+package main
+
+import (
+       "log"
+       "os"
+
+       "github.com/hashicorp/logutils"
+)
+
+func main() {
+       filter := &logutils.LevelFilter{
+               Levels: []logutils.LogLevel{"DEBUG", "WARN", "ERROR"},
+               MinLevel: logutils.LogLevel("WARN"),
+               Writer: os.Stderr,
+       }
+       log.SetOutput(filter)
+
+       log.Print("[DEBUG] Debugging") // this will not print
+       log.Print("[WARN] Warning") // this will
+       log.Print("[ERROR] Erring") // and so will this
+       log.Print("Message I haven't updated") // and so will this
+}
+```
+
+This logs to standard error exactly like go's standard logger. Any log messages you haven't converted to have a level will continue to print as before.
diff --git a/vendor/github.com/hashicorp/logutils/level.go b/vendor/github.com/hashicorp/logutils/level.go
new file mode 100644 (file)
index 0000000..6381bf1
--- /dev/null
@@ -0,0 +1,81 @@
+// Package logutils augments the standard log package with levels.
+package logutils
+
+import (
+       "bytes"
+       "io"
+       "sync"
+)
+
+type LogLevel string
+
+// LevelFilter is an io.Writer that can be used with a logger that
+// will filter out log messages that aren't at least a certain level.
+//
+// Once the filter is in use somewhere, it is not safe to modify
+// the structure.
+type LevelFilter struct {
+       // Levels is the list of log levels, in increasing order of
+       // severity. Example might be: {"DEBUG", "WARN", "ERROR"}.
+       Levels []LogLevel
+
+       // MinLevel is the minimum level allowed through
+       MinLevel LogLevel
+
+       // The underlying io.Writer where log messages that pass the filter
+       // will be set.
+       Writer io.Writer
+
+       badLevels map[LogLevel]struct{}
+       once      sync.Once
+}
+
+// Check will check a given line if it would be included in the level
+// filter.
+func (f *LevelFilter) Check(line []byte) bool {
+       f.once.Do(f.init)
+
+       // Check for a log level
+       var level LogLevel
+       x := bytes.IndexByte(line, '[')
+       if x >= 0 {
+               y := bytes.IndexByte(line[x:], ']')
+               if y >= 0 {
+                       level = LogLevel(line[x+1 : x+y])
+               }
+       }
+
+       _, ok := f.badLevels[level]
+       return !ok
+}
+
+func (f *LevelFilter) Write(p []byte) (n int, err error) {
+       // Note in general that io.Writer can receive any byte sequence
+       // to write, but the "log" package always guarantees that we only
+       // get a single line. We use that as a slight optimization within
+       // this method, assuming we're dealing with a single, complete line
+       // of log data.
+
+       if !f.Check(p) {
+               return len(p), nil
+       }
+
+       return f.Writer.Write(p)
+}
+
+// SetMinLevel is used to update the minimum log level
+func (f *LevelFilter) SetMinLevel(min LogLevel) {
+       f.MinLevel = min
+       f.init()
+}
+
+func (f *LevelFilter) init() {
+       badLevels := make(map[LogLevel]struct{})
+       for _, level := range f.Levels {
+               if level == f.MinLevel {
+                       break
+               }
+               badLevels[level] = struct{}{}
+       }
+       f.badLevels = badLevels
+}
diff --git a/vendor/github.com/hashicorp/terraform/LICENSE b/vendor/github.com/hashicorp/terraform/LICENSE
new file mode 100644 (file)
index 0000000..c33dcc7
--- /dev/null
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/vendor/github.com/hashicorp/terraform/config/append.go b/vendor/github.com/hashicorp/terraform/config/append.go
new file mode 100644 (file)
index 0000000..5f4e89e
--- /dev/null
@@ -0,0 +1,86 @@
+package config
+
+// Append appends one configuration to another.
+//
+// Append assumes that both configurations will not have
+// conflicting variables, resources, etc. If they do, the
+// problems will be caught in the validation phase.
+//
+// It is possible that c1, c2 on their own are not valid. For
+// example, a resource in c2 may reference a variable in c1. But
+// together, they would be valid.
+func Append(c1, c2 *Config) (*Config, error) {
+       c := new(Config)
+
+       // Append unknown keys, but keep them unique since it is a set
+       unknowns := make(map[string]struct{})
+       for _, k := range c1.unknownKeys {
+               _, present := unknowns[k]
+               if !present {
+                       unknowns[k] = struct{}{}
+                       c.unknownKeys = append(c.unknownKeys, k)
+               }
+       }
+
+       for _, k := range c2.unknownKeys {
+               _, present := unknowns[k]
+               if !present {
+                       unknowns[k] = struct{}{}
+                       c.unknownKeys = append(c.unknownKeys, k)
+               }
+       }
+
+       c.Atlas = c1.Atlas
+       if c2.Atlas != nil {
+               c.Atlas = c2.Atlas
+       }
+
+       // merge Terraform blocks
+       if c1.Terraform != nil {
+               c.Terraform = c1.Terraform
+               if c2.Terraform != nil {
+                       c.Terraform.Merge(c2.Terraform)
+               }
+       } else {
+               c.Terraform = c2.Terraform
+       }
+
+       if len(c1.Modules) > 0 || len(c2.Modules) > 0 {
+               c.Modules = make(
+                       []*Module, 0, len(c1.Modules)+len(c2.Modules))
+               c.Modules = append(c.Modules, c1.Modules...)
+               c.Modules = append(c.Modules, c2.Modules...)
+       }
+
+       if len(c1.Outputs) > 0 || len(c2.Outputs) > 0 {
+               c.Outputs = make(
+                       []*Output, 0, len(c1.Outputs)+len(c2.Outputs))
+               c.Outputs = append(c.Outputs, c1.Outputs...)
+               c.Outputs = append(c.Outputs, c2.Outputs...)
+       }
+
+       if len(c1.ProviderConfigs) > 0 || len(c2.ProviderConfigs) > 0 {
+               c.ProviderConfigs = make(
+                       []*ProviderConfig,
+                       0, len(c1.ProviderConfigs)+len(c2.ProviderConfigs))
+               c.ProviderConfigs = append(c.ProviderConfigs, c1.ProviderConfigs...)
+               c.ProviderConfigs = append(c.ProviderConfigs, c2.ProviderConfigs...)
+       }
+
+       if len(c1.Resources) > 0 || len(c2.Resources) > 0 {
+               c.Resources = make(
+                       []*Resource,
+                       0, len(c1.Resources)+len(c2.Resources))
+               c.Resources = append(c.Resources, c1.Resources...)
+               c.Resources = append(c.Resources, c2.Resources...)
+       }
+
+       if len(c1.Variables) > 0 || len(c2.Variables) > 0 {
+               c.Variables = make(
+                       []*Variable, 0, len(c1.Variables)+len(c2.Variables))
+               c.Variables = append(c.Variables, c1.Variables...)
+               c.Variables = append(c.Variables, c2.Variables...)
+       }
+
+       return c, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go
new file mode 100644 (file)
index 0000000..9a764ac
--- /dev/null
@@ -0,0 +1,1096 @@
+// The config package is responsible for loading and validating the
+// configuration.
+package config
+
+import (
+       "fmt"
+       "regexp"
+       "strconv"
+       "strings"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/hashicorp/hil"
+       "github.com/hashicorp/hil/ast"
+       "github.com/hashicorp/terraform/helper/hilmapstructure"
+       "github.com/mitchellh/reflectwalk"
+)
+
+// NameRegexp is the regular expression that all names (modules, providers,
+// resources, etc.) must follow.
+var NameRegexp = regexp.MustCompile(`(?i)\A[A-Z0-9_][A-Z0-9\-\_]*\z`)
+
+// Config is the configuration that comes from loading a collection
+// of Terraform templates.
+type Config struct {
+       // Dir is the path to the directory where this configuration was
+       // loaded from. If it is blank, this configuration wasn't loaded from
+       // any meaningful directory.
+       Dir string
+
+       Terraform       *Terraform
+       Atlas           *AtlasConfig
+       Modules         []*Module
+       ProviderConfigs []*ProviderConfig
+       Resources       []*Resource
+       Variables       []*Variable
+       Outputs         []*Output
+
+       // The fields below can be filled in by loaders for validation
+       // purposes.
+       unknownKeys []string
+}
+
+// AtlasConfig is the configuration for building in HashiCorp's Atlas.
+type AtlasConfig struct {
+       Name    string
+       Include []string
+       Exclude []string
+}
+
+// Module is a module used within a configuration.
+//
+// This does not represent a module itself, this represents a module
+// call-site within an existing configuration.
+type Module struct {
+       Name      string
+       Source    string
+       RawConfig *RawConfig
+}
+
+// ProviderConfig is the configuration for a resource provider.
+//
+// For example, Terraform needs to set the AWS access keys for the AWS
+// resource provider.
+type ProviderConfig struct {
+       Name      string
+       Alias     string
+       RawConfig *RawConfig
+}
+
+// A resource represents a single Terraform resource in the configuration.
+// A Terraform resource is something that supports some or all of the
+// usual "create, read, update, delete" operations, depending on
+// the given Mode.
+type Resource struct {
+       Mode         ResourceMode // which operations the resource supports
+       Name         string
+       Type         string
+       RawCount     *RawConfig
+       RawConfig    *RawConfig
+       Provisioners []*Provisioner
+       Provider     string
+       DependsOn    []string
+       Lifecycle    ResourceLifecycle
+}
+
+// Copy returns a copy of this Resource. Helpful for avoiding shared
+// config pointers across multiple pieces of the graph that need to do
+// interpolation.
+func (r *Resource) Copy() *Resource {
+       n := &Resource{
+               Mode:         r.Mode,
+               Name:         r.Name,
+               Type:         r.Type,
+               RawCount:     r.RawCount.Copy(),
+               RawConfig:    r.RawConfig.Copy(),
+               Provisioners: make([]*Provisioner, 0, len(r.Provisioners)),
+               Provider:     r.Provider,
+               DependsOn:    make([]string, len(r.DependsOn)),
+               Lifecycle:    *r.Lifecycle.Copy(),
+       }
+       for _, p := range r.Provisioners {
+               n.Provisioners = append(n.Provisioners, p.Copy())
+       }
+       copy(n.DependsOn, r.DependsOn)
+       return n
+}
+
+// ResourceLifecycle is used to store the lifecycle tuning parameters
+// to allow customized behavior
+type ResourceLifecycle struct {
+       CreateBeforeDestroy bool     `mapstructure:"create_before_destroy"`
+       PreventDestroy      bool     `mapstructure:"prevent_destroy"`
+       IgnoreChanges       []string `mapstructure:"ignore_changes"`
+}
+
+// Copy returns a copy of this ResourceLifecycle
+func (r *ResourceLifecycle) Copy() *ResourceLifecycle {
+       n := &ResourceLifecycle{
+               CreateBeforeDestroy: r.CreateBeforeDestroy,
+               PreventDestroy:      r.PreventDestroy,
+               IgnoreChanges:       make([]string, len(r.IgnoreChanges)),
+       }
+       copy(n.IgnoreChanges, r.IgnoreChanges)
+       return n
+}
+
+// Provisioner is a configured provisioner step on a resource.
+type Provisioner struct {
+       Type      string
+       RawConfig *RawConfig
+       ConnInfo  *RawConfig
+
+       When      ProvisionerWhen
+       OnFailure ProvisionerOnFailure
+}
+
+// Copy returns a copy of this Provisioner
+func (p *Provisioner) Copy() *Provisioner {
+       return &Provisioner{
+               Type:      p.Type,
+               RawConfig: p.RawConfig.Copy(),
+               ConnInfo:  p.ConnInfo.Copy(),
+               When:      p.When,
+               OnFailure: p.OnFailure,
+       }
+}
+
+// Variable is a variable defined within the configuration.
+type Variable struct {
+       Name         string
+       DeclaredType string `mapstructure:"type"`
+       Default      interface{}
+       Description  string
+}
+
+// Output is an output defined within the configuration. An output is
+// resulting data that is highlighted by Terraform when finished. An
+// output marked Sensitive will be output in a masked form following
+// application, but will still be available in state.
+type Output struct {
+       Name        string
+       DependsOn   []string
+       Description string
+       Sensitive   bool
+       RawConfig   *RawConfig
+}
+
+// VariableType is the type of value a variable is holding, and returned
+// by the Type() function on variables.
+type VariableType byte
+
+const (
+       VariableTypeUnknown VariableType = iota
+       VariableTypeString
+       VariableTypeList
+       VariableTypeMap
+)
+
+func (v VariableType) Printable() string {
+       switch v {
+       case VariableTypeString:
+               return "string"
+       case VariableTypeMap:
+               return "map"
+       case VariableTypeList:
+               return "list"
+       default:
+               return "unknown"
+       }
+}
+
+// ProviderConfigName returns the name of the provider configuration in
+// the given mapping that maps to the proper provider configuration
+// for this resource.
+func ProviderConfigName(t string, pcs []*ProviderConfig) string {
+       lk := ""
+       for _, v := range pcs {
+               k := v.Name
+               if strings.HasPrefix(t, k) && len(k) > len(lk) {
+                       lk = k
+               }
+       }
+
+       return lk
+}
+
+// A unique identifier for this module.
+func (r *Module) Id() string {
+       return fmt.Sprintf("%s", r.Name)
+}
+
+// Count returns the count of this resource.
+func (r *Resource) Count() (int, error) {
+       raw := r.RawCount.Value()
+       count, ok := r.RawCount.Value().(string)
+       if !ok {
+               return 0, fmt.Errorf(
+                       "expected count to be a string or int, got %T", raw)
+       }
+
+       v, err := strconv.ParseInt(count, 0, 0)
+       if err != nil {
+               return 0, err
+       }
+
+       return int(v), nil
+}
+
+// A unique identifier for this resource.
+func (r *Resource) Id() string {
+       switch r.Mode {
+       case ManagedResourceMode:
+               return fmt.Sprintf("%s.%s", r.Type, r.Name)
+       case DataResourceMode:
+               return fmt.Sprintf("data.%s.%s", r.Type, r.Name)
+       default:
+               panic(fmt.Errorf("unknown resource mode %s", r.Mode))
+       }
+}
+
+// Validate does some basic semantic checking of the configuration.
+func (c *Config) Validate() error {
+       if c == nil {
+               return nil
+       }
+
+       var errs []error
+
+       for _, k := range c.unknownKeys {
+               errs = append(errs, fmt.Errorf(
+                       "Unknown root level key: %s", k))
+       }
+
+       // Validate the Terraform config
+       if tf := c.Terraform; tf != nil {
+               errs = append(errs, c.Terraform.Validate()...)
+       }
+
+       vars := c.InterpolatedVariables()
+       varMap := make(map[string]*Variable)
+       for _, v := range c.Variables {
+               if _, ok := varMap[v.Name]; ok {
+                       errs = append(errs, fmt.Errorf(
+                               "Variable '%s': duplicate found. Variable names must be unique.",
+                               v.Name))
+               }
+
+               varMap[v.Name] = v
+       }
+
+       for k, _ := range varMap {
+               if !NameRegexp.MatchString(k) {
+                       errs = append(errs, fmt.Errorf(
+                               "variable %q: variable name must match regular expresion %s",
+                               k, NameRegexp))
+               }
+       }
+
+       for _, v := range c.Variables {
+               if v.Type() == VariableTypeUnknown {
+                       errs = append(errs, fmt.Errorf(
+                               "Variable '%s': must be a string or a map",
+                               v.Name))
+                       continue
+               }
+
+               interp := false
+               fn := func(n ast.Node) (interface{}, error) {
+                       // LiteralNode is a literal string (outside of a ${ ... } sequence).
+                       // interpolationWalker skips most of these. but in particular it
+                       // visits those that have escaped sequences (like $${foo}) as a
+                       // signal that *some* processing is required on this string. For
+                       // our purposes here though, this is fine and not an interpolation.
+                       if _, ok := n.(*ast.LiteralNode); !ok {
+                               interp = true
+                       }
+                       return "", nil
+               }
+
+               w := &interpolationWalker{F: fn}
+               if v.Default != nil {
+                       if err := reflectwalk.Walk(v.Default, w); err == nil {
+                               if interp {
+                                       errs = append(errs, fmt.Errorf(
+                                               "Variable '%s': cannot contain interpolations",
+                                               v.Name))
+                               }
+                       }
+               }
+       }
+
+       // Check for references to user variables that do not actually
+       // exist and record those errors.
+       for source, vs := range vars {
+               for _, v := range vs {
+                       uv, ok := v.(*UserVariable)
+                       if !ok {
+                               continue
+                       }
+
+                       if _, ok := varMap[uv.Name]; !ok {
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: unknown variable referenced: '%s'. define it with 'variable' blocks",
+                                       source,
+                                       uv.Name))
+                       }
+               }
+       }
+
+       // Check that all count variables are valid.
+       for source, vs := range vars {
+               for _, rawV := range vs {
+                       switch v := rawV.(type) {
+                       case *CountVariable:
+                               if v.Type == CountValueInvalid {
+                                       errs = append(errs, fmt.Errorf(
+                                               "%s: invalid count variable: %s",
+                                               source,
+                                               v.FullKey()))
+                               }
+                       case *PathVariable:
+                               if v.Type == PathValueInvalid {
+                                       errs = append(errs, fmt.Errorf(
+                                               "%s: invalid path variable: %s",
+                                               source,
+                                               v.FullKey()))
+                               }
+                       }
+               }
+       }
+
+       // Check that providers aren't declared multiple times.
+       providerSet := make(map[string]struct{})
+       for _, p := range c.ProviderConfigs {
+               name := p.FullName()
+               if _, ok := providerSet[name]; ok {
+                       errs = append(errs, fmt.Errorf(
+                               "provider.%s: declared multiple times, you can only declare a provider once",
+                               name))
+                       continue
+               }
+
+               providerSet[name] = struct{}{}
+       }
+
+       // Check that all references to modules are valid
+       modules := make(map[string]*Module)
+       dupped := make(map[string]struct{})
+       for _, m := range c.Modules {
+               // Check for duplicates
+               if _, ok := modules[m.Id()]; ok {
+                       if _, ok := dupped[m.Id()]; !ok {
+                               dupped[m.Id()] = struct{}{}
+
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: module repeated multiple times",
+                                       m.Id()))
+                       }
+
+                       // Already seen this module, just skip it
+                       continue
+               }
+
+               modules[m.Id()] = m
+
+               // Check that the source has no interpolations
+               rc, err := NewRawConfig(map[string]interface{}{
+                       "root": m.Source,
+               })
+               if err != nil {
+                       errs = append(errs, fmt.Errorf(
+                               "%s: module source error: %s",
+                               m.Id(), err))
+               } else if len(rc.Interpolations) > 0 {
+                       errs = append(errs, fmt.Errorf(
+                               "%s: module source cannot contain interpolations",
+                               m.Id()))
+               }
+
+               // Check that the name matches our regexp
+               if !NameRegexp.Match([]byte(m.Name)) {
+                       errs = append(errs, fmt.Errorf(
+                               "%s: module name can only contain letters, numbers, "+
+                                       "dashes, and underscores",
+                               m.Id()))
+               }
+
+               // Check that the configuration can all be strings, lists or maps
+               raw := make(map[string]interface{})
+               for k, v := range m.RawConfig.Raw {
+                       var strVal string
+                       if err := hilmapstructure.WeakDecode(v, &strVal); err == nil {
+                               raw[k] = strVal
+                               continue
+                       }
+
+                       var mapVal map[string]interface{}
+                       if err := hilmapstructure.WeakDecode(v, &mapVal); err == nil {
+                               raw[k] = mapVal
+                               continue
+                       }
+
+                       var sliceVal []interface{}
+                       if err := hilmapstructure.WeakDecode(v, &sliceVal); err == nil {
+                               raw[k] = sliceVal
+                               continue
+                       }
+
+                       errs = append(errs, fmt.Errorf(
+                               "%s: variable %s must be a string, list or map value",
+                               m.Id(), k))
+               }
+
+               // Check for invalid count variables
+               for _, v := range m.RawConfig.Variables {
+                       switch v.(type) {
+                       case *CountVariable:
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: count variables are only valid within resources", m.Name))
+                       case *SelfVariable:
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: self variables are only valid within resources", m.Name))
+                       }
+               }
+
+               // Update the raw configuration to only contain the string values
+               m.RawConfig, err = NewRawConfig(raw)
+               if err != nil {
+                       errs = append(errs, fmt.Errorf(
+                               "%s: can't initialize configuration: %s",
+                               m.Id(), err))
+               }
+       }
+       dupped = nil
+
+       // Check that all variables for modules reference modules that
+       // exist.
+       for source, vs := range vars {
+               for _, v := range vs {
+                       mv, ok := v.(*ModuleVariable)
+                       if !ok {
+                               continue
+                       }
+
+                       if _, ok := modules[mv.Name]; !ok {
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: unknown module referenced: %s",
+                                       source,
+                                       mv.Name))
+                       }
+               }
+       }
+
+       // Check that all references to resources are valid
+       resources := make(map[string]*Resource)
+       dupped = make(map[string]struct{})
+       for _, r := range c.Resources {
+               if _, ok := resources[r.Id()]; ok {
+                       if _, ok := dupped[r.Id()]; !ok {
+                               dupped[r.Id()] = struct{}{}
+
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: resource repeated multiple times",
+                                       r.Id()))
+                       }
+               }
+
+               resources[r.Id()] = r
+       }
+       dupped = nil
+
+       // Validate resources
+       for n, r := range resources {
+               // Verify count variables
+               for _, v := range r.RawCount.Variables {
+                       switch v.(type) {
+                       case *CountVariable:
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: resource count can't reference count variable: %s",
+                                       n,
+                                       v.FullKey()))
+                       case *SimpleVariable:
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: resource count can't reference variable: %s",
+                                       n,
+                                       v.FullKey()))
+
+                       // Good
+                       case *ModuleVariable:
+                       case *ResourceVariable:
+                       case *TerraformVariable:
+                       case *UserVariable:
+
+                       default:
+                               errs = append(errs, fmt.Errorf(
+                                       "Internal error. Unknown type in count var in %s: %T",
+                                       n, v))
+                       }
+               }
+
+               // Interpolate with a fixed number to verify that its a number.
+               r.RawCount.interpolate(func(root ast.Node) (interface{}, error) {
+                       // Execute the node but transform the AST so that it returns
+                       // a fixed value of "5" for all interpolations.
+                       result, err := hil.Eval(
+                               hil.FixedValueTransform(
+                                       root, &ast.LiteralNode{Value: "5", Typex: ast.TypeString}),
+                               nil)
+                       if err != nil {
+                               return "", err
+                       }
+
+                       return result.Value, nil
+               })
+               _, err := strconv.ParseInt(r.RawCount.Value().(string), 0, 0)
+               if err != nil {
+                       errs = append(errs, fmt.Errorf(
+                               "%s: resource count must be an integer",
+                               n))
+               }
+               r.RawCount.init()
+
+               // Validate DependsOn
+               errs = append(errs, c.validateDependsOn(n, r.DependsOn, resources, modules)...)
+
+               // Verify provisioners
+               for _, p := range r.Provisioners {
+                       // This validation checks that there are now splat variables
+                       // referencing ourself. This currently is not allowed.
+
+                       for _, v := range p.ConnInfo.Variables {
+                               rv, ok := v.(*ResourceVariable)
+                               if !ok {
+                                       continue
+                               }
+
+                               if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name {
+                                       errs = append(errs, fmt.Errorf(
+                                               "%s: connection info cannot contain splat variable "+
+                                                       "referencing itself", n))
+                                       break
+                               }
+                       }
+
+                       for _, v := range p.RawConfig.Variables {
+                               rv, ok := v.(*ResourceVariable)
+                               if !ok {
+                                       continue
+                               }
+
+                               if rv.Multi && rv.Index == -1 && rv.Type == r.Type && rv.Name == r.Name {
+                                       errs = append(errs, fmt.Errorf(
+                                               "%s: connection info cannot contain splat variable "+
+                                                       "referencing itself", n))
+                                       break
+                               }
+                       }
+
+                       // Check for invalid when/onFailure values, though this should be
+                       // picked up by the loader we check here just in case.
+                       if p.When == ProvisionerWhenInvalid {
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: provisioner 'when' value is invalid", n))
+                       }
+                       if p.OnFailure == ProvisionerOnFailureInvalid {
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: provisioner 'on_failure' value is invalid", n))
+                       }
+               }
+
+               // Verify ignore_changes contains valid entries
+               for _, v := range r.Lifecycle.IgnoreChanges {
+                       if strings.Contains(v, "*") && v != "*" {
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: ignore_changes does not support using a partial string "+
+                                               "together with a wildcard: %s", n, v))
+                       }
+               }
+
+               // Verify ignore_changes has no interpolations
+               rc, err := NewRawConfig(map[string]interface{}{
+                       "root": r.Lifecycle.IgnoreChanges,
+               })
+               if err != nil {
+                       errs = append(errs, fmt.Errorf(
+                               "%s: lifecycle ignore_changes error: %s",
+                               n, err))
+               } else if len(rc.Interpolations) > 0 {
+                       errs = append(errs, fmt.Errorf(
+                               "%s: lifecycle ignore_changes cannot contain interpolations",
+                               n))
+               }
+
+               // If it is a data source then it can't have provisioners
+               if r.Mode == DataResourceMode {
+                       if _, ok := r.RawConfig.Raw["provisioner"]; ok {
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: data sources cannot have provisioners",
+                                       n))
+                       }
+               }
+       }
+
+       for source, vs := range vars {
+               for _, v := range vs {
+                       rv, ok := v.(*ResourceVariable)
+                       if !ok {
+                               continue
+                       }
+
+                       id := rv.ResourceId()
+                       if _, ok := resources[id]; !ok {
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: unknown resource '%s' referenced in variable %s",
+                                       source,
+                                       id,
+                                       rv.FullKey()))
+                               continue
+                       }
+               }
+       }
+
+       // Check that all outputs are valid
+       {
+               found := make(map[string]struct{})
+               for _, o := range c.Outputs {
+                       // Verify the output is new
+                       if _, ok := found[o.Name]; ok {
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: duplicate output. output names must be unique.",
+                                       o.Name))
+                               continue
+                       }
+                       found[o.Name] = struct{}{}
+
+                       var invalidKeys []string
+                       valueKeyFound := false
+                       for k := range o.RawConfig.Raw {
+                               if k == "value" {
+                                       valueKeyFound = true
+                                       continue
+                               }
+                               if k == "sensitive" {
+                                       if sensitive, ok := o.RawConfig.config[k].(bool); ok {
+                                               if sensitive {
+                                                       o.Sensitive = true
+                                               }
+                                               continue
+                                       }
+
+                                       errs = append(errs, fmt.Errorf(
+                                               "%s: value for 'sensitive' must be boolean",
+                                               o.Name))
+                                       continue
+                               }
+                               if k == "description" {
+                                       if desc, ok := o.RawConfig.config[k].(string); ok {
+                                               o.Description = desc
+                                               continue
+                                       }
+
+                                       errs = append(errs, fmt.Errorf(
+                                               "%s: value for 'description' must be string",
+                                               o.Name))
+                                       continue
+                               }
+                               invalidKeys = append(invalidKeys, k)
+                       }
+                       if len(invalidKeys) > 0 {
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: output has invalid keys: %s",
+                                       o.Name, strings.Join(invalidKeys, ", ")))
+                       }
+                       if !valueKeyFound {
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: output is missing required 'value' key", o.Name))
+                       }
+
+                       for _, v := range o.RawConfig.Variables {
+                               if _, ok := v.(*CountVariable); ok {
+                                       errs = append(errs, fmt.Errorf(
+                                               "%s: count variables are only valid within resources", o.Name))
+                               }
+                       }
+               }
+       }
+
+       // Check that all variables are in the proper context
+       for source, rc := range c.rawConfigs() {
+               walker := &interpolationWalker{
+                       ContextF: c.validateVarContextFn(source, &errs),
+               }
+               if err := reflectwalk.Walk(rc.Raw, walker); err != nil {
+                       errs = append(errs, fmt.Errorf(
+                               "%s: error reading config: %s", source, err))
+               }
+       }
+
+       // Validate the self variable
+       for source, rc := range c.rawConfigs() {
+               // Ignore provisioners. This is a pretty brittle way to do this,
+               // but better than also repeating all the resources.
+               if strings.Contains(source, "provision") {
+                       continue
+               }
+
+               for _, v := range rc.Variables {
+                       if _, ok := v.(*SelfVariable); ok {
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: cannot contain self-reference %s", source, v.FullKey()))
+                       }
+               }
+       }
+
+       if len(errs) > 0 {
+               return &multierror.Error{Errors: errs}
+       }
+
+       return nil
+}
+
+// InterpolatedVariables is a helper that returns a mapping of all the interpolated
+// variables within the configuration. This is used to verify references
+// are valid in the Validate step.
+func (c *Config) InterpolatedVariables() map[string][]InterpolatedVariable {
+       result := make(map[string][]InterpolatedVariable)
+       for source, rc := range c.rawConfigs() {
+               for _, v := range rc.Variables {
+                       result[source] = append(result[source], v)
+               }
+       }
+       return result
+}
+
+// rawConfigs returns all of the RawConfigs that are available keyed by
+// a human-friendly source.
+func (c *Config) rawConfigs() map[string]*RawConfig {
+       result := make(map[string]*RawConfig)
+       for _, m := range c.Modules {
+               source := fmt.Sprintf("module '%s'", m.Name)
+               result[source] = m.RawConfig
+       }
+
+       for _, pc := range c.ProviderConfigs {
+               source := fmt.Sprintf("provider config '%s'", pc.Name)
+               result[source] = pc.RawConfig
+       }
+
+       for _, rc := range c.Resources {
+               source := fmt.Sprintf("resource '%s'", rc.Id())
+               result[source+" count"] = rc.RawCount
+               result[source+" config"] = rc.RawConfig
+
+               for i, p := range rc.Provisioners {
+                       subsource := fmt.Sprintf(
+                               "%s provisioner %s (#%d)",
+                               source, p.Type, i+1)
+                       result[subsource] = p.RawConfig
+               }
+       }
+
+       for _, o := range c.Outputs {
+               source := fmt.Sprintf("output '%s'", o.Name)
+               result[source] = o.RawConfig
+       }
+
+       return result
+}
+
+func (c *Config) validateVarContextFn(
+       source string, errs *[]error) interpolationWalkerContextFunc {
+       return func(loc reflectwalk.Location, node ast.Node) {
+               // If we're in a slice element, then its fine, since you can do
+               // anything in there.
+               if loc == reflectwalk.SliceElem {
+                       return
+               }
+
+               // Otherwise, let's check if there is a splat resource variable
+               // at the top level in here. We do this by doing a transform that
+               // replaces everything with a noop node unless its a variable
+               // access or concat. This should turn the AST into a flat tree
+               // of Concat(Noop, ...). If there are any variables left that are
+               // multi-access, then its still broken.
+               node = node.Accept(func(n ast.Node) ast.Node {
+                       // If it is a concat or variable access, we allow it.
+                       switch n.(type) {
+                       case *ast.Output:
+                               return n
+                       case *ast.VariableAccess:
+                               return n
+                       }
+
+                       // Otherwise, noop
+                       return &noopNode{}
+               })
+
+               vars, err := DetectVariables(node)
+               if err != nil {
+                       // Ignore it since this will be caught during parse. This
+                       // actually probably should never happen by the time this
+                       // is called, but its okay.
+                       return
+               }
+
+               for _, v := range vars {
+                       rv, ok := v.(*ResourceVariable)
+                       if !ok {
+                               return
+                       }
+
+                       if rv.Multi && rv.Index == -1 {
+                               *errs = append(*errs, fmt.Errorf(
+                                       "%s: use of the splat ('*') operator must be wrapped in a list declaration",
+                                       source))
+                       }
+               }
+       }
+}
+
+func (c *Config) validateDependsOn(
+       n string,
+       v []string,
+       resources map[string]*Resource,
+       modules map[string]*Module) []error {
+       // Verify depends on points to resources that all exist
+       var errs []error
+       for _, d := range v {
+               // Check if we contain interpolations
+               rc, err := NewRawConfig(map[string]interface{}{
+                       "value": d,
+               })
+               if err == nil && len(rc.Variables) > 0 {
+                       errs = append(errs, fmt.Errorf(
+                               "%s: depends on value cannot contain interpolations: %s",
+                               n, d))
+                       continue
+               }
+
+               // If it is a module, verify it is a module
+               if strings.HasPrefix(d, "module.") {
+                       name := d[len("module."):]
+                       if _, ok := modules[name]; !ok {
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: resource depends on non-existent module '%s'",
+                                       n, name))
+                       }
+
+                       continue
+               }
+
+               // Check resources
+               if _, ok := resources[d]; !ok {
+                       errs = append(errs, fmt.Errorf(
+                               "%s: resource depends on non-existent resource '%s'",
+                               n, d))
+               }
+       }
+
+       return errs
+}
+
+func (m *Module) mergerName() string {
+       return m.Id()
+}
+
+func (m *Module) mergerMerge(other merger) merger {
+       m2 := other.(*Module)
+
+       result := *m
+       result.Name = m2.Name
+       result.RawConfig = result.RawConfig.merge(m2.RawConfig)
+
+       if m2.Source != "" {
+               result.Source = m2.Source
+       }
+
+       return &result
+}
+
+func (o *Output) mergerName() string {
+       return o.Name
+}
+
+func (o *Output) mergerMerge(m merger) merger {
+       o2 := m.(*Output)
+
+       result := *o
+       result.Name = o2.Name
+       result.Description = o2.Description
+       result.RawConfig = result.RawConfig.merge(o2.RawConfig)
+       result.Sensitive = o2.Sensitive
+       result.DependsOn = o2.DependsOn
+
+       return &result
+}
+
+func (c *ProviderConfig) GoString() string {
+       return fmt.Sprintf("*%#v", *c)
+}
+
+func (c *ProviderConfig) FullName() string {
+       if c.Alias == "" {
+               return c.Name
+       }
+
+       return fmt.Sprintf("%s.%s", c.Name, c.Alias)
+}
+
+func (c *ProviderConfig) mergerName() string {
+       return c.Name
+}
+
+func (c *ProviderConfig) mergerMerge(m merger) merger {
+       c2 := m.(*ProviderConfig)
+
+       result := *c
+       result.Name = c2.Name
+       result.RawConfig = result.RawConfig.merge(c2.RawConfig)
+
+       if c2.Alias != "" {
+               result.Alias = c2.Alias
+       }
+
+       return &result
+}
+
+func (r *Resource) mergerName() string {
+       return r.Id()
+}
+
+func (r *Resource) mergerMerge(m merger) merger {
+       r2 := m.(*Resource)
+
+       result := *r
+       result.Mode = r2.Mode
+       result.Name = r2.Name
+       result.Type = r2.Type
+       result.RawConfig = result.RawConfig.merge(r2.RawConfig)
+
+       if r2.RawCount.Value() != "1" {
+               result.RawCount = r2.RawCount
+       }
+
+       if len(r2.Provisioners) > 0 {
+               result.Provisioners = r2.Provisioners
+       }
+
+       return &result
+}
+
+// Merge merges two variables to create a new third variable.
+func (v *Variable) Merge(v2 *Variable) *Variable {
+       // Shallow copy the variable
+       result := *v
+
+       // The names should be the same, but the second name always wins.
+       result.Name = v2.Name
+
+       if v2.DeclaredType != "" {
+               result.DeclaredType = v2.DeclaredType
+       }
+       if v2.Default != nil {
+               result.Default = v2.Default
+       }
+       if v2.Description != "" {
+               result.Description = v2.Description
+       }
+
+       return &result
+}
+
+var typeStringMap = map[string]VariableType{
+       "string": VariableTypeString,
+       "map":    VariableTypeMap,
+       "list":   VariableTypeList,
+}
+
+// Type returns the type of variable this is.
+func (v *Variable) Type() VariableType {
+       if v.DeclaredType != "" {
+               declaredType, ok := typeStringMap[v.DeclaredType]
+               if !ok {
+                       return VariableTypeUnknown
+               }
+
+               return declaredType
+       }
+
+       return v.inferTypeFromDefault()
+}
+
+// ValidateTypeAndDefault ensures that default variable value is compatible
+// with the declared type (if one exists), and that the type is one which is
+// known to Terraform
+func (v *Variable) ValidateTypeAndDefault() error {
+       // If an explicit type is declared, ensure it is valid
+       if v.DeclaredType != "" {
+               if _, ok := typeStringMap[v.DeclaredType]; !ok {
+                       validTypes := []string{}
+                       for k := range typeStringMap {
+                               validTypes = append(validTypes, k)
+                       }
+                       return fmt.Errorf(
+                               "Variable '%s' type must be one of [%s] - '%s' is not a valid type",
+                               v.Name,
+                               strings.Join(validTypes, ", "),
+                               v.DeclaredType,
+                       )
+               }
+       }
+
+       if v.DeclaredType == "" || v.Default == nil {
+               return nil
+       }
+
+       if v.inferTypeFromDefault() != v.Type() {
+               return fmt.Errorf("'%s' has a default value which is not of type '%s' (got '%s')",
+                       v.Name, v.DeclaredType, v.inferTypeFromDefault().Printable())
+       }
+
+       return nil
+}
+
+func (v *Variable) mergerName() string {
+       return v.Name
+}
+
+func (v *Variable) mergerMerge(m merger) merger {
+       return v.Merge(m.(*Variable))
+}
+
+// Required tests whether a variable is required or not.
+func (v *Variable) Required() bool {
+       return v.Default == nil
+}
+
+// inferTypeFromDefault contains the logic for the old method of inferring
+// variable types - we can also use this for validating that the declared
+// type matches the type of the default value
+func (v *Variable) inferTypeFromDefault() VariableType {
+       if v.Default == nil {
+               return VariableTypeString
+       }
+
+       var s string
+       if err := hilmapstructure.WeakDecode(v.Default, &s); err == nil {
+               v.Default = s
+               return VariableTypeString
+       }
+
+       var m map[string]interface{}
+       if err := hilmapstructure.WeakDecode(v.Default, &m); err == nil {
+               v.Default = m
+               return VariableTypeMap
+       }
+
+       var l []interface{}
+       if err := hilmapstructure.WeakDecode(v.Default, &l); err == nil {
+               v.Default = l
+               return VariableTypeList
+       }
+
+       return VariableTypeUnknown
+}
+
+func (m ResourceMode) Taintable() bool {
+       switch m {
+       case ManagedResourceMode:
+               return true
+       case DataResourceMode:
+               return false
+       default:
+               panic(fmt.Errorf("unsupported ResourceMode value %s", m))
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/config_string.go b/vendor/github.com/hashicorp/terraform/config/config_string.go
new file mode 100644 (file)
index 0000000..0b3abbc
--- /dev/null
@@ -0,0 +1,338 @@
+package config
+
+import (
+       "bytes"
+       "fmt"
+       "sort"
+       "strings"
+)
+
+// TestString is a Stringer-like function that outputs a string that can
+// be used to easily compare multiple Config structures in unit tests.
+//
+// This function has no practical use outside of unit tests and debugging.
+func (c *Config) TestString() string {
+       if c == nil {
+               return "<nil config>"
+       }
+
+       var buf bytes.Buffer
+       if len(c.Modules) > 0 {
+               buf.WriteString("Modules:\n\n")
+               buf.WriteString(modulesStr(c.Modules))
+               buf.WriteString("\n\n")
+       }
+
+       if len(c.Variables) > 0 {
+               buf.WriteString("Variables:\n\n")
+               buf.WriteString(variablesStr(c.Variables))
+               buf.WriteString("\n\n")
+       }
+
+       if len(c.ProviderConfigs) > 0 {
+               buf.WriteString("Provider Configs:\n\n")
+               buf.WriteString(providerConfigsStr(c.ProviderConfigs))
+               buf.WriteString("\n\n")
+       }
+
+       if len(c.Resources) > 0 {
+               buf.WriteString("Resources:\n\n")
+               buf.WriteString(resourcesStr(c.Resources))
+               buf.WriteString("\n\n")
+       }
+
+       if len(c.Outputs) > 0 {
+               buf.WriteString("Outputs:\n\n")
+               buf.WriteString(outputsStr(c.Outputs))
+               buf.WriteString("\n")
+       }
+
+       return strings.TrimSpace(buf.String())
+}
+
+func terraformStr(t *Terraform) string {
+       result := ""
+
+       if b := t.Backend; b != nil {
+               result += fmt.Sprintf("backend (%s)\n", b.Type)
+
+               keys := make([]string, 0, len(b.RawConfig.Raw))
+               for k, _ := range b.RawConfig.Raw {
+                       keys = append(keys, k)
+               }
+               sort.Strings(keys)
+
+               for _, k := range keys {
+                       result += fmt.Sprintf("  %s\n", k)
+               }
+       }
+
+       return strings.TrimSpace(result)
+}
+
+func modulesStr(ms []*Module) string {
+       result := ""
+       order := make([]int, 0, len(ms))
+       ks := make([]string, 0, len(ms))
+       mapping := make(map[string]int)
+       for i, m := range ms {
+               k := m.Id()
+               ks = append(ks, k)
+               mapping[k] = i
+       }
+       sort.Strings(ks)
+       for _, k := range ks {
+               order = append(order, mapping[k])
+       }
+
+       for _, i := range order {
+               m := ms[i]
+               result += fmt.Sprintf("%s\n", m.Id())
+
+               ks := make([]string, 0, len(m.RawConfig.Raw))
+               for k, _ := range m.RawConfig.Raw {
+                       ks = append(ks, k)
+               }
+               sort.Strings(ks)
+
+               result += fmt.Sprintf("  source = %s\n", m.Source)
+
+               for _, k := range ks {
+                       result += fmt.Sprintf("  %s\n", k)
+               }
+       }
+
+       return strings.TrimSpace(result)
+}
+
+func outputsStr(os []*Output) string {
+       ns := make([]string, 0, len(os))
+       m := make(map[string]*Output)
+       for _, o := range os {
+               ns = append(ns, o.Name)
+               m[o.Name] = o
+       }
+       sort.Strings(ns)
+
+       result := ""
+       for _, n := range ns {
+               o := m[n]
+
+               result += fmt.Sprintf("%s\n", n)
+
+               if len(o.DependsOn) > 0 {
+                       result += fmt.Sprintf("  dependsOn\n")
+                       for _, d := range o.DependsOn {
+                               result += fmt.Sprintf("    %s\n", d)
+                       }
+               }
+
+               if len(o.RawConfig.Variables) > 0 {
+                       result += fmt.Sprintf("  vars\n")
+                       for _, rawV := range o.RawConfig.Variables {
+                               kind := "unknown"
+                               str := rawV.FullKey()
+
+                               switch rawV.(type) {
+                               case *ResourceVariable:
+                                       kind = "resource"
+                               case *UserVariable:
+                                       kind = "user"
+                               }
+
+                               result += fmt.Sprintf("    %s: %s\n", kind, str)
+                       }
+               }
+       }
+
+       return strings.TrimSpace(result)
+}
+
+// This helper turns a provider configs field into a deterministic
+// string value for comparison in tests.
+func providerConfigsStr(pcs []*ProviderConfig) string {
+       result := ""
+
+       ns := make([]string, 0, len(pcs))
+       m := make(map[string]*ProviderConfig)
+       for _, n := range pcs {
+               ns = append(ns, n.Name)
+               m[n.Name] = n
+       }
+       sort.Strings(ns)
+
+       for _, n := range ns {
+               pc := m[n]
+
+               result += fmt.Sprintf("%s\n", n)
+
+               keys := make([]string, 0, len(pc.RawConfig.Raw))
+               for k, _ := range pc.RawConfig.Raw {
+                       keys = append(keys, k)
+               }
+               sort.Strings(keys)
+
+               for _, k := range keys {
+                       result += fmt.Sprintf("  %s\n", k)
+               }
+
+               if len(pc.RawConfig.Variables) > 0 {
+                       result += fmt.Sprintf("  vars\n")
+                       for _, rawV := range pc.RawConfig.Variables {
+                               kind := "unknown"
+                               str := rawV.FullKey()
+
+                               switch rawV.(type) {
+                               case *ResourceVariable:
+                                       kind = "resource"
+                               case *UserVariable:
+                                       kind = "user"
+                               }
+
+                               result += fmt.Sprintf("    %s: %s\n", kind, str)
+                       }
+               }
+       }
+
+       return strings.TrimSpace(result)
+}
+
+// This helper turns a resources field into a deterministic
+// string value for comparison in tests.
+func resourcesStr(rs []*Resource) string {
+       result := ""
+       order := make([]int, 0, len(rs))
+       ks := make([]string, 0, len(rs))
+       mapping := make(map[string]int)
+       for i, r := range rs {
+               k := r.Id()
+               ks = append(ks, k)
+               mapping[k] = i
+       }
+       sort.Strings(ks)
+       for _, k := range ks {
+               order = append(order, mapping[k])
+       }
+
+       for _, i := range order {
+               r := rs[i]
+               result += fmt.Sprintf(
+                       "%s (x%s)\n",
+                       r.Id(),
+                       r.RawCount.Value())
+
+               ks := make([]string, 0, len(r.RawConfig.Raw))
+               for k, _ := range r.RawConfig.Raw {
+                       ks = append(ks, k)
+               }
+               sort.Strings(ks)
+
+               for _, k := range ks {
+                       result += fmt.Sprintf("  %s\n", k)
+               }
+
+               if len(r.Provisioners) > 0 {
+                       result += fmt.Sprintf("  provisioners\n")
+                       for _, p := range r.Provisioners {
+                               when := ""
+                               if p.When != ProvisionerWhenCreate {
+                                       when = fmt.Sprintf(" (%s)", p.When.String())
+                               }
+
+                               result += fmt.Sprintf("    %s%s\n", p.Type, when)
+
+                               if p.OnFailure != ProvisionerOnFailureFail {
+                                       result += fmt.Sprintf("      on_failure = %s\n", p.OnFailure.String())
+                               }
+
+                               ks := make([]string, 0, len(p.RawConfig.Raw))
+                               for k, _ := range p.RawConfig.Raw {
+                                       ks = append(ks, k)
+                               }
+                               sort.Strings(ks)
+
+                               for _, k := range ks {
+                                       result += fmt.Sprintf("      %s\n", k)
+                               }
+                       }
+               }
+
+               if len(r.DependsOn) > 0 {
+                       result += fmt.Sprintf("  dependsOn\n")
+                       for _, d := range r.DependsOn {
+                               result += fmt.Sprintf("    %s\n", d)
+                       }
+               }
+
+               if len(r.RawConfig.Variables) > 0 {
+                       result += fmt.Sprintf("  vars\n")
+
+                       ks := make([]string, 0, len(r.RawConfig.Variables))
+                       for k, _ := range r.RawConfig.Variables {
+                               ks = append(ks, k)
+                       }
+                       sort.Strings(ks)
+
+                       for _, k := range ks {
+                               rawV := r.RawConfig.Variables[k]
+                               kind := "unknown"
+                               str := rawV.FullKey()
+
+                               switch rawV.(type) {
+                               case *ResourceVariable:
+                                       kind = "resource"
+                               case *UserVariable:
+                                       kind = "user"
+                               }
+
+                               result += fmt.Sprintf("    %s: %s\n", kind, str)
+                       }
+               }
+       }
+
+       return strings.TrimSpace(result)
+}
+
+// This helper turns a variables field into a deterministic
+// string value for comparison in tests.
+func variablesStr(vs []*Variable) string {
+       result := ""
+       ks := make([]string, 0, len(vs))
+       m := make(map[string]*Variable)
+       for _, v := range vs {
+               ks = append(ks, v.Name)
+               m[v.Name] = v
+       }
+       sort.Strings(ks)
+
+       for _, k := range ks {
+               v := m[k]
+
+               required := ""
+               if v.Required() {
+                       required = " (required)"
+               }
+
+               declaredType := ""
+               if v.DeclaredType != "" {
+                       declaredType = fmt.Sprintf(" (%s)", v.DeclaredType)
+               }
+
+               if v.Default == nil || v.Default == "" {
+                       v.Default = "<>"
+               }
+               if v.Description == "" {
+                       v.Description = "<>"
+               }
+
+               result += fmt.Sprintf(
+                       "%s%s%s\n  %v\n  %s\n",
+                       k,
+                       required,
+                       declaredType,
+                       v.Default,
+                       v.Description)
+       }
+
+       return strings.TrimSpace(result)
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/config_terraform.go b/vendor/github.com/hashicorp/terraform/config/config_terraform.go
new file mode 100644 (file)
index 0000000..8535c96
--- /dev/null
@@ -0,0 +1,117 @@
+package config
+
+import (
+       "fmt"
+       "strings"
+
+       "github.com/hashicorp/go-version"
+       "github.com/mitchellh/hashstructure"
+)
+
+// Terraform is the Terraform meta-configuration that can be present
+// in configuration files for configuring Terraform itself.
+type Terraform struct {
+       RequiredVersion string   `hcl:"required_version"` // Required Terraform version (constraint)
+       Backend         *Backend // See Backend struct docs
+}
+
+// Validate performs the validation for just the Terraform configuration.
+func (t *Terraform) Validate() []error {
+       var errs []error
+
+       if raw := t.RequiredVersion; raw != "" {
+               // Check that the value has no interpolations
+               rc, err := NewRawConfig(map[string]interface{}{
+                       "root": raw,
+               })
+               if err != nil {
+                       errs = append(errs, fmt.Errorf(
+                               "terraform.required_version: %s", err))
+               } else if len(rc.Interpolations) > 0 {
+                       errs = append(errs, fmt.Errorf(
+                               "terraform.required_version: cannot contain interpolations"))
+               } else {
+                       // Check it is valid
+                       _, err := version.NewConstraint(raw)
+                       if err != nil {
+                               errs = append(errs, fmt.Errorf(
+                                       "terraform.required_version: invalid syntax: %s", err))
+                       }
+               }
+       }
+
+       if t.Backend != nil {
+               errs = append(errs, t.Backend.Validate()...)
+       }
+
+       return errs
+}
+
+// Merge t with t2.
+// Any conflicting fields are overwritten by t2.
+func (t *Terraform) Merge(t2 *Terraform) {
+       if t2.RequiredVersion != "" {
+               t.RequiredVersion = t2.RequiredVersion
+       }
+
+       if t2.Backend != nil {
+               t.Backend = t2.Backend
+       }
+}
+
+// Backend is the configuration for the "backend" to use with Terraform.
+// A backend is responsible for all major behavior of Terraform's core.
+// The abstraction layer above the core (the "backend") allows for behavior
+// such as remote operation.
+type Backend struct {
+       Type      string
+       RawConfig *RawConfig
+
+       // Hash is a unique hash code representing the original configuration
+       // of the backend. This won't be recomputed unless Rehash is called.
+       Hash uint64
+}
+
+// Rehash returns a unique content hash for this backend's configuration
+// as a uint64 value.
+func (b *Backend) Rehash() uint64 {
+       // If we have no backend, the value is zero
+       if b == nil {
+               return 0
+       }
+
+       // Use hashstructure to hash only our type with the config.
+       code, err := hashstructure.Hash(map[string]interface{}{
+               "type":   b.Type,
+               "config": b.RawConfig.Raw,
+       }, nil)
+
+       // This should never happen since we have just some basic primitives
+       // so panic if there is an error.
+       if err != nil {
+               panic(err)
+       }
+
+       return code
+}
+
+func (b *Backend) Validate() []error {
+       if len(b.RawConfig.Interpolations) > 0 {
+               return []error{fmt.Errorf(strings.TrimSpace(errBackendInterpolations))}
+       }
+
+       return nil
+}
+
+const errBackendInterpolations = `
+terraform.backend: configuration cannot contain interpolations
+
+The backend configuration is loaded by Terraform extremely early, before
+the core of Terraform can be initialized. This is necessary because the backend
+dictates the behavior of that core. The core is what handles interpolation
+processing. Because of this, interpolations cannot be used in backend
+configuration.
+
+If you'd like to parameterize backend configuration, we recommend using
+partial configuration with the "-backend-config" flag to "terraform init".
+`
diff --git a/vendor/github.com/hashicorp/terraform/config/config_tree.go b/vendor/github.com/hashicorp/terraform/config/config_tree.go
new file mode 100644 (file)
index 0000000..08dc0fe
--- /dev/null
@@ -0,0 +1,43 @@
+package config
+
+// configTree represents a tree of configurations where the root is the
+// first file and its children are the configurations it has imported.
+type configTree struct {
+       Path     string
+       Config   *Config
+       Children []*configTree
+}
+
+// Flatten flattens the entire tree down to a single merged Config
+// structure.
+func (t *configTree) Flatten() (*Config, error) {
+       // No children is easy: we're already merged!
+       if len(t.Children) == 0 {
+               return t.Config, nil
+       }
+
+       // Depth-first, merge all the children first.
+       childConfigs := make([]*Config, len(t.Children))
+       for i, ct := range t.Children {
+               c, err := ct.Flatten()
+               if err != nil {
+                       return nil, err
+               }
+
+               childConfigs[i] = c
+       }
+
+       // Merge all the children in order
+       config := childConfigs[0]
+       childConfigs = childConfigs[1:]
+       for _, config2 := range childConfigs {
+               var err error
+               config, err = Merge(config, config2)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       // Merge the final merged child config with our own
+       return Merge(config, t.Config)
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/import_tree.go b/vendor/github.com/hashicorp/terraform/config/import_tree.go
new file mode 100644 (file)
index 0000000..37ec11a
--- /dev/null
@@ -0,0 +1,113 @@
+package config
+
+import (
+       "fmt"
+       "io"
+)
+
+// configurable is an interface that must be implemented by any configuration
+// formats of Terraform in order to return a *Config.
+type configurable interface {
+       Config() (*Config, error)
+}
+
+// importTree is the result of the first-pass load of the configuration
+// files. It is a tree of raw configurables and then any children (their
+// imports).
+//
+// An importTree can be turned into a configTree.
+type importTree struct {
+       Path     string
+       Raw      configurable
+       Children []*importTree
+}
+
+// This is the function type that must be implemented by the configuration
+// file loader to turn a single file into a configurable and any additional
+// imports.
+type fileLoaderFunc func(path string) (configurable, []string, error)
+
+// loadTree takes a single file and loads the entire importTree for that
+// file. This function detects what kind of configuration file it is an
+// executes the proper fileLoaderFunc.
+func loadTree(root string) (*importTree, error) {
+       var f fileLoaderFunc
+       switch ext(root) {
+       case ".tf", ".tf.json":
+               f = loadFileHcl
+       default:
+       }
+
+       if f == nil {
+               return nil, fmt.Errorf(
+                       "%s: unknown configuration format. Use '.tf' or '.tf.json' extension",
+                       root)
+       }
+
+       c, imps, err := f(root)
+       if err != nil {
+               return nil, err
+       }
+
+       children := make([]*importTree, len(imps))
+       for i, imp := range imps {
+               t, err := loadTree(imp)
+               if err != nil {
+                       return nil, err
+               }
+
+               children[i] = t
+       }
+
+       return &importTree{
+               Path:     root,
+               Raw:      c,
+               Children: children,
+       }, nil
+}
+
+// Close releases any resources we might be holding open for the importTree.
+//
+// This can safely be called even while ConfigTree results are alive. The
+// importTree is not bound to these.
+func (t *importTree) Close() error {
+       if c, ok := t.Raw.(io.Closer); ok {
+               c.Close()
+       }
+       for _, ct := range t.Children {
+               ct.Close()
+       }
+
+       return nil
+}
+
+// ConfigTree traverses the importTree and turns each node into a *Config
+// object, ultimately returning a *configTree.
+func (t *importTree) ConfigTree() (*configTree, error) {
+       config, err := t.Raw.Config()
+       if err != nil {
+               return nil, fmt.Errorf(
+                       "Error loading %s: %s",
+                       t.Path,
+                       err)
+       }
+
+       // Build our result
+       result := &configTree{
+               Path:   t.Path,
+               Config: config,
+       }
+
+       // Build the config trees for the children
+       result.Children = make([]*configTree, len(t.Children))
+       for i, ct := range t.Children {
+               t, err := ct.ConfigTree()
+               if err != nil {
+                       return nil, err
+               }
+
+               result.Children[i] = t
+       }
+
+       return result, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate.go b/vendor/github.com/hashicorp/terraform/config/interpolate.go
new file mode 100644 (file)
index 0000000..bbb3555
--- /dev/null
@@ -0,0 +1,386 @@
+package config
+
+import (
+       "fmt"
+       "strconv"
+       "strings"
+
+       "github.com/hashicorp/hil/ast"
+)
+
+// An InterpolatedVariable is a variable reference within an interpolation.
+//
+// Implementations of this interface represents various sources where
+// variables can come from: user variables, resources, etc.
+type InterpolatedVariable interface {
+       FullKey() string
+}
+
+// CountVariable is a variable for referencing information about
+// the count.
+type CountVariable struct {
+       Type CountValueType
+       key  string
+}
+
+// CountValueType is the type of the count variable that is referenced.
+type CountValueType byte
+
+const (
+       CountValueInvalid CountValueType = iota
+       CountValueIndex
+)
+
+// A ModuleVariable is a variable that is referencing the output
+// of a module, such as "${module.foo.bar}"
+type ModuleVariable struct {
+       Name  string
+       Field string
+       key   string
+}
+
+// A PathVariable is a variable that references path information about the
+// module.
+type PathVariable struct {
+       Type PathValueType
+       key  string
+}
+
+type PathValueType byte
+
+const (
+       PathValueInvalid PathValueType = iota
+       PathValueCwd
+       PathValueModule
+       PathValueRoot
+)
+
+// A ResourceVariable is a variable that is referencing the field
+// of a resource, such as "${aws_instance.foo.ami}"
+type ResourceVariable struct {
+       Mode  ResourceMode
+       Type  string // Resource type, i.e. "aws_instance"
+       Name  string // Resource name
+       Field string // Resource field
+
+       Multi bool // True if multi-variable: aws_instance.foo.*.id
+       Index int  // Index for multi-variable: aws_instance.foo.1.id == 1
+
+       key string
+}
+
+// SelfVariable is a variable that is referencing the same resource
+// it is running on: "${self.address}"
+type SelfVariable struct {
+       Field string
+
+       key string
+}
+
+// SimpleVariable is an unprefixed variable, which can show up when users have
+// strings they are passing down to resources that use interpolation
+// internally. The template_file resource is an example of this.
+type SimpleVariable struct {
+       Key string
+}
+
+// TerraformVariable is a "terraform."-prefixed variable used to access
+// metadata about the Terraform run.
+type TerraformVariable struct {
+       Field string
+       key   string
+}
+
+// A UserVariable is a variable that is referencing a user variable
+// that is inputted from outside the configuration. This looks like
+// "${var.foo}"
+type UserVariable struct {
+       Name string
+       Elem string
+
+       key string
+}
+
+func NewInterpolatedVariable(v string) (InterpolatedVariable, error) {
+       if strings.HasPrefix(v, "count.") {
+               return NewCountVariable(v)
+       } else if strings.HasPrefix(v, "path.") {
+               return NewPathVariable(v)
+       } else if strings.HasPrefix(v, "self.") {
+               return NewSelfVariable(v)
+       } else if strings.HasPrefix(v, "terraform.") {
+               return NewTerraformVariable(v)
+       } else if strings.HasPrefix(v, "var.") {
+               return NewUserVariable(v)
+       } else if strings.HasPrefix(v, "module.") {
+               return NewModuleVariable(v)
+       } else if !strings.ContainsRune(v, '.') {
+               return NewSimpleVariable(v)
+       } else {
+               return NewResourceVariable(v)
+       }
+}
+
+func NewCountVariable(key string) (*CountVariable, error) {
+       var fieldType CountValueType
+       parts := strings.SplitN(key, ".", 2)
+       switch parts[1] {
+       case "index":
+               fieldType = CountValueIndex
+       }
+
+       return &CountVariable{
+               Type: fieldType,
+               key:  key,
+       }, nil
+}
+
+func (c *CountVariable) FullKey() string {
+       return c.key
+}
+
+func NewModuleVariable(key string) (*ModuleVariable, error) {
+       parts := strings.SplitN(key, ".", 3)
+       if len(parts) < 3 {
+               return nil, fmt.Errorf(
+                       "%s: module variables must be three parts: module.name.attr",
+                       key)
+       }
+
+       return &ModuleVariable{
+               Name:  parts[1],
+               Field: parts[2],
+               key:   key,
+       }, nil
+}
+
+func (v *ModuleVariable) FullKey() string {
+       return v.key
+}
+
+func (v *ModuleVariable) GoString() string {
+       return fmt.Sprintf("*%#v", *v)
+}
+
+func NewPathVariable(key string) (*PathVariable, error) {
+       var fieldType PathValueType
+       parts := strings.SplitN(key, ".", 2)
+       switch parts[1] {
+       case "cwd":
+               fieldType = PathValueCwd
+       case "module":
+               fieldType = PathValueModule
+       case "root":
+               fieldType = PathValueRoot
+       }
+
+       return &PathVariable{
+               Type: fieldType,
+               key:  key,
+       }, nil
+}
+
+func (v *PathVariable) FullKey() string {
+       return v.key
+}
+
+func NewResourceVariable(key string) (*ResourceVariable, error) {
+       var mode ResourceMode
+       var parts []string
+       if strings.HasPrefix(key, "data.") {
+               mode = DataResourceMode
+               parts = strings.SplitN(key, ".", 4)
+               if len(parts) < 4 {
+                       return nil, fmt.Errorf(
+                               "%s: data variables must be four parts: data.TYPE.NAME.ATTR",
+                               key)
+               }
+
+               // Don't actually need the "data." prefix for parsing, since it's
+               // always constant.
+               parts = parts[1:]
+       } else {
+               mode = ManagedResourceMode
+               parts = strings.SplitN(key, ".", 3)
+               if len(parts) < 3 {
+                       return nil, fmt.Errorf(
+                               "%s: resource variables must be three parts: TYPE.NAME.ATTR",
+                               key)
+               }
+       }
+
+       field := parts[2]
+       multi := false
+       var index int
+
+       if idx := strings.Index(field, "."); idx != -1 {
+               indexStr := field[:idx]
+               multi = indexStr == "*"
+               index = -1
+
+               if !multi {
+                       indexInt, err := strconv.ParseInt(indexStr, 0, 0)
+                       if err == nil {
+                               multi = true
+                               index = int(indexInt)
+                       }
+               }
+
+               if multi {
+                       field = field[idx+1:]
+               }
+       }
+
+       return &ResourceVariable{
+               Mode:  mode,
+               Type:  parts[0],
+               Name:  parts[1],
+               Field: field,
+               Multi: multi,
+               Index: index,
+               key:   key,
+       }, nil
+}
+
+func (v *ResourceVariable) ResourceId() string {
+       switch v.Mode {
+       case ManagedResourceMode:
+               return fmt.Sprintf("%s.%s", v.Type, v.Name)
+       case DataResourceMode:
+               return fmt.Sprintf("data.%s.%s", v.Type, v.Name)
+       default:
+               panic(fmt.Errorf("unknown resource mode %s", v.Mode))
+       }
+}
+
+func (v *ResourceVariable) FullKey() string {
+       return v.key
+}
+
+func NewSelfVariable(key string) (*SelfVariable, error) {
+       field := key[len("self."):]
+
+       return &SelfVariable{
+               Field: field,
+
+               key: key,
+       }, nil
+}
+
+func (v *SelfVariable) FullKey() string {
+       return v.key
+}
+
+func (v *SelfVariable) GoString() string {
+       return fmt.Sprintf("*%#v", *v)
+}
+
+func NewSimpleVariable(key string) (*SimpleVariable, error) {
+       return &SimpleVariable{key}, nil
+}
+
+func (v *SimpleVariable) FullKey() string {
+       return v.Key
+}
+
+func (v *SimpleVariable) GoString() string {
+       return fmt.Sprintf("*%#v", *v)
+}
+
+func NewTerraformVariable(key string) (*TerraformVariable, error) {
+       field := key[len("terraform."):]
+       return &TerraformVariable{
+               Field: field,
+               key:   key,
+       }, nil
+}
+
+func (v *TerraformVariable) FullKey() string {
+       return v.key
+}
+
+func (v *TerraformVariable) GoString() string {
+       return fmt.Sprintf("*%#v", *v)
+}
+
+func NewUserVariable(key string) (*UserVariable, error) {
+       name := key[len("var."):]
+       elem := ""
+       if idx := strings.Index(name, "."); idx > -1 {
+               elem = name[idx+1:]
+               name = name[:idx]
+       }
+
+       if len(elem) > 0 {
+               return nil, fmt.Errorf("Invalid dot index found: 'var.%s.%s'. Values in maps and lists can be referenced using square bracket indexing, like: 'var.mymap[\"key\"]' or 'var.mylist[1]'.", name, elem)
+       }
+
+       return &UserVariable{
+               key: key,
+
+               Name: name,
+               Elem: elem,
+       }, nil
+}
+
+func (v *UserVariable) FullKey() string {
+       return v.key
+}
+
+func (v *UserVariable) GoString() string {
+       return fmt.Sprintf("*%#v", *v)
+}
+
+// DetectVariables takes an AST root and returns all the interpolated
+// variables that are detected in the AST tree.
+func DetectVariables(root ast.Node) ([]InterpolatedVariable, error) {
+       var result []InterpolatedVariable
+       var resultErr error
+
+       // Visitor callback
+       fn := func(n ast.Node) ast.Node {
+               if resultErr != nil {
+                       return n
+               }
+
+               switch vn := n.(type) {
+               case *ast.VariableAccess:
+                       v, err := NewInterpolatedVariable(vn.Name)
+                       if err != nil {
+                               resultErr = err
+                               return n
+                       }
+                       result = append(result, v)
+               case *ast.Index:
+                       if va, ok := vn.Target.(*ast.VariableAccess); ok {
+                               v, err := NewInterpolatedVariable(va.Name)
+                               if err != nil {
+                                       resultErr = err
+                                       return n
+                               }
+                               result = append(result, v)
+                       }
+                       if va, ok := vn.Key.(*ast.VariableAccess); ok {
+                               v, err := NewInterpolatedVariable(va.Name)
+                               if err != nil {
+                                       resultErr = err
+                                       return n
+                               }
+                               result = append(result, v)
+                       }
+               default:
+                       return n
+               }
+
+               return n
+       }
+
+       // Visitor pattern
+       root.Accept(fn)
+
+       if resultErr != nil {
+               return nil, resultErr
+       }
+
+       return result, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go
new file mode 100644 (file)
index 0000000..f1f97b0
--- /dev/null
@@ -0,0 +1,1390 @@
+package config
+
+import (
+       "crypto/md5"
+       "crypto/sha1"
+       "crypto/sha256"
+       "crypto/sha512"
+       "encoding/base64"
+       "encoding/hex"
+       "encoding/json"
+       "fmt"
+       "io/ioutil"
+       "math"
+       "net"
+       "path/filepath"
+       "regexp"
+       "sort"
+       "strconv"
+       "strings"
+       "time"
+
+       "github.com/apparentlymart/go-cidr/cidr"
+       "github.com/hashicorp/go-uuid"
+       "github.com/hashicorp/hil"
+       "github.com/hashicorp/hil/ast"
+       "github.com/mitchellh/go-homedir"
+)
+
+// stringSliceToVariableValue converts a string slice into the value
+// required to be returned from interpolation functions which return
+// TypeList.
+func stringSliceToVariableValue(values []string) []ast.Variable {
+       output := make([]ast.Variable, len(values))
+       for index, value := range values {
+               output[index] = ast.Variable{
+                       Type:  ast.TypeString,
+                       Value: value,
+               }
+       }
+       return output
+}
+
+func listVariableValueToStringSlice(values []ast.Variable) ([]string, error) {
+       output := make([]string, len(values))
+       for index, value := range values {
+               if value.Type != ast.TypeString {
+                       return []string{}, fmt.Errorf("list has non-string element (%T)", value.Type.String())
+               }
+               output[index] = value.Value.(string)
+       }
+       return output, nil
+}
+
+// Funcs is the mapping of built-in functions for configuration.
+func Funcs() map[string]ast.Function {
+       return map[string]ast.Function{
+               "basename":     interpolationFuncBasename(),
+               "base64decode": interpolationFuncBase64Decode(),
+               "base64encode": interpolationFuncBase64Encode(),
+               "base64sha256": interpolationFuncBase64Sha256(),
+               "base64sha512": interpolationFuncBase64Sha512(),
+               "ceil":         interpolationFuncCeil(),
+               "chomp":        interpolationFuncChomp(),
+               "cidrhost":     interpolationFuncCidrHost(),
+               "cidrnetmask":  interpolationFuncCidrNetmask(),
+               "cidrsubnet":   interpolationFuncCidrSubnet(),
+               "coalesce":     interpolationFuncCoalesce(),
+               "coalescelist": interpolationFuncCoalesceList(),
+               "compact":      interpolationFuncCompact(),
+               "concat":       interpolationFuncConcat(),
+               "dirname":      interpolationFuncDirname(),
+               "distinct":     interpolationFuncDistinct(),
+               "element":      interpolationFuncElement(),
+               "file":         interpolationFuncFile(),
+               "matchkeys":    interpolationFuncMatchKeys(),
+               "floor":        interpolationFuncFloor(),
+               "format":       interpolationFuncFormat(),
+               "formatlist":   interpolationFuncFormatList(),
+               "index":        interpolationFuncIndex(),
+               "join":         interpolationFuncJoin(),
+               "jsonencode":   interpolationFuncJSONEncode(),
+               "length":       interpolationFuncLength(),
+               "list":         interpolationFuncList(),
+               "log":          interpolationFuncLog(),
+               "lower":        interpolationFuncLower(),
+               "map":          interpolationFuncMap(),
+               "max":          interpolationFuncMax(),
+               "md5":          interpolationFuncMd5(),
+               "merge":        interpolationFuncMerge(),
+               "min":          interpolationFuncMin(),
+               "pathexpand":   interpolationFuncPathExpand(),
+               "uuid":         interpolationFuncUUID(),
+               "replace":      interpolationFuncReplace(),
+               "sha1":         interpolationFuncSha1(),
+               "sha256":       interpolationFuncSha256(),
+               "sha512":       interpolationFuncSha512(),
+               "signum":       interpolationFuncSignum(),
+               "slice":        interpolationFuncSlice(),
+               "sort":         interpolationFuncSort(),
+               "split":        interpolationFuncSplit(),
+               "substr":       interpolationFuncSubstr(),
+               "timestamp":    interpolationFuncTimestamp(),
+               "title":        interpolationFuncTitle(),
+               "trimspace":    interpolationFuncTrimSpace(),
+               "upper":        interpolationFuncUpper(),
+               "zipmap":       interpolationFuncZipMap(),
+       }
+}
+
+// interpolationFuncList creates a list from the parameters passed
+// to it.
+func interpolationFuncList() ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{},
+               ReturnType:   ast.TypeList,
+               Variadic:     true,
+               VariadicType: ast.TypeAny,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       var outputList []ast.Variable
+
+                       for i, val := range args {
+                               switch v := val.(type) {
+                               case string:
+                                       outputList = append(outputList, ast.Variable{Type: ast.TypeString, Value: v})
+                               case []ast.Variable:
+                                       outputList = append(outputList, ast.Variable{Type: ast.TypeList, Value: v})
+                               case map[string]ast.Variable:
+                                       outputList = append(outputList, ast.Variable{Type: ast.TypeMap, Value: v})
+                               default:
+                                       return nil, fmt.Errorf("unexpected type %T for argument %d in list", v, i)
+                               }
+                       }
+
+                       // we don't support heterogeneous types, so make sure all types match the first
+                       if len(outputList) > 0 {
+                               firstType := outputList[0].Type
+                               for i, v := range outputList[1:] {
+                                       if v.Type != firstType {
+                                               return nil, fmt.Errorf("unexpected type %s for argument %d in list", v.Type, i+1)
+                                       }
+                               }
+                       }
+
+                       return outputList, nil
+               },
+       }
+}
+
+// interpolationFuncMap creates a map from the parameters passed
+// to it.
+func interpolationFuncMap() ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{},
+               ReturnType:   ast.TypeMap,
+               Variadic:     true,
+               VariadicType: ast.TypeAny,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       outputMap := make(map[string]ast.Variable)
+
+                       if len(args)%2 != 0 {
+                               return nil, fmt.Errorf("requires an even number of arguments, got %d", len(args))
+                       }
+
+                       var firstType *ast.Type
+                       for i := 0; i < len(args); i += 2 {
+                               key, ok := args[i].(string)
+                               if !ok {
+                                       return nil, fmt.Errorf("argument %d represents a key, so it must be a string", i+1)
+                               }
+                               val := args[i+1]
+                               variable, err := hil.InterfaceToVariable(val)
+                               if err != nil {
+                                       return nil, err
+                               }
+                               // Enforce map type homogeneity
+                               if firstType == nil {
+                                       firstType = &variable.Type
+                               } else if variable.Type != *firstType {
+                                       return nil, fmt.Errorf("all map values must have the same type, got %s then %s", firstType.Printable(), variable.Type.Printable())
+                               }
+                               // Check for duplicate keys
+                               if _, ok := outputMap[key]; ok {
+                                       return nil, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key)
+                               }
+                               outputMap[key] = variable
+                       }
+
+                       return outputMap, nil
+               },
+       }
+}
+
+// interpolationFuncCompact strips a list of multi-variable values
+// (e.g. as returned by "split") of any empty strings.
+func interpolationFuncCompact() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeList},
+               ReturnType: ast.TypeList,
+               Variadic:   false,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       inputList := args[0].([]ast.Variable)
+
+                       var outputList []string
+                       for _, val := range inputList {
+                               strVal, ok := val.Value.(string)
+                               if !ok {
+                                       return nil, fmt.Errorf(
+                                               "compact() may only be used with flat lists, this list contains elements of %s",
+                                               val.Type.Printable())
+                               }
+                               if strVal == "" {
+                                       continue
+                               }
+
+                               outputList = append(outputList, strVal)
+                       }
+                       return stringSliceToVariableValue(outputList), nil
+               },
+       }
+}
+
+// interpolationFuncCidrHost implements the "cidrhost" function that
+// fills in the host part of a CIDR range address to create a single
+// host address
+func interpolationFuncCidrHost() ast.Function {
+       return ast.Function{
+               ArgTypes: []ast.Type{
+                       ast.TypeString, // starting CIDR mask
+                       ast.TypeInt,    // host number to insert
+               },
+               ReturnType: ast.TypeString,
+               Variadic:   false,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       hostNum := args[1].(int)
+                       _, network, err := net.ParseCIDR(args[0].(string))
+                       if err != nil {
+                               return nil, fmt.Errorf("invalid CIDR expression: %s", err)
+                       }
+
+                       ip, err := cidr.Host(network, hostNum)
+                       if err != nil {
+                               return nil, err
+                       }
+
+                       return ip.String(), nil
+               },
+       }
+}
+
+// interpolationFuncCidrNetmask implements the "cidrnetmask" function
+// that returns the subnet mask in IP address notation.
+func interpolationFuncCidrNetmask() ast.Function {
+       return ast.Function{
+               ArgTypes: []ast.Type{
+                       ast.TypeString, // CIDR mask
+               },
+               ReturnType: ast.TypeString,
+               Variadic:   false,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       _, network, err := net.ParseCIDR(args[0].(string))
+                       if err != nil {
+                               return nil, fmt.Errorf("invalid CIDR expression: %s", err)
+                       }
+
+                       return net.IP(network.Mask).String(), nil
+               },
+       }
+}
+
+// interpolationFuncCidrSubnet implements the "cidrsubnet" function that
+// adds an additional subnet of the given length onto an existing
+// IP block expressed in CIDR notation.
+func interpolationFuncCidrSubnet() ast.Function {
+       return ast.Function{
+               ArgTypes: []ast.Type{
+                       ast.TypeString, // starting CIDR mask
+                       ast.TypeInt,    // number of bits to extend the prefix
+                       ast.TypeInt,    // network number to append to the prefix
+               },
+               ReturnType: ast.TypeString,
+               Variadic:   false,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       extraBits := args[1].(int)
+                       subnetNum := args[2].(int)
+                       _, network, err := net.ParseCIDR(args[0].(string))
+                       if err != nil {
+                               return nil, fmt.Errorf("invalid CIDR expression: %s", err)
+                       }
+
+                       // For portability with 32-bit systems where the subnet number
+                       // will be a 32-bit int, we only allow extension of 32 bits in
+                       // one call even if we're running on a 64-bit machine.
+                       // (Of course, this is significant only for IPv6.)
+                       if extraBits > 32 {
+                               return nil, fmt.Errorf("may not extend prefix by more than 32 bits")
+                       }
+
+                       newNetwork, err := cidr.Subnet(network, extraBits, subnetNum)
+                       if err != nil {
+                               return nil, err
+                       }
+
+                       return newNetwork.String(), nil
+               },
+       }
+}
+
+// interpolationFuncCoalesce implements the "coalesce" function that
+// returns the first non null / empty string from the provided input
+func interpolationFuncCoalesce() ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{ast.TypeString},
+               ReturnType:   ast.TypeString,
+               Variadic:     true,
+               VariadicType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       if len(args) < 2 {
+                               return nil, fmt.Errorf("must provide at least two arguments")
+                       }
+                       for _, arg := range args {
+                               argument := arg.(string)
+
+                               if argument != "" {
+                                       return argument, nil
+                               }
+                       }
+                       return "", nil
+               },
+       }
+}
+
+// interpolationFuncCoalesceList implements the "coalescelist" function that
+// returns the first non empty list from the provided input
+func interpolationFuncCoalesceList() ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{ast.TypeList},
+               ReturnType:   ast.TypeList,
+               Variadic:     true,
+               VariadicType: ast.TypeList,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       if len(args) < 2 {
+                               return nil, fmt.Errorf("must provide at least two arguments")
+                       }
+                       for _, arg := range args {
+                               argument := arg.([]ast.Variable)
+
+                               if len(argument) > 0 {
+                                       return argument, nil
+                               }
+                       }
+                       return make([]ast.Variable, 0), nil
+               },
+       }
+}
+
+// interpolationFuncConcat implements the "concat" function that concatenates
+// multiple lists.
+func interpolationFuncConcat() ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{ast.TypeList},
+               ReturnType:   ast.TypeList,
+               Variadic:     true,
+               VariadicType: ast.TypeList,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       var outputList []ast.Variable
+
+                       for _, arg := range args {
+                               for _, v := range arg.([]ast.Variable) {
+                                       switch v.Type {
+                                       case ast.TypeString:
+                                               outputList = append(outputList, v)
+                                       case ast.TypeList:
+                                               outputList = append(outputList, v)
+                                       case ast.TypeMap:
+                                               outputList = append(outputList, v)
+                                       default:
+                                               return nil, fmt.Errorf("concat() does not support lists of %s", v.Type.Printable())
+                                       }
+                               }
+                       }
+
+                       // we don't support heterogeneous types, so make sure all types match the first
+                       if len(outputList) > 0 {
+                               firstType := outputList[0].Type
+                               for _, v := range outputList[1:] {
+                                       if v.Type != firstType {
+                                               return nil, fmt.Errorf("unexpected %s in list of %s", v.Type.Printable(), firstType.Printable())
+                                       }
+                               }
+                       }
+
+                       return outputList, nil
+               },
+       }
+}
+
+// interpolationFuncFile implements the "file" function that allows
+// loading contents from a file.
+func interpolationFuncFile() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       path, err := homedir.Expand(args[0].(string))
+                       if err != nil {
+                               return "", err
+                       }
+                       data, err := ioutil.ReadFile(path)
+                       if err != nil {
+                               return "", err
+                       }
+
+                       return string(data), nil
+               },
+       }
+}
+
+// interpolationFuncFormat implements the "format" function that does
+// string formatting.
+func interpolationFuncFormat() ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{ast.TypeString},
+               Variadic:     true,
+               VariadicType: ast.TypeAny,
+               ReturnType:   ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       format := args[0].(string)
+                       return fmt.Sprintf(format, args[1:]...), nil
+               },
+       }
+}
+
+// interpolationFuncMax returns the maximum of the numeric arguments
+func interpolationFuncMax() ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{ast.TypeFloat},
+               ReturnType:   ast.TypeFloat,
+               Variadic:     true,
+               VariadicType: ast.TypeFloat,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       max := args[0].(float64)
+
+                       for i := 1; i < len(args); i++ {
+                               max = math.Max(max, args[i].(float64))
+                       }
+
+                       return max, nil
+               },
+       }
+}
+
+// interpolationFuncMin returns the minimum of the numeric arguments
+func interpolationFuncMin() ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{ast.TypeFloat},
+               ReturnType:   ast.TypeFloat,
+               Variadic:     true,
+               VariadicType: ast.TypeFloat,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       min := args[0].(float64)
+
+                       for i := 1; i < len(args); i++ {
+                               min = math.Min(min, args[i].(float64))
+                       }
+
+                       return min, nil
+               },
+       }
+}
+
+// interpolationFuncPathExpand will expand any `~`'s found with the full file path
+func interpolationFuncPathExpand() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       return homedir.Expand(args[0].(string))
+               },
+       }
+}
+
+// interpolationFuncCeil returns the the least integer value greater than or equal to the argument
+func interpolationFuncCeil() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeFloat},
+               ReturnType: ast.TypeInt,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       return int(math.Ceil(args[0].(float64))), nil
+               },
+       }
+}
+
+// interpolationFuncLog returns the logarithnm.
+func interpolationFuncLog() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeFloat, ast.TypeFloat},
+               ReturnType: ast.TypeFloat,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       return math.Log(args[0].(float64)) / math.Log(args[1].(float64)), nil
+               },
+       }
+}
+
+// interpolationFuncChomp removes trailing newlines from the given string
+func interpolationFuncChomp() ast.Function {
+       newlines := regexp.MustCompile(`(?:\r\n?|\n)*\z`)
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       return newlines.ReplaceAllString(args[0].(string), ""), nil
+               },
+       }
+}
+
+// interpolationFuncFloorreturns returns the greatest integer value less than or equal to the argument
+func interpolationFuncFloor() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeFloat},
+               ReturnType: ast.TypeInt,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       return int(math.Floor(args[0].(float64))), nil
+               },
+       }
+}
+
+func interpolationFuncZipMap() ast.Function {
+       return ast.Function{
+               ArgTypes: []ast.Type{
+                       ast.TypeList, // Keys
+                       ast.TypeList, // Values
+               },
+               ReturnType: ast.TypeMap,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       keys := args[0].([]ast.Variable)
+                       values := args[1].([]ast.Variable)
+
+                       if len(keys) != len(values) {
+                               return nil, fmt.Errorf("count of keys (%d) does not match count of values (%d)",
+                                       len(keys), len(values))
+                       }
+
+                       for i, val := range keys {
+                               if val.Type != ast.TypeString {
+                                       return nil, fmt.Errorf("keys must be strings. value at position %d is %s",
+                                               i, val.Type.Printable())
+                               }
+                       }
+
+                       result := map[string]ast.Variable{}
+                       for i := 0; i < len(keys); i++ {
+                               result[keys[i].Value.(string)] = values[i]
+                       }
+
+                       return result, nil
+               },
+       }
+}
+
+// interpolationFuncFormatList implements the "formatlist" function that does
+// string formatting on lists.
+func interpolationFuncFormatList() ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{ast.TypeAny},
+               Variadic:     true,
+               VariadicType: ast.TypeAny,
+               ReturnType:   ast.TypeList,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       // Make a copy of the variadic part of args
+                       // to avoid modifying the original.
+                       varargs := make([]interface{}, len(args)-1)
+                       copy(varargs, args[1:])
+
+                       // Verify we have some arguments
+                       if len(varargs) == 0 {
+                               return nil, fmt.Errorf("no arguments to formatlist")
+                       }
+
+                       // Convert arguments that are lists into slices.
+                       // Confirm along the way that all lists have the same length (n).
+                       var n int
+                       listSeen := false
+                       for i := 1; i < len(args); i++ {
+                               s, ok := args[i].([]ast.Variable)
+                               if !ok {
+                                       continue
+                               }
+
+                               // Mark that we've seen at least one list
+                               listSeen = true
+
+                               // Convert the ast.Variable to a slice of strings
+                               parts, err := listVariableValueToStringSlice(s)
+                               if err != nil {
+                                       return nil, err
+                               }
+
+                               // otherwise the list is sent down to be indexed
+                               varargs[i-1] = parts
+
+                               // Check length
+                               if n == 0 {
+                                       // first list we've seen
+                                       n = len(parts)
+                                       continue
+                               }
+                               if n != len(parts) {
+                                       return nil, fmt.Errorf("format: mismatched list lengths: %d != %d", n, len(parts))
+                               }
+                       }
+
+                       // If we didn't see a list this is an error because we
+                       // can't determine the return value length.
+                       if !listSeen {
+                               return nil, fmt.Errorf(
+                                       "formatlist requires at least one list argument")
+                       }
+
+                       // Do the formatting.
+                       format := args[0].(string)
+
+                       // Generate a list of formatted strings.
+                       list := make([]string, n)
+                       fmtargs := make([]interface{}, len(varargs))
+                       for i := 0; i < n; i++ {
+                               for j, arg := range varargs {
+                                       switch arg := arg.(type) {
+                                       default:
+                                               fmtargs[j] = arg
+                                       case []string:
+                                               fmtargs[j] = arg[i]
+                                       }
+                               }
+                               list[i] = fmt.Sprintf(format, fmtargs...)
+                       }
+                       return stringSliceToVariableValue(list), nil
+               },
+       }
+}
+
+// interpolationFuncIndex implements the "index" function that allows one to
+// find the index of a specific element in a list
+func interpolationFuncIndex() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeList, ast.TypeString},
+               ReturnType: ast.TypeInt,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       haystack := args[0].([]ast.Variable)
+                       needle := args[1].(string)
+                       for index, element := range haystack {
+                               if needle == element.Value {
+                                       return index, nil
+                               }
+                       }
+                       return nil, fmt.Errorf("Could not find '%s' in '%s'", needle, haystack)
+               },
+       }
+}
+
+// interpolationFuncBasename implements the "dirname" function.
+func interpolationFuncDirname() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       return filepath.Dir(args[0].(string)), nil
+               },
+       }
+}
+
+// interpolationFuncDistinct implements the "distinct" function that
+// removes duplicate elements from a list.
+func interpolationFuncDistinct() ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{ast.TypeList},
+               ReturnType:   ast.TypeList,
+               Variadic:     true,
+               VariadicType: ast.TypeList,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       var list []string
+
+                       if len(args) != 1 {
+                               return nil, fmt.Errorf("accepts only one argument.")
+                       }
+
+                       if argument, ok := args[0].([]ast.Variable); ok {
+                               for _, element := range argument {
+                                       if element.Type != ast.TypeString {
+                                               return nil, fmt.Errorf(
+                                                       "only works for flat lists, this list contains elements of %s",
+                                                       element.Type.Printable())
+                                       }
+                                       list = appendIfMissing(list, element.Value.(string))
+                               }
+                       }
+
+                       return stringSliceToVariableValue(list), nil
+               },
+       }
+}
+
+// helper function to add an element to a list, if it does not already exsit
+func appendIfMissing(slice []string, element string) []string {
+       for _, ele := range slice {
+               if ele == element {
+                       return slice
+               }
+       }
+       return append(slice, element)
+}
+
+// for two lists `keys` and `values` of equal length, returns all elements
+// from `values` where the corresponding element from `keys` is in `searchset`.
+func interpolationFuncMatchKeys() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeList, ast.TypeList, ast.TypeList},
+               ReturnType: ast.TypeList,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       output := make([]ast.Variable, 0)
+
+                       values, _ := args[0].([]ast.Variable)
+                       keys, _ := args[1].([]ast.Variable)
+                       searchset, _ := args[2].([]ast.Variable)
+
+                       if len(keys) != len(values) {
+                               return nil, fmt.Errorf("length of keys and values should be equal")
+                       }
+
+                       for i, key := range keys {
+                               for _, search := range searchset {
+                                       if res, err := compareSimpleVariables(key, search); err != nil {
+                                               return nil, err
+                                       } else if res == true {
+                                               output = append(output, values[i])
+                                               break
+                                       }
+                               }
+                       }
+                       // if searchset is empty, then output is an empty list as well.
+                       // if we haven't matched any key, then output is an empty list.
+                       return output, nil
+               },
+       }
+}
+
+// compare two variables of the same type, i.e. non complex one, such as TypeList or TypeMap
+func compareSimpleVariables(a, b ast.Variable) (bool, error) {
+       if a.Type != b.Type {
+               return false, fmt.Errorf(
+                       "won't compare items of different types %s and %s",
+                       a.Type.Printable(), b.Type.Printable())
+       }
+       switch a.Type {
+       case ast.TypeString:
+               return a.Value.(string) == b.Value.(string), nil
+       default:
+               return false, fmt.Errorf(
+                       "can't compare items of type %s",
+                       a.Type.Printable())
+       }
+}
+
+// interpolationFuncJoin implements the "join" function that allows
+// multi-variable values to be joined by some character.
+func interpolationFuncJoin() ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{ast.TypeString},
+               Variadic:     true,
+               VariadicType: ast.TypeList,
+               ReturnType:   ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       var list []string
+
+                       if len(args) < 2 {
+                               return nil, fmt.Errorf("not enough arguments to join()")
+                       }
+
+                       for _, arg := range args[1:] {
+                               for _, part := range arg.([]ast.Variable) {
+                                       if part.Type != ast.TypeString {
+                                               return nil, fmt.Errorf(
+                                                       "only works on flat lists, this list contains elements of %s",
+                                                       part.Type.Printable())
+                                       }
+                                       list = append(list, part.Value.(string))
+                               }
+                       }
+
+                       return strings.Join(list, args[0].(string)), nil
+               },
+       }
+}
+
+// interpolationFuncJSONEncode implements the "jsonencode" function that encodes
+// a string, list, or map as its JSON representation. For now, values in the
+// list or map may only be strings.
+func interpolationFuncJSONEncode() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeAny},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       var toEncode interface{}
+
+                       switch typedArg := args[0].(type) {
+                       case string:
+                               toEncode = typedArg
+
+                       case []ast.Variable:
+                               // We preallocate the list here. Note that it's important that in
+                               // the length 0 case, we have an empty list rather than nil, as
+                               // they encode differently.
+                               // XXX It would be nice to support arbitrarily nested data here. Is
+                               // there an inverse of hil.InterfaceToVariable?
+                               strings := make([]string, len(typedArg))
+
+                               for i, v := range typedArg {
+                                       if v.Type != ast.TypeString {
+                                               return "", fmt.Errorf("list elements must be strings")
+                                       }
+                                       strings[i] = v.Value.(string)
+                               }
+                               toEncode = strings
+
+                       case map[string]ast.Variable:
+                               // XXX It would be nice to support arbitrarily nested data here. Is
+                               // there an inverse of hil.InterfaceToVariable?
+                               stringMap := make(map[string]string)
+                               for k, v := range typedArg {
+                                       if v.Type != ast.TypeString {
+                                               return "", fmt.Errorf("map values must be strings")
+                                       }
+                                       stringMap[k] = v.Value.(string)
+                               }
+                               toEncode = stringMap
+
+                       default:
+                               return "", fmt.Errorf("unknown type for JSON encoding: %T", args[0])
+                       }
+
+                       jEnc, err := json.Marshal(toEncode)
+                       if err != nil {
+                               return "", fmt.Errorf("failed to encode JSON data '%s'", toEncode)
+                       }
+                       return string(jEnc), nil
+               },
+       }
+}
+
+// interpolationFuncReplace implements the "replace" function that does
+// string replacement.
+func interpolationFuncReplace() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString, ast.TypeString, ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       s := args[0].(string)
+                       search := args[1].(string)
+                       replace := args[2].(string)
+
+                       // We search/replace using a regexp if the string is surrounded
+                       // in forward slashes.
+                       if len(search) > 1 && search[0] == '/' && search[len(search)-1] == '/' {
+                               re, err := regexp.Compile(search[1 : len(search)-1])
+                               if err != nil {
+                                       return nil, err
+                               }
+
+                               return re.ReplaceAllString(s, replace), nil
+                       }
+
+                       return strings.Replace(s, search, replace, -1), nil
+               },
+       }
+}
+
+func interpolationFuncLength() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeAny},
+               ReturnType: ast.TypeInt,
+               Variadic:   false,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       subject := args[0]
+
+                       switch typedSubject := subject.(type) {
+                       case string:
+                               return len(typedSubject), nil
+                       case []ast.Variable:
+                               return len(typedSubject), nil
+                       case map[string]ast.Variable:
+                               return len(typedSubject), nil
+                       }
+
+                       return 0, fmt.Errorf("arguments to length() must be a string, list, or map")
+               },
+       }
+}
+
+func interpolationFuncSignum() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeInt},
+               ReturnType: ast.TypeInt,
+               Variadic:   false,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       num := args[0].(int)
+                       switch {
+                       case num < 0:
+                               return -1, nil
+                       case num > 0:
+                               return +1, nil
+                       default:
+                               return 0, nil
+                       }
+               },
+       }
+}
+
+// interpolationFuncSlice returns a portion of the input list between from, inclusive and to, exclusive.
+func interpolationFuncSlice() ast.Function {
+       return ast.Function{
+               ArgTypes: []ast.Type{
+                       ast.TypeList, // inputList
+                       ast.TypeInt,  // from
+                       ast.TypeInt,  // to
+               },
+               ReturnType: ast.TypeList,
+               Variadic:   false,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       inputList := args[0].([]ast.Variable)
+                       from := args[1].(int)
+                       to := args[2].(int)
+
+                       if from < 0 {
+                               return nil, fmt.Errorf("from index must be >= 0")
+                       }
+                       if to > len(inputList) {
+                               return nil, fmt.Errorf("to index must be <= length of the input list")
+                       }
+                       if from > to {
+                               return nil, fmt.Errorf("from index must be <= to index")
+                       }
+
+                       var outputList []ast.Variable
+                       for i, val := range inputList {
+                               if i >= from && i < to {
+                                       outputList = append(outputList, val)
+                               }
+                       }
+                       return outputList, nil
+               },
+       }
+}
+
+// interpolationFuncSort sorts a list of a strings lexographically
+func interpolationFuncSort() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeList},
+               ReturnType: ast.TypeList,
+               Variadic:   false,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       inputList := args[0].([]ast.Variable)
+
+                       // Ensure that all the list members are strings and
+                       // create a string slice from them
+                       members := make([]string, len(inputList))
+                       for i, val := range inputList {
+                               if val.Type != ast.TypeString {
+                                       return nil, fmt.Errorf(
+                                               "sort() may only be used with lists of strings - %s at index %d",
+                                               val.Type.String(), i)
+                               }
+
+                               members[i] = val.Value.(string)
+                       }
+
+                       sort.Strings(members)
+                       return stringSliceToVariableValue(members), nil
+               },
+       }
+}
+
+// interpolationFuncSplit implements the "split" function that allows
+// strings to split into multi-variable values
+func interpolationFuncSplit() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString, ast.TypeString},
+               ReturnType: ast.TypeList,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       sep := args[0].(string)
+                       s := args[1].(string)
+                       elements := strings.Split(s, sep)
+                       return stringSliceToVariableValue(elements), nil
+               },
+       }
+}
+
+// interpolationFuncLookup implements the "lookup" function that allows
+// dynamic lookups of map types within a Terraform configuration.
+func interpolationFuncLookup(vs map[string]ast.Variable) ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{ast.TypeMap, ast.TypeString},
+               ReturnType:   ast.TypeString,
+               Variadic:     true,
+               VariadicType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       defaultValue := ""
+                       defaultValueSet := false
+                       if len(args) > 2 {
+                               defaultValue = args[2].(string)
+                               defaultValueSet = true
+                       }
+                       if len(args) > 3 {
+                               return "", fmt.Errorf("lookup() takes no more than three arguments")
+                       }
+                       index := args[1].(string)
+                       mapVar := args[0].(map[string]ast.Variable)
+
+                       v, ok := mapVar[index]
+                       if !ok {
+                               if defaultValueSet {
+                                       return defaultValue, nil
+                               } else {
+                                       return "", fmt.Errorf(
+                                               "lookup failed to find '%s'",
+                                               args[1].(string))
+                               }
+                       }
+                       if v.Type != ast.TypeString {
+                               return nil, fmt.Errorf(
+                                       "lookup() may only be used with flat maps, this map contains elements of %s",
+                                       v.Type.Printable())
+                       }
+
+                       return v.Value.(string), nil
+               },
+       }
+}
+
+// interpolationFuncElement implements the "element" function that allows
+// a specific index to be looked up in a multi-variable value. Note that this will
+// wrap if the index is larger than the number of elements in the multi-variable value.
+func interpolationFuncElement() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeList, ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       list := args[0].([]ast.Variable)
+                       if len(list) == 0 {
+                               return nil, fmt.Errorf("element() may not be used with an empty list")
+                       }
+
+                       index, err := strconv.Atoi(args[1].(string))
+                       if err != nil || index < 0 {
+                               return "", fmt.Errorf(
+                                       "invalid number for index, got %s", args[1])
+                       }
+
+                       resolvedIndex := index % len(list)
+
+                       v := list[resolvedIndex]
+                       if v.Type != ast.TypeString {
+                               return nil, fmt.Errorf(
+                                       "element() may only be used with flat lists, this list contains elements of %s",
+                                       v.Type.Printable())
+                       }
+                       return v.Value, nil
+               },
+       }
+}
+
+// interpolationFuncKeys implements the "keys" function that yields a list of
+// keys of map types within a Terraform configuration.
+func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeMap},
+               ReturnType: ast.TypeList,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       mapVar := args[0].(map[string]ast.Variable)
+                       keys := make([]string, 0)
+
+                       for k, _ := range mapVar {
+                               keys = append(keys, k)
+                       }
+
+                       sort.Strings(keys)
+
+                       // Keys are guaranteed to be strings
+                       return stringSliceToVariableValue(keys), nil
+               },
+       }
+}
+
+// interpolationFuncValues implements the "values" function that yields a list of
+// keys of map types within a Terraform configuration.
+func interpolationFuncValues(vs map[string]ast.Variable) ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeMap},
+               ReturnType: ast.TypeList,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       mapVar := args[0].(map[string]ast.Variable)
+                       keys := make([]string, 0)
+
+                       for k, _ := range mapVar {
+                               keys = append(keys, k)
+                       }
+
+                       sort.Strings(keys)
+
+                       values := make([]string, len(keys))
+                       for index, key := range keys {
+                               if value, ok := mapVar[key].Value.(string); ok {
+                                       values[index] = value
+                               } else {
+                                       return "", fmt.Errorf("values(): %q has element with bad type %s",
+                                               key, mapVar[key].Type)
+                               }
+                       }
+
+                       variable, err := hil.InterfaceToVariable(values)
+                       if err != nil {
+                               return nil, err
+                       }
+
+                       return variable.Value, nil
+               },
+       }
+}
+
+// interpolationFuncBasename implements the "basename" function.
+func interpolationFuncBasename() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       return filepath.Base(args[0].(string)), nil
+               },
+       }
+}
+
+// interpolationFuncBase64Encode implements the "base64encode" function that
+// allows Base64 encoding.
+func interpolationFuncBase64Encode() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       s := args[0].(string)
+                       return base64.StdEncoding.EncodeToString([]byte(s)), nil
+               },
+       }
+}
+
+// interpolationFuncBase64Decode implements the "base64decode" function that
+// allows Base64 decoding.
+func interpolationFuncBase64Decode() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       s := args[0].(string)
+                       sDec, err := base64.StdEncoding.DecodeString(s)
+                       if err != nil {
+                               return "", fmt.Errorf("failed to decode base64 data '%s'", s)
+                       }
+                       return string(sDec), nil
+               },
+       }
+}
+
+// interpolationFuncLower implements the "lower" function that does
+// string lower casing.
+func interpolationFuncLower() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       toLower := args[0].(string)
+                       return strings.ToLower(toLower), nil
+               },
+       }
+}
+
+func interpolationFuncMd5() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       s := args[0].(string)
+                       h := md5.New()
+                       h.Write([]byte(s))
+                       hash := hex.EncodeToString(h.Sum(nil))
+                       return hash, nil
+               },
+       }
+}
+
+func interpolationFuncMerge() ast.Function {
+       return ast.Function{
+               ArgTypes:     []ast.Type{ast.TypeMap},
+               ReturnType:   ast.TypeMap,
+               Variadic:     true,
+               VariadicType: ast.TypeMap,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       outputMap := make(map[string]ast.Variable)
+
+                       for _, arg := range args {
+                               for k, v := range arg.(map[string]ast.Variable) {
+                                       outputMap[k] = v
+                               }
+                       }
+
+                       return outputMap, nil
+               },
+       }
+}
+
+// interpolationFuncUpper implements the "upper" function that does
+// string upper casing.
+func interpolationFuncUpper() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       toUpper := args[0].(string)
+                       return strings.ToUpper(toUpper), nil
+               },
+       }
+}
+
+func interpolationFuncSha1() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       s := args[0].(string)
+                       h := sha1.New()
+                       h.Write([]byte(s))
+                       hash := hex.EncodeToString(h.Sum(nil))
+                       return hash, nil
+               },
+       }
+}
+
+// hexadecimal representation of sha256 sum
+func interpolationFuncSha256() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       s := args[0].(string)
+                       h := sha256.New()
+                       h.Write([]byte(s))
+                       hash := hex.EncodeToString(h.Sum(nil))
+                       return hash, nil
+               },
+       }
+}
+
+func interpolationFuncSha512() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       s := args[0].(string)
+                       h := sha512.New()
+                       h.Write([]byte(s))
+                       hash := hex.EncodeToString(h.Sum(nil))
+                       return hash, nil
+               },
+       }
+}
+
+func interpolationFuncTrimSpace() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       trimSpace := args[0].(string)
+                       return strings.TrimSpace(trimSpace), nil
+               },
+       }
+}
+
+func interpolationFuncBase64Sha256() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       s := args[0].(string)
+                       h := sha256.New()
+                       h.Write([]byte(s))
+                       shaSum := h.Sum(nil)
+                       encoded := base64.StdEncoding.EncodeToString(shaSum[:])
+                       return encoded, nil
+               },
+       }
+}
+
+func interpolationFuncBase64Sha512() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       s := args[0].(string)
+                       h := sha512.New()
+                       h.Write([]byte(s))
+                       shaSum := h.Sum(nil)
+                       encoded := base64.StdEncoding.EncodeToString(shaSum[:])
+                       return encoded, nil
+               },
+       }
+}
+
+func interpolationFuncUUID() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       return uuid.GenerateUUID()
+               },
+       }
+}
+
+// interpolationFuncTimestamp
+func interpolationFuncTimestamp() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       return time.Now().UTC().Format(time.RFC3339), nil
+               },
+       }
+}
+
+// interpolationFuncTitle implements the "title" function that returns a copy of the
+// string in which first characters of all the words are capitalized.
+func interpolationFuncTitle() ast.Function {
+       return ast.Function{
+               ArgTypes:   []ast.Type{ast.TypeString},
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       toTitle := args[0].(string)
+                       return strings.Title(toTitle), nil
+               },
+       }
+}
+
+// interpolationFuncSubstr implements the "substr" function that allows strings
+// to be truncated.
+func interpolationFuncSubstr() ast.Function {
+       return ast.Function{
+               ArgTypes: []ast.Type{
+                       ast.TypeString, // input string
+                       ast.TypeInt,    // offset
+                       ast.TypeInt,    // length
+               },
+               ReturnType: ast.TypeString,
+               Callback: func(args []interface{}) (interface{}, error) {
+                       str := args[0].(string)
+                       offset := args[1].(int)
+                       length := args[2].(int)
+
+                       // Interpret a negative offset as being equivalent to a positive
+                       // offset taken from the end of the string.
+                       if offset < 0 {
+                               offset += len(str)
+                       }
+
+                       // Interpret a length of `-1` as indicating that the substring
+                       // should start at `offset` and continue until the end of the
+                       // string. Any other negative length (other than `-1`) is invalid.
+                       if length == -1 {
+                               length = len(str)
+                       } else if length >= 0 {
+                               length += offset
+                       } else {
+                               return nil, fmt.Errorf("length should be a non-negative integer")
+                       }
+
+                       if offset > len(str) {
+                               return nil, fmt.Errorf("offset cannot be larger than the length of the string")
+                       }
+
+                       if length > len(str) {
+                               return nil, fmt.Errorf("'offset + length' cannot be larger than the length of the string")
+                       }
+
+                       return str[offset:length], nil
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go b/vendor/github.com/hashicorp/terraform/config/interpolate_walk.go
new file mode 100644 (file)
index 0000000..ead3d10
--- /dev/null
@@ -0,0 +1,283 @@
+package config
+
+import (
+       "fmt"
+       "reflect"
+       "strings"
+
+       "github.com/hashicorp/hil"
+       "github.com/hashicorp/hil/ast"
+       "github.com/mitchellh/reflectwalk"
+)
+
+// interpolationWalker implements interfaces for the reflectwalk package
+// (github.com/mitchellh/reflectwalk) that can be used to automatically
+// execute a callback for an interpolation.
+type interpolationWalker struct {
+       // F is the function to call for every interpolation. It can be nil.
+       //
+       // If Replace is true, then the return value of F will be used to
+       // replace the interpolation.
+       F       interpolationWalkerFunc
+       Replace bool
+
+       // ContextF is an advanced version of F that also receives the
+       // location of where it is in the structure. This lets you do
+       // context-aware validation.
+       ContextF interpolationWalkerContextFunc
+
+       key         []string
+       lastValue   reflect.Value
+       loc         reflectwalk.Location
+       cs          []reflect.Value
+       csKey       []reflect.Value
+       csData      interface{}
+       sliceIndex  []int
+       unknownKeys []string
+}
+
+// interpolationWalkerFunc is the callback called by interpolationWalk.
+// It is called with any interpolation found. It should return a value
+// to replace the interpolation with, along with any errors.
+//
+// If Replace is set to false in interpolationWalker, then the replace
+// value can be anything as it will have no effect.
+type interpolationWalkerFunc func(ast.Node) (interface{}, error)
+
+// interpolationWalkerContextFunc is called by interpolationWalk if
+// ContextF is set. This receives both the interpolation and the location
+// where the interpolation is.
+//
+// This callback can be used to validate the location of the interpolation
+// within the configuration.
+type interpolationWalkerContextFunc func(reflectwalk.Location, ast.Node)
+
+func (w *interpolationWalker) Enter(loc reflectwalk.Location) error {
+       w.loc = loc
+       return nil
+}
+
+func (w *interpolationWalker) Exit(loc reflectwalk.Location) error {
+       w.loc = reflectwalk.None
+
+       switch loc {
+       case reflectwalk.Map:
+               w.cs = w.cs[:len(w.cs)-1]
+       case reflectwalk.MapValue:
+               w.key = w.key[:len(w.key)-1]
+               w.csKey = w.csKey[:len(w.csKey)-1]
+       case reflectwalk.Slice:
+               // Split any values that need to be split
+               w.splitSlice()
+               w.cs = w.cs[:len(w.cs)-1]
+       case reflectwalk.SliceElem:
+               w.csKey = w.csKey[:len(w.csKey)-1]
+               w.sliceIndex = w.sliceIndex[:len(w.sliceIndex)-1]
+       }
+
+       return nil
+}
+
+func (w *interpolationWalker) Map(m reflect.Value) error {
+       w.cs = append(w.cs, m)
+       return nil
+}
+
+func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error {
+       w.csData = k
+       w.csKey = append(w.csKey, k)
+
+       if l := len(w.sliceIndex); l > 0 {
+               w.key = append(w.key, fmt.Sprintf("%d.%s", w.sliceIndex[l-1], k.String()))
+       } else {
+               w.key = append(w.key, k.String())
+       }
+
+       w.lastValue = v
+       return nil
+}
+
+func (w *interpolationWalker) Slice(s reflect.Value) error {
+       w.cs = append(w.cs, s)
+       return nil
+}
+
+func (w *interpolationWalker) SliceElem(i int, elem reflect.Value) error {
+       w.csKey = append(w.csKey, reflect.ValueOf(i))
+       w.sliceIndex = append(w.sliceIndex, i)
+       return nil
+}
+
+func (w *interpolationWalker) Primitive(v reflect.Value) error {
+       setV := v
+
+       // We only care about strings
+       if v.Kind() == reflect.Interface {
+               setV = v
+               v = v.Elem()
+       }
+       if v.Kind() != reflect.String {
+               return nil
+       }
+
+       astRoot, err := hil.Parse(v.String())
+       if err != nil {
+               return err
+       }
+
+       // If the AST we got is just a literal string value with the same
+       // value then we ignore it. We have to check if its the same value
+       // because it is possible to input a string, get out a string, and
+       // have it be different. For example: "foo-$${bar}" turns into
+       // "foo-${bar}"
+       if n, ok := astRoot.(*ast.LiteralNode); ok {
+               if s, ok := n.Value.(string); ok && s == v.String() {
+                       return nil
+               }
+       }
+
+       if w.ContextF != nil {
+               w.ContextF(w.loc, astRoot)
+       }
+
+       if w.F == nil {
+               return nil
+       }
+
+       replaceVal, err := w.F(astRoot)
+       if err != nil {
+               return fmt.Errorf(
+                       "%s in:\n\n%s",
+                       err, v.String())
+       }
+
+       if w.Replace {
+               // We need to determine if we need to remove this element
+               // if the result contains any "UnknownVariableValue" which is
+               // set if it is computed. This behavior is different if we're
+               // splitting (in a SliceElem) or not.
+               remove := false
+               if w.loc == reflectwalk.SliceElem {
+                       switch typedReplaceVal := replaceVal.(type) {
+                       case string:
+                               if typedReplaceVal == UnknownVariableValue {
+                                       remove = true
+                               }
+                       case []interface{}:
+                               if hasUnknownValue(typedReplaceVal) {
+                                       remove = true
+                               }
+                       }
+               } else if replaceVal == UnknownVariableValue {
+                       remove = true
+               }
+
+               if remove {
+                       w.unknownKeys = append(w.unknownKeys, strings.Join(w.key, "."))
+               }
+
+               resultVal := reflect.ValueOf(replaceVal)
+               switch w.loc {
+               case reflectwalk.MapKey:
+                       m := w.cs[len(w.cs)-1]
+
+                       // Delete the old value
+                       var zero reflect.Value
+                       m.SetMapIndex(w.csData.(reflect.Value), zero)
+
+                       // Set the new key with the existing value
+                       m.SetMapIndex(resultVal, w.lastValue)
+
+                       // Set the key to be the new key
+                       w.csData = resultVal
+               case reflectwalk.MapValue:
+                       // If we're in a map, then the only way to set a map value is
+                       // to set it directly.
+                       m := w.cs[len(w.cs)-1]
+                       mk := w.csData.(reflect.Value)
+                       m.SetMapIndex(mk, resultVal)
+               default:
+                       // Otherwise, we should be addressable
+                       setV.Set(resultVal)
+               }
+       }
+
+       return nil
+}
+
+func (w *interpolationWalker) replaceCurrent(v reflect.Value) {
+       // if we don't have at least 2 values, we're not going to find a map, but
+       // we could panic.
+       if len(w.cs) < 2 {
+               return
+       }
+
+       c := w.cs[len(w.cs)-2]
+       switch c.Kind() {
+       case reflect.Map:
+               // Get the key and delete it
+               k := w.csKey[len(w.csKey)-1]
+               c.SetMapIndex(k, v)
+       }
+}
+
+func hasUnknownValue(variable []interface{}) bool {
+       for _, value := range variable {
+               if strVal, ok := value.(string); ok {
+                       if strVal == UnknownVariableValue {
+                               return true
+                       }
+               }
+       }
+       return false
+}
+
+func (w *interpolationWalker) splitSlice() {
+       raw := w.cs[len(w.cs)-1]
+
+       var s []interface{}
+       switch v := raw.Interface().(type) {
+       case []interface{}:
+               s = v
+       case []map[string]interface{}:
+               return
+       }
+
+       split := false
+       for _, val := range s {
+               if varVal, ok := val.(ast.Variable); ok && varVal.Type == ast.TypeList {
+                       split = true
+               }
+               if _, ok := val.([]interface{}); ok {
+                       split = true
+               }
+       }
+
+       if !split {
+               return
+       }
+
+       result := make([]interface{}, 0)
+       for _, v := range s {
+               switch val := v.(type) {
+               case ast.Variable:
+                       switch val.Type {
+                       case ast.TypeList:
+                               elements := val.Value.([]ast.Variable)
+                               for _, element := range elements {
+                                       result = append(result, element.Value)
+                               }
+                       default:
+                               result = append(result, val.Value)
+                       }
+               case []interface{}:
+                       for _, element := range val {
+                               result = append(result, element)
+                       }
+               default:
+                       result = append(result, v)
+               }
+       }
+
+       w.replaceCurrent(reflect.ValueOf(result))
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/lang.go b/vendor/github.com/hashicorp/terraform/config/lang.go
new file mode 100644 (file)
index 0000000..890d30b
--- /dev/null
@@ -0,0 +1,11 @@
+package config
+
+import (
+       "github.com/hashicorp/hil/ast"
+)
+
+type noopNode struct{}
+
+func (n *noopNode) Accept(ast.Visitor) ast.Node      { return n }
+func (n *noopNode) Pos() ast.Pos                     { return ast.Pos{} }
+func (n *noopNode) Type(ast.Scope) (ast.Type, error) { return ast.TypeString, nil }
diff --git a/vendor/github.com/hashicorp/terraform/config/loader.go b/vendor/github.com/hashicorp/terraform/config/loader.go
new file mode 100644 (file)
index 0000000..0bfa89c
--- /dev/null
@@ -0,0 +1,224 @@
+package config
+
+import (
+       "encoding/json"
+       "fmt"
+       "io"
+       "os"
+       "path/filepath"
+       "sort"
+       "strings"
+
+       "github.com/hashicorp/hcl"
+)
+
+// ErrNoConfigsFound is the error returned by LoadDir if no
+// Terraform configuration files were found in the given directory.
+type ErrNoConfigsFound struct {
+       Dir string
+}
+
+func (e ErrNoConfigsFound) Error() string {
+       return fmt.Sprintf(
+               "No Terraform configuration files found in directory: %s",
+               e.Dir)
+}
+
+// LoadJSON loads a single Terraform configuration from a given JSON document.
+//
+// The document must be a complete Terraform configuration. This function will
+// NOT try to load any additional modules so only the given document is loaded.
+func LoadJSON(raw json.RawMessage) (*Config, error) {
+       obj, err := hcl.Parse(string(raw))
+       if err != nil {
+               return nil, fmt.Errorf(
+                       "Error parsing JSON document as HCL: %s", err)
+       }
+
+       // Start building the result
+       hclConfig := &hclConfigurable{
+               Root: obj,
+       }
+
+       return hclConfig.Config()
+}
+
+// LoadFile loads the Terraform configuration from a given file.
+//
+// This file can be any format that Terraform recognizes, and import any
+// other format that Terraform recognizes.
+func LoadFile(path string) (*Config, error) {
+       importTree, err := loadTree(path)
+       if err != nil {
+               return nil, err
+       }
+
+       configTree, err := importTree.ConfigTree()
+
+       // Close the importTree now so that we can clear resources as quickly
+       // as possible.
+       importTree.Close()
+
+       if err != nil {
+               return nil, err
+       }
+
+       return configTree.Flatten()
+}
+
+// LoadDir loads all the Terraform configuration files in a single
+// directory and appends them together.
+//
+// Special files known as "override files" can also be present, which
+// are merged into the loaded configuration. That is, the non-override
+// files are loaded first to create the configuration. Then, the overrides
+// are merged into the configuration to create the final configuration.
+//
+// Files are loaded in lexical order.
+func LoadDir(root string) (*Config, error) {
+       files, overrides, err := dirFiles(root)
+       if err != nil {
+               return nil, err
+       }
+       if len(files) == 0 {
+               return nil, &ErrNoConfigsFound{Dir: root}
+       }
+
+       // Determine the absolute path to the directory.
+       rootAbs, err := filepath.Abs(root)
+       if err != nil {
+               return nil, err
+       }
+
+       var result *Config
+
+       // Sort the files and overrides so we have a deterministic order
+       sort.Strings(files)
+       sort.Strings(overrides)
+
+       // Load all the regular files, append them to each other.
+       for _, f := range files {
+               c, err := LoadFile(f)
+               if err != nil {
+                       return nil, err
+               }
+
+               if result != nil {
+                       result, err = Append(result, c)
+                       if err != nil {
+                               return nil, err
+                       }
+               } else {
+                       result = c
+               }
+       }
+
+       // Load all the overrides, and merge them into the config
+       for _, f := range overrides {
+               c, err := LoadFile(f)
+               if err != nil {
+                       return nil, err
+               }
+
+               result, err = Merge(result, c)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       // Mark the directory
+       result.Dir = rootAbs
+
+       return result, nil
+}
+
+// IsEmptyDir returns true if the directory given has no Terraform
+// configuration files.
+func IsEmptyDir(root string) (bool, error) {
+       if _, err := os.Stat(root); err != nil && os.IsNotExist(err) {
+               return true, nil
+       }
+
+       fs, os, err := dirFiles(root)
+       if err != nil {
+               return false, err
+       }
+
+       return len(fs) == 0 && len(os) == 0, nil
+}
+
+// Ext returns the Terraform configuration extension of the given
+// path, or a blank string if it is an invalid function.
+func ext(path string) string {
+       if strings.HasSuffix(path, ".tf") {
+               return ".tf"
+       } else if strings.HasSuffix(path, ".tf.json") {
+               return ".tf.json"
+       } else {
+               return ""
+       }
+}
+
+func dirFiles(dir string) ([]string, []string, error) {
+       f, err := os.Open(dir)
+       if err != nil {
+               return nil, nil, err
+       }
+       defer f.Close()
+
+       fi, err := f.Stat()
+       if err != nil {
+               return nil, nil, err
+       }
+       if !fi.IsDir() {
+               return nil, nil, fmt.Errorf(
+                       "configuration path must be a directory: %s",
+                       dir)
+       }
+
+       var files, overrides []string
+       err = nil
+       for err != io.EOF {
+               var fis []os.FileInfo
+               fis, err = f.Readdir(128)
+               if err != nil && err != io.EOF {
+                       return nil, nil, err
+               }
+
+               for _, fi := range fis {
+                       // Ignore directories
+                       if fi.IsDir() {
+                               continue
+                       }
+
+                       // Only care about files that are valid to load
+                       name := fi.Name()
+                       extValue := ext(name)
+                       if extValue == "" || isIgnoredFile(name) {
+                               continue
+                       }
+
+                       // Determine if we're dealing with an override
+                       nameNoExt := name[:len(name)-len(extValue)]
+                       override := nameNoExt == "override" ||
+                               strings.HasSuffix(nameNoExt, "_override")
+
+                       path := filepath.Join(dir, name)
+                       if override {
+                               overrides = append(overrides, path)
+                       } else {
+                               files = append(files, path)
+                       }
+               }
+       }
+
+       return files, overrides, nil
+}
+
+// isIgnoredFile returns true or false depending on whether the
+// provided file name is a file that should be ignored.
+func isIgnoredFile(name string) bool {
+       return strings.HasPrefix(name, ".") || // Unix-like hidden files
+               strings.HasSuffix(name, "~") || // vim
+               strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#") // emacs
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/loader_hcl.go b/vendor/github.com/hashicorp/terraform/config/loader_hcl.go
new file mode 100644 (file)
index 0000000..9abb196
--- /dev/null
@@ -0,0 +1,1130 @@
+package config
+
+import (
+       "fmt"
+       "io/ioutil"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/hashicorp/hcl"
+       "github.com/hashicorp/hcl/hcl/ast"
+       "github.com/mitchellh/mapstructure"
+)
+
+// hclConfigurable is an implementation of configurable that knows
+// how to turn HCL configuration into a *Config object.
+type hclConfigurable struct {
+       File string
+       Root *ast.File
+}
+
+func (t *hclConfigurable) Config() (*Config, error) {
+       validKeys := map[string]struct{}{
+               "atlas":     struct{}{},
+               "data":      struct{}{},
+               "module":    struct{}{},
+               "output":    struct{}{},
+               "provider":  struct{}{},
+               "resource":  struct{}{},
+               "terraform": struct{}{},
+               "variable":  struct{}{},
+       }
+
+       // Top-level item should be the object list
+       list, ok := t.Root.Node.(*ast.ObjectList)
+       if !ok {
+               return nil, fmt.Errorf("error parsing: file doesn't contain a root object")
+       }
+
+       // Start building up the actual configuration.
+       config := new(Config)
+
+       // Terraform config
+       if o := list.Filter("terraform"); len(o.Items) > 0 {
+               var err error
+               config.Terraform, err = loadTerraformHcl(o)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       // Build the variables
+       if vars := list.Filter("variable"); len(vars.Items) > 0 {
+               var err error
+               config.Variables, err = loadVariablesHcl(vars)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       // Get Atlas configuration
+       if atlas := list.Filter("atlas"); len(atlas.Items) > 0 {
+               var err error
+               config.Atlas, err = loadAtlasHcl(atlas)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       // Build the modules
+       if modules := list.Filter("module"); len(modules.Items) > 0 {
+               var err error
+               config.Modules, err = loadModulesHcl(modules)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       // Build the provider configs
+       if providers := list.Filter("provider"); len(providers.Items) > 0 {
+               var err error
+               config.ProviderConfigs, err = loadProvidersHcl(providers)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       // Build the resources
+       {
+               var err error
+               managedResourceConfigs := list.Filter("resource")
+               dataResourceConfigs := list.Filter("data")
+
+               config.Resources = make(
+                       []*Resource, 0,
+                       len(managedResourceConfigs.Items)+len(dataResourceConfigs.Items),
+               )
+
+               managedResources, err := loadManagedResourcesHcl(managedResourceConfigs)
+               if err != nil {
+                       return nil, err
+               }
+               dataResources, err := loadDataResourcesHcl(dataResourceConfigs)
+               if err != nil {
+                       return nil, err
+               }
+
+               config.Resources = append(config.Resources, dataResources...)
+               config.Resources = append(config.Resources, managedResources...)
+       }
+
+       // Build the outputs
+       if outputs := list.Filter("output"); len(outputs.Items) > 0 {
+               var err error
+               config.Outputs, err = loadOutputsHcl(outputs)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       // Check for invalid keys
+       for _, item := range list.Items {
+               if len(item.Keys) == 0 {
+                       // Not sure how this would happen, but let's avoid a panic
+                       continue
+               }
+
+               k := item.Keys[0].Token.Value().(string)
+               if _, ok := validKeys[k]; ok {
+                       continue
+               }
+
+               config.unknownKeys = append(config.unknownKeys, k)
+       }
+
+       return config, nil
+}
+
+// loadFileHcl is a fileLoaderFunc that knows how to read HCL
+// files and turn them into hclConfigurables.
+func loadFileHcl(root string) (configurable, []string, error) {
+       // Read the HCL file and prepare for parsing
+       d, err := ioutil.ReadFile(root)
+       if err != nil {
+               return nil, nil, fmt.Errorf(
+                       "Error reading %s: %s", root, err)
+       }
+
+       // Parse it
+       hclRoot, err := hcl.Parse(string(d))
+       if err != nil {
+               return nil, nil, fmt.Errorf(
+                       "Error parsing %s: %s", root, err)
+       }
+
+       // Start building the result
+       result := &hclConfigurable{
+               File: root,
+               Root: hclRoot,
+       }
+
+       // Dive in, find the imports. This is disabled for now since
+       // imports were removed prior to Terraform 0.1. The code is
+       // remaining here commented for historical purposes.
+       /*
+               imports := obj.Get("import")
+               if imports == nil {
+                       result.Object.Ref()
+                       return result, nil, nil
+               }
+
+               if imports.Type() != libucl.ObjectTypeString {
+                       imports.Close()
+
+                       return nil, nil, fmt.Errorf(
+                               "Error in %s: all 'import' declarations should be in the format\n"+
+                                       "`import \"foo\"` (Got type %s)",
+                               root,
+                               imports.Type())
+               }
+
+               // Gather all the import paths
+               importPaths := make([]string, 0, imports.Len())
+               iter := imports.Iterate(false)
+               for imp := iter.Next(); imp != nil; imp = iter.Next() {
+                       path := imp.ToString()
+                       if !filepath.IsAbs(path) {
+                               // Relative paths are relative to the Terraform file itself
+                               dir := filepath.Dir(root)
+                               path = filepath.Join(dir, path)
+                       }
+
+                       importPaths = append(importPaths, path)
+                       imp.Close()
+               }
+               iter.Close()
+               imports.Close()
+
+               result.Object.Ref()
+       */
+
+       return result, nil, nil
+}
+
+// Given a handle to a HCL object, this transforms it into the Terraform config
+func loadTerraformHcl(list *ast.ObjectList) (*Terraform, error) {
+       if len(list.Items) > 1 {
+               return nil, fmt.Errorf("only one 'terraform' block allowed per module")
+       }
+
+       // Get our one item
+       item := list.Items[0]
+
+       // This block should have an empty top level ObjectItem.  If there are keys
+       // here, it's likely because we have a flattened JSON object, and we can
+       // lift this into a nested ObjectList to decode properly.
+       if len(item.Keys) > 0 {
+               item = &ast.ObjectItem{
+                       Val: &ast.ObjectType{
+                               List: &ast.ObjectList{
+                                       Items: []*ast.ObjectItem{item},
+                               },
+                       },
+               }
+       }
+
+       // We need the item value as an ObjectList
+       var listVal *ast.ObjectList
+       if ot, ok := item.Val.(*ast.ObjectType); ok {
+               listVal = ot.List
+       } else {
+               return nil, fmt.Errorf("terraform block: should be an object")
+       }
+
+       // NOTE: We purposely don't validate unknown HCL keys here so that
+       // we can potentially read _future_ Terraform version config (to
+       // still be able to validate the required version).
+       //
+       // We should still keep track of unknown keys to validate later, but
+       // HCL doesn't currently support that.
+
+       var config Terraform
+       if err := hcl.DecodeObject(&config, item.Val); err != nil {
+               return nil, fmt.Errorf(
+                       "Error reading terraform config: %s",
+                       err)
+       }
+
+       // If we have provisioners, then parse those out
+       if os := listVal.Filter("backend"); len(os.Items) > 0 {
+               var err error
+               config.Backend, err = loadTerraformBackendHcl(os)
+               if err != nil {
+                       return nil, fmt.Errorf(
+                               "Error reading backend config for terraform block: %s",
+                               err)
+               }
+       }
+
+       return &config, nil
+}
+
+// Loads the Backend configuration from an object list.
+func loadTerraformBackendHcl(list *ast.ObjectList) (*Backend, error) {
+       if len(list.Items) > 1 {
+               return nil, fmt.Errorf("only one 'backend' block allowed")
+       }
+
+       // Get our one item
+       item := list.Items[0]
+
+       // Verify the keys
+       if len(item.Keys) != 1 {
+               return nil, fmt.Errorf(
+                       "position %s: 'backend' must be followed by exactly one string: a type",
+                       item.Pos())
+       }
+
+       typ := item.Keys[0].Token.Value().(string)
+
+       // Decode the raw config
+       var config map[string]interface{}
+       if err := hcl.DecodeObject(&config, item.Val); err != nil {
+               return nil, fmt.Errorf(
+                       "Error reading backend config: %s",
+                       err)
+       }
+
+       rawConfig, err := NewRawConfig(config)
+       if err != nil {
+               return nil, fmt.Errorf(
+                       "Error reading backend config: %s",
+                       err)
+       }
+
+       b := &Backend{
+               Type:      typ,
+               RawConfig: rawConfig,
+       }
+       b.Hash = b.Rehash()
+
+       return b, nil
+}
+
+// Given a handle to a HCL object, this transforms it into the Atlas
+// configuration.
+func loadAtlasHcl(list *ast.ObjectList) (*AtlasConfig, error) {
+       if len(list.Items) > 1 {
+               return nil, fmt.Errorf("only one 'atlas' block allowed")
+       }
+
+       // Get our one item
+       item := list.Items[0]
+
+       var config AtlasConfig
+       if err := hcl.DecodeObject(&config, item.Val); err != nil {
+               return nil, fmt.Errorf(
+                       "Error reading atlas config: %s",
+                       err)
+       }
+
+       return &config, nil
+}
+
+// Given a handle to a HCL object, this recurses into the structure
+// and pulls out a list of modules.
+//
+// The resulting modules may not be unique, but each module
+// represents exactly one module definition in the HCL configuration.
+// We leave it up to another pass to merge them together.
+func loadModulesHcl(list *ast.ObjectList) ([]*Module, error) {
+       if err := assertAllBlocksHaveNames("module", list); err != nil {
+               return nil, err
+       }
+
+       list = list.Children()
+       if len(list.Items) == 0 {
+               return nil, nil
+       }
+
+       // Where all the results will go
+       var result []*Module
+
+       // Now go over all the types and their children in order to get
+       // all of the actual resources.
+       for _, item := range list.Items {
+               k := item.Keys[0].Token.Value().(string)
+
+               var listVal *ast.ObjectList
+               if ot, ok := item.Val.(*ast.ObjectType); ok {
+                       listVal = ot.List
+               } else {
+                       return nil, fmt.Errorf("module '%s': should be an object", k)
+               }
+
+               var config map[string]interface{}
+               if err := hcl.DecodeObject(&config, item.Val); err != nil {
+                       return nil, fmt.Errorf(
+                               "Error reading config for %s: %s",
+                               k,
+                               err)
+               }
+
+               // Remove the fields we handle specially
+               delete(config, "source")
+
+               rawConfig, err := NewRawConfig(config)
+               if err != nil {
+                       return nil, fmt.Errorf(
+                               "Error reading config for %s: %s",
+                               k,
+                               err)
+               }
+
+               // If we have a count, then figure it out
+               var source string
+               if o := listVal.Filter("source"); len(o.Items) > 0 {
+                       err = hcl.DecodeObject(&source, o.Items[0].Val)
+                       if err != nil {
+                               return nil, fmt.Errorf(
+                                       "Error parsing source for %s: %s",
+                                       k,
+                                       err)
+                       }
+               }
+
+               result = append(result, &Module{
+                       Name:      k,
+                       Source:    source,
+                       RawConfig: rawConfig,
+               })
+       }
+
+       return result, nil
+}
+
+// LoadOutputsHcl recurses into the given HCL object and turns
+// it into a mapping of outputs.
+func loadOutputsHcl(list *ast.ObjectList) ([]*Output, error) {
+       if err := assertAllBlocksHaveNames("output", list); err != nil {
+               return nil, err
+       }
+
+       list = list.Children()
+
+       // Go through each object and turn it into an actual result.
+       result := make([]*Output, 0, len(list.Items))
+       for _, item := range list.Items {
+               n := item.Keys[0].Token.Value().(string)
+
+               var listVal *ast.ObjectList
+               if ot, ok := item.Val.(*ast.ObjectType); ok {
+                       listVal = ot.List
+               } else {
+                       return nil, fmt.Errorf("output '%s': should be an object", n)
+               }
+
+               var config map[string]interface{}
+               if err := hcl.DecodeObject(&config, item.Val); err != nil {
+                       return nil, err
+               }
+
+               // Delete special keys
+               delete(config, "depends_on")
+
+               rawConfig, err := NewRawConfig(config)
+               if err != nil {
+                       return nil, fmt.Errorf(
+                               "Error reading config for output %s: %s",
+                               n,
+                               err)
+               }
+
+               // If we have depends fields, then add those in
+               var dependsOn []string
+               if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
+                       err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
+                       if err != nil {
+                               return nil, fmt.Errorf(
+                                       "Error reading depends_on for output %q: %s",
+                                       n,
+                                       err)
+                       }
+               }
+
+               result = append(result, &Output{
+                       Name:      n,
+                       RawConfig: rawConfig,
+                       DependsOn: dependsOn,
+               })
+       }
+
+       return result, nil
+}
+
+// LoadVariablesHcl recurses into the given HCL object and turns
+// it into a list of variables.
+func loadVariablesHcl(list *ast.ObjectList) ([]*Variable, error) {
+       if err := assertAllBlocksHaveNames("variable", list); err != nil {
+               return nil, err
+       }
+
+       list = list.Children()
+
+       // hclVariable is the structure each variable is decoded into
+       type hclVariable struct {
+               DeclaredType string `hcl:"type"`
+               Default      interface{}
+               Description  string
+               Fields       []string `hcl:",decodedFields"`
+       }
+
+       // Go through each object and turn it into an actual result.
+       result := make([]*Variable, 0, len(list.Items))
+       for _, item := range list.Items {
+               // Clean up items from JSON
+               unwrapHCLObjectKeysFromJSON(item, 1)
+
+               // Verify the keys
+               if len(item.Keys) != 1 {
+                       return nil, fmt.Errorf(
+                               "position %s: 'variable' must be followed by exactly one strings: a name",
+                               item.Pos())
+               }
+
+               n := item.Keys[0].Token.Value().(string)
+               if !NameRegexp.MatchString(n) {
+                       return nil, fmt.Errorf(
+                               "position %s: 'variable' name must match regular expression: %s",
+                               item.Pos(), NameRegexp)
+               }
+
+               // Check for invalid keys
+               valid := []string{"type", "default", "description"}
+               if err := checkHCLKeys(item.Val, valid); err != nil {
+                       return nil, multierror.Prefix(err, fmt.Sprintf(
+                               "variable[%s]:", n))
+               }
+
+               // Decode into hclVariable to get typed values
+               var hclVar hclVariable
+               if err := hcl.DecodeObject(&hclVar, item.Val); err != nil {
+                       return nil, err
+               }
+
+               // Defaults turn into a slice of map[string]interface{} and
+               // we need to make sure to convert that down into the
+               // proper type for Config.
+               if ms, ok := hclVar.Default.([]map[string]interface{}); ok {
+                       def := make(map[string]interface{})
+                       for _, m := range ms {
+                               for k, v := range m {
+                                       def[k] = v
+                               }
+                       }
+
+                       hclVar.Default = def
+               }
+
+               // Build the new variable and do some basic validation
+               newVar := &Variable{
+                       Name:         n,
+                       DeclaredType: hclVar.DeclaredType,
+                       Default:      hclVar.Default,
+                       Description:  hclVar.Description,
+               }
+               if err := newVar.ValidateTypeAndDefault(); err != nil {
+                       return nil, err
+               }
+
+               result = append(result, newVar)
+       }
+
+       return result, nil
+}
+
+// LoadProvidersHcl recurses into the given HCL object and turns
+// it into a mapping of provider configs.
+func loadProvidersHcl(list *ast.ObjectList) ([]*ProviderConfig, error) {
+       if err := assertAllBlocksHaveNames("provider", list); err != nil {
+               return nil, err
+       }
+
+       list = list.Children()
+       if len(list.Items) == 0 {
+               return nil, nil
+       }
+
+       // Go through each object and turn it into an actual result.
+       result := make([]*ProviderConfig, 0, len(list.Items))
+       for _, item := range list.Items {
+               n := item.Keys[0].Token.Value().(string)
+
+               var listVal *ast.ObjectList
+               if ot, ok := item.Val.(*ast.ObjectType); ok {
+                       listVal = ot.List
+               } else {
+                       return nil, fmt.Errorf("module '%s': should be an object", n)
+               }
+
+               var config map[string]interface{}
+               if err := hcl.DecodeObject(&config, item.Val); err != nil {
+                       return nil, err
+               }
+
+               delete(config, "alias")
+
+               rawConfig, err := NewRawConfig(config)
+               if err != nil {
+                       return nil, fmt.Errorf(
+                               "Error reading config for provider config %s: %s",
+                               n,
+                               err)
+               }
+
+               // If we have an alias field, then add those in
+               var alias string
+               if a := listVal.Filter("alias"); len(a.Items) > 0 {
+                       err := hcl.DecodeObject(&alias, a.Items[0].Val)
+                       if err != nil {
+                               return nil, fmt.Errorf(
+                                       "Error reading alias for provider[%s]: %s",
+                                       n,
+                                       err)
+                       }
+               }
+
+               result = append(result, &ProviderConfig{
+                       Name:      n,
+                       Alias:     alias,
+                       RawConfig: rawConfig,
+               })
+       }
+
+       return result, nil
+}
+
+// Given a handle to a HCL object, this recurses into the structure
+// and pulls out a list of data sources.
+//
+// The resulting data sources may not be unique, but each one
+// represents exactly one data definition in the HCL configuration.
+// We leave it up to another pass to merge them together.
+func loadDataResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
+       if err := assertAllBlocksHaveNames("data", list); err != nil {
+               return nil, err
+       }
+
+       list = list.Children()
+       if len(list.Items) == 0 {
+               return nil, nil
+       }
+
+       // Where all the results will go
+       var result []*Resource
+
+       // Now go over all the types and their children in order to get
+       // all of the actual resources.
+       for _, item := range list.Items {
+               if len(item.Keys) != 2 {
+                       return nil, fmt.Errorf(
+                               "position %s: 'data' must be followed by exactly two strings: a type and a name",
+                               item.Pos())
+               }
+
+               t := item.Keys[0].Token.Value().(string)
+               k := item.Keys[1].Token.Value().(string)
+
+               var listVal *ast.ObjectList
+               if ot, ok := item.Val.(*ast.ObjectType); ok {
+                       listVal = ot.List
+               } else {
+                       return nil, fmt.Errorf("data sources %s[%s]: should be an object", t, k)
+               }
+
+               var config map[string]interface{}
+               if err := hcl.DecodeObject(&config, item.Val); err != nil {
+                       return nil, fmt.Errorf(
+                               "Error reading config for %s[%s]: %s",
+                               t,
+                               k,
+                               err)
+               }
+
+               // Remove the fields we handle specially
+               delete(config, "depends_on")
+               delete(config, "provider")
+               delete(config, "count")
+
+               rawConfig, err := NewRawConfig(config)
+               if err != nil {
+                       return nil, fmt.Errorf(
+                               "Error reading config for %s[%s]: %s",
+                               t,
+                               k,
+                               err)
+               }
+
+               // If we have a count, then figure it out
+               var count string = "1"
+               if o := listVal.Filter("count"); len(o.Items) > 0 {
+                       err = hcl.DecodeObject(&count, o.Items[0].Val)
+                       if err != nil {
+                               return nil, fmt.Errorf(
+                                       "Error parsing count for %s[%s]: %s",
+                                       t,
+                                       k,
+                                       err)
+                       }
+               }
+               countConfig, err := NewRawConfig(map[string]interface{}{
+                       "count": count,
+               })
+               if err != nil {
+                       return nil, err
+               }
+               countConfig.Key = "count"
+
+               // If we have depends fields, then add those in
+               var dependsOn []string
+               if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
+                       err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
+                       if err != nil {
+                               return nil, fmt.Errorf(
+                                       "Error reading depends_on for %s[%s]: %s",
+                                       t,
+                                       k,
+                                       err)
+                       }
+               }
+
+               // If we have a provider, then parse it out
+               var provider string
+               if o := listVal.Filter("provider"); len(o.Items) > 0 {
+                       err := hcl.DecodeObject(&provider, o.Items[0].Val)
+                       if err != nil {
+                               return nil, fmt.Errorf(
+                                       "Error reading provider for %s[%s]: %s",
+                                       t,
+                                       k,
+                                       err)
+                       }
+               }
+
+               result = append(result, &Resource{
+                       Mode:         DataResourceMode,
+                       Name:         k,
+                       Type:         t,
+                       RawCount:     countConfig,
+                       RawConfig:    rawConfig,
+                       Provider:     provider,
+                       Provisioners: []*Provisioner{},
+                       DependsOn:    dependsOn,
+                       Lifecycle:    ResourceLifecycle{},
+               })
+       }
+
+       return result, nil
+}
+
+// Given a handle to a HCL object, this recurses into the structure
+// and pulls out a list of managed resources.
+//
+// The resulting resources may not be unique, but each resource
+// represents exactly one "resource" block in the HCL configuration.
+// We leave it up to another pass to merge them together.
+func loadManagedResourcesHcl(list *ast.ObjectList) ([]*Resource, error) {
+       list = list.Children()
+       if len(list.Items) == 0 {
+               return nil, nil
+       }
+
+       // Where all the results will go
+       var result []*Resource
+
+       // Now go over all the types and their children in order to get
+       // all of the actual resources.
+       for _, item := range list.Items {
+               // GH-4385: We detect a pure provisioner resource and give the user
+               // an error about how to do it cleanly.
+               if len(item.Keys) == 4 && item.Keys[2].Token.Value().(string) == "provisioner" {
+                       return nil, fmt.Errorf(
+                               "position %s: provisioners in a resource should be wrapped in a list\n\n"+
+                                       "Example: \"provisioner\": [ { \"local-exec\": ... } ]",
+                               item.Pos())
+               }
+
+               // Fix up JSON input
+               unwrapHCLObjectKeysFromJSON(item, 2)
+
+               if len(item.Keys) != 2 {
+                       return nil, fmt.Errorf(
+                               "position %s: resource must be followed by exactly two strings, a type and a name",
+                               item.Pos())
+               }
+
+               t := item.Keys[0].Token.Value().(string)
+               k := item.Keys[1].Token.Value().(string)
+
+               var listVal *ast.ObjectList
+               if ot, ok := item.Val.(*ast.ObjectType); ok {
+                       listVal = ot.List
+               } else {
+                       return nil, fmt.Errorf("resources %s[%s]: should be an object", t, k)
+               }
+
+               var config map[string]interface{}
+               if err := hcl.DecodeObject(&config, item.Val); err != nil {
+                       return nil, fmt.Errorf(
+                               "Error reading config for %s[%s]: %s",
+                               t,
+                               k,
+                               err)
+               }
+
+               // Remove the fields we handle specially
+               delete(config, "connection")
+               delete(config, "count")
+               delete(config, "depends_on")
+               delete(config, "provisioner")
+               delete(config, "provider")
+               delete(config, "lifecycle")
+
+               rawConfig, err := NewRawConfig(config)
+               if err != nil {
+                       return nil, fmt.Errorf(
+                               "Error reading config for %s[%s]: %s",
+                               t,
+                               k,
+                               err)
+               }
+
+               // If we have a count, then figure it out
+               var count string = "1"
+               if o := listVal.Filter("count"); len(o.Items) > 0 {
+                       err = hcl.DecodeObject(&count, o.Items[0].Val)
+                       if err != nil {
+                               return nil, fmt.Errorf(
+                                       "Error parsing count for %s[%s]: %s",
+                                       t,
+                                       k,
+                                       err)
+                       }
+               }
+               countConfig, err := NewRawConfig(map[string]interface{}{
+                       "count": count,
+               })
+               if err != nil {
+                       return nil, err
+               }
+               countConfig.Key = "count"
+
+               // If we have depends fields, then add those in
+               var dependsOn []string
+               if o := listVal.Filter("depends_on"); len(o.Items) > 0 {
+                       err := hcl.DecodeObject(&dependsOn, o.Items[0].Val)
+                       if err != nil {
+                               return nil, fmt.Errorf(
+                                       "Error reading depends_on for %s[%s]: %s",
+                                       t,
+                                       k,
+                                       err)
+                       }
+               }
+
+               // If we have connection info, then parse those out
+               var connInfo map[string]interface{}
+               if o := listVal.Filter("connection"); len(o.Items) > 0 {
+                       err := hcl.DecodeObject(&connInfo, o.Items[0].Val)
+                       if err != nil {
+                               return nil, fmt.Errorf(
+                                       "Error reading connection info for %s[%s]: %s",
+                                       t,
+                                       k,
+                                       err)
+                       }
+               }
+
+               // If we have provisioners, then parse those out
+               var provisioners []*Provisioner
+               if os := listVal.Filter("provisioner"); len(os.Items) > 0 {
+                       var err error
+                       provisioners, err = loadProvisionersHcl(os, connInfo)
+                       if err != nil {
+                               return nil, fmt.Errorf(
+                                       "Error reading provisioners for %s[%s]: %s",
+                                       t,
+                                       k,
+                                       err)
+                       }
+               }
+
+               // If we have a provider, then parse it out
+               var provider string
+               if o := listVal.Filter("provider"); len(o.Items) > 0 {
+                       err := hcl.DecodeObject(&provider, o.Items[0].Val)
+                       if err != nil {
+                               return nil, fmt.Errorf(
+                                       "Error reading provider for %s[%s]: %s",
+                                       t,
+                                       k,
+                                       err)
+                       }
+               }
+
+               // Check if the resource should be re-created before
+               // destroying the existing instance
+               var lifecycle ResourceLifecycle
+               if o := listVal.Filter("lifecycle"); len(o.Items) > 0 {
+                       if len(o.Items) > 1 {
+                               return nil, fmt.Errorf(
+                                       "%s[%s]: Multiple lifecycle blocks found, expected one",
+                                       t, k)
+                       }
+
+                       // Check for invalid keys
+                       valid := []string{"create_before_destroy", "ignore_changes", "prevent_destroy"}
+                       if err := checkHCLKeys(o.Items[0].Val, valid); err != nil {
+                               return nil, multierror.Prefix(err, fmt.Sprintf(
+                                       "%s[%s]:", t, k))
+                       }
+
+                       var raw map[string]interface{}
+                       if err = hcl.DecodeObject(&raw, o.Items[0].Val); err != nil {
+                               return nil, fmt.Errorf(
+                                       "Error parsing lifecycle for %s[%s]: %s",
+                                       t,
+                                       k,
+                                       err)
+                       }
+
+                       if err := mapstructure.WeakDecode(raw, &lifecycle); err != nil {
+                               return nil, fmt.Errorf(
+                                       "Error parsing lifecycle for %s[%s]: %s",
+                                       t,
+                                       k,
+                                       err)
+                       }
+               }
+
+               result = append(result, &Resource{
+                       Mode:         ManagedResourceMode,
+                       Name:         k,
+                       Type:         t,
+                       RawCount:     countConfig,
+                       RawConfig:    rawConfig,
+                       Provisioners: provisioners,
+                       Provider:     provider,
+                       DependsOn:    dependsOn,
+                       Lifecycle:    lifecycle,
+               })
+       }
+
+       return result, nil
+}
+
+func loadProvisionersHcl(list *ast.ObjectList, connInfo map[string]interface{}) ([]*Provisioner, error) {
+       if err := assertAllBlocksHaveNames("provisioner", list); err != nil {
+               return nil, err
+       }
+
+       list = list.Children()
+       if len(list.Items) == 0 {
+               return nil, nil
+       }
+
+       // Go through each object and turn it into an actual result.
+       result := make([]*Provisioner, 0, len(list.Items))
+       for _, item := range list.Items {
+               n := item.Keys[0].Token.Value().(string)
+
+               var listVal *ast.ObjectList
+               if ot, ok := item.Val.(*ast.ObjectType); ok {
+                       listVal = ot.List
+               } else {
+                       return nil, fmt.Errorf("provisioner '%s': should be an object", n)
+               }
+
+               var config map[string]interface{}
+               if err := hcl.DecodeObject(&config, item.Val); err != nil {
+                       return nil, err
+               }
+
+               // Parse the "when" value
+               when := ProvisionerWhenCreate
+               if v, ok := config["when"]; ok {
+                       switch v {
+                       case "create":
+                               when = ProvisionerWhenCreate
+                       case "destroy":
+                               when = ProvisionerWhenDestroy
+                       default:
+                               return nil, fmt.Errorf(
+                                       "position %s: 'provisioner' when must be 'create' or 'destroy'",
+                                       item.Pos())
+                       }
+               }
+
+               // Parse the "on_failure" value
+               onFailure := ProvisionerOnFailureFail
+               if v, ok := config["on_failure"]; ok {
+                       switch v {
+                       case "continue":
+                               onFailure = ProvisionerOnFailureContinue
+                       case "fail":
+                               onFailure = ProvisionerOnFailureFail
+                       default:
+                               return nil, fmt.Errorf(
+                                       "position %s: 'provisioner' on_failure must be 'continue' or 'fail'",
+                                       item.Pos())
+                       }
+               }
+
+               // Delete fields we special case
+               delete(config, "connection")
+               delete(config, "when")
+               delete(config, "on_failure")
+
+               rawConfig, err := NewRawConfig(config)
+               if err != nil {
+                       return nil, err
+               }
+
+               // Check if we have a provisioner-level connection
+               // block that overrides the resource-level
+               var subConnInfo map[string]interface{}
+               if o := listVal.Filter("connection"); len(o.Items) > 0 {
+                       err := hcl.DecodeObject(&subConnInfo, o.Items[0].Val)
+                       if err != nil {
+                               return nil, err
+                       }
+               }
+
+               // Inherit from the resource connInfo any keys
+               // that are not explicitly overriden.
+               if connInfo != nil && subConnInfo != nil {
+                       for k, v := range connInfo {
+                               if _, ok := subConnInfo[k]; !ok {
+                                       subConnInfo[k] = v
+                               }
+                       }
+               } else if subConnInfo == nil {
+                       subConnInfo = connInfo
+               }
+
+               // Parse the connInfo
+               connRaw, err := NewRawConfig(subConnInfo)
+               if err != nil {
+                       return nil, err
+               }
+
+               result = append(result, &Provisioner{
+                       Type:      n,
+                       RawConfig: rawConfig,
+                       ConnInfo:  connRaw,
+                       When:      when,
+                       OnFailure: onFailure,
+               })
+       }
+
+       return result, nil
+}
+
+/*
+func hclObjectMap(os *hclobj.Object) map[string]ast.ListNode {
+       objects := make(map[string][]*hclobj.Object)
+
+       for _, o := range os.Elem(false) {
+               for _, elem := range o.Elem(true) {
+                       val, ok := objects[elem.Key]
+                       if !ok {
+                               val = make([]*hclobj.Object, 0, 1)
+                       }
+
+                       val = append(val, elem)
+                       objects[elem.Key] = val
+               }
+       }
+
+       return objects
+}
+*/
+
+// assertAllBlocksHaveNames returns an error if any of the items in
+// the given object list are blocks without keys (like "module {}")
+// or simple assignments (like "module = 1"). It returns nil if
+// neither of these things are true.
+//
+// The given name is used in any generated error messages, and should
+// be the name of the block we're dealing with. The given list should
+// be the result of calling .Filter on an object list with that same
+// name.
+func assertAllBlocksHaveNames(name string, list *ast.ObjectList) error {
+       if elem := list.Elem(); len(elem.Items) != 0 {
+               switch et := elem.Items[0].Val.(type) {
+               case *ast.ObjectType:
+                       pos := et.Lbrace
+                       return fmt.Errorf("%s: %q must be followed by a name", pos, name)
+               default:
+                       pos := elem.Items[0].Val.Pos()
+                       return fmt.Errorf("%s: %q must be a configuration block", pos, name)
+               }
+       }
+       return nil
+}
+
+func checkHCLKeys(node ast.Node, valid []string) error {
+       var list *ast.ObjectList
+       switch n := node.(type) {
+       case *ast.ObjectList:
+               list = n
+       case *ast.ObjectType:
+               list = n.List
+       default:
+               return fmt.Errorf("cannot check HCL keys of type %T", n)
+       }
+
+       validMap := make(map[string]struct{}, len(valid))
+       for _, v := range valid {
+               validMap[v] = struct{}{}
+       }
+
+       var result error
+       for _, item := range list.Items {
+               key := item.Keys[0].Token.Value().(string)
+               if _, ok := validMap[key]; !ok {
+                       result = multierror.Append(result, fmt.Errorf(
+                               "invalid key: %s", key))
+               }
+       }
+
+       return result
+}
+
+// unwrapHCLObjectKeysFromJSON cleans up an edge case that can occur when
+// parsing JSON as input: if we're parsing JSON then directly nested
+// items will show up as additional "keys".
+//
+// For objects that expect a fixed number of keys, this breaks the
+// decoding process. This function unwraps the object into what it would've
+// looked like if it came directly from HCL by specifying the number of keys
+// you expect.
+//
+// Example:
+//
+// { "foo": { "baz": {} } }
+//
+// Will show up with Keys being: []string{"foo", "baz"}
+// when we really just want the first two. This function will fix this.
+func unwrapHCLObjectKeysFromJSON(item *ast.ObjectItem, depth int) {
+       if len(item.Keys) > depth && item.Keys[0].Token.JSON {
+               for len(item.Keys) > depth {
+                       // Pop off the last key
+                       n := len(item.Keys)
+                       key := item.Keys[n-1]
+                       item.Keys[n-1] = nil
+                       item.Keys = item.Keys[:n-1]
+
+                       // Wrap our value in a list
+                       item.Val = &ast.ObjectType{
+                               List: &ast.ObjectList{
+                                       Items: []*ast.ObjectItem{
+                                               &ast.ObjectItem{
+                                                       Keys: []*ast.ObjectKey{key},
+                                                       Val:  item.Val,
+                                               },
+                                       },
+                               },
+                       }
+               }
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/merge.go b/vendor/github.com/hashicorp/terraform/config/merge.go
new file mode 100644 (file)
index 0000000..db214be
--- /dev/null
@@ -0,0 +1,193 @@
+package config
+
+// Merge merges two configurations into a single configuration.
+//
+// Merge allows for the two configurations to have duplicate resources,
+// because the resources will be merged. This differs from a single
+// Config which must only have unique resources.
+func Merge(c1, c2 *Config) (*Config, error) {
+       c := new(Config)
+
+       // Merge unknown keys
+       unknowns := make(map[string]struct{})
+       for _, k := range c1.unknownKeys {
+               _, present := unknowns[k]
+               if !present {
+                       unknowns[k] = struct{}{}
+                       c.unknownKeys = append(c.unknownKeys, k)
+               }
+       }
+       for _, k := range c2.unknownKeys {
+               _, present := unknowns[k]
+               if !present {
+                       unknowns[k] = struct{}{}
+                       c.unknownKeys = append(c.unknownKeys, k)
+               }
+       }
+
+       // Merge Atlas configuration. This is a dumb one overrides the other
+       // sort of merge.
+       c.Atlas = c1.Atlas
+       if c2.Atlas != nil {
+               c.Atlas = c2.Atlas
+       }
+
+       // Merge the Terraform configuration
+       if c1.Terraform != nil {
+               c.Terraform = c1.Terraform
+               if c2.Terraform != nil {
+                       c.Terraform.Merge(c2.Terraform)
+               }
+       } else {
+               c.Terraform = c2.Terraform
+       }
+
+       // NOTE: Everything below is pretty gross. Due to the lack of generics
+       // in Go, there is some hoop-jumping involved to make this merging a
+       // little more test-friendly and less repetitive. Ironically, making it
+       // less repetitive involves being a little repetitive, but I prefer to
+       // be repetitive with things that are less error prone than things that
+       // are more error prone (more logic). Type conversions to an interface
+       // are pretty low-error.
+
+       var m1, m2, mresult []merger
+
+       // Modules
+       m1 = make([]merger, 0, len(c1.Modules))
+       m2 = make([]merger, 0, len(c2.Modules))
+       for _, v := range c1.Modules {
+               m1 = append(m1, v)
+       }
+       for _, v := range c2.Modules {
+               m2 = append(m2, v)
+       }
+       mresult = mergeSlice(m1, m2)
+       if len(mresult) > 0 {
+               c.Modules = make([]*Module, len(mresult))
+               for i, v := range mresult {
+                       c.Modules[i] = v.(*Module)
+               }
+       }
+
+       // Outputs
+       m1 = make([]merger, 0, len(c1.Outputs))
+       m2 = make([]merger, 0, len(c2.Outputs))
+       for _, v := range c1.Outputs {
+               m1 = append(m1, v)
+       }
+       for _, v := range c2.Outputs {
+               m2 = append(m2, v)
+       }
+       mresult = mergeSlice(m1, m2)
+       if len(mresult) > 0 {
+               c.Outputs = make([]*Output, len(mresult))
+               for i, v := range mresult {
+                       c.Outputs[i] = v.(*Output)
+               }
+       }
+
+       // Provider Configs
+       m1 = make([]merger, 0, len(c1.ProviderConfigs))
+       m2 = make([]merger, 0, len(c2.ProviderConfigs))
+       for _, v := range c1.ProviderConfigs {
+               m1 = append(m1, v)
+       }
+       for _, v := range c2.ProviderConfigs {
+               m2 = append(m2, v)
+       }
+       mresult = mergeSlice(m1, m2)
+       if len(mresult) > 0 {
+               c.ProviderConfigs = make([]*ProviderConfig, len(mresult))
+               for i, v := range mresult {
+                       c.ProviderConfigs[i] = v.(*ProviderConfig)
+               }
+       }
+
+       // Resources
+       m1 = make([]merger, 0, len(c1.Resources))
+       m2 = make([]merger, 0, len(c2.Resources))
+       for _, v := range c1.Resources {
+               m1 = append(m1, v)
+       }
+       for _, v := range c2.Resources {
+               m2 = append(m2, v)
+       }
+       mresult = mergeSlice(m1, m2)
+       if len(mresult) > 0 {
+               c.Resources = make([]*Resource, len(mresult))
+               for i, v := range mresult {
+                       c.Resources[i] = v.(*Resource)
+               }
+       }
+
+       // Variables
+       m1 = make([]merger, 0, len(c1.Variables))
+       m2 = make([]merger, 0, len(c2.Variables))
+       for _, v := range c1.Variables {
+               m1 = append(m1, v)
+       }
+       for _, v := range c2.Variables {
+               m2 = append(m2, v)
+       }
+       mresult = mergeSlice(m1, m2)
+       if len(mresult) > 0 {
+               c.Variables = make([]*Variable, len(mresult))
+               for i, v := range mresult {
+                       c.Variables[i] = v.(*Variable)
+               }
+       }
+
+       return c, nil
+}
+
+// merger is an interface that must be implemented by types that are
+// merge-able. This simplifies the implementation of Merge for the various
+// components of a Config.
+type merger interface {
+       mergerName() string
+       mergerMerge(merger) merger
+}
+
+// mergeSlice merges a slice of mergers.
+func mergeSlice(m1, m2 []merger) []merger {
+       r := make([]merger, len(m1), len(m1)+len(m2))
+       copy(r, m1)
+
+       m := map[string]struct{}{}
+       for _, v2 := range m2 {
+               // If we already saw it, just append it because its a
+               // duplicate and invalid...
+               name := v2.mergerName()
+               if _, ok := m[name]; ok {
+                       r = append(r, v2)
+                       continue
+               }
+               m[name] = struct{}{}
+
+               // Find an original to override
+               var original merger
+               originalIndex := -1
+               for i, v := range m1 {
+                       if v.mergerName() == name {
+                               originalIndex = i
+                               original = v
+                               break
+                       }
+               }
+
+               var v merger
+               if original == nil {
+                       v = v2
+               } else {
+                       v = original.mergerMerge(v2)
+               }
+
+               if originalIndex == -1 {
+                       r = append(r, v)
+               } else {
+                       r[originalIndex] = v
+               }
+       }
+
+       return r
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go b/vendor/github.com/hashicorp/terraform/config/module/copy_dir.go
new file mode 100644 (file)
index 0000000..095f61d
--- /dev/null
@@ -0,0 +1,114 @@
+package module
+
+import (
+       "io"
+       "os"
+       "path/filepath"
+       "strings"
+)
+
+// copyDir copies the src directory contents into dst. Both directories
+// should already exist.
+func copyDir(dst, src string) error {
+       src, err := filepath.EvalSymlinks(src)
+       if err != nil {
+               return err
+       }
+
+       walkFn := func(path string, info os.FileInfo, err error) error {
+               if err != nil {
+                       return err
+               }
+
+               if path == src {
+                       return nil
+               }
+
+               if strings.HasPrefix(filepath.Base(path), ".") {
+                       // Skip any dot files
+                       if info.IsDir() {
+                               return filepath.SkipDir
+                       } else {
+                               return nil
+                       }
+               }
+
+               // The "path" has the src prefixed to it. We need to join our
+               // destination with the path without the src on it.
+               dstPath := filepath.Join(dst, path[len(src):])
+
+               // we don't want to try and copy the same file over itself.
+               if eq, err := sameFile(path, dstPath); eq {
+                       return nil
+               } else if err != nil {
+                       return err
+               }
+
+               // If we have a directory, make that subdirectory, then continue
+               // the walk.
+               if info.IsDir() {
+                       if path == filepath.Join(src, dst) {
+                               // dst is in src; don't walk it.
+                               return nil
+                       }
+
+                       if err := os.MkdirAll(dstPath, 0755); err != nil {
+                               return err
+                       }
+
+                       return nil
+               }
+
+               // If we have a file, copy the contents.
+               srcF, err := os.Open(path)
+               if err != nil {
+                       return err
+               }
+               defer srcF.Close()
+
+               dstF, err := os.Create(dstPath)
+               if err != nil {
+                       return err
+               }
+               defer dstF.Close()
+
+               if _, err := io.Copy(dstF, srcF); err != nil {
+                       return err
+               }
+
+               // Chmod it
+               return os.Chmod(dstPath, info.Mode())
+       }
+
+       return filepath.Walk(src, walkFn)
+}
+
+// sameFile tried to determine if to paths are the same file.
+// If the paths don't match, we lookup the inode on supported systems.
+func sameFile(a, b string) (bool, error) {
+       if a == b {
+               return true, nil
+       }
+
+       aIno, err := inode(a)
+       if err != nil {
+               if os.IsNotExist(err) {
+                       return false, nil
+               }
+               return false, err
+       }
+
+       bIno, err := inode(b)
+       if err != nil {
+               if os.IsNotExist(err) {
+                       return false, nil
+               }
+               return false, err
+       }
+
+       if aIno > 0 && aIno == bIno {
+               return true, nil
+       }
+
+       return false, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/get.go b/vendor/github.com/hashicorp/terraform/config/module/get.go
new file mode 100644 (file)
index 0000000..96b4a63
--- /dev/null
@@ -0,0 +1,71 @@
+package module
+
+import (
+       "io/ioutil"
+       "os"
+
+       "github.com/hashicorp/go-getter"
+)
+
+// GetMode is an enum that describes how modules are loaded.
+//
+// GetModeLoad says that modules will not be downloaded or updated, they will
+// only be loaded from the storage.
+//
+// GetModeGet says that modules can be initially downloaded if they don't
+// exist, but otherwise to just load from the current version in storage.
+//
+// GetModeUpdate says that modules should be checked for updates and
+// downloaded prior to loading. If there are no updates, we load the version
+// from disk, otherwise we download first and then load.
+type GetMode byte
+
+const (
+       GetModeNone GetMode = iota
+       GetModeGet
+       GetModeUpdate
+)
+
+// GetCopy is the same as Get except that it downloads a copy of the
+// module represented by source.
+//
+// This copy will omit and dot-prefixed files (such as .git/, .hg/) and
+// can't be updated on its own.
+func GetCopy(dst, src string) error {
+       // Create the temporary directory to do the real Get to
+       tmpDir, err := ioutil.TempDir("", "tf")
+       if err != nil {
+               return err
+       }
+       // FIXME: This isn't completely safe. Creating and removing our temp path
+       //        exposes where to race to inject files.
+       if err := os.RemoveAll(tmpDir); err != nil {
+               return err
+       }
+       defer os.RemoveAll(tmpDir)
+
+       // Get to that temporary dir
+       if err := getter.Get(tmpDir, src); err != nil {
+               return err
+       }
+
+       // Make sure the destination exists
+       if err := os.MkdirAll(dst, 0755); err != nil {
+               return err
+       }
+
+       // Copy to the final location
+       return copyDir(dst, tmpDir)
+}
+
+func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) {
+       // Get the module with the level specified if we were told to.
+       if mode > GetModeNone {
+               if err := s.Get(key, src, mode == GetModeUpdate); err != nil {
+                       return "", false, err
+               }
+       }
+
+       // Get the directory where the module is.
+       return s.Dir(key)
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode.go b/vendor/github.com/hashicorp/terraform/config/module/inode.go
new file mode 100644 (file)
index 0000000..8603ee2
--- /dev/null
@@ -0,0 +1,21 @@
+// +build linux darwin openbsd netbsd solaris
+
+package module
+
+import (
+       "fmt"
+       "os"
+       "syscall"
+)
+
+// lookup the inode of a file on posix systems
+func inode(path string) (uint64, error) {
+       stat, err := os.Stat(path)
+       if err != nil {
+               return 0, err
+       }
+       if st, ok := stat.Sys().(*syscall.Stat_t); ok {
+               return st.Ino, nil
+       }
+       return 0, fmt.Errorf("could not determine file inode")
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go b/vendor/github.com/hashicorp/terraform/config/module/inode_freebsd.go
new file mode 100644 (file)
index 0000000..0d95730
--- /dev/null
@@ -0,0 +1,21 @@
+// +build freebsd
+
+package module
+
+import (
+       "fmt"
+       "os"
+       "syscall"
+)
+
+// lookup the inode of a file on posix systems
+func inode(path string) (uint64, error) {
+       stat, err := os.Stat(path)
+       if err != nil {
+               return 0, err
+       }
+       if st, ok := stat.Sys().(*syscall.Stat_t); ok {
+               return uint64(st.Ino), nil
+       }
+       return 0, fmt.Errorf("could not determine file inode")
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go b/vendor/github.com/hashicorp/terraform/config/module/inode_windows.go
new file mode 100644 (file)
index 0000000..c0cf455
--- /dev/null
@@ -0,0 +1,8 @@
+// +build windows
+
+package module
+
+// no syscall.Stat_t on windows, return 0 for inodes
+func inode(path string) (uint64, error) {
+       return 0, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/module.go b/vendor/github.com/hashicorp/terraform/config/module/module.go
new file mode 100644 (file)
index 0000000..f8649f6
--- /dev/null
@@ -0,0 +1,7 @@
+package module
+
+// Module represents the metadata for a single module.
+type Module struct {
+       Name   string
+       Source string
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/testing.go b/vendor/github.com/hashicorp/terraform/config/module/testing.go
new file mode 100644 (file)
index 0000000..fc9e733
--- /dev/null
@@ -0,0 +1,38 @@
+package module
+
+import (
+       "io/ioutil"
+       "os"
+       "testing"
+
+       "github.com/hashicorp/go-getter"
+)
+
+// TestTree loads a module at the given path and returns the tree as well
+// as a function that should be deferred to clean up resources.
+func TestTree(t *testing.T, path string) (*Tree, func()) {
+       // Create a temporary directory for module storage
+       dir, err := ioutil.TempDir("", "tf")
+       if err != nil {
+               t.Fatalf("err: %s", err)
+               return nil, nil
+       }
+
+       // Load the module
+       mod, err := NewTreeModule("", path)
+       if err != nil {
+               t.Fatalf("err: %s", err)
+               return nil, nil
+       }
+
+       // Get the child modules
+       s := &getter.FolderStorage{StorageDir: dir}
+       if err := mod.Load(s, GetModeGet); err != nil {
+               t.Fatalf("err: %s", err)
+               return nil, nil
+       }
+
+       return mod, func() {
+               os.RemoveAll(dir)
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree.go b/vendor/github.com/hashicorp/terraform/config/module/tree.go
new file mode 100644 (file)
index 0000000..b6f90fd
--- /dev/null
@@ -0,0 +1,428 @@
+package module
+
+import (
+       "bufio"
+       "bytes"
+       "fmt"
+       "path/filepath"
+       "strings"
+       "sync"
+
+       "github.com/hashicorp/go-getter"
+       "github.com/hashicorp/terraform/config"
+)
+
+// RootName is the name of the root tree.
+const RootName = "root"
+
+// Tree represents the module import tree of configurations.
+//
+// This Tree structure can be used to get (download) new modules, load
+// all the modules without getting, flatten the tree into something
+// Terraform can use, etc.
+type Tree struct {
+       name     string
+       config   *config.Config
+       children map[string]*Tree
+       path     []string
+       lock     sync.RWMutex
+}
+
+// NewTree returns a new Tree for the given config structure.
+func NewTree(name string, c *config.Config) *Tree {
+       return &Tree{config: c, name: name}
+}
+
+// NewEmptyTree returns a new tree that is empty (contains no configuration).
+func NewEmptyTree() *Tree {
+       t := &Tree{config: &config.Config{}}
+
+       // We do this dummy load so that the tree is marked as "loaded". It
+       // should never fail because this is just about a no-op. If it does fail
+       // we panic so we can know its a bug.
+       if err := t.Load(nil, GetModeGet); err != nil {
+               panic(err)
+       }
+
+       return t
+}
+
+// NewTreeModule is like NewTree except it parses the configuration in
+// the directory and gives it a specific name. Use a blank name "" to specify
+// the root module.
+func NewTreeModule(name, dir string) (*Tree, error) {
+       c, err := config.LoadDir(dir)
+       if err != nil {
+               return nil, err
+       }
+
+       return NewTree(name, c), nil
+}
+
+// Config returns the configuration for this module.
+func (t *Tree) Config() *config.Config {
+       return t.config
+}
+
+// Child returns the child with the given path (by name).
+func (t *Tree) Child(path []string) *Tree {
+       if t == nil {
+               return nil
+       }
+
+       if len(path) == 0 {
+               return t
+       }
+
+       c := t.Children()[path[0]]
+       if c == nil {
+               return nil
+       }
+
+       return c.Child(path[1:])
+}
+
+// Children returns the children of this tree (the modules that are
+// imported by this root).
+//
+// This will only return a non-nil value after Load is called.
+func (t *Tree) Children() map[string]*Tree {
+       t.lock.RLock()
+       defer t.lock.RUnlock()
+       return t.children
+}
+
+// Loaded says whether or not this tree has been loaded or not yet.
+func (t *Tree) Loaded() bool {
+       t.lock.RLock()
+       defer t.lock.RUnlock()
+       return t.children != nil
+}
+
+// Modules returns the list of modules that this tree imports.
+//
+// This is only the imports of _this_ level of the tree. To retrieve the
+// full nested imports, you'll have to traverse the tree.
+func (t *Tree) Modules() []*Module {
+       result := make([]*Module, len(t.config.Modules))
+       for i, m := range t.config.Modules {
+               result[i] = &Module{
+                       Name:   m.Name,
+                       Source: m.Source,
+               }
+       }
+
+       return result
+}
+
+// Name returns the name of the tree. This will be "<root>" for the root
+// tree and then the module name given for any children.
+func (t *Tree) Name() string {
+       if t.name == "" {
+               return RootName
+       }
+
+       return t.name
+}
+
+// Load loads the configuration of the entire tree.
+//
+// The parameters are used to tell the tree where to find modules and
+// whether it can download/update modules along the way.
+//
+// Calling this multiple times will reload the tree.
+//
+// Various semantic-like checks are made along the way of loading since
+// module trees inherently require the configuration to be in a reasonably
+// sane state: no circular dependencies, proper module sources, etc. A full
+// suite of validations can be done by running Validate (after loading).
+func (t *Tree) Load(s getter.Storage, mode GetMode) error {
+       t.lock.Lock()
+       defer t.lock.Unlock()
+
+       // Reset the children if we have any
+       t.children = nil
+
+       modules := t.Modules()
+       children := make(map[string]*Tree)
+
+       // Go through all the modules and get the directory for them.
+       for _, m := range modules {
+               if _, ok := children[m.Name]; ok {
+                       return fmt.Errorf(
+                               "module %s: duplicated. module names must be unique", m.Name)
+               }
+
+               // Determine the path to this child
+               path := make([]string, len(t.path), len(t.path)+1)
+               copy(path, t.path)
+               path = append(path, m.Name)
+
+               // Split out the subdir if we have one
+               source, subDir := getter.SourceDirSubdir(m.Source)
+
+               source, err := getter.Detect(source, t.config.Dir, getter.Detectors)
+               if err != nil {
+                       return fmt.Errorf("module %s: %s", m.Name, err)
+               }
+
+               // Check if the detector introduced something new.
+               source, subDir2 := getter.SourceDirSubdir(source)
+               if subDir2 != "" {
+                       subDir = filepath.Join(subDir2, subDir)
+               }
+
+               // Get the directory where this module is so we can load it
+               key := strings.Join(path, ".")
+               key = fmt.Sprintf("root.%s-%s", key, m.Source)
+               dir, ok, err := getStorage(s, key, source, mode)
+               if err != nil {
+                       return err
+               }
+               if !ok {
+                       return fmt.Errorf(
+                               "module %s: not found, may need to be downloaded using 'terraform get'", m.Name)
+               }
+
+               // If we have a subdirectory, then merge that in
+               if subDir != "" {
+                       dir = filepath.Join(dir, subDir)
+               }
+
+               // Load the configurations.Dir(source)
+               children[m.Name], err = NewTreeModule(m.Name, dir)
+               if err != nil {
+                       return fmt.Errorf(
+                               "module %s: %s", m.Name, err)
+               }
+
+               // Set the path of this child
+               children[m.Name].path = path
+       }
+
+       // Go through all the children and load them.
+       for _, c := range children {
+               if err := c.Load(s, mode); err != nil {
+                       return err
+               }
+       }
+
+       // Set our tree up
+       t.children = children
+
+       return nil
+}
+
+// Path is the full path to this tree.
+func (t *Tree) Path() []string {
+       return t.path
+}
+
+// String gives a nice output to describe the tree.
+func (t *Tree) String() string {
+       var result bytes.Buffer
+       path := strings.Join(t.path, ", ")
+       if path != "" {
+               path = fmt.Sprintf(" (path: %s)", path)
+       }
+       result.WriteString(t.Name() + path + "\n")
+
+       cs := t.Children()
+       if cs == nil {
+               result.WriteString("  not loaded")
+       } else {
+               // Go through each child and get its string value, then indent it
+               // by two.
+               for _, c := range cs {
+                       r := strings.NewReader(c.String())
+                       scanner := bufio.NewScanner(r)
+                       for scanner.Scan() {
+                               result.WriteString("  ")
+                               result.WriteString(scanner.Text())
+                               result.WriteString("\n")
+                       }
+               }
+       }
+
+       return result.String()
+}
+
+// Validate does semantic checks on the entire tree of configurations.
+//
+// This will call the respective config.Config.Validate() functions as well
+// as verifying things such as parameters/outputs between the various modules.
+//
+// Load must be called prior to calling Validate or an error will be returned.
+func (t *Tree) Validate() error {
+       if !t.Loaded() {
+               return fmt.Errorf("tree must be loaded before calling Validate")
+       }
+
+       // If something goes wrong, here is our error template
+       newErr := &treeError{Name: []string{t.Name()}}
+
+       // Terraform core does not handle root module children named "root".
+       // We plan to fix this in the future but this bug was brought up in
+       // the middle of a release and we don't want to introduce wide-sweeping
+       // changes at that time.
+       if len(t.path) == 1 && t.name == "root" {
+               return fmt.Errorf("root module cannot contain module named 'root'")
+       }
+
+       // Validate our configuration first.
+       if err := t.config.Validate(); err != nil {
+               newErr.Add(err)
+       }
+
+       // If we're the root, we do extra validation. This validation usually
+       // requires the entire tree (since children don't have parent pointers).
+       if len(t.path) == 0 {
+               if err := t.validateProviderAlias(); err != nil {
+                       newErr.Add(err)
+               }
+       }
+
+       // Get the child trees
+       children := t.Children()
+
+       // Validate all our children
+       for _, c := range children {
+               err := c.Validate()
+               if err == nil {
+                       continue
+               }
+
+               verr, ok := err.(*treeError)
+               if !ok {
+                       // Unknown error, just return...
+                       return err
+               }
+
+               // Append ourselves to the error and then return
+               verr.Name = append(verr.Name, t.Name())
+               newErr.AddChild(verr)
+       }
+
+       // Go over all the modules and verify that any parameters are valid
+       // variables into the module in question.
+       for _, m := range t.config.Modules {
+               tree, ok := children[m.Name]
+               if !ok {
+                       // This should never happen because Load watches us
+                       panic("module not found in children: " + m.Name)
+               }
+
+               // Build the variables that the module defines
+               requiredMap := make(map[string]struct{})
+               varMap := make(map[string]struct{})
+               for _, v := range tree.config.Variables {
+                       varMap[v.Name] = struct{}{}
+
+                       if v.Required() {
+                               requiredMap[v.Name] = struct{}{}
+                       }
+               }
+
+               // Compare to the keys in our raw config for the module
+               for k, _ := range m.RawConfig.Raw {
+                       if _, ok := varMap[k]; !ok {
+                               newErr.Add(fmt.Errorf(
+                                       "module %s: %s is not a valid parameter",
+                                       m.Name, k))
+                       }
+
+                       // Remove the required
+                       delete(requiredMap, k)
+               }
+
+               // If we have any required left over, they aren't set.
+               for k, _ := range requiredMap {
+                       newErr.Add(fmt.Errorf(
+                               "module %s: required variable %q not set",
+                               m.Name, k))
+               }
+       }
+
+       // Go over all the variables used and make sure that any module
+       // variables represent outputs properly.
+       for source, vs := range t.config.InterpolatedVariables() {
+               for _, v := range vs {
+                       mv, ok := v.(*config.ModuleVariable)
+                       if !ok {
+                               continue
+                       }
+
+                       tree, ok := children[mv.Name]
+                       if !ok {
+                               newErr.Add(fmt.Errorf(
+                                       "%s: undefined module referenced %s",
+                                       source, mv.Name))
+                               continue
+                       }
+
+                       found := false
+                       for _, o := range tree.config.Outputs {
+                               if o.Name == mv.Field {
+                                       found = true
+                                       break
+                               }
+                       }
+                       if !found {
+                               newErr.Add(fmt.Errorf(
+                                       "%s: %s is not a valid output for module %s",
+                                       source, mv.Field, mv.Name))
+                       }
+               }
+       }
+
+       return newErr.ErrOrNil()
+}
+
+// treeError is an error use by Tree.Validate to accumulates all
+// validation errors.
+type treeError struct {
+       Name     []string
+       Errs     []error
+       Children []*treeError
+}
+
+func (e *treeError) Add(err error) {
+       e.Errs = append(e.Errs, err)
+}
+
+func (e *treeError) AddChild(err *treeError) {
+       e.Children = append(e.Children, err)
+}
+
+func (e *treeError) ErrOrNil() error {
+       if len(e.Errs) > 0 || len(e.Children) > 0 {
+               return e
+       }
+       return nil
+}
+
+func (e *treeError) Error() string {
+       name := strings.Join(e.Name, ".")
+       var out bytes.Buffer
+       fmt.Fprintf(&out, "module %s: ", name)
+
+       if len(e.Errs) == 1 {
+               // single like error
+               out.WriteString(e.Errs[0].Error())
+       } else {
+               // multi-line error
+               for _, err := range e.Errs {
+                       fmt.Fprintf(&out, "\n    %s", err)
+               }
+       }
+
+       if len(e.Children) > 0 {
+               // start the next error on a new line
+               out.WriteString("\n  ")
+       }
+       for _, child := range e.Children {
+               out.WriteString(child.Error())
+       }
+
+       return out.String()
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go b/vendor/github.com/hashicorp/terraform/config/module/tree_gob.go
new file mode 100644 (file)
index 0000000..fcd37f4
--- /dev/null
@@ -0,0 +1,57 @@
+package module
+
+import (
+       "bytes"
+       "encoding/gob"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+func (t *Tree) GobDecode(bs []byte) error {
+       t.lock.Lock()
+       defer t.lock.Unlock()
+
+       // Decode the gob data
+       var data treeGob
+       dec := gob.NewDecoder(bytes.NewReader(bs))
+       if err := dec.Decode(&data); err != nil {
+               return err
+       }
+
+       // Set the fields
+       t.name = data.Name
+       t.config = data.Config
+       t.children = data.Children
+       t.path = data.Path
+
+       return nil
+}
+
+func (t *Tree) GobEncode() ([]byte, error) {
+       data := &treeGob{
+               Config:   t.config,
+               Children: t.children,
+               Name:     t.name,
+               Path:     t.path,
+       }
+
+       var buf bytes.Buffer
+       enc := gob.NewEncoder(&buf)
+       if err := enc.Encode(data); err != nil {
+               return nil, err
+       }
+
+       return buf.Bytes(), nil
+}
+
+// treeGob is used as a structure to Gob encode a tree.
+//
+// This structure is private so it can't be referenced but the fields are
+// public, allowing Gob to properly encode this. When we decode this, we are
+// able to turn it into a Tree.
+type treeGob struct {
+       Config   *config.Config
+       Children map[string]*Tree
+       Name     string
+       Path     []string
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go b/vendor/github.com/hashicorp/terraform/config/module/validate_provider_alias.go
new file mode 100644 (file)
index 0000000..090d4f7
--- /dev/null
@@ -0,0 +1,118 @@
+package module
+
+import (
+       "fmt"
+       "strings"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// validateProviderAlias validates that all provider alias references are
+// defined at some point in the parent tree. This improves UX by catching
+// alias typos at the slight cost of requiring a declaration of usage. This
+// is usually a good tradeoff since not many aliases are used.
+func (t *Tree) validateProviderAlias() error {
+       // If we're not the root, don't perform this validation. We must be the
+       // root since we require full tree visibilty.
+       if len(t.path) != 0 {
+               return nil
+       }
+
+       // We'll use a graph to keep track of defined aliases at each level.
+       // As long as a parent defines an alias, it is okay.
+       var g dag.AcyclicGraph
+       t.buildProviderAliasGraph(&g, nil)
+
+       // Go through the graph and check that the usage is all good.
+       var err error
+       for _, v := range g.Vertices() {
+               pv, ok := v.(*providerAliasVertex)
+               if !ok {
+                       // This shouldn't happen, just ignore it.
+                       continue
+               }
+
+               // If we're not using any aliases, fast track and just continue
+               if len(pv.Used) == 0 {
+                       continue
+               }
+
+               // Grab the ancestors since we're going to have to check if our
+               // parents define any of our aliases.
+               var parents []*providerAliasVertex
+               ancestors, _ := g.Ancestors(v)
+               for _, raw := range ancestors.List() {
+                       if pv, ok := raw.(*providerAliasVertex); ok {
+                               parents = append(parents, pv)
+                       }
+               }
+               for k, _ := range pv.Used {
+                       // Check if we define this
+                       if _, ok := pv.Defined[k]; ok {
+                               continue
+                       }
+
+                       // Check for a parent
+                       found := false
+                       for _, parent := range parents {
+                               _, found = parent.Defined[k]
+                               if found {
+                                       break
+                               }
+                       }
+                       if found {
+                               continue
+                       }
+
+                       // We didn't find the alias, error!
+                       err = multierror.Append(err, fmt.Errorf(
+                               "module %s: provider alias must be defined by the module or a parent: %s",
+                               strings.Join(pv.Path, "."), k))
+               }
+       }
+
+       return err
+}
+
+func (t *Tree) buildProviderAliasGraph(g *dag.AcyclicGraph, parent dag.Vertex) {
+       // Add all our defined aliases
+       defined := make(map[string]struct{})
+       for _, p := range t.config.ProviderConfigs {
+               defined[p.FullName()] = struct{}{}
+       }
+
+       // Add all our used aliases
+       used := make(map[string]struct{})
+       for _, r := range t.config.Resources {
+               if r.Provider != "" {
+                       used[r.Provider] = struct{}{}
+               }
+       }
+
+       // Add it to the graph
+       vertex := &providerAliasVertex{
+               Path:    t.Path(),
+               Defined: defined,
+               Used:    used,
+       }
+       g.Add(vertex)
+
+       // Connect to our parent if we have one
+       if parent != nil {
+               g.Connect(dag.BasicEdge(vertex, parent))
+       }
+
+       // Build all our children
+       for _, c := range t.Children() {
+               c.buildProviderAliasGraph(g, vertex)
+       }
+}
+
+// providerAliasVertex is the vertex for the graph that keeps track of
+// defined provider aliases.
+type providerAliasVertex struct {
+       Path    []string
+       Defined map[string]struct{}
+       Used    map[string]struct{}
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go b/vendor/github.com/hashicorp/terraform/config/provisioner_enums.go
new file mode 100644 (file)
index 0000000..00fd43f
--- /dev/null
@@ -0,0 +1,40 @@
+package config
+
+// ProvisionerWhen is an enum for valid values for when to run provisioners.
+type ProvisionerWhen int
+
+const (
+       ProvisionerWhenInvalid ProvisionerWhen = iota
+       ProvisionerWhenCreate
+       ProvisionerWhenDestroy
+)
+
+var provisionerWhenStrs = map[ProvisionerWhen]string{
+       ProvisionerWhenInvalid: "invalid",
+       ProvisionerWhenCreate:  "create",
+       ProvisionerWhenDestroy: "destroy",
+}
+
+func (v ProvisionerWhen) String() string {
+       return provisionerWhenStrs[v]
+}
+
+// ProvisionerOnFailure is an enum for valid values for on_failure options
+// for provisioners.
+type ProvisionerOnFailure int
+
+const (
+       ProvisionerOnFailureInvalid ProvisionerOnFailure = iota
+       ProvisionerOnFailureContinue
+       ProvisionerOnFailureFail
+)
+
+var provisionerOnFailureStrs = map[ProvisionerOnFailure]string{
+       ProvisionerOnFailureInvalid:  "invalid",
+       ProvisionerOnFailureContinue: "continue",
+       ProvisionerOnFailureFail:     "fail",
+}
+
+func (v ProvisionerOnFailure) String() string {
+       return provisionerOnFailureStrs[v]
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/raw_config.go b/vendor/github.com/hashicorp/terraform/config/raw_config.go
new file mode 100644 (file)
index 0000000..f8498d8
--- /dev/null
@@ -0,0 +1,335 @@
+package config
+
+import (
+       "bytes"
+       "encoding/gob"
+       "sync"
+
+       "github.com/hashicorp/hil"
+       "github.com/hashicorp/hil/ast"
+       "github.com/mitchellh/copystructure"
+       "github.com/mitchellh/reflectwalk"
+)
+
+// UnknownVariableValue is a sentinel value that can be used
+// to denote that the value of a variable is unknown at this time.
+// RawConfig uses this information to build up data about
+// unknown keys.
+const UnknownVariableValue = "74D93920-ED26-11E3-AC10-0800200C9A66"
+
+// RawConfig is a structure that holds a piece of configuration
+// where the overall structure is unknown since it will be used
+// to configure a plugin or some other similar external component.
+//
+// RawConfigs can be interpolated with variables that come from
+// other resources, user variables, etc.
+//
+// RawConfig supports a query-like interface to request
+// information from deep within the structure.
+type RawConfig struct {
+       Key            string
+       Raw            map[string]interface{}
+       Interpolations []ast.Node
+       Variables      map[string]InterpolatedVariable
+
+       lock        sync.Mutex
+       config      map[string]interface{}
+       unknownKeys []string
+}
+
+// NewRawConfig creates a new RawConfig structure and populates the
+// publicly readable struct fields.
+func NewRawConfig(raw map[string]interface{}) (*RawConfig, error) {
+       result := &RawConfig{Raw: raw}
+       if err := result.init(); err != nil {
+               return nil, err
+       }
+
+       return result, nil
+}
+
+// RawMap returns a copy of the RawConfig.Raw map.
+func (r *RawConfig) RawMap() map[string]interface{} {
+       r.lock.Lock()
+       defer r.lock.Unlock()
+
+       m := make(map[string]interface{})
+       for k, v := range r.Raw {
+               m[k] = v
+       }
+       return m
+}
+
+// Copy returns a copy of this RawConfig, uninterpolated.
+func (r *RawConfig) Copy() *RawConfig {
+       if r == nil {
+               return nil
+       }
+
+       r.lock.Lock()
+       defer r.lock.Unlock()
+
+       newRaw := make(map[string]interface{})
+       for k, v := range r.Raw {
+               newRaw[k] = v
+       }
+
+       result, err := NewRawConfig(newRaw)
+       if err != nil {
+               panic("copy failed: " + err.Error())
+       }
+
+       result.Key = r.Key
+       return result
+}
+
+// Value returns the value of the configuration if this configuration
+// has a Key set. If this does not have a Key set, nil will be returned.
+func (r *RawConfig) Value() interface{} {
+       if c := r.Config(); c != nil {
+               if v, ok := c[r.Key]; ok {
+                       return v
+               }
+       }
+
+       r.lock.Lock()
+       defer r.lock.Unlock()
+       return r.Raw[r.Key]
+}
+
+// Config returns the entire configuration with the variables
+// interpolated from any call to Interpolate.
+//
+// If any interpolated variables are unknown (value set to
+// UnknownVariableValue), the first non-container (map, slice, etc.) element
+// will be removed from the config. The keys of unknown variables
+// can be found using the UnknownKeys function.
+//
+// By pruning out unknown keys from the configuration, the raw
+// structure will always successfully decode into its ultimate
+// structure using something like mapstructure.
+func (r *RawConfig) Config() map[string]interface{} {
+       r.lock.Lock()
+       defer r.lock.Unlock()
+       return r.config
+}
+
+// Interpolate uses the given mapping of variable values and uses
+// those as the values to replace any variables in this raw
+// configuration.
+//
+// Any prior calls to Interpolate are replaced with this one.
+//
+// If a variable key is missing, this will panic.
+func (r *RawConfig) Interpolate(vs map[string]ast.Variable) error {
+       r.lock.Lock()
+       defer r.lock.Unlock()
+
+       config := langEvalConfig(vs)
+       return r.interpolate(func(root ast.Node) (interface{}, error) {
+               // None of the variables we need are computed, meaning we should
+               // be able to properly evaluate.
+               result, err := hil.Eval(root, config)
+               if err != nil {
+                       return "", err
+               }
+
+               return result.Value, nil
+       })
+}
+
+// Merge merges another RawConfig into this one (overriding any conflicting
+// values in this config) and returns a new config. The original config
+// is not modified.
+func (r *RawConfig) Merge(other *RawConfig) *RawConfig {
+       r.lock.Lock()
+       defer r.lock.Unlock()
+
+       // Merge the raw configurations
+       raw := make(map[string]interface{})
+       for k, v := range r.Raw {
+               raw[k] = v
+       }
+       for k, v := range other.Raw {
+               raw[k] = v
+       }
+
+       // Create the result
+       result, err := NewRawConfig(raw)
+       if err != nil {
+               panic(err)
+       }
+
+       // Merge the interpolated results
+       result.config = make(map[string]interface{})
+       for k, v := range r.config {
+               result.config[k] = v
+       }
+       for k, v := range other.config {
+               result.config[k] = v
+       }
+
+       // Build the unknown keys
+       if len(r.unknownKeys) > 0 || len(other.unknownKeys) > 0 {
+               unknownKeys := make(map[string]struct{})
+               for _, k := range r.unknownKeys {
+                       unknownKeys[k] = struct{}{}
+               }
+               for _, k := range other.unknownKeys {
+                       unknownKeys[k] = struct{}{}
+               }
+
+               result.unknownKeys = make([]string, 0, len(unknownKeys))
+               for k, _ := range unknownKeys {
+                       result.unknownKeys = append(result.unknownKeys, k)
+               }
+       }
+
+       return result
+}
+
+func (r *RawConfig) init() error {
+       r.lock.Lock()
+       defer r.lock.Unlock()
+
+       r.config = r.Raw
+       r.Interpolations = nil
+       r.Variables = nil
+
+       fn := func(node ast.Node) (interface{}, error) {
+               r.Interpolations = append(r.Interpolations, node)
+               vars, err := DetectVariables(node)
+               if err != nil {
+                       return "", err
+               }
+
+               for _, v := range vars {
+                       if r.Variables == nil {
+                               r.Variables = make(map[string]InterpolatedVariable)
+                       }
+
+                       r.Variables[v.FullKey()] = v
+               }
+
+               return "", nil
+       }
+
+       walker := &interpolationWalker{F: fn}
+       if err := reflectwalk.Walk(r.Raw, walker); err != nil {
+               return err
+       }
+
+       return nil
+}
+
+func (r *RawConfig) interpolate(fn interpolationWalkerFunc) error {
+       config, err := copystructure.Copy(r.Raw)
+       if err != nil {
+               return err
+       }
+       r.config = config.(map[string]interface{})
+
+       w := &interpolationWalker{F: fn, Replace: true}
+       err = reflectwalk.Walk(r.config, w)
+       if err != nil {
+               return err
+       }
+
+       r.unknownKeys = w.unknownKeys
+       return nil
+}
+
+func (r *RawConfig) merge(r2 *RawConfig) *RawConfig {
+       if r == nil && r2 == nil {
+               return nil
+       }
+
+       if r == nil {
+               r = &RawConfig{}
+       }
+
+       rawRaw, err := copystructure.Copy(r.Raw)
+       if err != nil {
+               panic(err)
+       }
+
+       raw := rawRaw.(map[string]interface{})
+       if r2 != nil {
+               for k, v := range r2.Raw {
+                       raw[k] = v
+               }
+       }
+
+       result, err := NewRawConfig(raw)
+       if err != nil {
+               panic(err)
+       }
+
+       return result
+}
+
+// UnknownKeys returns the keys of the configuration that are unknown
+// because they had interpolated variables that must be computed.
+func (r *RawConfig) UnknownKeys() []string {
+       r.lock.Lock()
+       defer r.lock.Unlock()
+       return r.unknownKeys
+}
+
+// See GobEncode
+func (r *RawConfig) GobDecode(b []byte) error {
+       var data gobRawConfig
+       err := gob.NewDecoder(bytes.NewReader(b)).Decode(&data)
+       if err != nil {
+               return err
+       }
+
+       r.Key = data.Key
+       r.Raw = data.Raw
+
+       return r.init()
+}
+
+// GobEncode is a custom Gob encoder to use so that we only include the
+// raw configuration. Interpolated variables and such are lost and the
+// tree of interpolated variables is recomputed on decode, since it is
+// referentially transparent.
+func (r *RawConfig) GobEncode() ([]byte, error) {
+       r.lock.Lock()
+       defer r.lock.Unlock()
+
+       data := gobRawConfig{
+               Key: r.Key,
+               Raw: r.Raw,
+       }
+
+       var buf bytes.Buffer
+       if err := gob.NewEncoder(&buf).Encode(data); err != nil {
+               return nil, err
+       }
+
+       return buf.Bytes(), nil
+}
+
+type gobRawConfig struct {
+       Key string
+       Raw map[string]interface{}
+}
+
+// langEvalConfig returns the evaluation configuration we use to execute.
+func langEvalConfig(vs map[string]ast.Variable) *hil.EvalConfig {
+       funcMap := make(map[string]ast.Function)
+       for k, v := range Funcs() {
+               funcMap[k] = v
+       }
+       funcMap["lookup"] = interpolationFuncLookup(vs)
+       funcMap["keys"] = interpolationFuncKeys(vs)
+       funcMap["values"] = interpolationFuncValues(vs)
+
+       return &hil.EvalConfig{
+               GlobalScope: &ast.BasicScope{
+                       VarMap:  vs,
+                       FuncMap: funcMap,
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode.go b/vendor/github.com/hashicorp/terraform/config/resource_mode.go
new file mode 100644 (file)
index 0000000..877c6e8
--- /dev/null
@@ -0,0 +1,9 @@
+package config
+
+//go:generate stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go
+type ResourceMode int
+
+const (
+       ManagedResourceMode ResourceMode = iota
+       DataResourceMode
+)
diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go
new file mode 100644 (file)
index 0000000..ea68b4f
--- /dev/null
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=ResourceMode -output=resource_mode_string.go resource_mode.go"; DO NOT EDIT.
+
+package config
+
+import "fmt"
+
+const _ResourceMode_name = "ManagedResourceModeDataResourceMode"
+
+var _ResourceMode_index = [...]uint8{0, 19, 35}
+
+func (i ResourceMode) String() string {
+       if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) {
+               return fmt.Sprintf("ResourceMode(%d)", i)
+       }
+       return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/terraform/config/testing.go b/vendor/github.com/hashicorp/terraform/config/testing.go
new file mode 100644 (file)
index 0000000..f7bfadd
--- /dev/null
@@ -0,0 +1,15 @@
+package config
+
+import (
+       "testing"
+)
+
+// TestRawConfig is used to create a RawConfig for testing.
+func TestRawConfig(t *testing.T, c map[string]interface{}) *RawConfig {
+       cfg, err := NewRawConfig(c)
+       if err != nil {
+               t.Fatalf("err: %s", err)
+       }
+
+       return cfg
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/dag.go b/vendor/github.com/hashicorp/terraform/dag/dag.go
new file mode 100644 (file)
index 0000000..f8776bc
--- /dev/null
@@ -0,0 +1,286 @@
+package dag
+
+import (
+       "fmt"
+       "sort"
+       "strings"
+
+       "github.com/hashicorp/go-multierror"
+)
+
+// AcyclicGraph is a specialization of Graph that cannot have cycles. With
+// this property, we get the property of sane graph traversal.
+type AcyclicGraph struct {
+       Graph
+}
+
+// WalkFunc is the callback used for walking the graph.
+type WalkFunc func(Vertex) error
+
+// DepthWalkFunc is a walk function that also receives the current depth of the
+// walk as an argument
+type DepthWalkFunc func(Vertex, int) error
+
+func (g *AcyclicGraph) DirectedGraph() Grapher {
+       return g
+}
+
+// Returns a Set that includes every Vertex yielded by walking down from the
+// provided starting Vertex v.
+func (g *AcyclicGraph) Ancestors(v Vertex) (*Set, error) {
+       s := new(Set)
+       start := AsVertexList(g.DownEdges(v))
+       memoFunc := func(v Vertex, d int) error {
+               s.Add(v)
+               return nil
+       }
+
+       if err := g.DepthFirstWalk(start, memoFunc); err != nil {
+               return nil, err
+       }
+
+       return s, nil
+}
+
+// Returns a Set that includes every Vertex yielded by walking up from the
+// provided starting Vertex v.
+func (g *AcyclicGraph) Descendents(v Vertex) (*Set, error) {
+       s := new(Set)
+       start := AsVertexList(g.UpEdges(v))
+       memoFunc := func(v Vertex, d int) error {
+               s.Add(v)
+               return nil
+       }
+
+       if err := g.ReverseDepthFirstWalk(start, memoFunc); err != nil {
+               return nil, err
+       }
+
+       return s, nil
+}
+
+// Root returns the root of the DAG, or an error.
+//
+// Complexity: O(V)
+func (g *AcyclicGraph) Root() (Vertex, error) {
+       roots := make([]Vertex, 0, 1)
+       for _, v := range g.Vertices() {
+               if g.UpEdges(v).Len() == 0 {
+                       roots = append(roots, v)
+               }
+       }
+
+       if len(roots) > 1 {
+               // TODO(mitchellh): make this error message a lot better
+               return nil, fmt.Errorf("multiple roots: %#v", roots)
+       }
+
+       if len(roots) == 0 {
+               return nil, fmt.Errorf("no roots found")
+       }
+
+       return roots[0], nil
+}
+
+// TransitiveReduction performs the transitive reduction of graph g in place.
+// The transitive reduction of a graph is a graph with as few edges as
+// possible with the same reachability as the original graph. This means
+// that if there are three nodes A => B => C, and A connects to both
+// B and C, and B connects to C, then the transitive reduction is the
+// same graph with only a single edge between A and B, and a single edge
+// between B and C.
+//
+// The graph must be valid for this operation to behave properly. If
+// Validate() returns an error, the behavior is undefined and the results
+// will likely be unexpected.
+//
+// Complexity: O(V(V+E)), or asymptotically O(VE)
+func (g *AcyclicGraph) TransitiveReduction() {
+       // For each vertex u in graph g, do a DFS starting from each vertex
+       // v such that the edge (u,v) exists (v is a direct descendant of u).
+       //
+       // For each v-prime reachable from v, remove the edge (u, v-prime).
+       defer g.debug.BeginOperation("TransitiveReduction", "").End("")
+
+       for _, u := range g.Vertices() {
+               uTargets := g.DownEdges(u)
+               vs := AsVertexList(g.DownEdges(u))
+
+               g.DepthFirstWalk(vs, func(v Vertex, d int) error {
+                       shared := uTargets.Intersection(g.DownEdges(v))
+                       for _, vPrime := range AsVertexList(shared) {
+                               g.RemoveEdge(BasicEdge(u, vPrime))
+                       }
+
+                       return nil
+               })
+       }
+}
+
+// Validate validates the DAG. A DAG is valid if it has a single root
+// with no cycles.
+func (g *AcyclicGraph) Validate() error {
+       if _, err := g.Root(); err != nil {
+               return err
+       }
+
+       // Look for cycles of more than 1 component
+       var err error
+       cycles := g.Cycles()
+       if len(cycles) > 0 {
+               for _, cycle := range cycles {
+                       cycleStr := make([]string, len(cycle))
+                       for j, vertex := range cycle {
+                               cycleStr[j] = VertexName(vertex)
+                       }
+
+                       err = multierror.Append(err, fmt.Errorf(
+                               "Cycle: %s", strings.Join(cycleStr, ", ")))
+               }
+       }
+
+       // Look for cycles to self
+       for _, e := range g.Edges() {
+               if e.Source() == e.Target() {
+                       err = multierror.Append(err, fmt.Errorf(
+                               "Self reference: %s", VertexName(e.Source())))
+               }
+       }
+
+       return err
+}
+
+func (g *AcyclicGraph) Cycles() [][]Vertex {
+       var cycles [][]Vertex
+       for _, cycle := range StronglyConnected(&g.Graph) {
+               if len(cycle) > 1 {
+                       cycles = append(cycles, cycle)
+               }
+       }
+       return cycles
+}
+
+// Walk walks the graph, calling your callback as each node is visited.
+// This will walk nodes in parallel if it can. Because the walk is done
+// in parallel, the error returned will be a multierror.
+func (g *AcyclicGraph) Walk(cb WalkFunc) error {
+       defer g.debug.BeginOperation(typeWalk, "").End("")
+
+       w := &Walker{Callback: cb, Reverse: true}
+       w.Update(g)
+       return w.Wait()
+}
+
+// simple convenience helper for converting a dag.Set to a []Vertex
+func AsVertexList(s *Set) []Vertex {
+       rawList := s.List()
+       vertexList := make([]Vertex, len(rawList))
+       for i, raw := range rawList {
+               vertexList[i] = raw.(Vertex)
+       }
+       return vertexList
+}
+
+type vertexAtDepth struct {
+       Vertex Vertex
+       Depth  int
+}
+
+// depthFirstWalk does a depth-first walk of the graph starting from
+// the vertices in start. This is not exported now but it would make sense
+// to export this publicly at some point.
+func (g *AcyclicGraph) DepthFirstWalk(start []Vertex, f DepthWalkFunc) error {
+       defer g.debug.BeginOperation(typeDepthFirstWalk, "").End("")
+
+       seen := make(map[Vertex]struct{})
+       frontier := make([]*vertexAtDepth, len(start))
+       for i, v := range start {
+               frontier[i] = &vertexAtDepth{
+                       Vertex: v,
+                       Depth:  0,
+               }
+       }
+       for len(frontier) > 0 {
+               // Pop the current vertex
+               n := len(frontier)
+               current := frontier[n-1]
+               frontier = frontier[:n-1]
+
+               // Check if we've seen this already and return...
+               if _, ok := seen[current.Vertex]; ok {
+                       continue
+               }
+               seen[current.Vertex] = struct{}{}
+
+               // Visit the current node
+               if err := f(current.Vertex, current.Depth); err != nil {
+                       return err
+               }
+
+               // Visit targets of this in a consistent order.
+               targets := AsVertexList(g.DownEdges(current.Vertex))
+               sort.Sort(byVertexName(targets))
+               for _, t := range targets {
+                       frontier = append(frontier, &vertexAtDepth{
+                               Vertex: t,
+                               Depth:  current.Depth + 1,
+                       })
+               }
+       }
+
+       return nil
+}
+
+// reverseDepthFirstWalk does a depth-first walk _up_ the graph starting from
+// the vertices in start.
+func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) error {
+       defer g.debug.BeginOperation(typeReverseDepthFirstWalk, "").End("")
+
+       seen := make(map[Vertex]struct{})
+       frontier := make([]*vertexAtDepth, len(start))
+       for i, v := range start {
+               frontier[i] = &vertexAtDepth{
+                       Vertex: v,
+                       Depth:  0,
+               }
+       }
+       for len(frontier) > 0 {
+               // Pop the current vertex
+               n := len(frontier)
+               current := frontier[n-1]
+               frontier = frontier[:n-1]
+
+               // Check if we've seen this already and return...
+               if _, ok := seen[current.Vertex]; ok {
+                       continue
+               }
+               seen[current.Vertex] = struct{}{}
+
+               // Add next set of targets in a consistent order.
+               targets := AsVertexList(g.UpEdges(current.Vertex))
+               sort.Sort(byVertexName(targets))
+               for _, t := range targets {
+                       frontier = append(frontier, &vertexAtDepth{
+                               Vertex: t,
+                               Depth:  current.Depth + 1,
+                       })
+               }
+
+               // Visit the current node
+               if err := f(current.Vertex, current.Depth); err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
+
+// byVertexName implements sort.Interface so a list of Vertices can be sorted
+// consistently by their VertexName
+type byVertexName []Vertex
+
+func (b byVertexName) Len() int      { return len(b) }
+func (b byVertexName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+func (b byVertexName) Less(i, j int) bool {
+       return VertexName(b[i]) < VertexName(b[j])
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/dot.go b/vendor/github.com/hashicorp/terraform/dag/dot.go
new file mode 100644 (file)
index 0000000..7e6d2af
--- /dev/null
@@ -0,0 +1,282 @@
+package dag
+
+import (
+       "bytes"
+       "fmt"
+       "sort"
+       "strings"
+)
+
+// DotOpts are the options for generating a dot formatted Graph.
+type DotOpts struct {
+       // Allows some nodes to decide to only show themselves when the user has
+       // requested the "verbose" graph.
+       Verbose bool
+
+       // Highlight Cycles
+       DrawCycles bool
+
+       // How many levels to expand modules as we draw
+       MaxDepth int
+
+       // use this to keep the cluster_ naming convention from the previous dot writer
+       cluster bool
+}
+
+// GraphNodeDotter can be implemented by a node to cause it to be included
+// in the dot graph. The Dot method will be called which is expected to
+// return a representation of this node.
+type GraphNodeDotter interface {
+       // Dot is called to return the dot formatting for the node.
+       // The first parameter is the title of the node.
+       // The second parameter includes user-specified options that affect the dot
+       // graph. See GraphDotOpts below for details.
+       DotNode(string, *DotOpts) *DotNode
+}
+
+// DotNode provides a structure for Vertices to return in order to specify their
+// dot format.
+type DotNode struct {
+       Name  string
+       Attrs map[string]string
+}
+
+// Returns the DOT representation of this Graph.
+func (g *marshalGraph) Dot(opts *DotOpts) []byte {
+       if opts == nil {
+               opts = &DotOpts{
+                       DrawCycles: true,
+                       MaxDepth:   -1,
+                       Verbose:    true,
+               }
+       }
+
+       var w indentWriter
+       w.WriteString("digraph {\n")
+       w.Indent()
+
+       // some dot defaults
+       w.WriteString(`compound = "true"` + "\n")
+       w.WriteString(`newrank = "true"` + "\n")
+
+       // the top level graph is written as the first subgraph
+       w.WriteString(`subgraph "root" {` + "\n")
+       g.writeBody(opts, &w)
+
+       // cluster isn't really used other than for naming purposes in some graphs
+       opts.cluster = opts.MaxDepth != 0
+       maxDepth := opts.MaxDepth
+       if maxDepth == 0 {
+               maxDepth = -1
+       }
+
+       for _, s := range g.Subgraphs {
+               g.writeSubgraph(s, opts, maxDepth, &w)
+       }
+
+       w.Unindent()
+       w.WriteString("}\n")
+       return w.Bytes()
+}
+
+func (v *marshalVertex) dot(g *marshalGraph, opts *DotOpts) []byte {
+       var buf bytes.Buffer
+       graphName := g.Name
+       if graphName == "" {
+               graphName = "root"
+       }
+
+       name := v.Name
+       attrs := v.Attrs
+       if v.graphNodeDotter != nil {
+               node := v.graphNodeDotter.DotNode(name, opts)
+               if node == nil {
+                       return []byte{}
+               }
+
+               newAttrs := make(map[string]string)
+               for k, v := range attrs {
+                       newAttrs[k] = v
+               }
+               for k, v := range node.Attrs {
+                       newAttrs[k] = v
+               }
+
+               name = node.Name
+               attrs = newAttrs
+       }
+
+       buf.WriteString(fmt.Sprintf(`"[%s] %s"`, graphName, name))
+       writeAttrs(&buf, attrs)
+       buf.WriteByte('\n')
+
+       return buf.Bytes()
+}
+
+func (e *marshalEdge) dot(g *marshalGraph) string {
+       var buf bytes.Buffer
+       graphName := g.Name
+       if graphName == "" {
+               graphName = "root"
+       }
+
+       sourceName := g.vertexByID(e.Source).Name
+       targetName := g.vertexByID(e.Target).Name
+       s := fmt.Sprintf(`"[%s] %s" -> "[%s] %s"`, graphName, sourceName, graphName, targetName)
+       buf.WriteString(s)
+       writeAttrs(&buf, e.Attrs)
+
+       return buf.String()
+}
+
+func cycleDot(e *marshalEdge, g *marshalGraph) string {
+       return e.dot(g) + ` [color = "red", penwidth = "2.0"]`
+}
+
+// Write the subgraph body. The is recursive, and the depth argument is used to
+// record the current depth of iteration.
+func (g *marshalGraph) writeSubgraph(sg *marshalGraph, opts *DotOpts, depth int, w *indentWriter) {
+       if depth == 0 {
+               return
+       }
+       depth--
+
+       name := sg.Name
+       if opts.cluster {
+               // we prefix with cluster_ to match the old dot output
+               name = "cluster_" + name
+               sg.Attrs["label"] = sg.Name
+       }
+       w.WriteString(fmt.Sprintf("subgraph %q {\n", name))
+       sg.writeBody(opts, w)
+
+       for _, sg := range sg.Subgraphs {
+               g.writeSubgraph(sg, opts, depth, w)
+       }
+}
+
+func (g *marshalGraph) writeBody(opts *DotOpts, w *indentWriter) {
+       w.Indent()
+
+       for _, as := range attrStrings(g.Attrs) {
+               w.WriteString(as + "\n")
+       }
+
+       // list of Vertices that aren't to be included in the dot output
+       skip := map[string]bool{}
+
+       for _, v := range g.Vertices {
+               if v.graphNodeDotter == nil {
+                       skip[v.ID] = true
+                       continue
+               }
+
+               w.Write(v.dot(g, opts))
+       }
+
+       var dotEdges []string
+
+       if opts.DrawCycles {
+               for _, c := range g.Cycles {
+                       if len(c) < 2 {
+                               continue
+                       }
+
+                       for i, j := 0, 1; i < len(c); i, j = i+1, j+1 {
+                               if j >= len(c) {
+                                       j = 0
+                               }
+                               src := c[i]
+                               tgt := c[j]
+
+                               if skip[src.ID] || skip[tgt.ID] {
+                                       continue
+                               }
+
+                               e := &marshalEdge{
+                                       Name:   fmt.Sprintf("%s|%s", src.Name, tgt.Name),
+                                       Source: src.ID,
+                                       Target: tgt.ID,
+                                       Attrs:  make(map[string]string),
+                               }
+
+                               dotEdges = append(dotEdges, cycleDot(e, g))
+                               src = tgt
+                       }
+               }
+       }
+
+       for _, e := range g.Edges {
+               dotEdges = append(dotEdges, e.dot(g))
+       }
+
+       // srot these again to match the old output
+       sort.Strings(dotEdges)
+
+       for _, e := range dotEdges {
+               w.WriteString(e + "\n")
+       }
+
+       w.Unindent()
+       w.WriteString("}\n")
+}
+
+func writeAttrs(buf *bytes.Buffer, attrs map[string]string) {
+       if len(attrs) > 0 {
+               buf.WriteString(" [")
+               buf.WriteString(strings.Join(attrStrings(attrs), ", "))
+               buf.WriteString("]")
+       }
+}
+
+func attrStrings(attrs map[string]string) []string {
+       strings := make([]string, 0, len(attrs))
+       for k, v := range attrs {
+               strings = append(strings, fmt.Sprintf("%s = %q", k, v))
+       }
+       sort.Strings(strings)
+       return strings
+}
+
+// Provide a bytes.Buffer like structure, which will indent when starting a
+// newline.
+type indentWriter struct {
+       bytes.Buffer
+       level int
+}
+
+func (w *indentWriter) indent() {
+       newline := []byte("\n")
+       if !bytes.HasSuffix(w.Bytes(), newline) {
+               return
+       }
+       for i := 0; i < w.level; i++ {
+               w.Buffer.WriteString("\t")
+       }
+}
+
+// Indent increases indentation by 1
+func (w *indentWriter) Indent() { w.level++ }
+
+// Unindent decreases indentation by 1
+func (w *indentWriter) Unindent() { w.level-- }
+
+// the following methods intercecpt the byte.Buffer writes and insert the
+// indentation when starting a new line.
+func (w *indentWriter) Write(b []byte) (int, error) {
+       w.indent()
+       return w.Buffer.Write(b)
+}
+
+func (w *indentWriter) WriteString(s string) (int, error) {
+       w.indent()
+       return w.Buffer.WriteString(s)
+}
+func (w *indentWriter) WriteByte(b byte) error {
+       w.indent()
+       return w.Buffer.WriteByte(b)
+}
+func (w *indentWriter) WriteRune(r rune) (int, error) {
+       w.indent()
+       return w.Buffer.WriteRune(r)
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/edge.go b/vendor/github.com/hashicorp/terraform/dag/edge.go
new file mode 100644 (file)
index 0000000..f0d99ee
--- /dev/null
@@ -0,0 +1,37 @@
+package dag
+
+import (
+       "fmt"
+)
+
+// Edge represents an edge in the graph, with a source and target vertex.
+type Edge interface {
+       Source() Vertex
+       Target() Vertex
+
+       Hashable
+}
+
+// BasicEdge returns an Edge implementation that simply tracks the source
+// and target given as-is.
+func BasicEdge(source, target Vertex) Edge {
+       return &basicEdge{S: source, T: target}
+}
+
+// basicEdge is a basic implementation of Edge that has the source and
+// target vertex.
+type basicEdge struct {
+       S, T Vertex
+}
+
+func (e *basicEdge) Hashcode() interface{} {
+       return fmt.Sprintf("%p-%p", e.S, e.T)
+}
+
+func (e *basicEdge) Source() Vertex {
+       return e.S
+}
+
+func (e *basicEdge) Target() Vertex {
+       return e.T
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/graph.go b/vendor/github.com/hashicorp/terraform/dag/graph.go
new file mode 100644 (file)
index 0000000..e7517a2
--- /dev/null
@@ -0,0 +1,391 @@
+package dag
+
+import (
+       "bytes"
+       "encoding/json"
+       "fmt"
+       "io"
+       "sort"
+)
+
+// Graph is used to represent a dependency graph.
+type Graph struct {
+       vertices  *Set
+       edges     *Set
+       downEdges map[interface{}]*Set
+       upEdges   map[interface{}]*Set
+
+       // JSON encoder for recording debug information
+       debug *encoder
+}
+
+// Subgrapher allows a Vertex to be a Graph itself, by returning a Grapher.
+type Subgrapher interface {
+       Subgraph() Grapher
+}
+
+// A Grapher is any type that returns a Grapher, mainly used to identify
+// dag.Graph and dag.AcyclicGraph.  In the case of Graph and AcyclicGraph, they
+// return themselves.
+type Grapher interface {
+       DirectedGraph() Grapher
+}
+
+// Vertex of the graph.
+type Vertex interface{}
+
+// NamedVertex is an optional interface that can be implemented by Vertex
+// to give it a human-friendly name that is used for outputting the graph.
+type NamedVertex interface {
+       Vertex
+       Name() string
+}
+
+func (g *Graph) DirectedGraph() Grapher {
+       return g
+}
+
+// Vertices returns the list of all the vertices in the graph.
+func (g *Graph) Vertices() []Vertex {
+       list := g.vertices.List()
+       result := make([]Vertex, len(list))
+       for i, v := range list {
+               result[i] = v.(Vertex)
+       }
+
+       return result
+}
+
+// Edges returns the list of all the edges in the graph.
+func (g *Graph) Edges() []Edge {
+       list := g.edges.List()
+       result := make([]Edge, len(list))
+       for i, v := range list {
+               result[i] = v.(Edge)
+       }
+
+       return result
+}
+
+// EdgesFrom returns the list of edges from the given source.
+func (g *Graph) EdgesFrom(v Vertex) []Edge {
+       var result []Edge
+       from := hashcode(v)
+       for _, e := range g.Edges() {
+               if hashcode(e.Source()) == from {
+                       result = append(result, e)
+               }
+       }
+
+       return result
+}
+
+// EdgesTo returns the list of edges to the given target.
+func (g *Graph) EdgesTo(v Vertex) []Edge {
+       var result []Edge
+       search := hashcode(v)
+       for _, e := range g.Edges() {
+               if hashcode(e.Target()) == search {
+                       result = append(result, e)
+               }
+       }
+
+       return result
+}
+
+// HasVertex checks if the given Vertex is present in the graph.
+func (g *Graph) HasVertex(v Vertex) bool {
+       return g.vertices.Include(v)
+}
+
+// HasEdge checks if the given Edge is present in the graph.
+func (g *Graph) HasEdge(e Edge) bool {
+       return g.edges.Include(e)
+}
+
+// Add adds a vertex to the graph. This is safe to call multiple time with
+// the same Vertex.
+func (g *Graph) Add(v Vertex) Vertex {
+       g.init()
+       g.vertices.Add(v)
+       g.debug.Add(v)
+       return v
+}
+
+// Remove removes a vertex from the graph. This will also remove any
+// edges with this vertex as a source or target.
+func (g *Graph) Remove(v Vertex) Vertex {
+       // Delete the vertex itself
+       g.vertices.Delete(v)
+       g.debug.Remove(v)
+
+       // Delete the edges to non-existent things
+       for _, target := range g.DownEdges(v).List() {
+               g.RemoveEdge(BasicEdge(v, target))
+       }
+       for _, source := range g.UpEdges(v).List() {
+               g.RemoveEdge(BasicEdge(source, v))
+       }
+
+       return nil
+}
+
+// Replace replaces the original Vertex with replacement. If the original
+// does not exist within the graph, then false is returned. Otherwise, true
+// is returned.
+func (g *Graph) Replace(original, replacement Vertex) bool {
+       // If we don't have the original, we can't do anything
+       if !g.vertices.Include(original) {
+               return false
+       }
+
+       defer g.debug.BeginOperation("Replace", "").End("")
+
+       // If they're the same, then don't do anything
+       if original == replacement {
+               return true
+       }
+
+       // Add our new vertex, then copy all the edges
+       g.Add(replacement)
+       for _, target := range g.DownEdges(original).List() {
+               g.Connect(BasicEdge(replacement, target))
+       }
+       for _, source := range g.UpEdges(original).List() {
+               g.Connect(BasicEdge(source, replacement))
+       }
+
+       // Remove our old vertex, which will also remove all the edges
+       g.Remove(original)
+
+       return true
+}
+
+// RemoveEdge removes an edge from the graph.
+func (g *Graph) RemoveEdge(edge Edge) {
+       g.init()
+       g.debug.RemoveEdge(edge)
+
+       // Delete the edge from the set
+       g.edges.Delete(edge)
+
+       // Delete the up/down edges
+       if s, ok := g.downEdges[hashcode(edge.Source())]; ok {
+               s.Delete(edge.Target())
+       }
+       if s, ok := g.upEdges[hashcode(edge.Target())]; ok {
+               s.Delete(edge.Source())
+       }
+}
+
+// DownEdges returns the outward edges from the source Vertex v.
+func (g *Graph) DownEdges(v Vertex) *Set {
+       g.init()
+       return g.downEdges[hashcode(v)]
+}
+
+// UpEdges returns the inward edges to the destination Vertex v.
+func (g *Graph) UpEdges(v Vertex) *Set {
+       g.init()
+       return g.upEdges[hashcode(v)]
+}
+
+// Connect adds an edge with the given source and target. This is safe to
+// call multiple times with the same value. Note that the same value is
+// verified through pointer equality of the vertices, not through the
+// value of the edge itself.
+func (g *Graph) Connect(edge Edge) {
+       g.init()
+       g.debug.Connect(edge)
+
+       source := edge.Source()
+       target := edge.Target()
+       sourceCode := hashcode(source)
+       targetCode := hashcode(target)
+
+       // Do we have this already? If so, don't add it again.
+       if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) {
+               return
+       }
+
+       // Add the edge to the set
+       g.edges.Add(edge)
+
+       // Add the down edge
+       s, ok := g.downEdges[sourceCode]
+       if !ok {
+               s = new(Set)
+               g.downEdges[sourceCode] = s
+       }
+       s.Add(target)
+
+       // Add the up edge
+       s, ok = g.upEdges[targetCode]
+       if !ok {
+               s = new(Set)
+               g.upEdges[targetCode] = s
+       }
+       s.Add(source)
+}
+
+// String outputs some human-friendly output for the graph structure.
+func (g *Graph) StringWithNodeTypes() string {
+       var buf bytes.Buffer
+
+       // Build the list of node names and a mapping so that we can more
+       // easily alphabetize the output to remain deterministic.
+       vertices := g.Vertices()
+       names := make([]string, 0, len(vertices))
+       mapping := make(map[string]Vertex, len(vertices))
+       for _, v := range vertices {
+               name := VertexName(v)
+               names = append(names, name)
+               mapping[name] = v
+       }
+       sort.Strings(names)
+
+       // Write each node in order...
+       for _, name := range names {
+               v := mapping[name]
+               targets := g.downEdges[hashcode(v)]
+
+               buf.WriteString(fmt.Sprintf("%s - %T\n", name, v))
+
+               // Alphabetize dependencies
+               deps := make([]string, 0, targets.Len())
+               targetNodes := make(map[string]Vertex)
+               for _, target := range targets.List() {
+                       dep := VertexName(target)
+                       deps = append(deps, dep)
+                       targetNodes[dep] = target
+               }
+               sort.Strings(deps)
+
+               // Write dependencies
+               for _, d := range deps {
+                       buf.WriteString(fmt.Sprintf("  %s - %T\n", d, targetNodes[d]))
+               }
+       }
+
+       return buf.String()
+}
+
+// String outputs some human-friendly output for the graph structure.
+func (g *Graph) String() string {
+       var buf bytes.Buffer
+
+       // Build the list of node names and a mapping so that we can more
+       // easily alphabetize the output to remain deterministic.
+       vertices := g.Vertices()
+       names := make([]string, 0, len(vertices))
+       mapping := make(map[string]Vertex, len(vertices))
+       for _, v := range vertices {
+               name := VertexName(v)
+               names = append(names, name)
+               mapping[name] = v
+       }
+       sort.Strings(names)
+
+       // Write each node in order...
+       for _, name := range names {
+               v := mapping[name]
+               targets := g.downEdges[hashcode(v)]
+
+               buf.WriteString(fmt.Sprintf("%s\n", name))
+
+               // Alphabetize dependencies
+               deps := make([]string, 0, targets.Len())
+               for _, target := range targets.List() {
+                       deps = append(deps, VertexName(target))
+               }
+               sort.Strings(deps)
+
+               // Write dependencies
+               for _, d := range deps {
+                       buf.WriteString(fmt.Sprintf("  %s\n", d))
+               }
+       }
+
+       return buf.String()
+}
+
+func (g *Graph) init() {
+       if g.vertices == nil {
+               g.vertices = new(Set)
+       }
+       if g.edges == nil {
+               g.edges = new(Set)
+       }
+       if g.downEdges == nil {
+               g.downEdges = make(map[interface{}]*Set)
+       }
+       if g.upEdges == nil {
+               g.upEdges = make(map[interface{}]*Set)
+       }
+}
+
+// Dot returns a dot-formatted representation of the Graph.
+func (g *Graph) Dot(opts *DotOpts) []byte {
+       return newMarshalGraph("", g).Dot(opts)
+}
+
+// MarshalJSON returns a JSON representation of the entire Graph.
+func (g *Graph) MarshalJSON() ([]byte, error) {
+       dg := newMarshalGraph("root", g)
+       return json.MarshalIndent(dg, "", "  ")
+}
+
+// SetDebugWriter sets the io.Writer where the Graph will record debug
+// information. After this is set, the graph will immediately encode itself to
+// the stream, and continue to record all subsequent operations.
+func (g *Graph) SetDebugWriter(w io.Writer) {
+       g.debug = &encoder{w: w}
+       g.debug.Encode(newMarshalGraph("root", g))
+}
+
+// DebugVertexInfo encodes arbitrary information about a vertex in the graph
+// debug logs.
+func (g *Graph) DebugVertexInfo(v Vertex, info string) {
+       va := newVertexInfo(typeVertexInfo, v, info)
+       g.debug.Encode(va)
+}
+
+// DebugEdgeInfo encodes arbitrary information about an edge in the graph debug
+// logs.
+func (g *Graph) DebugEdgeInfo(e Edge, info string) {
+       ea := newEdgeInfo(typeEdgeInfo, e, info)
+       g.debug.Encode(ea)
+}
+
+// DebugVisitInfo records a visit to a Vertex during a walk operation.
+func (g *Graph) DebugVisitInfo(v Vertex, info string) {
+       vi := newVertexInfo(typeVisitInfo, v, info)
+       g.debug.Encode(vi)
+}
+
+// DebugOperation marks the start of a set of graph transformations in
+// the debug log, and returns a DebugOperationEnd func, which marks the end of
+// the operation in the log. Additional information can be added to the log via
+// the info parameter.
+//
+// The returned func's End method allows this method to be called from a single
+// defer statement:
+//     defer g.DebugOperationBegin("OpName", "operating").End("")
+//
+// The returned function must be called to properly close the logical operation
+// in the logs.
+func (g *Graph) DebugOperation(operation string, info string) DebugOperationEnd {
+       return g.debug.BeginOperation(operation, info)
+}
+
+// VertexName returns the name of a vertex.
+func VertexName(raw Vertex) string {
+       switch v := raw.(type) {
+       case NamedVertex:
+               return v.Name()
+       case fmt.Stringer:
+               return fmt.Sprintf("%s", v)
+       default:
+               return fmt.Sprintf("%v", v)
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/marshal.go b/vendor/github.com/hashicorp/terraform/dag/marshal.go
new file mode 100644 (file)
index 0000000..16d5dd6
--- /dev/null
@@ -0,0 +1,462 @@
+package dag
+
+import (
+       "encoding/json"
+       "fmt"
+       "io"
+       "log"
+       "reflect"
+       "sort"
+       "strconv"
+       "sync"
+)
+
+const (
+       typeOperation             = "Operation"
+       typeTransform             = "Transform"
+       typeWalk                  = "Walk"
+       typeDepthFirstWalk        = "DepthFirstWalk"
+       typeReverseDepthFirstWalk = "ReverseDepthFirstWalk"
+       typeTransitiveReduction   = "TransitiveReduction"
+       typeEdgeInfo              = "EdgeInfo"
+       typeVertexInfo            = "VertexInfo"
+       typeVisitInfo             = "VisitInfo"
+)
+
+// the marshal* structs are for serialization of the graph data.
+type marshalGraph struct {
+       // Type is always "Graph", for identification as a top level object in the
+       // JSON stream.
+       Type string
+
+       // Each marshal structure requires a unique ID so that it can be referenced
+       // by other structures.
+       ID string `json:",omitempty"`
+
+       // Human readable name for this graph.
+       Name string `json:",omitempty"`
+
+       // Arbitrary attributes that can be added to the output.
+       Attrs map[string]string `json:",omitempty"`
+
+       // List of graph vertices, sorted by ID.
+       Vertices []*marshalVertex `json:",omitempty"`
+
+       // List of edges, sorted by Source ID.
+       Edges []*marshalEdge `json:",omitempty"`
+
+       // Any number of subgraphs. A subgraph itself is considered a vertex, and
+       // may be referenced by either end of an edge.
+       Subgraphs []*marshalGraph `json:",omitempty"`
+
+       // Any lists of vertices that are included in cycles.
+       Cycles [][]*marshalVertex `json:",omitempty"`
+}
+
+// The add, remove, connect, removeEdge methods mirror the basic Graph
+// manipulations to reconstruct a marshalGraph from a debug log.
+func (g *marshalGraph) add(v *marshalVertex) {
+       g.Vertices = append(g.Vertices, v)
+       sort.Sort(vertices(g.Vertices))
+}
+
+func (g *marshalGraph) remove(v *marshalVertex) {
+       for i, existing := range g.Vertices {
+               if v.ID == existing.ID {
+                       g.Vertices = append(g.Vertices[:i], g.Vertices[i+1:]...)
+                       return
+               }
+       }
+}
+
+func (g *marshalGraph) connect(e *marshalEdge) {
+       g.Edges = append(g.Edges, e)
+       sort.Sort(edges(g.Edges))
+}
+
+func (g *marshalGraph) removeEdge(e *marshalEdge) {
+       for i, existing := range g.Edges {
+               if e.Source == existing.Source && e.Target == existing.Target {
+                       g.Edges = append(g.Edges[:i], g.Edges[i+1:]...)
+                       return
+               }
+       }
+}
+
+func (g *marshalGraph) vertexByID(id string) *marshalVertex {
+       for _, v := range g.Vertices {
+               if id == v.ID {
+                       return v
+               }
+       }
+       return nil
+}
+
+type marshalVertex struct {
+       // Unique ID, used to reference this vertex from other structures.
+       ID string
+
+       // Human readable name
+       Name string `json:",omitempty"`
+
+       Attrs map[string]string `json:",omitempty"`
+
+       // This is to help transition from the old Dot interfaces. We record if the
+       // node was a GraphNodeDotter here, so we can call it to get attributes.
+       graphNodeDotter GraphNodeDotter
+}
+
+func newMarshalVertex(v Vertex) *marshalVertex {
+       dn, ok := v.(GraphNodeDotter)
+       if !ok {
+               dn = nil
+       }
+
+       return &marshalVertex{
+               ID:              marshalVertexID(v),
+               Name:            VertexName(v),
+               Attrs:           make(map[string]string),
+               graphNodeDotter: dn,
+       }
+}
+
+// vertices is a sort.Interface implementation for sorting vertices by ID
+type vertices []*marshalVertex
+
+func (v vertices) Less(i, j int) bool { return v[i].Name < v[j].Name }
+func (v vertices) Len() int           { return len(v) }
+func (v vertices) Swap(i, j int)      { v[i], v[j] = v[j], v[i] }
+
+type marshalEdge struct {
+       // Human readable name
+       Name string
+
+       // Source and Target Vertices by ID
+       Source string
+       Target string
+
+       Attrs map[string]string `json:",omitempty"`
+}
+
+func newMarshalEdge(e Edge) *marshalEdge {
+       return &marshalEdge{
+               Name:   fmt.Sprintf("%s|%s", VertexName(e.Source()), VertexName(e.Target())),
+               Source: marshalVertexID(e.Source()),
+               Target: marshalVertexID(e.Target()),
+               Attrs:  make(map[string]string),
+       }
+}
+
+// edges is a sort.Interface implementation for sorting edges by Source ID
+type edges []*marshalEdge
+
+func (e edges) Less(i, j int) bool { return e[i].Name < e[j].Name }
+func (e edges) Len() int           { return len(e) }
+func (e edges) Swap(i, j int)      { e[i], e[j] = e[j], e[i] }
+
+// build a marshalGraph structure from a *Graph
+func newMarshalGraph(name string, g *Graph) *marshalGraph {
+       mg := &marshalGraph{
+               Type:  "Graph",
+               Name:  name,
+               Attrs: make(map[string]string),
+       }
+
+       for _, v := range g.Vertices() {
+               id := marshalVertexID(v)
+               if sg, ok := marshalSubgrapher(v); ok {
+                       smg := newMarshalGraph(VertexName(v), sg)
+                       smg.ID = id
+                       mg.Subgraphs = append(mg.Subgraphs, smg)
+               }
+
+               mv := newMarshalVertex(v)
+               mg.Vertices = append(mg.Vertices, mv)
+       }
+
+       sort.Sort(vertices(mg.Vertices))
+
+       for _, e := range g.Edges() {
+               mg.Edges = append(mg.Edges, newMarshalEdge(e))
+       }
+
+       sort.Sort(edges(mg.Edges))
+
+       for _, c := range (&AcyclicGraph{*g}).Cycles() {
+               var cycle []*marshalVertex
+               for _, v := range c {
+                       mv := newMarshalVertex(v)
+                       cycle = append(cycle, mv)
+               }
+               mg.Cycles = append(mg.Cycles, cycle)
+       }
+
+       return mg
+}
+
+// Attempt to return a unique ID for any vertex.
+func marshalVertexID(v Vertex) string {
+       val := reflect.ValueOf(v)
+       switch val.Kind() {
+       case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
+               return strconv.Itoa(int(val.Pointer()))
+       case reflect.Interface:
+               return strconv.Itoa(int(val.InterfaceData()[1]))
+       }
+
+       if v, ok := v.(Hashable); ok {
+               h := v.Hashcode()
+               if h, ok := h.(string); ok {
+                       return h
+               }
+       }
+
+       // fallback to a name, which we hope is unique.
+       return VertexName(v)
+
+       // we could try harder by attempting to read the arbitrary value from the
+       // interface, but we shouldn't get here from terraform right now.
+}
+
+// check for a Subgrapher, and return the underlying *Graph.
+func marshalSubgrapher(v Vertex) (*Graph, bool) {
+       sg, ok := v.(Subgrapher)
+       if !ok {
+               return nil, false
+       }
+
+       switch g := sg.Subgraph().DirectedGraph().(type) {
+       case *Graph:
+               return g, true
+       case *AcyclicGraph:
+               return &g.Graph, true
+       }
+
+       return nil, false
+}
+
+// The DebugOperationEnd func type provides a way to call an End function via a
+// method call, allowing for the chaining of methods in a defer statement.
+type DebugOperationEnd func(string)
+
+// End calls function e with the info parameter, marking the end of this
+// operation in the logs.
+func (e DebugOperationEnd) End(info string) { e(info) }
+
+// encoder provides methods to write debug data to an io.Writer, and is a noop
+// when no writer is present
+type encoder struct {
+       sync.Mutex
+       w io.Writer
+}
+
+// Encode is analogous to json.Encoder.Encode
+func (e *encoder) Encode(i interface{}) {
+       if e == nil || e.w == nil {
+               return
+       }
+       e.Lock()
+       defer e.Unlock()
+
+       js, err := json.Marshal(i)
+       if err != nil {
+               log.Println("[ERROR] dag:", err)
+               return
+       }
+       js = append(js, '\n')
+
+       _, err = e.w.Write(js)
+       if err != nil {
+               log.Println("[ERROR] dag:", err)
+               return
+       }
+}
+
+func (e *encoder) Add(v Vertex) {
+       e.Encode(marshalTransform{
+               Type:      typeTransform,
+               AddVertex: newMarshalVertex(v),
+       })
+}
+
+// Remove records the removal of Vertex v.
+func (e *encoder) Remove(v Vertex) {
+       e.Encode(marshalTransform{
+               Type:         typeTransform,
+               RemoveVertex: newMarshalVertex(v),
+       })
+}
+
+func (e *encoder) Connect(edge Edge) {
+       e.Encode(marshalTransform{
+               Type:    typeTransform,
+               AddEdge: newMarshalEdge(edge),
+       })
+}
+
+func (e *encoder) RemoveEdge(edge Edge) {
+       e.Encode(marshalTransform{
+               Type:       typeTransform,
+               RemoveEdge: newMarshalEdge(edge),
+       })
+}
+
+// BeginOperation marks the start of set of graph transformations, and returns
+// an EndDebugOperation func to be called once the opration is complete.
+func (e *encoder) BeginOperation(op string, info string) DebugOperationEnd {
+       if e == nil {
+               return func(string) {}
+       }
+
+       e.Encode(marshalOperation{
+               Type:  typeOperation,
+               Begin: op,
+               Info:  info,
+       })
+
+       return func(info string) {
+               e.Encode(marshalOperation{
+                       Type: typeOperation,
+                       End:  op,
+                       Info: info,
+               })
+       }
+}
+
+// structure for recording graph transformations
+type marshalTransform struct {
+       // Type: "Transform"
+       Type         string
+       AddEdge      *marshalEdge   `json:",omitempty"`
+       RemoveEdge   *marshalEdge   `json:",omitempty"`
+       AddVertex    *marshalVertex `json:",omitempty"`
+       RemoveVertex *marshalVertex `json:",omitempty"`
+}
+
+func (t marshalTransform) Transform(g *marshalGraph) {
+       switch {
+       case t.AddEdge != nil:
+               g.connect(t.AddEdge)
+       case t.RemoveEdge != nil:
+               g.removeEdge(t.RemoveEdge)
+       case t.AddVertex != nil:
+               g.add(t.AddVertex)
+       case t.RemoveVertex != nil:
+               g.remove(t.RemoveVertex)
+       }
+}
+
+// this structure allows us to decode any object in the json stream for
+// inspection, then re-decode it into a proper struct if needed.
+type streamDecode struct {
+       Type string
+       Map  map[string]interface{}
+       JSON []byte
+}
+
+func (s *streamDecode) UnmarshalJSON(d []byte) error {
+       s.JSON = d
+       err := json.Unmarshal(d, &s.Map)
+       if err != nil {
+               return err
+       }
+
+       if t, ok := s.Map["Type"]; ok {
+               s.Type, _ = t.(string)
+       }
+       return nil
+}
+
+// structure for recording the beginning and end of any multi-step
+// transformations. These are informational, and not required to reproduce the
+// graph state.
+type marshalOperation struct {
+       Type  string
+       Begin string `json:",omitempty"`
+       End   string `json:",omitempty"`
+       Info  string `json:",omitempty"`
+}
+
+// decodeGraph decodes a marshalGraph from an encoded graph stream.
+func decodeGraph(r io.Reader) (*marshalGraph, error) {
+       dec := json.NewDecoder(r)
+
+       // a stream should always start with a graph
+       g := &marshalGraph{}
+
+       err := dec.Decode(g)
+       if err != nil {
+               return nil, err
+       }
+
+       // now replay any operations that occurred on the original graph
+       for dec.More() {
+               s := &streamDecode{}
+               err := dec.Decode(s)
+               if err != nil {
+                       return g, err
+               }
+
+               // the only Type we're concerned with here is Transform to complete the
+               // Graph
+               if s.Type != typeTransform {
+                       continue
+               }
+
+               t := &marshalTransform{}
+               err = json.Unmarshal(s.JSON, t)
+               if err != nil {
+                       return g, err
+               }
+               t.Transform(g)
+       }
+       return g, nil
+}
+
+// marshalVertexInfo allows encoding arbitrary information about the a single
+// Vertex in the logs. These are accumulated for informational display while
+// rebuilding the graph.
+type marshalVertexInfo struct {
+       Type   string
+       Vertex *marshalVertex
+       Info   string
+}
+
+func newVertexInfo(infoType string, v Vertex, info string) *marshalVertexInfo {
+       return &marshalVertexInfo{
+               Type:   infoType,
+               Vertex: newMarshalVertex(v),
+               Info:   info,
+       }
+}
+
+// marshalEdgeInfo allows encoding arbitrary information about the a single
+// Edge in the logs. These are accumulated for informational display while
+// rebuilding the graph.
+type marshalEdgeInfo struct {
+       Type string
+       Edge *marshalEdge
+       Info string
+}
+
+func newEdgeInfo(infoType string, e Edge, info string) *marshalEdgeInfo {
+       return &marshalEdgeInfo{
+               Type: infoType,
+               Edge: newMarshalEdge(e),
+               Info: info,
+       }
+}
+
+// JSON2Dot reads a Graph debug log from and io.Reader, and converts the final
+// graph dot format.
+//
+// TODO: Allow returning the output at a certain point during decode.
+//       Encode extra information from the json log into the Dot.
+func JSON2Dot(r io.Reader) ([]byte, error) {
+       g, err := decodeGraph(r)
+       if err != nil {
+               return nil, err
+       }
+
+       return g.Dot(nil), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/set.go b/vendor/github.com/hashicorp/terraform/dag/set.go
new file mode 100644 (file)
index 0000000..3929c9d
--- /dev/null
@@ -0,0 +1,109 @@
+package dag
+
+import (
+       "sync"
+)
+
+// Set is a set data structure.
+type Set struct {
+       m    map[interface{}]interface{}
+       once sync.Once
+}
+
+// Hashable is the interface used by set to get the hash code of a value.
+// If this isn't given, then the value of the item being added to the set
+// itself is used as the comparison value.
+type Hashable interface {
+       Hashcode() interface{}
+}
+
+// hashcode returns the hashcode used for set elements.
+func hashcode(v interface{}) interface{} {
+       if h, ok := v.(Hashable); ok {
+               return h.Hashcode()
+       }
+
+       return v
+}
+
+// Add adds an item to the set
+func (s *Set) Add(v interface{}) {
+       s.once.Do(s.init)
+       s.m[hashcode(v)] = v
+}
+
+// Delete removes an item from the set.
+func (s *Set) Delete(v interface{}) {
+       s.once.Do(s.init)
+       delete(s.m, hashcode(v))
+}
+
+// Include returns true/false of whether a value is in the set.
+func (s *Set) Include(v interface{}) bool {
+       s.once.Do(s.init)
+       _, ok := s.m[hashcode(v)]
+       return ok
+}
+
+// Intersection computes the set intersection with other.
+func (s *Set) Intersection(other *Set) *Set {
+       result := new(Set)
+       if s == nil {
+               return result
+       }
+       if other != nil {
+               for _, v := range s.m {
+                       if other.Include(v) {
+                               result.Add(v)
+                       }
+               }
+       }
+
+       return result
+}
+
+// Difference returns a set with the elements that s has but
+// other doesn't.
+func (s *Set) Difference(other *Set) *Set {
+       result := new(Set)
+       if s != nil {
+               for k, v := range s.m {
+                       var ok bool
+                       if other != nil {
+                               _, ok = other.m[k]
+                       }
+                       if !ok {
+                               result.Add(v)
+                       }
+               }
+       }
+
+       return result
+}
+
+// Len is the number of items in the set.
+func (s *Set) Len() int {
+       if s == nil {
+               return 0
+       }
+
+       return len(s.m)
+}
+
+// List returns the list of set elements.
+func (s *Set) List() []interface{} {
+       if s == nil {
+               return nil
+       }
+
+       r := make([]interface{}, 0, len(s.m))
+       for _, v := range s.m {
+               r = append(r, v)
+       }
+
+       return r
+}
+
+func (s *Set) init() {
+       s.m = make(map[interface{}]interface{})
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/tarjan.go b/vendor/github.com/hashicorp/terraform/dag/tarjan.go
new file mode 100644 (file)
index 0000000..9d8b25c
--- /dev/null
@@ -0,0 +1,107 @@
+package dag
+
+// StronglyConnected returns the list of strongly connected components
+// within the Graph g. This information is primarily used by this package
+// for cycle detection, but strongly connected components have widespread
+// use.
+func StronglyConnected(g *Graph) [][]Vertex {
+       vs := g.Vertices()
+       acct := sccAcct{
+               NextIndex:   1,
+               VertexIndex: make(map[Vertex]int, len(vs)),
+       }
+       for _, v := range vs {
+               // Recurse on any non-visited nodes
+               if acct.VertexIndex[v] == 0 {
+                       stronglyConnected(&acct, g, v)
+               }
+       }
+       return acct.SCC
+}
+
+func stronglyConnected(acct *sccAcct, g *Graph, v Vertex) int {
+       // Initial vertex visit
+       index := acct.visit(v)
+       minIdx := index
+
+       for _, raw := range g.DownEdges(v).List() {
+               target := raw.(Vertex)
+               targetIdx := acct.VertexIndex[target]
+
+               // Recurse on successor if not yet visited
+               if targetIdx == 0 {
+                       minIdx = min(minIdx, stronglyConnected(acct, g, target))
+               } else if acct.inStack(target) {
+                       // Check if the vertex is in the stack
+                       minIdx = min(minIdx, targetIdx)
+               }
+       }
+
+       // Pop the strongly connected components off the stack if
+       // this is a root vertex
+       if index == minIdx {
+               var scc []Vertex
+               for {
+                       v2 := acct.pop()
+                       scc = append(scc, v2)
+                       if v2 == v {
+                               break
+                       }
+               }
+
+               acct.SCC = append(acct.SCC, scc)
+       }
+
+       return minIdx
+}
+
+func min(a, b int) int {
+       if a <= b {
+               return a
+       }
+       return b
+}
+
+// sccAcct is used ot pass around accounting information for
+// the StronglyConnectedComponents algorithm
+type sccAcct struct {
+       NextIndex   int
+       VertexIndex map[Vertex]int
+       Stack       []Vertex
+       SCC         [][]Vertex
+}
+
+// visit assigns an index and pushes a vertex onto the stack
+func (s *sccAcct) visit(v Vertex) int {
+       idx := s.NextIndex
+       s.VertexIndex[v] = idx
+       s.NextIndex++
+       s.push(v)
+       return idx
+}
+
+// push adds a vertex to the stack
+func (s *sccAcct) push(n Vertex) {
+       s.Stack = append(s.Stack, n)
+}
+
+// pop removes a vertex from the stack
+func (s *sccAcct) pop() Vertex {
+       n := len(s.Stack)
+       if n == 0 {
+               return nil
+       }
+       vertex := s.Stack[n-1]
+       s.Stack = s.Stack[:n-1]
+       return vertex
+}
+
+// inStack checks if a vertex is in the stack
+func (s *sccAcct) inStack(needle Vertex) bool {
+       for _, n := range s.Stack {
+               if n == needle {
+                       return true
+               }
+       }
+       return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/dag/walk.go b/vendor/github.com/hashicorp/terraform/dag/walk.go
new file mode 100644 (file)
index 0000000..23c87ad
--- /dev/null
@@ -0,0 +1,445 @@
+package dag
+
+import (
+       "errors"
+       "fmt"
+       "log"
+       "sync"
+       "time"
+
+       "github.com/hashicorp/go-multierror"
+)
+
+// Walker is used to walk every vertex of a graph in parallel.
+//
+// A vertex will only be walked when the dependencies of that vertex have
+// been walked. If two vertices can be walked at the same time, they will be.
+//
+// Update can be called to update the graph. This can be called even during
+// a walk, cahnging vertices/edges mid-walk. This should be done carefully.
+// If a vertex is removed but has already been executed, the result of that
+// execution (any error) is still returned by Wait. Changing or re-adding
+// a vertex that has already executed has no effect. Changing edges of
+// a vertex that has already executed has no effect.
+//
+// Non-parallelism can be enforced by introducing a lock in your callback
+// function. However, the goroutine overhead of a walk will remain.
+// Walker will create V*2 goroutines (one for each vertex, and dependency
+// waiter for each vertex). In general this should be of no concern unless
+// there are a huge number of vertices.
+//
+// The walk is depth first by default. This can be changed with the Reverse
+// option.
+//
+// A single walker is only valid for one graph walk. After the walk is complete
+// you must construct a new walker to walk again. State for the walk is never
+// deleted in case vertices or edges are changed.
+type Walker struct {
+       // Callback is what is called for each vertex
+       Callback WalkFunc
+
+       // Reverse, if true, causes the source of an edge to depend on a target.
+       // When false (default), the target depends on the source.
+       Reverse bool
+
+       // changeLock must be held to modify any of the fields below. Only Update
+       // should modify these fields. Modifying them outside of Update can cause
+       // serious problems.
+       changeLock sync.Mutex
+       vertices   Set
+       edges      Set
+       vertexMap  map[Vertex]*walkerVertex
+
+       // wait is done when all vertices have executed. It may become "undone"
+       // if new vertices are added.
+       wait sync.WaitGroup
+
+       // errMap contains the errors recorded so far for execution. Reading
+       // and writing should hold errLock.
+       errMap  map[Vertex]error
+       errLock sync.Mutex
+}
+
+type walkerVertex struct {
+       // These should only be set once on initialization and never written again.
+       // They are not protected by a lock since they don't need to be since
+       // they are write-once.
+
+       // DoneCh is closed when this vertex has completed execution, regardless
+       // of success.
+       //
+       // CancelCh is closed when the vertex should cancel execution. If execution
+       // is already complete (DoneCh is closed), this has no effect. Otherwise,
+       // execution is cancelled as quickly as possible.
+       DoneCh   chan struct{}
+       CancelCh chan struct{}
+
+       // Dependency information. Any changes to any of these fields requires
+       // holding DepsLock.
+       //
+       // DepsCh is sent a single value that denotes whether the upstream deps
+       // were successful (no errors). Any value sent means that the upstream
+       // dependencies are complete. No other values will ever be sent again.
+       //
+       // DepsUpdateCh is closed when there is a new DepsCh set.
+       DepsCh       chan bool
+       DepsUpdateCh chan struct{}
+       DepsLock     sync.Mutex
+
+       // Below is not safe to read/write in parallel. This behavior is
+       // enforced by changes only happening in Update. Nothing else should
+       // ever modify these.
+       deps         map[Vertex]chan struct{}
+       depsCancelCh chan struct{}
+}
+
+// errWalkUpstream is used in the errMap of a walk to note that an upstream
+// dependency failed so this vertex wasn't run. This is not shown in the final
+// user-returned error.
+var errWalkUpstream = errors.New("upstream dependency failed")
+
+// Wait waits for the completion of the walk and returns any errors (
+// in the form of a multierror) that occurred. Update should be called
+// to populate the walk with vertices and edges prior to calling this.
+//
+// Wait will return as soon as all currently known vertices are complete.
+// If you plan on calling Update with more vertices in the future, you
+// should not call Wait until after this is done.
+func (w *Walker) Wait() error {
+       // Wait for completion
+       w.wait.Wait()
+
+       // Grab the error lock
+       w.errLock.Lock()
+       defer w.errLock.Unlock()
+
+       // Build the error
+       var result error
+       for v, err := range w.errMap {
+               if err != nil && err != errWalkUpstream {
+                       result = multierror.Append(result, fmt.Errorf(
+                               "%s: %s", VertexName(v), err))
+               }
+       }
+
+       return result
+}
+
+// Update updates the currently executing walk with the given graph.
+// This will perform a diff of the vertices and edges and update the walker.
+// Already completed vertices remain completed (including any errors during
+// their execution).
+//
+// This returns immediately once the walker is updated; it does not wait
+// for completion of the walk.
+//
+// Multiple Updates can be called in parallel. Update can be called at any
+// time during a walk.
+func (w *Walker) Update(g *AcyclicGraph) {
+       var v, e *Set
+       if g != nil {
+               v, e = g.vertices, g.edges
+       }
+
+       // Grab the change lock so no more updates happen but also so that
+       // no new vertices are executed during this time since we may be
+       // removing them.
+       w.changeLock.Lock()
+       defer w.changeLock.Unlock()
+
+       // Initialize fields
+       if w.vertexMap == nil {
+               w.vertexMap = make(map[Vertex]*walkerVertex)
+       }
+
+       // Calculate all our sets
+       newEdges := e.Difference(&w.edges)
+       oldEdges := w.edges.Difference(e)
+       newVerts := v.Difference(&w.vertices)
+       oldVerts := w.vertices.Difference(v)
+
+       // Add the new vertices
+       for _, raw := range newVerts.List() {
+               v := raw.(Vertex)
+
+               // Add to the waitgroup so our walk is not done until everything finishes
+               w.wait.Add(1)
+
+               // Add to our own set so we know about it already
+               log.Printf("[DEBUG] dag/walk: added new vertex: %q", VertexName(v))
+               w.vertices.Add(raw)
+
+               // Initialize the vertex info
+               info := &walkerVertex{
+                       DoneCh:   make(chan struct{}),
+                       CancelCh: make(chan struct{}),
+                       deps:     make(map[Vertex]chan struct{}),
+               }
+
+               // Add it to the map and kick off the walk
+               w.vertexMap[v] = info
+       }
+
+       // Remove the old vertices
+       for _, raw := range oldVerts.List() {
+               v := raw.(Vertex)
+
+               // Get the vertex info so we can cancel it
+               info, ok := w.vertexMap[v]
+               if !ok {
+                       // This vertex for some reason was never in our map. This
+                       // shouldn't be possible.
+                       continue
+               }
+
+               // Cancel the vertex
+               close(info.CancelCh)
+
+               // Delete it out of the map
+               delete(w.vertexMap, v)
+
+               log.Printf("[DEBUG] dag/walk: removed vertex: %q", VertexName(v))
+               w.vertices.Delete(raw)
+       }
+
+       // Add the new edges
+       var changedDeps Set
+       for _, raw := range newEdges.List() {
+               edge := raw.(Edge)
+               waiter, dep := w.edgeParts(edge)
+
+               // Get the info for the waiter
+               waiterInfo, ok := w.vertexMap[waiter]
+               if !ok {
+                       // Vertex doesn't exist... shouldn't be possible but ignore.
+                       continue
+               }
+
+               // Get the info for the dep
+               depInfo, ok := w.vertexMap[dep]
+               if !ok {
+                       // Vertex doesn't exist... shouldn't be possible but ignore.
+                       continue
+               }
+
+               // Add the dependency to our waiter
+               waiterInfo.deps[dep] = depInfo.DoneCh
+
+               // Record that the deps changed for this waiter
+               changedDeps.Add(waiter)
+
+               log.Printf(
+                       "[DEBUG] dag/walk: added edge: %q waiting on %q",
+                       VertexName(waiter), VertexName(dep))
+               w.edges.Add(raw)
+       }
+
+       // Process reoved edges
+       for _, raw := range oldEdges.List() {
+               edge := raw.(Edge)
+               waiter, dep := w.edgeParts(edge)
+
+               // Get the info for the waiter
+               waiterInfo, ok := w.vertexMap[waiter]
+               if !ok {
+                       // Vertex doesn't exist... shouldn't be possible but ignore.
+                       continue
+               }
+
+               // Delete the dependency from the waiter
+               delete(waiterInfo.deps, dep)
+
+               // Record that the deps changed for this waiter
+               changedDeps.Add(waiter)
+
+               log.Printf(
+                       "[DEBUG] dag/walk: removed edge: %q waiting on %q",
+                       VertexName(waiter), VertexName(dep))
+               w.edges.Delete(raw)
+       }
+
+       // For each vertex with changed dependencies, we need to kick off
+       // a new waiter and notify the vertex of the changes.
+       for _, raw := range changedDeps.List() {
+               v := raw.(Vertex)
+               info, ok := w.vertexMap[v]
+               if !ok {
+                       // Vertex doesn't exist... shouldn't be possible but ignore.
+                       continue
+               }
+
+               // Create a new done channel
+               doneCh := make(chan bool, 1)
+
+               // Create the channel we close for cancellation
+               cancelCh := make(chan struct{})
+
+               // Build a new deps copy
+               deps := make(map[Vertex]<-chan struct{})
+               for k, v := range info.deps {
+                       deps[k] = v
+               }
+
+               // Update the update channel
+               info.DepsLock.Lock()
+               if info.DepsUpdateCh != nil {
+                       close(info.DepsUpdateCh)
+               }
+               info.DepsCh = doneCh
+               info.DepsUpdateCh = make(chan struct{})
+               info.DepsLock.Unlock()
+
+               // Cancel the older waiter
+               if info.depsCancelCh != nil {
+                       close(info.depsCancelCh)
+               }
+               info.depsCancelCh = cancelCh
+
+               log.Printf(
+                       "[DEBUG] dag/walk: dependencies changed for %q, sending new deps",
+                       VertexName(v))
+
+               // Start the waiter
+               go w.waitDeps(v, deps, doneCh, cancelCh)
+       }
+
+       // Start all the new vertices. We do this at the end so that all
+       // the edge waiters and changes are setup above.
+       for _, raw := range newVerts.List() {
+               v := raw.(Vertex)
+               go w.walkVertex(v, w.vertexMap[v])
+       }
+}
+
+// edgeParts returns the waiter and the dependency, in that order.
+// The waiter is waiting on the dependency.
+func (w *Walker) edgeParts(e Edge) (Vertex, Vertex) {
+       if w.Reverse {
+               return e.Source(), e.Target()
+       }
+
+       return e.Target(), e.Source()
+}
+
+// walkVertex walks a single vertex, waiting for any dependencies before
+// executing the callback.
+func (w *Walker) walkVertex(v Vertex, info *walkerVertex) {
+       // When we're done executing, lower the waitgroup count
+       defer w.wait.Done()
+
+       // When we're done, always close our done channel
+       defer close(info.DoneCh)
+
+       // Wait for our dependencies. We create a [closed] deps channel so
+       // that we can immediately fall through to load our actual DepsCh.
+       var depsSuccess bool
+       var depsUpdateCh chan struct{}
+       depsCh := make(chan bool, 1)
+       depsCh <- true
+       close(depsCh)
+       for {
+               select {
+               case <-info.CancelCh:
+                       // Cancel
+                       return
+
+               case depsSuccess = <-depsCh:
+                       // Deps complete! Mark as nil to trigger completion handling.
+                       depsCh = nil
+
+               case <-depsUpdateCh:
+                       // New deps, reloop
+               }
+
+               // Check if we have updated dependencies. This can happen if the
+               // dependencies were satisfied exactly prior to an Update occurring.
+               // In that case, we'd like to take into account new dependencies
+               // if possible.
+               info.DepsLock.Lock()
+               if info.DepsCh != nil {
+                       depsCh = info.DepsCh
+                       info.DepsCh = nil
+               }
+               if info.DepsUpdateCh != nil {
+                       depsUpdateCh = info.DepsUpdateCh
+               }
+               info.DepsLock.Unlock()
+
+               // If we still have no deps channel set, then we're done!
+               if depsCh == nil {
+                       break
+               }
+       }
+
+       // If we passed dependencies, we just want to check once more that
+       // we're not cancelled, since this can happen just as dependencies pass.
+       select {
+       case <-info.CancelCh:
+               // Cancelled during an update while dependencies completed.
+               return
+       default:
+       }
+
+       // Run our callback or note that our upstream failed
+       var err error
+       if depsSuccess {
+               log.Printf("[DEBUG] dag/walk: walking %q", VertexName(v))
+               err = w.Callback(v)
+       } else {
+               log.Printf("[DEBUG] dag/walk: upstream errored, not walking %q", VertexName(v))
+               err = errWalkUpstream
+       }
+
+       // Record the error
+       if err != nil {
+               w.errLock.Lock()
+               defer w.errLock.Unlock()
+
+               if w.errMap == nil {
+                       w.errMap = make(map[Vertex]error)
+               }
+               w.errMap[v] = err
+       }
+}
+
+func (w *Walker) waitDeps(
+       v Vertex,
+       deps map[Vertex]<-chan struct{},
+       doneCh chan<- bool,
+       cancelCh <-chan struct{}) {
+       // For each dependency given to us, wait for it to complete
+       for dep, depCh := range deps {
+       DepSatisfied:
+               for {
+                       select {
+                       case <-depCh:
+                               // Dependency satisfied!
+                               break DepSatisfied
+
+                       case <-cancelCh:
+                               // Wait cancelled. Note that we didn't satisfy dependencies
+                               // so that anything waiting on us also doesn't run.
+                               doneCh <- false
+                               return
+
+                       case <-time.After(time.Second * 5):
+                               log.Printf("[DEBUG] dag/walk: vertex %q, waiting for: %q",
+                                       VertexName(v), VertexName(dep))
+                       }
+               }
+       }
+
+       // Dependencies satisfied! We need to check if any errored
+       w.errLock.Lock()
+       defer w.errLock.Unlock()
+       for dep, _ := range deps {
+               if w.errMap[dep] != nil {
+                       // One of our dependencies failed, so return false
+                       doneCh <- false
+                       return
+               }
+       }
+
+       // All dependencies satisfied and successful
+       doneCh <- true
+}
diff --git a/vendor/github.com/hashicorp/terraform/flatmap/expand.go b/vendor/github.com/hashicorp/terraform/flatmap/expand.go
new file mode 100644 (file)
index 0000000..e0b81b6
--- /dev/null
@@ -0,0 +1,147 @@
+package flatmap
+
+import (
+       "fmt"
+       "sort"
+       "strconv"
+       "strings"
+
+       "github.com/hashicorp/hil"
+)
+
+// Expand takes a map and a key (prefix) and expands that value into
+// a more complex structure. This is the reverse of the Flatten operation.
+func Expand(m map[string]string, key string) interface{} {
+       // If the key is exactly a key in the map, just return it
+       if v, ok := m[key]; ok {
+               if v == "true" {
+                       return true
+               } else if v == "false" {
+                       return false
+               }
+
+               return v
+       }
+
+       // Check if the key is an array, and if so, expand the array
+       if v, ok := m[key+".#"]; ok {
+               // If the count of the key is unknown, then just put the unknown
+               // value in the value itself. This will be detected by Terraform
+               // core later.
+               if v == hil.UnknownValue {
+                       return v
+               }
+
+               return expandArray(m, key)
+       }
+
+       // Check if this is a prefix in the map
+       prefix := key + "."
+       for k := range m {
+               if strings.HasPrefix(k, prefix) {
+                       return expandMap(m, prefix)
+               }
+       }
+
+       return nil
+}
+
+func expandArray(m map[string]string, prefix string) []interface{} {
+       num, err := strconv.ParseInt(m[prefix+".#"], 0, 0)
+       if err != nil {
+               panic(err)
+       }
+
+       // If the number of elements in this array is 0, then return an
+       // empty slice as there is nothing to expand. Trying to expand it
+       // anyway could lead to crashes as any child maps, arrays or sets
+       // that no longer exist are still shown as empty with a count of 0.
+       if num == 0 {
+               return []interface{}{}
+       }
+
+       // The Schema "Set" type stores its values in an array format, but
+       // using numeric hash values instead of ordinal keys. Take the set
+       // of keys regardless of value, and expand them in numeric order.
+       // See GH-11042 for more details.
+       keySet := map[int]bool{}
+       computed := map[string]bool{}
+       for k := range m {
+               if !strings.HasPrefix(k, prefix+".") {
+                       continue
+               }
+
+               key := k[len(prefix)+1:]
+               idx := strings.Index(key, ".")
+               if idx != -1 {
+                       key = key[:idx]
+               }
+
+               // skip the count value
+               if key == "#" {
+                       continue
+               }
+
+               // strip the computed flag if there is one
+               if strings.HasPrefix(key, "~") {
+                       key = key[1:]
+                       computed[key] = true
+               }
+
+               k, err := strconv.Atoi(key)
+               if err != nil {
+                       panic(err)
+               }
+               keySet[int(k)] = true
+       }
+
+       keysList := make([]int, 0, num)
+       for key := range keySet {
+               keysList = append(keysList, key)
+       }
+       sort.Ints(keysList)
+
+       result := make([]interface{}, num)
+       for i, key := range keysList {
+               keyString := strconv.Itoa(key)
+               if computed[keyString] {
+                       keyString = "~" + keyString
+               }
+               result[i] = Expand(m, fmt.Sprintf("%s.%s", prefix, keyString))
+       }
+
+       return result
+}
+
+func expandMap(m map[string]string, prefix string) map[string]interface{} {
+       // Submaps may not have a '%' key, so we can't count on this value being
+       // here. If we don't have a count, just proceed as if we have have a map.
+       if count, ok := m[prefix+"%"]; ok && count == "0" {
+               return map[string]interface{}{}
+       }
+
+       result := make(map[string]interface{})
+       for k := range m {
+               if !strings.HasPrefix(k, prefix) {
+                       continue
+               }
+
+               key := k[len(prefix):]
+               idx := strings.Index(key, ".")
+               if idx != -1 {
+                       key = key[:idx]
+               }
+               if _, ok := result[key]; ok {
+                       continue
+               }
+
+               // skip the map count value
+               if key == "%" {
+                       continue
+               }
+
+               result[key] = Expand(m, k[:len(prefix)+len(key)])
+       }
+
+       return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/flatmap/flatten.go b/vendor/github.com/hashicorp/terraform/flatmap/flatten.go
new file mode 100644 (file)
index 0000000..9ff6e42
--- /dev/null
@@ -0,0 +1,71 @@
+package flatmap
+
+import (
+       "fmt"
+       "reflect"
+)
+
+// Flatten takes a structure and turns into a flat map[string]string.
+//
+// Within the "thing" parameter, only primitive values are allowed. Structs are
+// not supported. Therefore, it can only be slices, maps, primitives, and
+// any combination of those together.
+//
+// See the tests for examples of what inputs are turned into.
+func Flatten(thing map[string]interface{}) Map {
+       result := make(map[string]string)
+
+       for k, raw := range thing {
+               flatten(result, k, reflect.ValueOf(raw))
+       }
+
+       return Map(result)
+}
+
+func flatten(result map[string]string, prefix string, v reflect.Value) {
+       if v.Kind() == reflect.Interface {
+               v = v.Elem()
+       }
+
+       switch v.Kind() {
+       case reflect.Bool:
+               if v.Bool() {
+                       result[prefix] = "true"
+               } else {
+                       result[prefix] = "false"
+               }
+       case reflect.Int:
+               result[prefix] = fmt.Sprintf("%d", v.Int())
+       case reflect.Map:
+               flattenMap(result, prefix, v)
+       case reflect.Slice:
+               flattenSlice(result, prefix, v)
+       case reflect.String:
+               result[prefix] = v.String()
+       default:
+               panic(fmt.Sprintf("Unknown: %s", v))
+       }
+}
+
+func flattenMap(result map[string]string, prefix string, v reflect.Value) {
+       for _, k := range v.MapKeys() {
+               if k.Kind() == reflect.Interface {
+                       k = k.Elem()
+               }
+
+               if k.Kind() != reflect.String {
+                       panic(fmt.Sprintf("%s: map key is not string: %s", prefix, k))
+               }
+
+               flatten(result, fmt.Sprintf("%s.%s", prefix, k.String()), v.MapIndex(k))
+       }
+}
+
+func flattenSlice(result map[string]string, prefix string, v reflect.Value) {
+       prefix = prefix + "."
+
+       result[prefix+"#"] = fmt.Sprintf("%d", v.Len())
+       for i := 0; i < v.Len(); i++ {
+               flatten(result, fmt.Sprintf("%s%d", prefix, i), v.Index(i))
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/flatmap/map.go b/vendor/github.com/hashicorp/terraform/flatmap/map.go
new file mode 100644 (file)
index 0000000..46b72c4
--- /dev/null
@@ -0,0 +1,82 @@
+package flatmap
+
+import (
+       "strings"
+)
+
+// Map is a wrapper around map[string]string that provides some helpers
+// above it that assume the map is in the format that flatmap expects
+// (the result of Flatten).
+//
+// All modifying functions such as Delete are done in-place unless
+// otherwise noted.
+type Map map[string]string
+
+// Contains returns true if the map contains the given key.
+func (m Map) Contains(key string) bool {
+       for _, k := range m.Keys() {
+               if k == key {
+                       return true
+               }
+       }
+
+       return false
+}
+
+// Delete deletes a key out of the map with the given prefix.
+func (m Map) Delete(prefix string) {
+       for k, _ := range m {
+               match := k == prefix
+               if !match {
+                       if !strings.HasPrefix(k, prefix) {
+                               continue
+                       }
+
+                       if k[len(prefix):len(prefix)+1] != "." {
+                               continue
+                       }
+               }
+
+               delete(m, k)
+       }
+}
+
+// Keys returns all of the top-level keys in this map
+func (m Map) Keys() []string {
+       ks := make(map[string]struct{})
+       for k, _ := range m {
+               idx := strings.Index(k, ".")
+               if idx == -1 {
+                       idx = len(k)
+               }
+
+               ks[k[:idx]] = struct{}{}
+       }
+
+       result := make([]string, 0, len(ks))
+       for k, _ := range ks {
+               result = append(result, k)
+       }
+
+       return result
+}
+
+// Merge merges the contents of the other Map into this one.
+//
+// This merge is smarter than a simple map iteration because it
+// will fully replace arrays and other complex structures that
+// are present in this map with the other map's. For example, if
+// this map has a 3 element "foo" list, and m2 has a 2 element "foo"
+// list, then the result will be that m has a 2 element "foo"
+// list.
+func (m Map) Merge(m2 Map) {
+       for _, prefix := range m2.Keys() {
+               m.Delete(prefix)
+
+               for k, v := range m2 {
+                       if strings.HasPrefix(k, prefix) {
+                               m[k] = v
+                       }
+               }
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go b/vendor/github.com/hashicorp/terraform/helper/acctest/acctest.go
new file mode 100644 (file)
index 0000000..9d31031
--- /dev/null
@@ -0,0 +1,2 @@
+// Package acctest contains for Terraform Acceptance Tests
+package acctest
diff --git a/vendor/github.com/hashicorp/terraform/helper/acctest/random.go b/vendor/github.com/hashicorp/terraform/helper/acctest/random.go
new file mode 100644 (file)
index 0000000..3ddc078
--- /dev/null
@@ -0,0 +1,93 @@
+package acctest
+
+import (
+       "bufio"
+       "bytes"
+       crand "crypto/rand"
+       "crypto/rsa"
+       "crypto/x509"
+       "encoding/pem"
+       "fmt"
+       "math/rand"
+       "strings"
+       "time"
+
+       "golang.org/x/crypto/ssh"
+)
+
+// Helpers for generating random tidbits for use in identifiers to prevent
+// collisions in acceptance tests.
+
+// RandInt generates a random integer
+func RandInt() int {
+       reseed()
+       return rand.New(rand.NewSource(time.Now().UnixNano())).Int()
+}
+
+// RandomWithPrefix is used to generate a unique name with a prefix, for
+// randomizing names in acceptance tests
+func RandomWithPrefix(name string) string {
+       reseed()
+       return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int())
+}
+
+func RandIntRange(min int, max int) int {
+       reseed()
+       source := rand.New(rand.NewSource(time.Now().UnixNano()))
+       rangeMax := max - min
+
+       return int(source.Int31n(int32(rangeMax)))
+}
+
+// RandString generates a random alphanumeric string of the length specified
+func RandString(strlen int) string {
+       return RandStringFromCharSet(strlen, CharSetAlphaNum)
+}
+
+// RandStringFromCharSet generates a random string by selecting characters from
+// the charset provided
+func RandStringFromCharSet(strlen int, charSet string) string {
+       reseed()
+       result := make([]byte, strlen)
+       for i := 0; i < strlen; i++ {
+               result[i] = charSet[rand.Intn(len(charSet))]
+       }
+       return string(result)
+}
+
+// RandSSHKeyPair generates a public and private SSH key pair. The public key is
+// returned in OpenSSH format, and the private key is PEM encoded.
+func RandSSHKeyPair(comment string) (string, string, error) {
+       privateKey, err := rsa.GenerateKey(crand.Reader, 1024)
+       if err != nil {
+               return "", "", err
+       }
+
+       var privateKeyBuffer bytes.Buffer
+       privateKeyPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)}
+       if err := pem.Encode(bufio.NewWriter(&privateKeyBuffer), privateKeyPEM); err != nil {
+               return "", "", err
+       }
+
+       publicKey, err := ssh.NewPublicKey(&privateKey.PublicKey)
+       if err != nil {
+               return "", "", err
+       }
+       keyMaterial := strings.TrimSpace(string(ssh.MarshalAuthorizedKey(publicKey)))
+       return fmt.Sprintf("%s %s", keyMaterial, comment), privateKeyBuffer.String(), nil
+}
+
+// Seeds random with current timestamp
+func reseed() {
+       rand.Seed(time.Now().UTC().UnixNano())
+}
+
+const (
+       // CharSetAlphaNum is the alphanumeric character set for use with
+       // RandStringFromCharSet
+       CharSetAlphaNum = "abcdefghijklmnopqrstuvwxyz012346789"
+
+       // CharSetAlpha is the alphabetical character set for use with
+       // RandStringFromCharSet
+       CharSetAlpha = "abcdefghijklmnopqrstuvwxyz"
+)
diff --git a/vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go b/vendor/github.com/hashicorp/terraform/helper/acctest/remotetests.go
new file mode 100644 (file)
index 0000000..87c60b8
--- /dev/null
@@ -0,0 +1,27 @@
+package acctest
+
+import (
+       "net/http"
+       "os"
+       "testing"
+)
+
+// SkipRemoteTestsEnvVar is an environment variable that can be set by a user
+// running the tests in an environment with limited network connectivity. By
+// default, tests requiring internet connectivity make an effort to skip if no
+// internet is available, but in some cases the smoke test will pass even
+// though the test should still be skipped.
+const SkipRemoteTestsEnvVar = "TF_SKIP_REMOTE_TESTS"
+
+// RemoteTestPrecheck is meant to be run by any unit test that requires
+// outbound internet connectivity. The test will be skipped if it's
+// unavailable.
+func RemoteTestPrecheck(t *testing.T) {
+       if os.Getenv(SkipRemoteTestsEnvVar) != "" {
+               t.Skipf("skipping test, %s was set", SkipRemoteTestsEnvVar)
+       }
+
+       if _, err := http.Get("http://google.com"); err != nil {
+               t.Skipf("skipping, internet seems to not be available: %s", err)
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/config/decode.go b/vendor/github.com/hashicorp/terraform/helper/config/decode.go
new file mode 100644 (file)
index 0000000..f470c9b
--- /dev/null
@@ -0,0 +1,28 @@
+package config
+
+import (
+       "github.com/mitchellh/mapstructure"
+)
+
+func Decode(target interface{}, raws ...interface{}) (*mapstructure.Metadata, error) {
+       var md mapstructure.Metadata
+       decoderConfig := &mapstructure.DecoderConfig{
+               Metadata:         &md,
+               Result:           target,
+               WeaklyTypedInput: true,
+       }
+
+       decoder, err := mapstructure.NewDecoder(decoderConfig)
+       if err != nil {
+               return nil, err
+       }
+
+       for _, raw := range raws {
+               err := decoder.Decode(raw)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       return &md, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/config/validator.go b/vendor/github.com/hashicorp/terraform/helper/config/validator.go
new file mode 100644 (file)
index 0000000..1a6e023
--- /dev/null
@@ -0,0 +1,214 @@
+package config
+
+import (
+       "fmt"
+       "strconv"
+       "strings"
+
+       "github.com/hashicorp/terraform/flatmap"
+       "github.com/hashicorp/terraform/terraform"
+)
+
+// Validator is a helper that helps you validate the configuration
+// of your resource, resource provider, etc.
+//
+// At the most basic level, set the Required and Optional lists to be
+// specifiers of keys that are required or optional. If a key shows up
+// that isn't in one of these two lists, then an error is generated.
+//
+// The "specifiers" allowed in this is a fairly rich syntax to help
+// describe the format of your configuration:
+//
+//   * Basic keys are just strings. For example: "foo" will match the
+//       "foo" key.
+//
+//   * Nested structure keys can be matched by doing
+//       "listener.*.foo". This will verify that there is at least one
+//       listener element that has the "foo" key set.
+//
+//   * The existence of a nested structure can be checked by simply
+//       doing "listener.*" which will verify that there is at least
+//       one element in the "listener" structure. This is NOT
+//       validating that "listener" is an array. It is validating
+//       that it is a nested structure in the configuration.
+//
+type Validator struct {
+       Required []string
+       Optional []string
+}
+
+func (v *Validator) Validate(
+       c *terraform.ResourceConfig) (ws []string, es []error) {
+       // Flatten the configuration so it is easier to reason about
+       flat := flatmap.Flatten(c.Raw)
+
+       keySet := make(map[string]validatorKey)
+       for i, vs := range [][]string{v.Required, v.Optional} {
+               req := i == 0
+               for _, k := range vs {
+                       vk, err := newValidatorKey(k, req)
+                       if err != nil {
+                               es = append(es, err)
+                               continue
+                       }
+
+                       keySet[k] = vk
+               }
+       }
+
+       purged := make([]string, 0)
+       for _, kv := range keySet {
+               p, w, e := kv.Validate(flat)
+               if len(w) > 0 {
+                       ws = append(ws, w...)
+               }
+               if len(e) > 0 {
+                       es = append(es, e...)
+               }
+
+               purged = append(purged, p...)
+       }
+
+       // Delete all the keys we processed in order to find
+       // the unknown keys.
+       for _, p := range purged {
+               delete(flat, p)
+       }
+
+       // The rest are unknown
+       for k, _ := range flat {
+               es = append(es, fmt.Errorf("Unknown configuration: %s", k))
+       }
+
+       return
+}
+
+type validatorKey interface {
+       // Validate validates the given configuration and returns viewed keys,
+       // warnings, and errors.
+       Validate(map[string]string) ([]string, []string, []error)
+}
+
+func newValidatorKey(k string, req bool) (validatorKey, error) {
+       var result validatorKey
+
+       parts := strings.Split(k, ".")
+       if len(parts) > 1 && parts[1] == "*" {
+               result = &nestedValidatorKey{
+                       Parts:    parts,
+                       Required: req,
+               }
+       } else {
+               result = &basicValidatorKey{
+                       Key:      k,
+                       Required: req,
+               }
+       }
+
+       return result, nil
+}
+
+// basicValidatorKey validates keys that are basic such as "foo"
+type basicValidatorKey struct {
+       Key      string
+       Required bool
+}
+
+func (v *basicValidatorKey) Validate(
+       m map[string]string) ([]string, []string, []error) {
+       for k, _ := range m {
+               // If we have the exact key its a match
+               if k == v.Key {
+                       return []string{k}, nil, nil
+               }
+       }
+
+       if !v.Required {
+               return nil, nil, nil
+       }
+
+       return nil, nil, []error{fmt.Errorf(
+               "Key not found: %s", v.Key)}
+}
+
+type nestedValidatorKey struct {
+       Parts    []string
+       Required bool
+}
+
+func (v *nestedValidatorKey) validate(
+       m map[string]string,
+       prefix string,
+       offset int) ([]string, []string, []error) {
+       if offset >= len(v.Parts) {
+               // We're at the end. Look for a specific key.
+               v2 := &basicValidatorKey{Key: prefix, Required: v.Required}
+               return v2.Validate(m)
+       }
+
+       current := v.Parts[offset]
+
+       // If we're at offset 0, special case to start at the next one.
+       if offset == 0 {
+               return v.validate(m, current, offset+1)
+       }
+
+       // Determine if we're doing a "for all" or a specific key
+       if current != "*" {
+               // We're looking at a specific key, continue on.
+               return v.validate(m, prefix+"."+current, offset+1)
+       }
+
+       // We're doing a "for all", so we loop over.
+       countStr, ok := m[prefix+".#"]
+       if !ok {
+               if !v.Required {
+                       // It wasn't required, so its no problem.
+                       return nil, nil, nil
+               }
+
+               return nil, nil, []error{fmt.Errorf(
+                       "Key not found: %s", prefix)}
+       }
+
+       count, err := strconv.ParseInt(countStr, 0, 0)
+       if err != nil {
+               // This shouldn't happen if flatmap works properly
+               panic("invalid flatmap array")
+       }
+
+       var e []error
+       var w []string
+       u := make([]string, 1, count+1)
+       u[0] = prefix + ".#"
+       for i := 0; i < int(count); i++ {
+               prefix := fmt.Sprintf("%s.%d", prefix, i)
+
+               // Mark that we saw this specific key
+               u = append(u, prefix)
+
+               // Mark all prefixes of this
+               for k, _ := range m {
+                       if !strings.HasPrefix(k, prefix+".") {
+                               continue
+                       }
+                       u = append(u, k)
+               }
+
+               // If we have more parts, then validate deeper
+               if offset+1 < len(v.Parts) {
+                       u2, w2, e2 := v.validate(m, prefix, offset+1)
+
+                       u = append(u, u2...)
+                       w = append(w, w2...)
+                       e = append(e, e2...)
+               }
+       }
+
+       return u, w, e
+}
+
+func (v *nestedValidatorKey) Validate(
+       m map[string]string) ([]string, []string, []error) {
+       return v.validate(m, "", 0)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go b/vendor/github.com/hashicorp/terraform/helper/experiment/experiment.go
new file mode 100644 (file)
index 0000000..18b8837
--- /dev/null
@@ -0,0 +1,154 @@
+// experiment package contains helper functions for tracking experimental
+// features throughout Terraform.
+//
+// This package should be used for creating, enabling, querying, and deleting
+// experimental features. By unifying all of that onto a single interface,
+// we can have the Go compiler help us by enforcing every place we touch
+// an experimental feature.
+//
+// To create a new experiment:
+//
+//   1. Add the experiment to the global vars list below, prefixed with X_
+//
+//   2. Add the experiment variable to the All listin the init() function
+//
+//   3. Use it!
+//
+// To remove an experiment:
+//
+//   1. Delete the experiment global var.
+//
+//   2. Try to compile and fix all the places where the var was referenced.
+//
+// To use an experiment:
+//
+//   1. Use Flag() if you want the experiment to be available from the CLI.
+//
+//   2. Use Enabled() to check whether it is enabled.
+//
+// As a general user:
+//
+//   1. The `-Xexperiment-name` flag
+//   2. The `TF_X_<experiment-name>` env var.
+//   3. The `TF_X_FORCE` env var can be set to force an experimental feature
+//      without human verifications.
+//
+package experiment
+
+import (
+       "flag"
+       "fmt"
+       "os"
+       "strconv"
+       "strings"
+       "sync"
+)
+
+// The experiments that are available are listed below. Any package in
+// Terraform defining an experiment should define the experiments below.
+// By keeping them all within the experiment package we force a single point
+// of definition and use. This allows the compiler to enforce references
+// so it becomes easy to remove the features.
+var (
+       // Shadow graph. This is already on by default. Disabling it will be
+       // allowed for awhile in order for it to not block operations.
+       X_shadow = newBasicID("shadow", "SHADOW", false)
+)
+
+// Global variables this package uses because we are a package
+// with global state.
+var (
+       // all is the list of all experiements. Do not modify this.
+       All []ID
+
+       // enabled keeps track of what flags have been enabled
+       enabled     map[string]bool
+       enabledLock sync.Mutex
+
+       // Hidden "experiment" that forces all others to be on without verification
+       x_force = newBasicID("force", "FORCE", false)
+)
+
+func init() {
+       // The list of all experiments, update this when an experiment is added.
+       All = []ID{
+               X_shadow,
+               x_force,
+       }
+
+       // Load
+       reload()
+}
+
+// reload is used by tests to reload the global state. This is called by
+// init publicly.
+func reload() {
+       // Initialize
+       enabledLock.Lock()
+       enabled = make(map[string]bool)
+       enabledLock.Unlock()
+
+       // Set defaults and check env vars
+       for _, id := range All {
+               // Get the default value
+               def := id.Default()
+
+               // If we set it in the env var, default it to true
+               key := fmt.Sprintf("TF_X_%s", strings.ToUpper(id.Env()))
+               if v := os.Getenv(key); v != "" {
+                       def = v != "0"
+               }
+
+               // Set the default
+               SetEnabled(id, def)
+       }
+}
+
+// Enabled returns whether an experiment has been enabled or not.
+func Enabled(id ID) bool {
+       enabledLock.Lock()
+       defer enabledLock.Unlock()
+       return enabled[id.Flag()]
+}
+
+// SetEnabled sets an experiment to enabled/disabled. Please check with
+// the experiment docs for when calling this actually affects the experiment.
+func SetEnabled(id ID, v bool) {
+       enabledLock.Lock()
+       defer enabledLock.Unlock()
+       enabled[id.Flag()] = v
+}
+
+// Force returns true if the -Xforce of TF_X_FORCE flag is present, which
+// advises users of this package to not verify with the user that they want
+// experimental behavior and to just continue with it.
+func Force() bool {
+       return Enabled(x_force)
+}
+
+// Flag configures the given FlagSet with the flags to configure
+// all active experiments.
+func Flag(fs *flag.FlagSet) {
+       for _, id := range All {
+               desc := id.Flag()
+               key := fmt.Sprintf("X%s", id.Flag())
+               fs.Var(&idValue{X: id}, key, desc)
+       }
+}
+
+// idValue implements flag.Value for setting the enabled/disabled state
+// of an experiment from the CLI.
+type idValue struct {
+       X ID
+}
+
+func (v *idValue) IsBoolFlag() bool { return true }
+func (v *idValue) String() string   { return strconv.FormatBool(Enabled(v.X)) }
+func (v *idValue) Set(raw string) error {
+       b, err := strconv.ParseBool(raw)
+       if err == nil {
+               SetEnabled(v.X, b)
+       }
+
+       return err
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/experiment/id.go b/vendor/github.com/hashicorp/terraform/helper/experiment/id.go
new file mode 100644 (file)
index 0000000..8e2f707
--- /dev/null
@@ -0,0 +1,34 @@
+package experiment
+
+// ID represents an experimental feature.
+//
+// The global vars defined on this package should be used as ID values.
+// This interface is purposely not implement-able outside of this package
+// so that we can rely on the Go compiler to enforce all experiment references.
+type ID interface {
+       Env() string
+       Flag() string
+       Default() bool
+
+       unexported() // So the ID can't be implemented externally.
+}
+
+// basicID implements ID.
+type basicID struct {
+       EnvValue     string
+       FlagValue    string
+       DefaultValue bool
+}
+
+func newBasicID(flag, env string, def bool) ID {
+       return &basicID{
+               EnvValue:     env,
+               FlagValue:    flag,
+               DefaultValue: def,
+       }
+}
+
+func (id *basicID) Env() string   { return id.EnvValue }
+func (id *basicID) Flag() string  { return id.FlagValue }
+func (id *basicID) Default() bool { return id.DefaultValue }
+func (id *basicID) unexported()   {}
diff --git a/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go b/vendor/github.com/hashicorp/terraform/helper/hashcode/hashcode.go
new file mode 100644 (file)
index 0000000..64d8263
--- /dev/null
@@ -0,0 +1,22 @@
+package hashcode
+
+import (
+       "hash/crc32"
+)
+
+// String hashes a string to a unique hashcode.
+//
+// crc32 returns a uint32, but for our use we need
+// and non negative integer. Here we cast to an integer
+// and invert it if the result is negative.
+func String(s string) int {
+       v := int(crc32.ChecksumIEEE([]byte(s)))
+       if v >= 0 {
+               return v
+       }
+       if -v >= 0 {
+               return -v
+       }
+       // v == MinInt
+       return 0
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go b/vendor/github.com/hashicorp/terraform/helper/hilmapstructure/hilmapstructure.go
new file mode 100644 (file)
index 0000000..67be1df
--- /dev/null
@@ -0,0 +1,41 @@
+package hilmapstructure
+
+import (
+       "fmt"
+       "reflect"
+
+       "github.com/mitchellh/mapstructure"
+)
+
+var hilMapstructureDecodeHookEmptySlice []interface{}
+var hilMapstructureDecodeHookStringSlice []string
+var hilMapstructureDecodeHookEmptyMap map[string]interface{}
+
+// WeakDecode behaves in the same way as mapstructure.WeakDecode but has a
+// DecodeHook which defeats the backward compatibility mode of mapstructure
+// which WeakDecodes []interface{}{} into an empty map[string]interface{}. This
+// allows us to use WeakDecode (desirable), but not fail on empty lists.
+func WeakDecode(m interface{}, rawVal interface{}) error {
+       config := &mapstructure.DecoderConfig{
+               DecodeHook: func(source reflect.Type, target reflect.Type, val interface{}) (interface{}, error) {
+                       sliceType := reflect.TypeOf(hilMapstructureDecodeHookEmptySlice)
+                       stringSliceType := reflect.TypeOf(hilMapstructureDecodeHookStringSlice)
+                       mapType := reflect.TypeOf(hilMapstructureDecodeHookEmptyMap)
+
+                       if (source == sliceType || source == stringSliceType) && target == mapType {
+                               return nil, fmt.Errorf("Cannot convert a []interface{} into a map[string]interface{}")
+                       }
+
+                       return val, nil
+               },
+               WeaklyTypedInput: true,
+               Result:           rawVal,
+       }
+
+       decoder, err := mapstructure.NewDecoder(config)
+       if err != nil {
+               return err
+       }
+
+       return decoder.Decode(m)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/logging.go b/vendor/github.com/hashicorp/terraform/helper/logging/logging.go
new file mode 100644 (file)
index 0000000..433cd77
--- /dev/null
@@ -0,0 +1,100 @@
+package logging
+
+import (
+       "io"
+       "io/ioutil"
+       "log"
+       "os"
+       "strings"
+       "syscall"
+
+       "github.com/hashicorp/logutils"
+)
+
+// These are the environmental variables that determine if we log, and if
+// we log whether or not the log should go to a file.
+const (
+       EnvLog     = "TF_LOG"      // Set to True
+       EnvLogFile = "TF_LOG_PATH" // Set to a file
+)
+
+var validLevels = []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"}
+
+// LogOutput determines where we should send logs (if anywhere) and the log level.
+func LogOutput() (logOutput io.Writer, err error) {
+       logOutput = ioutil.Discard
+
+       logLevel := LogLevel()
+       if logLevel == "" {
+               return
+       }
+
+       logOutput = os.Stderr
+       if logPath := os.Getenv(EnvLogFile); logPath != "" {
+               var err error
+               logOutput, err = os.OpenFile(logPath, syscall.O_CREAT|syscall.O_RDWR|syscall.O_APPEND, 0666)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       // This was the default since the beginning
+       logOutput = &logutils.LevelFilter{
+               Levels:   validLevels,
+               MinLevel: logutils.LogLevel(logLevel),
+               Writer:   logOutput,
+       }
+
+       return
+}
+
+// SetOutput checks for a log destination with LogOutput, and calls
+// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses
+// ioutil.Discard. Any error from LogOutout is fatal.
+func SetOutput() {
+       out, err := LogOutput()
+       if err != nil {
+               log.Fatal(err)
+       }
+
+       if out == nil {
+               out = ioutil.Discard
+       }
+
+       log.SetOutput(out)
+}
+
+// LogLevel returns the current log level string based the environment vars
+func LogLevel() string {
+       envLevel := os.Getenv(EnvLog)
+       if envLevel == "" {
+               return ""
+       }
+
+       logLevel := "TRACE"
+       if isValidLogLevel(envLevel) {
+               // allow following for better ux: info, Info or INFO
+               logLevel = strings.ToUpper(envLevel)
+       } else {
+               log.Printf("[WARN] Invalid log level: %q. Defaulting to level: TRACE. Valid levels are: %+v",
+                       envLevel, validLevels)
+       }
+
+       return logLevel
+}
+
+// IsDebugOrHigher returns whether or not the current log level is debug or trace
+func IsDebugOrHigher() bool {
+       level := string(LogLevel())
+       return level == "DEBUG" || level == "TRACE"
+}
+
+func isValidLogLevel(level string) bool {
+       for _, l := range validLevels {
+               if strings.ToUpper(level) == string(l) {
+                       return true
+               }
+       }
+
+       return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/logging/transport.go b/vendor/github.com/hashicorp/terraform/helper/logging/transport.go
new file mode 100644 (file)
index 0000000..4477924
--- /dev/null
@@ -0,0 +1,53 @@
+package logging
+
+import (
+       "log"
+       "net/http"
+       "net/http/httputil"
+)
+
+type transport struct {
+       name      string
+       transport http.RoundTripper
+}
+
+func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
+       if IsDebugOrHigher() {
+               reqData, err := httputil.DumpRequestOut(req, true)
+               if err == nil {
+                       log.Printf("[DEBUG] "+logReqMsg, t.name, string(reqData))
+               } else {
+                       log.Printf("[ERROR] %s API Request error: %#v", t.name, err)
+               }
+       }
+
+       resp, err := t.transport.RoundTrip(req)
+       if err != nil {
+               return resp, err
+       }
+
+       if IsDebugOrHigher() {
+               respData, err := httputil.DumpResponse(resp, true)
+               if err == nil {
+                       log.Printf("[DEBUG] "+logRespMsg, t.name, string(respData))
+               } else {
+                       log.Printf("[ERROR] %s API Response error: %#v", t.name, err)
+               }
+       }
+
+       return resp, nil
+}
+
+func NewTransport(name string, t http.RoundTripper) *transport {
+       return &transport{name, t}
+}
+
+const logReqMsg = `%s API Request Details:
+---[ REQUEST ]---------------------------------------
+%s
+-----------------------------------------------------`
+
+const logRespMsg = `%s API Response Details:
+---[ RESPONSE ]--------------------------------------
+%s
+-----------------------------------------------------`
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/error.go b/vendor/github.com/hashicorp/terraform/helper/resource/error.go
new file mode 100644 (file)
index 0000000..7ee2161
--- /dev/null
@@ -0,0 +1,79 @@
+package resource
+
+import (
+       "fmt"
+       "strings"
+       "time"
+)
+
+type NotFoundError struct {
+       LastError    error
+       LastRequest  interface{}
+       LastResponse interface{}
+       Message      string
+       Retries      int
+}
+
+func (e *NotFoundError) Error() string {
+       if e.Message != "" {
+               return e.Message
+       }
+
+       if e.Retries > 0 {
+               return fmt.Sprintf("couldn't find resource (%d retries)", e.Retries)
+       }
+
+       return "couldn't find resource"
+}
+
+// UnexpectedStateError is returned when Refresh returns a state that's neither in Target nor Pending
+type UnexpectedStateError struct {
+       LastError     error
+       State         string
+       ExpectedState []string
+}
+
+func (e *UnexpectedStateError) Error() string {
+       return fmt.Sprintf(
+               "unexpected state '%s', wanted target '%s'. last error: %s",
+               e.State,
+               strings.Join(e.ExpectedState, ", "),
+               e.LastError,
+       )
+}
+
+// TimeoutError is returned when WaitForState times out
+type TimeoutError struct {
+       LastError     error
+       LastState     string
+       Timeout       time.Duration
+       ExpectedState []string
+}
+
+func (e *TimeoutError) Error() string {
+       expectedState := "resource to be gone"
+       if len(e.ExpectedState) > 0 {
+               expectedState = fmt.Sprintf("state to become '%s'", strings.Join(e.ExpectedState, ", "))
+       }
+
+       extraInfo := make([]string, 0)
+       if e.LastState != "" {
+               extraInfo = append(extraInfo, fmt.Sprintf("last state: '%s'", e.LastState))
+       }
+       if e.Timeout > 0 {
+               extraInfo = append(extraInfo, fmt.Sprintf("timeout: %s", e.Timeout.String()))
+       }
+
+       suffix := ""
+       if len(extraInfo) > 0 {
+               suffix = fmt.Sprintf(" (%s)", strings.Join(extraInfo, ", "))
+       }
+
+       if e.LastError != nil {
+               return fmt.Sprintf("timeout while waiting for %s%s: %s",
+                       expectedState, suffix, e.LastError)
+       }
+
+       return fmt.Sprintf("timeout while waiting for %s%s",
+               expectedState, suffix)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/id.go b/vendor/github.com/hashicorp/terraform/helper/resource/id.go
new file mode 100644 (file)
index 0000000..629582b
--- /dev/null
@@ -0,0 +1,39 @@
+package resource
+
+import (
+       "crypto/rand"
+       "fmt"
+       "math/big"
+       "sync"
+)
+
+const UniqueIdPrefix = `terraform-`
+
+// idCounter is a randomly seeded monotonic counter for generating ordered
+// unique ids.  It uses a big.Int so we can easily increment a long numeric
+// string.  The max possible hex value here with 12 random bytes is
+// "01000000000000000000000000", so there's no chance of rollover during
+// operation.
+var idMutex sync.Mutex
+var idCounter = big.NewInt(0).SetBytes(randomBytes(12))
+
+// Helper for a resource to generate a unique identifier w/ default prefix
+func UniqueId() string {
+       return PrefixedUniqueId(UniqueIdPrefix)
+}
+
+// Helper for a resource to generate a unique identifier w/ given prefix
+//
+// After the prefix, the ID consists of an incrementing 26 digit value (to match
+// previous timestamp output).
+func PrefixedUniqueId(prefix string) string {
+       idMutex.Lock()
+       defer idMutex.Unlock()
+       return fmt.Sprintf("%s%026x", prefix, idCounter.Add(idCounter, big.NewInt(1)))
+}
+
+func randomBytes(n int) []byte {
+       b := make([]byte, n)
+       rand.Read(b)
+       return b
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/map.go b/vendor/github.com/hashicorp/terraform/helper/resource/map.go
new file mode 100644 (file)
index 0000000..a465136
--- /dev/null
@@ -0,0 +1,140 @@
+package resource
+
+import (
+       "fmt"
+       "sort"
+
+       "github.com/hashicorp/terraform/terraform"
+)
+
+// Map is a map of resources that are supported, and provides helpers for
+// more easily implementing a ResourceProvider.
+type Map struct {
+       Mapping map[string]Resource
+}
+
+func (m *Map) Validate(
+       t string, c *terraform.ResourceConfig) ([]string, []error) {
+       r, ok := m.Mapping[t]
+       if !ok {
+               return nil, []error{fmt.Errorf("Unknown resource type: %s", t)}
+       }
+
+       // If there is no validator set, then it is valid
+       if r.ConfigValidator == nil {
+               return nil, nil
+       }
+
+       return r.ConfigValidator.Validate(c)
+}
+
+// Apply performs a create or update depending on the diff, and calls
+// the proper function on the matching Resource.
+func (m *Map) Apply(
+       info *terraform.InstanceInfo,
+       s *terraform.InstanceState,
+       d *terraform.InstanceDiff,
+       meta interface{}) (*terraform.InstanceState, error) {
+       r, ok := m.Mapping[info.Type]
+       if !ok {
+               return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
+       }
+
+       if d.Destroy || d.RequiresNew() {
+               if s.ID != "" {
+                       // Destroy the resource if it is created
+                       err := r.Destroy(s, meta)
+                       if err != nil {
+                               return s, err
+                       }
+
+                       s.ID = ""
+               }
+
+               // If we're only destroying, and not creating, then return now.
+               // Otherwise, we continue so that we can create a new resource.
+               if !d.RequiresNew() {
+                       return nil, nil
+               }
+       }
+
+       var result *terraform.InstanceState
+       var err error
+       if s.ID == "" {
+               result, err = r.Create(s, d, meta)
+       } else {
+               if r.Update == nil {
+                       return s, fmt.Errorf(
+                               "Resource type '%s' doesn't support update",
+                               info.Type)
+               }
+
+               result, err = r.Update(s, d, meta)
+       }
+       if result != nil {
+               if result.Attributes == nil {
+                       result.Attributes = make(map[string]string)
+               }
+
+               result.Attributes["id"] = result.ID
+       }
+
+       return result, err
+}
+
+// Diff performs a diff on the proper resource type.
+func (m *Map) Diff(
+       info *terraform.InstanceInfo,
+       s *terraform.InstanceState,
+       c *terraform.ResourceConfig,
+       meta interface{}) (*terraform.InstanceDiff, error) {
+       r, ok := m.Mapping[info.Type]
+       if !ok {
+               return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
+       }
+
+       return r.Diff(s, c, meta)
+}
+
+// Refresh performs a Refresh on the proper resource type.
+//
+// Refresh on the Resource won't be called if the state represents a
+// non-created resource (ID is blank).
+//
+// An error is returned if the resource isn't registered.
+func (m *Map) Refresh(
+       info *terraform.InstanceInfo,
+       s *terraform.InstanceState,
+       meta interface{}) (*terraform.InstanceState, error) {
+       // If the resource isn't created, don't refresh.
+       if s.ID == "" {
+               return s, nil
+       }
+
+       r, ok := m.Mapping[info.Type]
+       if !ok {
+               return nil, fmt.Errorf("Unknown resource type: %s", info.Type)
+       }
+
+       return r.Refresh(s, meta)
+}
+
+// Resources returns all the resources that are supported by this
+// resource map and can be used to satisfy the Resources method of
+// a ResourceProvider.
+func (m *Map) Resources() []terraform.ResourceType {
+       ks := make([]string, 0, len(m.Mapping))
+       for k, _ := range m.Mapping {
+               ks = append(ks, k)
+       }
+       sort.Strings(ks)
+
+       rs := make([]terraform.ResourceType, 0, len(m.Mapping))
+       for _, k := range ks {
+               rs = append(rs, terraform.ResourceType{
+                       Name: k,
+               })
+       }
+
+       return rs
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/resource.go b/vendor/github.com/hashicorp/terraform/helper/resource/resource.go
new file mode 100644 (file)
index 0000000..0d9c831
--- /dev/null
@@ -0,0 +1,49 @@
+package resource
+
+import (
+       "github.com/hashicorp/terraform/helper/config"
+       "github.com/hashicorp/terraform/terraform"
+)
+
+type Resource struct {
+       ConfigValidator *config.Validator
+       Create          CreateFunc
+       Destroy         DestroyFunc
+       Diff            DiffFunc
+       Refresh         RefreshFunc
+       Update          UpdateFunc
+}
+
+// CreateFunc is a function that creates a resource that didn't previously
+// exist.
+type CreateFunc func(
+       *terraform.InstanceState,
+       *terraform.InstanceDiff,
+       interface{}) (*terraform.InstanceState, error)
+
+// DestroyFunc is a function that destroys a resource that previously
+// exists using the state.
+type DestroyFunc func(
+       *terraform.InstanceState,
+       interface{}) error
+
+// DiffFunc is a function that performs a diff of a resource.
+type DiffFunc func(
+       *terraform.InstanceState,
+       *terraform.ResourceConfig,
+       interface{}) (*terraform.InstanceDiff, error)
+
+// RefreshFunc is a function that performs a refresh of a specific type
+// of resource.
+type RefreshFunc func(
+       *terraform.InstanceState,
+       interface{}) (*terraform.InstanceState, error)
+
+// UpdateFunc is a function that is called to update a resource that
+// previously existed. The difference between this and CreateFunc is that
+// the diff is guaranteed to only contain attributes that don't require
+// a new resource.
+type UpdateFunc func(
+       *terraform.InstanceState,
+       *terraform.InstanceDiff,
+       interface{}) (*terraform.InstanceState, error)
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/state.go b/vendor/github.com/hashicorp/terraform/helper/resource/state.go
new file mode 100644 (file)
index 0000000..37c586a
--- /dev/null
@@ -0,0 +1,259 @@
+package resource
+
+import (
+       "log"
+       "time"
+)
+
+var refreshGracePeriod = 30 * time.Second
+
+// StateRefreshFunc is a function type used for StateChangeConf that is
+// responsible for refreshing the item being watched for a state change.
+//
+// It returns three results. `result` is any object that will be returned
+// as the final object after waiting for state change. This allows you to
+// return the final updated object, for example an EC2 instance after refreshing
+// it.
+//
+// `state` is the latest state of that object. And `err` is any error that
+// may have happened while refreshing the state.
+type StateRefreshFunc func() (result interface{}, state string, err error)
+
+// StateChangeConf is the configuration struct used for `WaitForState`.
+type StateChangeConf struct {
+       Delay          time.Duration    // Wait this time before starting checks
+       Pending        []string         // States that are "allowed" and will continue trying
+       Refresh        StateRefreshFunc // Refreshes the current state
+       Target         []string         // Target state
+       Timeout        time.Duration    // The amount of time to wait before timeout
+       MinTimeout     time.Duration    // Smallest time to wait before refreshes
+       PollInterval   time.Duration    // Override MinTimeout/backoff and only poll this often
+       NotFoundChecks int              // Number of times to allow not found
+
+       // This is to work around inconsistent APIs
+       ContinuousTargetOccurence int // Number of times the Target state has to occur continuously
+}
+
+// WaitForState watches an object and waits for it to achieve the state
+// specified in the configuration using the specified Refresh() func,
+// waiting the number of seconds specified in the timeout configuration.
+//
+// If the Refresh function returns a error, exit immediately with that error.
+//
+// If the Refresh function returns a state other than the Target state or one
+// listed in Pending, return immediately with an error.
+//
+// If the Timeout is exceeded before reaching the Target state, return an
+// error.
+//
+// Otherwise, result the result of the first call to the Refresh function to
+// reach the target state.
+func (conf *StateChangeConf) WaitForState() (interface{}, error) {
+       log.Printf("[DEBUG] Waiting for state to become: %s", conf.Target)
+
+       notfoundTick := 0
+       targetOccurence := 0
+
+       // Set a default for times to check for not found
+       if conf.NotFoundChecks == 0 {
+               conf.NotFoundChecks = 20
+       }
+
+       if conf.ContinuousTargetOccurence == 0 {
+               conf.ContinuousTargetOccurence = 1
+       }
+
+       type Result struct {
+               Result interface{}
+               State  string
+               Error  error
+               Done   bool
+       }
+
+       // Read every result from the refresh loop, waiting for a positive result.Done.
+       resCh := make(chan Result, 1)
+       // cancellation channel for the refresh loop
+       cancelCh := make(chan struct{})
+
+       result := Result{}
+
+       go func() {
+               defer close(resCh)
+
+               time.Sleep(conf.Delay)
+
+               // start with 0 delay for the first loop
+               var wait time.Duration
+
+               for {
+                       // store the last result
+                       resCh <- result
+
+                       // wait and watch for cancellation
+                       select {
+                       case <-cancelCh:
+                               return
+                       case <-time.After(wait):
+                               // first round had no wait
+                               if wait == 0 {
+                                       wait = 100 * time.Millisecond
+                               }
+                       }
+
+                       res, currentState, err := conf.Refresh()
+                       result = Result{
+                               Result: res,
+                               State:  currentState,
+                               Error:  err,
+                       }
+
+                       if err != nil {
+                               resCh <- result
+                               return
+                       }
+
+                       // If we're waiting for the absence of a thing, then return
+                       if res == nil && len(conf.Target) == 0 {
+                               targetOccurence++
+                               if conf.ContinuousTargetOccurence == targetOccurence {
+                                       result.Done = true
+                                       resCh <- result
+                                       return
+                               }
+                               continue
+                       }
+
+                       if res == nil {
+                               // If we didn't find the resource, check if we have been
+                               // not finding it for awhile, and if so, report an error.
+                               notfoundTick++
+                               if notfoundTick > conf.NotFoundChecks {
+                                       result.Error = &NotFoundError{
+                                               LastError: err,
+                                               Retries:   notfoundTick,
+                                       }
+                                       resCh <- result
+                                       return
+                               }
+                       } else {
+                               // Reset the counter for when a resource isn't found
+                               notfoundTick = 0
+                               found := false
+
+                               for _, allowed := range conf.Target {
+                                       if currentState == allowed {
+                                               found = true
+                                               targetOccurence++
+                                               if conf.ContinuousTargetOccurence == targetOccurence {
+                                                       result.Done = true
+                                                       resCh <- result
+                                                       return
+                                               }
+                                               continue
+                                       }
+                               }
+
+                               for _, allowed := range conf.Pending {
+                                       if currentState == allowed {
+                                               found = true
+                                               targetOccurence = 0
+                                               break
+                                       }
+                               }
+
+                               if !found && len(conf.Pending) > 0 {
+                                       result.Error = &UnexpectedStateError{
+                                               LastError:     err,
+                                               State:         result.State,
+                                               ExpectedState: conf.Target,
+                                       }
+                                       resCh <- result
+                                       return
+                               }
+                       }
+
+                       // Wait between refreshes using exponential backoff, except when
+                       // waiting for the target state to reoccur.
+                       if targetOccurence == 0 {
+                               wait *= 2
+                       }
+
+                       // If a poll interval has been specified, choose that interval.
+                       // Otherwise bound the default value.
+                       if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second {
+                               wait = conf.PollInterval
+                       } else {
+                               if wait < conf.MinTimeout {
+                                       wait = conf.MinTimeout
+                               } else if wait > 10*time.Second {
+                                       wait = 10 * time.Second
+                               }
+                       }
+
+                       log.Printf("[TRACE] Waiting %s before next try", wait)
+               }
+       }()
+
+       // store the last value result from the refresh loop
+       lastResult := Result{}
+
+       timeout := time.After(conf.Timeout)
+       for {
+               select {
+               case r, ok := <-resCh:
+                       // channel closed, so return the last result
+                       if !ok {
+                               return lastResult.Result, lastResult.Error
+                       }
+
+                       // we reached the intended state
+                       if r.Done {
+                               return r.Result, r.Error
+                       }
+
+                       // still waiting, store the last result
+                       lastResult = r
+
+               case <-timeout:
+                       log.Printf("[WARN] WaitForState timeout after %s", conf.Timeout)
+                       log.Printf("[WARN] WaitForState starting %s refresh grace period", refreshGracePeriod)
+
+                       // cancel the goroutine and start our grace period timer
+                       close(cancelCh)
+                       timeout := time.After(refreshGracePeriod)
+
+                       // we need a for loop and a label to break on, because we may have
+                       // an extra response value to read, but still want to wait for the
+                       // channel to close.
+               forSelect:
+                       for {
+                               select {
+                               case r, ok := <-resCh:
+                                       if r.Done {
+                                               // the last refresh loop reached the desired state
+                                               return r.Result, r.Error
+                                       }
+
+                                       if !ok {
+                                               // the goroutine returned
+                                               break forSelect
+                                       }
+
+                                       // target state not reached, save the result for the
+                                       // TimeoutError and wait for the channel to close
+                                       lastResult = r
+                               case <-timeout:
+                                       log.Println("[ERROR] WaitForState exceeded refresh grace period")
+                                       break forSelect
+                               }
+                       }
+
+                       return nil, &TimeoutError{
+                               LastError:     lastResult.Error,
+                               LastState:     lastResult.State,
+                               Timeout:       conf.Timeout,
+                               ExpectedState: conf.Target,
+                       }
+               }
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing.go
new file mode 100644 (file)
index 0000000..04367c5
--- /dev/null
@@ -0,0 +1,790 @@
+package resource
+
+import (
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "os"
+       "path/filepath"
+       "reflect"
+       "regexp"
+       "strings"
+       "testing"
+
+       "github.com/davecgh/go-spew/spew"
+       "github.com/hashicorp/go-getter"
+       "github.com/hashicorp/go-multierror"
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/helper/logging"
+       "github.com/hashicorp/terraform/terraform"
+)
+
+const TestEnvVar = "TF_ACC"
+
+// TestProvider can be implemented by any ResourceProvider to provide custom
+// reset functionality at the start of an acceptance test.
+// The helper/schema Provider implements this interface.
+type TestProvider interface {
+       TestReset() error
+}
+
+// TestCheckFunc is the callback type used with acceptance tests to check
+// the state of a resource. The state passed in is the latest state known,
+// or in the case of being after a destroy, it is the last known state when
+// it was created.
+type TestCheckFunc func(*terraform.State) error
+
+// ImportStateCheckFunc is the check function for ImportState tests
+type ImportStateCheckFunc func([]*terraform.InstanceState) error
+
+// TestCase is a single acceptance test case used to test the apply/destroy
+// lifecycle of a resource in a specific configuration.
+//
+// When the destroy plan is executed, the config from the last TestStep
+// is used to plan it.
+type TestCase struct {
+       // IsUnitTest allows a test to run regardless of the TF_ACC
+       // environment variable. This should be used with care - only for
+       // fast tests on local resources (e.g. remote state with a local
+       // backend) but can be used to increase confidence in correct
+       // operation of Terraform without waiting for a full acctest run.
+       IsUnitTest bool
+
+       // PreCheck, if non-nil, will be called before any test steps are
+       // executed. It will only be executed in the case that the steps
+       // would run, so it can be used for some validation before running
+       // acceptance tests, such as verifying that keys are setup.
+       PreCheck func()
+
+       // Providers is the ResourceProvider that will be under test.
+       //
+       // Alternately, ProviderFactories can be specified for the providers
+       // that are valid. This takes priority over Providers.
+       //
+       // The end effect of each is the same: specifying the providers that
+       // are used within the tests.
+       Providers         map[string]terraform.ResourceProvider
+       ProviderFactories map[string]terraform.ResourceProviderFactory
+
+       // PreventPostDestroyRefresh can be set to true for cases where data sources
+       // are tested alongside real resources
+       PreventPostDestroyRefresh bool
+
+       // CheckDestroy is called after the resource is finally destroyed
+       // to allow the tester to test that the resource is truly gone.
+       CheckDestroy TestCheckFunc
+
+       // Steps are the apply sequences done within the context of the
+       // same state. Each step can have its own check to verify correctness.
+       Steps []TestStep
+
+       // The settings below control the "ID-only refresh test." This is
+       // an enabled-by-default test that tests that a refresh can be
+       // refreshed with only an ID to result in the same attributes.
+       // This validates completeness of Refresh.
+       //
+       // IDRefreshName is the name of the resource to check. This will
+       // default to the first non-nil primary resource in the state.
+       //
+       // IDRefreshIgnore is a list of configuration keys that will be ignored.
+       IDRefreshName   string
+       IDRefreshIgnore []string
+}
+
+// TestStep is a single apply sequence of a test, done within the
+// context of a state.
+//
+// Multiple TestSteps can be sequenced in a Test to allow testing
+// potentially complex update logic. In general, simply create/destroy
+// tests will only need one step.
+type TestStep struct {
+       // ResourceName should be set to the name of the resource
+       // that is being tested. Example: "aws_instance.foo". Various test
+       // modes use this to auto-detect state information.
+       //
+       // This is only required if the test mode settings below say it is
+       // for the mode you're using.
+       ResourceName string
+
+       // PreConfig is called before the Config is applied to perform any per-step
+       // setup that needs to happen. This is called regardless of "test mode"
+       // below.
+       PreConfig func()
+
+       //---------------------------------------------------------------
+       // Test modes. One of the following groups of settings must be
+       // set to determine what the test step will do. Ideally we would've
+       // used Go interfaces here but there are now hundreds of tests we don't
+       // want to re-type so instead we just determine which step logic
+       // to run based on what settings below are set.
+       //---------------------------------------------------------------
+
+       //---------------------------------------------------------------
+       // Plan, Apply testing
+       //---------------------------------------------------------------
+
+       // Config a string of the configuration to give to Terraform. If this
+       // is set, then the TestCase will execute this step with the same logic
+       // as a `terraform apply`.
+       Config string
+
+       // Check is called after the Config is applied. Use this step to
+       // make your own API calls to check the status of things, and to
+       // inspect the format of the ResourceState itself.
+       //
+       // If an error is returned, the test will fail. In this case, a
+       // destroy plan will still be attempted.
+       //
+       // If this is nil, no check is done on this step.
+       Check TestCheckFunc
+
+       // Destroy will create a destroy plan if set to true.
+       Destroy bool
+
+       // ExpectNonEmptyPlan can be set to true for specific types of tests that are
+       // looking to verify that a diff occurs
+       ExpectNonEmptyPlan bool
+
+       // ExpectError allows the construction of test cases that we expect to fail
+       // with an error. The specified regexp must match against the error for the
+       // test to pass.
+       ExpectError *regexp.Regexp
+
+       // PlanOnly can be set to only run `plan` with this configuration, and not
+       // actually apply it. This is useful for ensuring config changes result in
+       // no-op plans
+       PlanOnly bool
+
+       // PreventPostDestroyRefresh can be set to true for cases where data sources
+       // are tested alongside real resources
+       PreventPostDestroyRefresh bool
+
+       //---------------------------------------------------------------
+       // ImportState testing
+       //---------------------------------------------------------------
+
+       // ImportState, if true, will test the functionality of ImportState
+       // by importing the resource with ResourceName (must be set) and the
+       // ID of that resource.
+       ImportState bool
+
+       // ImportStateId is the ID to perform an ImportState operation with.
+       // This is optional. If it isn't set, then the resource ID is automatically
+       // determined by inspecting the state for ResourceName's ID.
+       ImportStateId string
+
+       // ImportStateIdPrefix is the prefix added in front of ImportStateId.
+       // This can be useful in complex import cases, where more than one
+       // attribute needs to be passed on as the Import ID. Mainly in cases
+       // where the ID is not known, and a known prefix needs to be added to
+       // the unset ImportStateId field.
+       ImportStateIdPrefix string
+
+       // ImportStateCheck checks the results of ImportState. It should be
+       // used to verify that the resulting value of ImportState has the
+       // proper resources, IDs, and attributes.
+       ImportStateCheck ImportStateCheckFunc
+
+       // ImportStateVerify, if true, will also check that the state values
+       // that are finally put into the state after import match for all the
+       // IDs returned by the Import.
+       //
+       // ImportStateVerifyIgnore are fields that should not be verified to
+       // be equal. These can be set to ephemeral fields or fields that can't
+       // be refreshed and don't matter.
+       ImportStateVerify       bool
+       ImportStateVerifyIgnore []string
+}
+
+// Test performs an acceptance test on a resource.
+//
+// Tests are not run unless an environmental variable "TF_ACC" is
+// set to some non-empty value. This is to avoid test cases surprising
+// a user by creating real resources.
+//
+// Tests will fail unless the verbose flag (`go test -v`, or explicitly
+// the "-test.v" flag) is set. Because some acceptance tests take quite
+// long, we require the verbose flag so users are able to see progress
+// output.
+func Test(t TestT, c TestCase) {
+       // We only run acceptance tests if an env var is set because they're
+       // slow and generally require some outside configuration. You can opt out
+       // of this with OverrideEnvVar on individual TestCases.
+       if os.Getenv(TestEnvVar) == "" && !c.IsUnitTest {
+               t.Skip(fmt.Sprintf(
+                       "Acceptance tests skipped unless env '%s' set",
+                       TestEnvVar))
+               return
+       }
+
+       logWriter, err := logging.LogOutput()
+       if err != nil {
+               t.Error(fmt.Errorf("error setting up logging: %s", err))
+       }
+       log.SetOutput(logWriter)
+
+       // We require verbose mode so that the user knows what is going on.
+       if !testTesting && !testing.Verbose() && !c.IsUnitTest {
+               t.Fatal("Acceptance tests must be run with the -v flag on tests")
+               return
+       }
+
+       // Run the PreCheck if we have it
+       if c.PreCheck != nil {
+               c.PreCheck()
+       }
+
+       ctxProviders, err := testProviderFactories(c)
+       if err != nil {
+               t.Fatal(err)
+       }
+       opts := terraform.ContextOpts{Providers: ctxProviders}
+
+       // A single state variable to track the lifecycle, starting with no state
+       var state *terraform.State
+
+       // Go through each step and run it
+       var idRefreshCheck *terraform.ResourceState
+       idRefresh := c.IDRefreshName != ""
+       errored := false
+       for i, step := range c.Steps {
+               var err error
+               log.Printf("[WARN] Test: Executing step %d", i)
+
+               // Determine the test mode to execute
+               if step.Config != "" {
+                       state, err = testStepConfig(opts, state, step)
+               } else if step.ImportState {
+                       state, err = testStepImportState(opts, state, step)
+               } else {
+                       err = fmt.Errorf(
+                               "unknown test mode for step. Please see TestStep docs\n\n%#v",
+                               step)
+               }
+
+               // If there was an error, exit
+               if err != nil {
+                       // Perhaps we expected an error? Check if it matches
+                       if step.ExpectError != nil {
+                               if !step.ExpectError.MatchString(err.Error()) {
+                                       errored = true
+                                       t.Error(fmt.Sprintf(
+                                               "Step %d, expected error:\n\n%s\n\nTo match:\n\n%s\n\n",
+                                               i, err, step.ExpectError))
+                                       break
+                               }
+                       } else {
+                               errored = true
+                               t.Error(fmt.Sprintf(
+                                       "Step %d error: %s", i, err))
+                               break
+                       }
+               }
+
+               // If we've never checked an id-only refresh and our state isn't
+               // empty, find the first resource and test it.
+               if idRefresh && idRefreshCheck == nil && !state.Empty() {
+                       // Find the first non-nil resource in the state
+                       for _, m := range state.Modules {
+                               if len(m.Resources) > 0 {
+                                       if v, ok := m.Resources[c.IDRefreshName]; ok {
+                                               idRefreshCheck = v
+                                       }
+
+                                       break
+                               }
+                       }
+
+                       // If we have an instance to check for refreshes, do it
+                       // immediately. We do it in the middle of another test
+                       // because it shouldn't affect the overall state (refresh
+                       // is read-only semantically) and we want to fail early if
+                       // this fails. If refresh isn't read-only, then this will have
+                       // caught a different bug.
+                       if idRefreshCheck != nil {
+                               log.Printf(
+                                       "[WARN] Test: Running ID-only refresh check on %s",
+                                       idRefreshCheck.Primary.ID)
+                               if err := testIDOnlyRefresh(c, opts, step, idRefreshCheck); err != nil {
+                                       log.Printf("[ERROR] Test: ID-only test failed: %s", err)
+                                       t.Error(fmt.Sprintf(
+                                               "[ERROR] Test: ID-only test failed: %s", err))
+                                       break
+                               }
+                       }
+               }
+       }
+
+       // If we never checked an id-only refresh, it is a failure.
+       if idRefresh {
+               if !errored && len(c.Steps) > 0 && idRefreshCheck == nil {
+                       t.Error("ID-only refresh check never ran.")
+               }
+       }
+
+       // If we have a state, then run the destroy
+       if state != nil {
+               lastStep := c.Steps[len(c.Steps)-1]
+               destroyStep := TestStep{
+                       Config:                    lastStep.Config,
+                       Check:                     c.CheckDestroy,
+                       Destroy:                   true,
+                       PreventPostDestroyRefresh: c.PreventPostDestroyRefresh,
+               }
+
+               log.Printf("[WARN] Test: Executing destroy step")
+               state, err := testStep(opts, state, destroyStep)
+               if err != nil {
+                       t.Error(fmt.Sprintf(
+                               "Error destroying resource! WARNING: Dangling resources\n"+
+                                       "may exist. The full state and error is shown below.\n\n"+
+                                       "Error: %s\n\nState: %s",
+                               err,
+                               state))
+               }
+       } else {
+               log.Printf("[WARN] Skipping destroy test since there is no state.")
+       }
+}
+
+// testProviderFactories is a helper to build the ResourceProviderFactory map
+// with pre instantiated ResourceProviders, so that we can reset them for the
+// test, while only calling the factory function once.
+// Any errors are stored so that they can be returned by the factory in
+// terraform to match non-test behavior.
+func testProviderFactories(c TestCase) (map[string]terraform.ResourceProviderFactory, error) {
+       ctxProviders := c.ProviderFactories // make(map[string]terraform.ResourceProviderFactory)
+       if ctxProviders == nil {
+               ctxProviders = make(map[string]terraform.ResourceProviderFactory)
+       }
+       // add any fixed providers
+       for k, p := range c.Providers {
+               ctxProviders[k] = terraform.ResourceProviderFactoryFixed(p)
+       }
+
+       // reset the providers if needed
+       for k, pf := range ctxProviders {
+               // we can ignore any errors here, if we don't have a provider to reset
+               // the error will be handled later
+               p, err := pf()
+               if err != nil {
+                       return nil, err
+               }
+               if p, ok := p.(TestProvider); ok {
+                       err := p.TestReset()
+                       if err != nil {
+                               return nil, fmt.Errorf("[ERROR] failed to reset provider %q: %s", k, err)
+                       }
+               }
+       }
+
+       return ctxProviders, nil
+}
+
+// UnitTest is a helper to force the acceptance testing harness to run in the
+// normal unit test suite. This should only be used for resource that don't
+// have any external dependencies.
+func UnitTest(t TestT, c TestCase) {
+       c.IsUnitTest = true
+       Test(t, c)
+}
+
+func testIDOnlyRefresh(c TestCase, opts terraform.ContextOpts, step TestStep, r *terraform.ResourceState) error {
+       // TODO: We guard by this right now so master doesn't explode. We
+       // need to remove this eventually to make this part of the normal tests.
+       if os.Getenv("TF_ACC_IDONLY") == "" {
+               return nil
+       }
+
+       name := fmt.Sprintf("%s.foo", r.Type)
+
+       // Build the state. The state is just the resource with an ID. There
+       // are no attributes. We only set what is needed to perform a refresh.
+       state := terraform.NewState()
+       state.RootModule().Resources[name] = &terraform.ResourceState{
+               Type: r.Type,
+               Primary: &terraform.InstanceState{
+                       ID: r.Primary.ID,
+               },
+       }
+
+       // Create the config module. We use the full config because Refresh
+       // doesn't have access to it and we may need things like provider
+       // configurations. The initial implementation of id-only checks used
+       // an empty config module, but that caused the aforementioned problems.
+       mod, err := testModule(opts, step)
+       if err != nil {
+               return err
+       }
+
+       // Initialize the context
+       opts.Module = mod
+       opts.State = state
+       ctx, err := terraform.NewContext(&opts)
+       if err != nil {
+               return err
+       }
+       if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 {
+               if len(es) > 0 {
+                       estrs := make([]string, len(es))
+                       for i, e := range es {
+                               estrs[i] = e.Error()
+                       }
+                       return fmt.Errorf(
+                               "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v",
+                               ws, estrs)
+               }
+
+               log.Printf("[WARN] Config warnings: %#v", ws)
+       }
+
+       // Refresh!
+       state, err = ctx.Refresh()
+       if err != nil {
+               return fmt.Errorf("Error refreshing: %s", err)
+       }
+
+       // Verify attribute equivalence.
+       actualR := state.RootModule().Resources[name]
+       if actualR == nil {
+               return fmt.Errorf("Resource gone!")
+       }
+       if actualR.Primary == nil {
+               return fmt.Errorf("Resource has no primary instance")
+       }
+       actual := actualR.Primary.Attributes
+       expected := r.Primary.Attributes
+       // Remove fields we're ignoring
+       for _, v := range c.IDRefreshIgnore {
+               for k, _ := range actual {
+                       if strings.HasPrefix(k, v) {
+                               delete(actual, k)
+                       }
+               }
+               for k, _ := range expected {
+                       if strings.HasPrefix(k, v) {
+                               delete(expected, k)
+                       }
+               }
+       }
+
+       if !reflect.DeepEqual(actual, expected) {
+               // Determine only the different attributes
+               for k, v := range expected {
+                       if av, ok := actual[k]; ok && v == av {
+                               delete(expected, k)
+                               delete(actual, k)
+                       }
+               }
+
+               spewConf := spew.NewDefaultConfig()
+               spewConf.SortKeys = true
+               return fmt.Errorf(
+                       "Attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+
+                               "\n\n%s\n\n%s",
+                       spewConf.Sdump(actual), spewConf.Sdump(expected))
+       }
+
+       return nil
+}
+
+func testModule(
+       opts terraform.ContextOpts,
+       step TestStep) (*module.Tree, error) {
+       if step.PreConfig != nil {
+               step.PreConfig()
+       }
+
+       cfgPath, err := ioutil.TempDir("", "tf-test")
+       if err != nil {
+               return nil, fmt.Errorf(
+                       "Error creating temporary directory for config: %s", err)
+       }
+       defer os.RemoveAll(cfgPath)
+
+       // Write the configuration
+       cfgF, err := os.Create(filepath.Join(cfgPath, "main.tf"))
+       if err != nil {
+               return nil, fmt.Errorf(
+                       "Error creating temporary file for config: %s", err)
+       }
+
+       _, err = io.Copy(cfgF, strings.NewReader(step.Config))
+       cfgF.Close()
+       if err != nil {
+               return nil, fmt.Errorf(
+                       "Error creating temporary file for config: %s", err)
+       }
+
+       // Parse the configuration
+       mod, err := module.NewTreeModule("", cfgPath)
+       if err != nil {
+               return nil, fmt.Errorf(
+                       "Error loading configuration: %s", err)
+       }
+
+       // Load the modules
+       modStorage := &getter.FolderStorage{
+               StorageDir: filepath.Join(cfgPath, ".tfmodules"),
+       }
+       err = mod.Load(modStorage, module.GetModeGet)
+       if err != nil {
+               return nil, fmt.Errorf("Error downloading modules: %s", err)
+       }
+
+       return mod, nil
+}
+
+func testResource(c TestStep, state *terraform.State) (*terraform.ResourceState, error) {
+       if c.ResourceName == "" {
+               return nil, fmt.Errorf("ResourceName must be set in TestStep")
+       }
+
+       for _, m := range state.Modules {
+               if len(m.Resources) > 0 {
+                       if v, ok := m.Resources[c.ResourceName]; ok {
+                               return v, nil
+                       }
+               }
+       }
+
+       return nil, fmt.Errorf(
+               "Resource specified by ResourceName couldn't be found: %s", c.ResourceName)
+}
+
+// ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into
+// a single TestCheckFunc.
+//
+// As a user testing their provider, this lets you decompose your checks
+// into smaller pieces more easily.
+func ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc {
+       return func(s *terraform.State) error {
+               for i, f := range fs {
+                       if err := f(s); err != nil {
+                               return fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err)
+                       }
+               }
+
+               return nil
+       }
+}
+
+// ComposeAggregateTestCheckFunc lets you compose multiple TestCheckFuncs into
+// a single TestCheckFunc.
+//
+// As a user testing their provider, this lets you decompose your checks
+// into smaller pieces more easily.
+//
+// Unlike ComposeTestCheckFunc, ComposeAggergateTestCheckFunc runs _all_ of the
+// TestCheckFuncs and aggregates failures.
+func ComposeAggregateTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc {
+       return func(s *terraform.State) error {
+               var result *multierror.Error
+
+               for i, f := range fs {
+                       if err := f(s); err != nil {
+                               result = multierror.Append(result, fmt.Errorf("Check %d/%d error: %s", i+1, len(fs), err))
+                       }
+               }
+
+               return result.ErrorOrNil()
+       }
+}
+
+// TestCheckResourceAttrSet is a TestCheckFunc which ensures a value
+// exists in state for the given name/key combination. It is useful when
+// testing that computed values were set, when it is not possible to
+// know ahead of time what the values will be.
+func TestCheckResourceAttrSet(name, key string) TestCheckFunc {
+       return func(s *terraform.State) error {
+               is, err := primaryInstanceState(s, name)
+               if err != nil {
+                       return err
+               }
+
+               if val, ok := is.Attributes[key]; ok && val != "" {
+                       return nil
+               }
+
+               return fmt.Errorf("%s: Attribute '%s' expected to be set", name, key)
+       }
+}
+
+// TestCheckResourceAttr is a TestCheckFunc which validates
+// the value in state for the given name/key combination.
+func TestCheckResourceAttr(name, key, value string) TestCheckFunc {
+       return func(s *terraform.State) error {
+               is, err := primaryInstanceState(s, name)
+               if err != nil {
+                       return err
+               }
+
+               if v, ok := is.Attributes[key]; !ok || v != value {
+                       if !ok {
+                               return fmt.Errorf("%s: Attribute '%s' not found", name, key)
+                       }
+
+                       return fmt.Errorf(
+                               "%s: Attribute '%s' expected %#v, got %#v",
+                               name,
+                               key,
+                               value,
+                               v)
+               }
+
+               return nil
+       }
+}
+
+// TestCheckNoResourceAttr is a TestCheckFunc which ensures that
+// NO value exists in state for the given name/key combination.
+func TestCheckNoResourceAttr(name, key string) TestCheckFunc {
+       return func(s *terraform.State) error {
+               is, err := primaryInstanceState(s, name)
+               if err != nil {
+                       return err
+               }
+
+               if _, ok := is.Attributes[key]; ok {
+                       return fmt.Errorf("%s: Attribute '%s' found when not expected", name, key)
+               }
+
+               return nil
+       }
+}
+
+// TestMatchResourceAttr is a TestCheckFunc which checks that the value
+// in state for the given name/key combination matches the given regex.
+func TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc {
+       return func(s *terraform.State) error {
+               is, err := primaryInstanceState(s, name)
+               if err != nil {
+                       return err
+               }
+
+               if !r.MatchString(is.Attributes[key]) {
+                       return fmt.Errorf(
+                               "%s: Attribute '%s' didn't match %q, got %#v",
+                               name,
+                               key,
+                               r.String(),
+                               is.Attributes[key])
+               }
+
+               return nil
+       }
+}
+
+// TestCheckResourceAttrPtr is like TestCheckResourceAttr except the
+// value is a pointer so that it can be updated while the test is running.
+// It will only be dereferenced at the point this step is run.
+func TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc {
+       return func(s *terraform.State) error {
+               return TestCheckResourceAttr(name, key, *value)(s)
+       }
+}
+
+// TestCheckResourceAttrPair is a TestCheckFunc which validates that the values
+// in state for a pair of name/key combinations are equal.
+func TestCheckResourceAttrPair(nameFirst, keyFirst, nameSecond, keySecond string) TestCheckFunc {
+       return func(s *terraform.State) error {
+               isFirst, err := primaryInstanceState(s, nameFirst)
+               if err != nil {
+                       return err
+               }
+               vFirst, ok := isFirst.Attributes[keyFirst]
+               if !ok {
+                       return fmt.Errorf("%s: Attribute '%s' not found", nameFirst, keyFirst)
+               }
+
+               isSecond, err := primaryInstanceState(s, nameSecond)
+               if err != nil {
+                       return err
+               }
+               vSecond, ok := isSecond.Attributes[keySecond]
+               if !ok {
+                       return fmt.Errorf("%s: Attribute '%s' not found", nameSecond, keySecond)
+               }
+
+               if vFirst != vSecond {
+                       return fmt.Errorf(
+                               "%s: Attribute '%s' expected %#v, got %#v",
+                               nameFirst,
+                               keyFirst,
+                               vSecond,
+                               vFirst)
+               }
+
+               return nil
+       }
+}
+
+// TestCheckOutput checks an output in the Terraform configuration
+func TestCheckOutput(name, value string) TestCheckFunc {
+       return func(s *terraform.State) error {
+               ms := s.RootModule()
+               rs, ok := ms.Outputs[name]
+               if !ok {
+                       return fmt.Errorf("Not found: %s", name)
+               }
+
+               if rs.Value != value {
+                       return fmt.Errorf(
+                               "Output '%s': expected %#v, got %#v",
+                               name,
+                               value,
+                               rs)
+               }
+
+               return nil
+       }
+}
+
+func TestMatchOutput(name string, r *regexp.Regexp) TestCheckFunc {
+       return func(s *terraform.State) error {
+               ms := s.RootModule()
+               rs, ok := ms.Outputs[name]
+               if !ok {
+                       return fmt.Errorf("Not found: %s", name)
+               }
+
+               if !r.MatchString(rs.Value.(string)) {
+                       return fmt.Errorf(
+                               "Output '%s': %#v didn't match %q",
+                               name,
+                               rs,
+                               r.String())
+               }
+
+               return nil
+       }
+}
+
+// TestT is the interface used to handle the test lifecycle of a test.
+//
+// Users should just use a *testing.T object, which implements this.
+type TestT interface {
+       Error(args ...interface{})
+       Fatal(args ...interface{})
+       Skip(args ...interface{})
+}
+
+// This is set to true by unit tests to alter some behavior
+var testTesting = false
+
+// primaryInstanceState returns the primary instance state for the given resource name.
+func primaryInstanceState(s *terraform.State, name string) (*terraform.InstanceState, error) {
+       ms := s.RootModule()
+       rs, ok := ms.Resources[name]
+       if !ok {
+               return nil, fmt.Errorf("Not found: %s", name)
+       }
+
+       is := rs.Primary
+       if is == nil {
+               return nil, fmt.Errorf("No primary instance: %s", name)
+       }
+
+       return is, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_config.go
new file mode 100644 (file)
index 0000000..537a11c
--- /dev/null
@@ -0,0 +1,160 @@
+package resource
+
+import (
+       "fmt"
+       "log"
+       "strings"
+
+       "github.com/hashicorp/terraform/terraform"
+)
+
+// testStepConfig runs a config-mode test step
+func testStepConfig(
+       opts terraform.ContextOpts,
+       state *terraform.State,
+       step TestStep) (*terraform.State, error) {
+       return testStep(opts, state, step)
+}
+
+func testStep(
+       opts terraform.ContextOpts,
+       state *terraform.State,
+       step TestStep) (*terraform.State, error) {
+       mod, err := testModule(opts, step)
+       if err != nil {
+               return state, err
+       }
+
+       // Build the context
+       opts.Module = mod
+       opts.State = state
+       opts.Destroy = step.Destroy
+       ctx, err := terraform.NewContext(&opts)
+       if err != nil {
+               return state, fmt.Errorf("Error initializing context: %s", err)
+       }
+       if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 {
+               if len(es) > 0 {
+                       estrs := make([]string, len(es))
+                       for i, e := range es {
+                               estrs[i] = e.Error()
+                       }
+                       return state, fmt.Errorf(
+                               "Configuration is invalid.\n\nWarnings: %#v\n\nErrors: %#v",
+                               ws, estrs)
+               }
+               log.Printf("[WARN] Config warnings: %#v", ws)
+       }
+
+       // Refresh!
+       state, err = ctx.Refresh()
+       if err != nil {
+               return state, fmt.Errorf(
+                       "Error refreshing: %s", err)
+       }
+
+       // If this step is a PlanOnly step, skip over this first Plan and subsequent
+       // Apply, and use the follow up Plan that checks for perpetual diffs
+       if !step.PlanOnly {
+               // Plan!
+               if p, err := ctx.Plan(); err != nil {
+                       return state, fmt.Errorf(
+                               "Error planning: %s", err)
+               } else {
+                       log.Printf("[WARN] Test: Step plan: %s", p)
+               }
+
+               // We need to keep a copy of the state prior to destroying
+               // such that destroy steps can verify their behaviour in the check
+               // function
+               stateBeforeApplication := state.DeepCopy()
+
+               // Apply!
+               state, err = ctx.Apply()
+               if err != nil {
+                       return state, fmt.Errorf("Error applying: %s", err)
+               }
+
+               // Check! Excitement!
+               if step.Check != nil {
+                       if step.Destroy {
+                               if err := step.Check(stateBeforeApplication); err != nil {
+                                       return state, fmt.Errorf("Check failed: %s", err)
+                               }
+                       } else {
+                               if err := step.Check(state); err != nil {
+                                       return state, fmt.Errorf("Check failed: %s", err)
+                               }
+                       }
+               }
+       }
+
+       // Now, verify that Plan is now empty and we don't have a perpetual diff issue
+       // We do this with TWO plans. One without a refresh.
+       var p *terraform.Plan
+       if p, err = ctx.Plan(); err != nil {
+               return state, fmt.Errorf("Error on follow-up plan: %s", err)
+       }
+       if p.Diff != nil && !p.Diff.Empty() {
+               if step.ExpectNonEmptyPlan {
+                       log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p)
+               } else {
+                       return state, fmt.Errorf(
+                               "After applying this step, the plan was not empty:\n\n%s", p)
+               }
+       }
+
+       // And another after a Refresh.
+       if !step.Destroy || (step.Destroy && !step.PreventPostDestroyRefresh) {
+               state, err = ctx.Refresh()
+               if err != nil {
+                       return state, fmt.Errorf(
+                               "Error on follow-up refresh: %s", err)
+               }
+       }
+       if p, err = ctx.Plan(); err != nil {
+               return state, fmt.Errorf("Error on second follow-up plan: %s", err)
+       }
+       empty := p.Diff == nil || p.Diff.Empty()
+
+       // Data resources are tricky because they legitimately get instantiated
+       // during refresh so that they will be already populated during the
+       // plan walk. Because of this, if we have any data resources in the
+       // config we'll end up wanting to destroy them again here. This is
+       // acceptable and expected, and we'll treat it as "empty" for the
+       // sake of this testing.
+       if step.Destroy {
+               empty = true
+
+               for _, moduleDiff := range p.Diff.Modules {
+                       for k, instanceDiff := range moduleDiff.Resources {
+                               if !strings.HasPrefix(k, "data.") {
+                                       empty = false
+                                       break
+                               }
+
+                               if !instanceDiff.Destroy {
+                                       empty = false
+                               }
+                       }
+               }
+       }
+
+       if !empty {
+               if step.ExpectNonEmptyPlan {
+                       log.Printf("[INFO] Got non-empty plan, as expected:\n\n%s", p)
+               } else {
+                       return state, fmt.Errorf(
+                               "After applying this step and refreshing, "+
+                                       "the plan was not empty:\n\n%s", p)
+               }
+       }
+
+       // Made it here, but expected a non-empty plan, fail!
+       if step.ExpectNonEmptyPlan && (p.Diff == nil || p.Diff.Empty()) {
+               return state, fmt.Errorf("Expected a non-empty plan, but got an empty plan!")
+       }
+
+       // Made it here? Good job test step!
+       return state, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go b/vendor/github.com/hashicorp/terraform/helper/resource/testing_import_state.go
new file mode 100644 (file)
index 0000000..28ad105
--- /dev/null
@@ -0,0 +1,141 @@
+package resource
+
+import (
+       "fmt"
+       "log"
+       "reflect"
+       "strings"
+
+       "github.com/davecgh/go-spew/spew"
+       "github.com/hashicorp/terraform/terraform"
+)
+
+// testStepImportState runs an imort state test step
+func testStepImportState(
+       opts terraform.ContextOpts,
+       state *terraform.State,
+       step TestStep) (*terraform.State, error) {
+       // Determine the ID to import
+       importId := step.ImportStateId
+       if importId == "" {
+               resource, err := testResource(step, state)
+               if err != nil {
+                       return state, err
+               }
+
+               importId = resource.Primary.ID
+       }
+       importPrefix := step.ImportStateIdPrefix
+       if importPrefix != "" {
+               importId = fmt.Sprintf("%s%s", importPrefix, importId)
+       }
+
+       // Setup the context. We initialize with an empty state. We use the
+       // full config for provider configurations.
+       mod, err := testModule(opts, step)
+       if err != nil {
+               return state, err
+       }
+
+       opts.Module = mod
+       opts.State = terraform.NewState()
+       ctx, err := terraform.NewContext(&opts)
+       if err != nil {
+               return state, err
+       }
+
+       // Do the import!
+       newState, err := ctx.Import(&terraform.ImportOpts{
+               // Set the module so that any provider config is loaded
+               Module: mod,
+
+               Targets: []*terraform.ImportTarget{
+                       &terraform.ImportTarget{
+                               Addr: step.ResourceName,
+                               ID:   importId,
+                       },
+               },
+       })
+       if err != nil {
+               log.Printf("[ERROR] Test: ImportState failure: %s", err)
+               return state, err
+       }
+
+       // Go through the new state and verify
+       if step.ImportStateCheck != nil {
+               var states []*terraform.InstanceState
+               for _, r := range newState.RootModule().Resources {
+                       if r.Primary != nil {
+                               states = append(states, r.Primary)
+                       }
+               }
+               if err := step.ImportStateCheck(states); err != nil {
+                       return state, err
+               }
+       }
+
+       // Verify that all the states match
+       if step.ImportStateVerify {
+               new := newState.RootModule().Resources
+               old := state.RootModule().Resources
+               for _, r := range new {
+                       // Find the existing resource
+                       var oldR *terraform.ResourceState
+                       for _, r2 := range old {
+                               if r2.Primary != nil && r2.Primary.ID == r.Primary.ID && r2.Type == r.Type {
+                                       oldR = r2
+                                       break
+                               }
+                       }
+                       if oldR == nil {
+                               return state, fmt.Errorf(
+                                       "Failed state verification, resource with ID %s not found",
+                                       r.Primary.ID)
+                       }
+
+                       // Compare their attributes
+                       actual := make(map[string]string)
+                       for k, v := range r.Primary.Attributes {
+                               actual[k] = v
+                       }
+                       expected := make(map[string]string)
+                       for k, v := range oldR.Primary.Attributes {
+                               expected[k] = v
+                       }
+
+                       // Remove fields we're ignoring
+                       for _, v := range step.ImportStateVerifyIgnore {
+                               for k, _ := range actual {
+                                       if strings.HasPrefix(k, v) {
+                                               delete(actual, k)
+                                       }
+                               }
+                               for k, _ := range expected {
+                                       if strings.HasPrefix(k, v) {
+                                               delete(expected, k)
+                                       }
+                               }
+                       }
+
+                       if !reflect.DeepEqual(actual, expected) {
+                               // Determine only the different attributes
+                               for k, v := range expected {
+                                       if av, ok := actual[k]; ok && v == av {
+                                               delete(expected, k)
+                                               delete(actual, k)
+                                       }
+                               }
+
+                               spewConf := spew.NewDefaultConfig()
+                               spewConf.SortKeys = true
+                               return state, fmt.Errorf(
+                                       "ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected."+
+                                               "\n\n%s\n\n%s",
+                                       spewConf.Sdump(actual), spewConf.Sdump(expected))
+                       }
+               }
+       }
+
+       // Return the old state (non-imported) so we don't change anything.
+       return state, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/resource/wait.go b/vendor/github.com/hashicorp/terraform/helper/resource/wait.go
new file mode 100644 (file)
index 0000000..ca50e29
--- /dev/null
@@ -0,0 +1,84 @@
+package resource
+
+import (
+       "sync"
+       "time"
+)
+
+// Retry is a basic wrapper around StateChangeConf that will just retry
+// a function until it no longer returns an error.
+func Retry(timeout time.Duration, f RetryFunc) error {
+       // These are used to pull the error out of the function; need a mutex to
+       // avoid a data race.
+       var resultErr error
+       var resultErrMu sync.Mutex
+
+       c := &StateChangeConf{
+               Pending:    []string{"retryableerror"},
+               Target:     []string{"success"},
+               Timeout:    timeout,
+               MinTimeout: 500 * time.Millisecond,
+               Refresh: func() (interface{}, string, error) {
+                       rerr := f()
+
+                       resultErrMu.Lock()
+                       defer resultErrMu.Unlock()
+
+                       if rerr == nil {
+                               resultErr = nil
+                               return 42, "success", nil
+                       }
+
+                       resultErr = rerr.Err
+
+                       if rerr.Retryable {
+                               return 42, "retryableerror", nil
+                       }
+                       return nil, "quit", rerr.Err
+               },
+       }
+
+       _, waitErr := c.WaitForState()
+
+       // Need to acquire the lock here to be able to avoid race using resultErr as
+       // the return value
+       resultErrMu.Lock()
+       defer resultErrMu.Unlock()
+
+       // resultErr may be nil because the wait timed out and resultErr was never
+       // set; this is still an error
+       if resultErr == nil {
+               return waitErr
+       }
+       // resultErr takes precedence over waitErr if both are set because it is
+       // more likely to be useful
+       return resultErr
+}
+
+// RetryFunc is the function retried until it succeeds.
+type RetryFunc func() *RetryError
+
+// RetryError is the required return type of RetryFunc. It forces client code
+// to choose whether or not a given error is retryable.
+type RetryError struct {
+       Err       error
+       Retryable bool
+}
+
+// RetryableError is a helper to create a RetryError that's retryable from a
+// given error.
+func RetryableError(err error) *RetryError {
+       if err == nil {
+               return nil
+       }
+       return &RetryError{Err: err, Retryable: true}
+}
+
+// NonRetryableError is a helper to create a RetryError that's _not)_ retryable
+// from a given error.
+func NonRetryableError(err error) *RetryError {
+       if err == nil {
+               return nil
+       }
+       return &RetryError{Err: err, Retryable: false}
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/README.md b/vendor/github.com/hashicorp/terraform/helper/schema/README.md
new file mode 100644 (file)
index 0000000..28c8362
--- /dev/null
@@ -0,0 +1,11 @@
+# Terraform Helper Lib: schema
+
+The `schema` package provides a high-level interface for writing resource
+providers for Terraform.
+
+If you're writing a resource provider, we recommend you use this package.
+
+The interface exposed by this package is much friendlier than trying to
+write to the Terraform API directly. The core Terraform API is low-level
+and built for maximum flexibility and control, whereas this library is built
+as a framework around that to more easily write common providers.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/backend.go b/vendor/github.com/hashicorp/terraform/helper/schema/backend.go
new file mode 100644 (file)
index 0000000..a0729c0
--- /dev/null
@@ -0,0 +1,94 @@
+package schema
+
+import (
+       "context"
+
+       "github.com/hashicorp/terraform/terraform"
+)
+
+// Backend represents a partial backend.Backend implementation and simplifies
+// the creation of configuration loading and validation.
+//
+// Unlike other schema structs such as Provider, this struct is meant to be
+// embedded within your actual implementation. It provides implementations
+// only for Input and Configure and gives you a method for accessing the
+// configuration in the form of a ResourceData that you're expected to call
+// from the other implementation funcs.
+type Backend struct {
+       // Schema is the schema for the configuration of this backend. If this
+       // Backend has no configuration this can be omitted.
+       Schema map[string]*Schema
+
+       // ConfigureFunc is called to configure the backend. Use the
+       // FromContext* methods to extract information from the context.
+       // This can be nil, in which case nothing will be called but the
+       // config will still be stored.
+       ConfigureFunc func(context.Context) error
+
+       config *ResourceData
+}
+
+var (
+       backendConfigKey = contextKey("backend config")
+)
+
+// FromContextBackendConfig extracts a ResourceData with the configuration
+// from the context. This should only be called by Backend functions.
+func FromContextBackendConfig(ctx context.Context) *ResourceData {
+       return ctx.Value(backendConfigKey).(*ResourceData)
+}
+
+func (b *Backend) Input(
+       input terraform.UIInput,
+       c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
+       if b == nil {
+               return c, nil
+       }
+
+       return schemaMap(b.Schema).Input(input, c)
+}
+
+func (b *Backend) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+       if b == nil {
+               return nil, nil
+       }
+
+       return schemaMap(b.Schema).Validate(c)
+}
+
+func (b *Backend) Configure(c *terraform.ResourceConfig) error {
+       if b == nil {
+               return nil
+       }
+
+       sm := schemaMap(b.Schema)
+
+       // Get a ResourceData for this configuration. To do this, we actually
+       // generate an intermediary "diff" although that is never exposed.
+       diff, err := sm.Diff(nil, c)
+       if err != nil {
+               return err
+       }
+
+       data, err := sm.Data(nil, diff)
+       if err != nil {
+               return err
+       }
+       b.config = data
+
+       if b.ConfigureFunc != nil {
+               err = b.ConfigureFunc(context.WithValue(
+                       context.Background(), backendConfigKey, data))
+               if err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
+
+// Config returns the configuration. This is available after Configure is
+// called.
+func (b *Backend) Config() *ResourceData {
+       return b.config
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go b/vendor/github.com/hashicorp/terraform/helper/schema/data_source_resource_shim.go
new file mode 100644 (file)
index 0000000..5a03d2d
--- /dev/null
@@ -0,0 +1,59 @@
+package schema
+
+import (
+       "fmt"
+)
+
+// DataSourceResourceShim takes a Resource instance describing a data source
+// (with a Read implementation and a Schema, at least) and returns a new
+// Resource instance with additional Create and Delete implementations that
+// allow the data source to be used as a resource.
+//
+// This is a backward-compatibility layer for data sources that were formerly
+// read-only resources before the data source concept was added. It should not
+// be used for any *new* data sources.
+//
+// The Read function for the data source *must* call d.SetId with a non-empty
+// id in order for this shim to function as expected.
+//
+// The provided Resource instance, and its schema, will be modified in-place
+// to make it suitable for use as a full resource.
+func DataSourceResourceShim(name string, dataSource *Resource) *Resource {
+       // Recursively, in-place adjust the schema so that it has ForceNew
+       // on any user-settable resource.
+       dataSourceResourceShimAdjustSchema(dataSource.Schema)
+
+       dataSource.Create = CreateFunc(dataSource.Read)
+       dataSource.Delete = func(d *ResourceData, meta interface{}) error {
+               d.SetId("")
+               return nil
+       }
+       dataSource.Update = nil // should already be nil, but let's make sure
+
+       // FIXME: Link to some further docs either on the website or in the
+       // changelog, once such a thing exists.
+       dataSource.deprecationMessage = fmt.Sprintf(
+               "using %s as a resource is deprecated; consider using the data source instead",
+               name,
+       )
+
+       return dataSource
+}
+
+func dataSourceResourceShimAdjustSchema(schema map[string]*Schema) {
+       for _, s := range schema {
+               // If the attribute is configurable then it must be ForceNew,
+               // since we have no Update implementation.
+               if s.Required || s.Optional {
+                       s.ForceNew = true
+               }
+
+               // If the attribute is a nested resource, we need to recursively
+               // apply these same adjustments to it.
+               if s.Elem != nil {
+                       if r, ok := s.Elem.(*Resource); ok {
+                               dataSourceResourceShimAdjustSchema(r.Schema)
+                       }
+               }
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/equal.go b/vendor/github.com/hashicorp/terraform/helper/schema/equal.go
new file mode 100644 (file)
index 0000000..d5e20e0
--- /dev/null
@@ -0,0 +1,6 @@
+package schema
+
+// Equal is an interface that checks for deep equality between two objects.
+type Equal interface {
+       Equal(interface{}) bool
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader.go
new file mode 100644 (file)
index 0000000..1660a67
--- /dev/null
@@ -0,0 +1,334 @@
+package schema
+
+import (
+       "fmt"
+       "strconv"
+)
+
+// FieldReaders are responsible for decoding fields out of data into
+// the proper typed representation. ResourceData uses this to query data
+// out of multiple sources: config, state, diffs, etc.
+type FieldReader interface {
+       ReadField([]string) (FieldReadResult, error)
+}
+
+// FieldReadResult encapsulates all the resulting data from reading
+// a field.
+type FieldReadResult struct {
+       // Value is the actual read value. NegValue is the _negative_ value
+       // or the items that should be removed (if they existed). NegValue
+       // doesn't make sense for primitives but is important for any
+       // container types such as maps, sets, lists.
+       Value          interface{}
+       ValueProcessed interface{}
+
+       // Exists is true if the field was found in the data. False means
+       // it wasn't found if there was no error.
+       Exists bool
+
+       // Computed is true if the field was found but the value
+       // is computed.
+       Computed bool
+}
+
+// ValueOrZero returns the value of this result or the zero value of the
+// schema type, ensuring a consistent non-nil return value.
+func (r *FieldReadResult) ValueOrZero(s *Schema) interface{} {
+       if r.Value != nil {
+               return r.Value
+       }
+
+       return s.ZeroValue()
+}
+
+// addrToSchema finds the final element schema for the given address
+// and the given schema. It returns all the schemas that led to the final
+// schema. These are in order of the address (out to in).
+func addrToSchema(addr []string, schemaMap map[string]*Schema) []*Schema {
+       current := &Schema{
+               Type: typeObject,
+               Elem: schemaMap,
+       }
+
+       // If we aren't given an address, then the user is requesting the
+       // full object, so we return the special value which is the full object.
+       if len(addr) == 0 {
+               return []*Schema{current}
+       }
+
+       result := make([]*Schema, 0, len(addr))
+       for len(addr) > 0 {
+               k := addr[0]
+               addr = addr[1:]
+
+       REPEAT:
+               // We want to trim off the first "typeObject" since its not a
+               // real lookup that people do. i.e. []string{"foo"} in a structure
+               // isn't {typeObject, typeString}, its just a {typeString}.
+               if len(result) > 0 || current.Type != typeObject {
+                       result = append(result, current)
+               }
+
+               switch t := current.Type; t {
+               case TypeBool, TypeInt, TypeFloat, TypeString:
+                       if len(addr) > 0 {
+                               return nil
+                       }
+               case TypeList, TypeSet:
+                       isIndex := len(addr) > 0 && addr[0] == "#"
+
+                       switch v := current.Elem.(type) {
+                       case *Resource:
+                               current = &Schema{
+                                       Type: typeObject,
+                                       Elem: v.Schema,
+                               }
+                       case *Schema:
+                               current = v
+                       case ValueType:
+                               current = &Schema{Type: v}
+                       default:
+                               // we may not know the Elem type and are just looking for the
+                               // index
+                               if isIndex {
+                                       break
+                               }
+
+                               if len(addr) == 0 {
+                                       // we've processed the address, so return what we've
+                                       // collected
+                                       return result
+                               }
+
+                               if len(addr) == 1 {
+                                       if _, err := strconv.Atoi(addr[0]); err == nil {
+                                               // we're indexing a value without a schema. This can
+                                               // happen if the list is nested in another schema type.
+                                               // Default to a TypeString like we do with a map
+                                               current = &Schema{Type: TypeString}
+                                               break
+                                       }
+                               }
+
+                               return nil
+                       }
+
+                       // If we only have one more thing and the next thing
+                       // is a #, then we're accessing the index which is always
+                       // an int.
+                       if isIndex {
+                               current = &Schema{Type: TypeInt}
+                               break
+                       }
+
+               case TypeMap:
+                       if len(addr) > 0 {
+                               switch v := current.Elem.(type) {
+                               case ValueType:
+                                       current = &Schema{Type: v}
+                               default:
+                                       // maps default to string values. This is all we can have
+                                       // if this is nested in another list or map.
+                                       current = &Schema{Type: TypeString}
+                               }
+                       }
+               case typeObject:
+                       // If we're already in the object, then we want to handle Sets
+                       // and Lists specially. Basically, their next key is the lookup
+                       // key (the set value or the list element). For these scenarios,
+                       // we just want to skip it and move to the next element if there
+                       // is one.
+                       if len(result) > 0 {
+                               lastType := result[len(result)-2].Type
+                               if lastType == TypeSet || lastType == TypeList {
+                                       if len(addr) == 0 {
+                                               break
+                                       }
+
+                                       k = addr[0]
+                                       addr = addr[1:]
+                               }
+                       }
+
+                       m := current.Elem.(map[string]*Schema)
+                       val, ok := m[k]
+                       if !ok {
+                               return nil
+                       }
+
+                       current = val
+                       goto REPEAT
+               }
+       }
+
+       return result
+}
+
+// readListField is a generic method for reading a list field out of a
+// a FieldReader. It does this based on the assumption that there is a key
+// "foo.#" for a list "foo" and that the indexes are "foo.0", "foo.1", etc.
+// after that point.
+func readListField(
+       r FieldReader, addr []string, schema *Schema) (FieldReadResult, error) {
+       addrPadded := make([]string, len(addr)+1)
+       copy(addrPadded, addr)
+       addrPadded[len(addrPadded)-1] = "#"
+
+       // Get the number of elements in the list
+       countResult, err := r.ReadField(addrPadded)
+       if err != nil {
+               return FieldReadResult{}, err
+       }
+       if !countResult.Exists {
+               // No count, means we have no list
+               countResult.Value = 0
+       }
+
+       // If we have an empty list, then return an empty list
+       if countResult.Computed || countResult.Value.(int) == 0 {
+               return FieldReadResult{
+                       Value:    []interface{}{},
+                       Exists:   countResult.Exists,
+                       Computed: countResult.Computed,
+               }, nil
+       }
+
+       // Go through each count, and get the item value out of it
+       result := make([]interface{}, countResult.Value.(int))
+       for i, _ := range result {
+               is := strconv.FormatInt(int64(i), 10)
+               addrPadded[len(addrPadded)-1] = is
+               rawResult, err := r.ReadField(addrPadded)
+               if err != nil {
+                       return FieldReadResult{}, err
+               }
+               if !rawResult.Exists {
+                       // This should never happen, because by the time the data
+                       // gets to the FieldReaders, all the defaults should be set by
+                       // Schema.
+                       rawResult.Value = nil
+               }
+
+               result[i] = rawResult.Value
+       }
+
+       return FieldReadResult{
+               Value:  result,
+               Exists: true,
+       }, nil
+}
+
+// readObjectField is a generic method for reading objects out of FieldReaders
+// based on the assumption that building an address of []string{k, FIELD}
+// will result in the proper field data.
+func readObjectField(
+       r FieldReader,
+       addr []string,
+       schema map[string]*Schema) (FieldReadResult, error) {
+       result := make(map[string]interface{})
+       exists := false
+       for field, s := range schema {
+               addrRead := make([]string, len(addr), len(addr)+1)
+               copy(addrRead, addr)
+               addrRead = append(addrRead, field)
+               rawResult, err := r.ReadField(addrRead)
+               if err != nil {
+                       return FieldReadResult{}, err
+               }
+               if rawResult.Exists {
+                       exists = true
+               }
+
+               result[field] = rawResult.ValueOrZero(s)
+       }
+
+       return FieldReadResult{
+               Value:  result,
+               Exists: exists,
+       }, nil
+}
+
+// convert map values to the proper primitive type based on schema.Elem
+func mapValuesToPrimitive(m map[string]interface{}, schema *Schema) error {
+
+       elemType := TypeString
+       if et, ok := schema.Elem.(ValueType); ok {
+               elemType = et
+       }
+
+       switch elemType {
+       case TypeInt, TypeFloat, TypeBool:
+               for k, v := range m {
+                       vs, ok := v.(string)
+                       if !ok {
+                               continue
+                       }
+
+                       v, err := stringToPrimitive(vs, false, &Schema{Type: elemType})
+                       if err != nil {
+                               return err
+                       }
+
+                       m[k] = v
+               }
+       }
+       return nil
+}
+
+func stringToPrimitive(
+       value string, computed bool, schema *Schema) (interface{}, error) {
+       var returnVal interface{}
+       switch schema.Type {
+       case TypeBool:
+               if value == "" {
+                       returnVal = false
+                       break
+               }
+               if computed {
+                       break
+               }
+
+               v, err := strconv.ParseBool(value)
+               if err != nil {
+                       return nil, err
+               }
+
+               returnVal = v
+       case TypeFloat:
+               if value == "" {
+                       returnVal = 0.0
+                       break
+               }
+               if computed {
+                       break
+               }
+
+               v, err := strconv.ParseFloat(value, 64)
+               if err != nil {
+                       return nil, err
+               }
+
+               returnVal = v
+       case TypeInt:
+               if value == "" {
+                       returnVal = 0
+                       break
+               }
+               if computed {
+                       break
+               }
+
+               v, err := strconv.ParseInt(value, 0, 0)
+               if err != nil {
+                       return nil, err
+               }
+
+               returnVal = int(v)
+       case TypeString:
+               returnVal = value
+       default:
+               panic(fmt.Sprintf("Unknown type: %s", schema.Type))
+       }
+
+       return returnVal, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_config.go
new file mode 100644 (file)
index 0000000..f958bbc
--- /dev/null
@@ -0,0 +1,333 @@
+package schema
+
+import (
+       "fmt"
+       "strconv"
+       "strings"
+       "sync"
+
+       "github.com/hashicorp/terraform/terraform"
+       "github.com/mitchellh/mapstructure"
+)
+
+// ConfigFieldReader reads fields out of an untyped map[string]string to the
+// best of its ability. It also applies defaults from the Schema. (The other
+// field readers do not need default handling because they source fully
+// populated data structures.)
+type ConfigFieldReader struct {
+       Config *terraform.ResourceConfig
+       Schema map[string]*Schema
+
+       indexMaps map[string]map[string]int
+       once      sync.Once
+}
+
+func (r *ConfigFieldReader) ReadField(address []string) (FieldReadResult, error) {
+       r.once.Do(func() { r.indexMaps = make(map[string]map[string]int) })
+       return r.readField(address, false)
+}
+
+func (r *ConfigFieldReader) readField(
+       address []string, nested bool) (FieldReadResult, error) {
+       schemaList := addrToSchema(address, r.Schema)
+       if len(schemaList) == 0 {
+               return FieldReadResult{}, nil
+       }
+
+       if !nested {
+               // If we have a set anywhere in the address, then we need to
+               // read that set out in order and actually replace that part of
+               // the address with the real list index. i.e. set.50 might actually
+               // map to set.12 in the config, since it is in list order in the
+               // config, not indexed by set value.
+               for i, v := range schemaList {
+                       // Sets are the only thing that cause this issue.
+                       if v.Type != TypeSet {
+                               continue
+                       }
+
+                       // If we're at the end of the list, then we don't have to worry
+                       // about this because we're just requesting the whole set.
+                       if i == len(schemaList)-1 {
+                               continue
+                       }
+
+                       // If we're looking for the count, then ignore...
+                       if address[i+1] == "#" {
+                               continue
+                       }
+
+                       indexMap, ok := r.indexMaps[strings.Join(address[:i+1], ".")]
+                       if !ok {
+                               // Get the set so we can get the index map that tells us the
+                               // mapping of the hash code to the list index
+                               _, err := r.readSet(address[:i+1], v)
+                               if err != nil {
+                                       return FieldReadResult{}, err
+                               }
+                               indexMap = r.indexMaps[strings.Join(address[:i+1], ".")]
+                       }
+
+                       index, ok := indexMap[address[i+1]]
+                       if !ok {
+                               return FieldReadResult{}, nil
+                       }
+
+                       address[i+1] = strconv.FormatInt(int64(index), 10)
+               }
+       }
+
+       k := strings.Join(address, ".")
+       schema := schemaList[len(schemaList)-1]
+
+       // If we're getting the single element of a promoted list, then
+       // check to see if we have a single element we need to promote.
+       if address[len(address)-1] == "0" && len(schemaList) > 1 {
+               lastSchema := schemaList[len(schemaList)-2]
+               if lastSchema.Type == TypeList && lastSchema.PromoteSingle {
+                       k := strings.Join(address[:len(address)-1], ".")
+                       result, err := r.readPrimitive(k, schema)
+                       if err == nil {
+                               return result, nil
+                       }
+               }
+       }
+
+       switch schema.Type {
+       case TypeBool, TypeFloat, TypeInt, TypeString:
+               return r.readPrimitive(k, schema)
+       case TypeList:
+               // If we support promotion then we first check if we have a lone
+               // value that we must promote.
+               // a value that is alone.
+               if schema.PromoteSingle {
+                       result, err := r.readPrimitive(k, schema.Elem.(*Schema))
+                       if err == nil && result.Exists {
+                               result.Value = []interface{}{result.Value}
+                               return result, nil
+                       }
+               }
+
+               return readListField(&nestedConfigFieldReader{r}, address, schema)
+       case TypeMap:
+               return r.readMap(k, schema)
+       case TypeSet:
+               return r.readSet(address, schema)
+       case typeObject:
+               return readObjectField(
+                       &nestedConfigFieldReader{r},
+                       address, schema.Elem.(map[string]*Schema))
+       default:
+               panic(fmt.Sprintf("Unknown type: %s", schema.Type))
+       }
+}
+
+func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) {
+       // We want both the raw value and the interpolated. We use the interpolated
+       // to store actual values and we use the raw one to check for
+       // computed keys. Actual values are obtained in the switch, depending on
+       // the type of the raw value.
+       mraw, ok := r.Config.GetRaw(k)
+       if !ok {
+               // check if this is from an interpolated field by seeing if it exists
+               // in the config
+               _, ok := r.Config.Get(k)
+               if !ok {
+                       // this really doesn't exist
+                       return FieldReadResult{}, nil
+               }
+
+               // We couldn't fetch the value from a nested data structure, so treat the
+               // raw value as an interpolation string. The mraw value is only used
+               // for the type switch below.
+               mraw = "${INTERPOLATED}"
+       }
+
+       result := make(map[string]interface{})
+       computed := false
+       switch m := mraw.(type) {
+       case string:
+               // This is a map which has come out of an interpolated variable, so we
+               // can just get the value directly from config. Values cannot be computed
+               // currently.
+               v, _ := r.Config.Get(k)
+
+               // If this isn't a map[string]interface, it must be computed.
+               mapV, ok := v.(map[string]interface{})
+               if !ok {
+                       return FieldReadResult{
+                               Exists:   true,
+                               Computed: true,
+                       }, nil
+               }
+
+               // Otherwise we can proceed as usual.
+               for i, iv := range mapV {
+                       result[i] = iv
+               }
+       case []interface{}:
+               for i, innerRaw := range m {
+                       for ik := range innerRaw.(map[string]interface{}) {
+                               key := fmt.Sprintf("%s.%d.%s", k, i, ik)
+                               if r.Config.IsComputed(key) {
+                                       computed = true
+                                       break
+                               }
+
+                               v, _ := r.Config.Get(key)
+                               result[ik] = v
+                       }
+               }
+       case []map[string]interface{}:
+               for i, innerRaw := range m {
+                       for ik := range innerRaw {
+                               key := fmt.Sprintf("%s.%d.%s", k, i, ik)
+                               if r.Config.IsComputed(key) {
+                                       computed = true
+                                       break
+                               }
+
+                               v, _ := r.Config.Get(key)
+                               result[ik] = v
+                       }
+               }
+       case map[string]interface{}:
+               for ik := range m {
+                       key := fmt.Sprintf("%s.%s", k, ik)
+                       if r.Config.IsComputed(key) {
+                               computed = true
+                               break
+                       }
+
+                       v, _ := r.Config.Get(key)
+                       result[ik] = v
+               }
+       default:
+               panic(fmt.Sprintf("unknown type: %#v", mraw))
+       }
+
+       err := mapValuesToPrimitive(result, schema)
+       if err != nil {
+               return FieldReadResult{}, nil
+       }
+
+       var value interface{}
+       if !computed {
+               value = result
+       }
+
+       return FieldReadResult{
+               Value:    value,
+               Exists:   true,
+               Computed: computed,
+       }, nil
+}
+
+func (r *ConfigFieldReader) readPrimitive(
+       k string, schema *Schema) (FieldReadResult, error) {
+       raw, ok := r.Config.Get(k)
+       if !ok {
+               // Nothing in config, but we might still have a default from the schema
+               var err error
+               raw, err = schema.DefaultValue()
+               if err != nil {
+                       return FieldReadResult{}, fmt.Errorf("%s, error loading default: %s", k, err)
+               }
+
+               if raw == nil {
+                       return FieldReadResult{}, nil
+               }
+       }
+
+       var result string
+       if err := mapstructure.WeakDecode(raw, &result); err != nil {
+               return FieldReadResult{}, err
+       }
+
+       computed := r.Config.IsComputed(k)
+       returnVal, err := stringToPrimitive(result, computed, schema)
+       if err != nil {
+               return FieldReadResult{}, err
+       }
+
+       return FieldReadResult{
+               Value:    returnVal,
+               Exists:   true,
+               Computed: computed,
+       }, nil
+}
+
+func (r *ConfigFieldReader) readSet(
+       address []string, schema *Schema) (FieldReadResult, error) {
+       indexMap := make(map[string]int)
+       // Create the set that will be our result
+       set := schema.ZeroValue().(*Set)
+
+       raw, err := readListField(&nestedConfigFieldReader{r}, address, schema)
+       if err != nil {
+               return FieldReadResult{}, err
+       }
+       if !raw.Exists {
+               return FieldReadResult{Value: set}, nil
+       }
+
+       // If the list is computed, the set is necessarilly computed
+       if raw.Computed {
+               return FieldReadResult{
+                       Value:    set,
+                       Exists:   true,
+                       Computed: raw.Computed,
+               }, nil
+       }
+
+       // Build up the set from the list elements
+       for i, v := range raw.Value.([]interface{}) {
+               // Check if any of the keys in this item are computed
+               computed := r.hasComputedSubKeys(
+                       fmt.Sprintf("%s.%d", strings.Join(address, "."), i), schema)
+
+               code := set.add(v, computed)
+               indexMap[code] = i
+       }
+
+       r.indexMaps[strings.Join(address, ".")] = indexMap
+
+       return FieldReadResult{
+               Value:  set,
+               Exists: true,
+       }, nil
+}
+
+// hasComputedSubKeys walks through a schema and returns whether or not the
+// given key contains any subkeys that are computed.
+func (r *ConfigFieldReader) hasComputedSubKeys(key string, schema *Schema) bool {
+       prefix := key + "."
+
+       switch t := schema.Elem.(type) {
+       case *Resource:
+               for k, schema := range t.Schema {
+                       if r.Config.IsComputed(prefix + k) {
+                               return true
+                       }
+
+                       if r.hasComputedSubKeys(prefix+k, schema) {
+                               return true
+                       }
+               }
+       }
+
+       return false
+}
+
+// nestedConfigFieldReader is a funny little thing that just wraps a
+// ConfigFieldReader to call readField when ReadField is called so that
+// we don't recalculate the set rewrites in the address, which leads to
+// an infinite loop.
+type nestedConfigFieldReader struct {
+       Reader *ConfigFieldReader
+}
+
+func (r *nestedConfigFieldReader) ReadField(
+       address []string) (FieldReadResult, error) {
+       return r.Reader.readField(address, true)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_diff.go
new file mode 100644 (file)
index 0000000..16bbae2
--- /dev/null
@@ -0,0 +1,208 @@
+package schema
+
+import (
+       "fmt"
+       "strings"
+
+       "github.com/hashicorp/terraform/terraform"
+       "github.com/mitchellh/mapstructure"
+)
+
+// DiffFieldReader reads fields out of a diff structures.
+//
+// It also requires access to a Reader that reads fields from the structure
+// that the diff was derived from. This is usually the state. This is required
+// because a diff on its own doesn't have complete data about full objects
+// such as maps.
+//
+// The Source MUST be the data that the diff was derived from. If it isn't,
+// the behavior of this struct is undefined.
+//
+// Reading fields from a DiffFieldReader is identical to reading from
+// Source except the diff will be applied to the end result.
+//
+// The "Exists" field on the result will be set to true if the complete
+// field exists whether its from the source, diff, or a combination of both.
+// It cannot be determined whether a retrieved value is composed of
+// diff elements.
+type DiffFieldReader struct {
+       Diff   *terraform.InstanceDiff
+       Source FieldReader
+       Schema map[string]*Schema
+}
+
+func (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) {
+       schemaList := addrToSchema(address, r.Schema)
+       if len(schemaList) == 0 {
+               return FieldReadResult{}, nil
+       }
+
+       schema := schemaList[len(schemaList)-1]
+       switch schema.Type {
+       case TypeBool, TypeInt, TypeFloat, TypeString:
+               return r.readPrimitive(address, schema)
+       case TypeList:
+               return readListField(r, address, schema)
+       case TypeMap:
+               return r.readMap(address, schema)
+       case TypeSet:
+               return r.readSet(address, schema)
+       case typeObject:
+               return readObjectField(r, address, schema.Elem.(map[string]*Schema))
+       default:
+               panic(fmt.Sprintf("Unknown type: %#v", schema.Type))
+       }
+}
+
+func (r *DiffFieldReader) readMap(
+       address []string, schema *Schema) (FieldReadResult, error) {
+       result := make(map[string]interface{})
+       resultSet := false
+
+       // First read the map from the underlying source
+       source, err := r.Source.ReadField(address)
+       if err != nil {
+               return FieldReadResult{}, err
+       }
+       if source.Exists {
+               result = source.Value.(map[string]interface{})
+               resultSet = true
+       }
+
+       // Next, read all the elements we have in our diff, and apply
+       // the diff to our result.
+       prefix := strings.Join(address, ".") + "."
+       for k, v := range r.Diff.Attributes {
+               if !strings.HasPrefix(k, prefix) {
+                       continue
+               }
+               if strings.HasPrefix(k, prefix+"%") {
+                       // Ignore the count field
+                       continue
+               }
+
+               resultSet = true
+
+               k = k[len(prefix):]
+               if v.NewRemoved {
+                       delete(result, k)
+                       continue
+               }
+
+               result[k] = v.New
+       }
+
+       err = mapValuesToPrimitive(result, schema)
+       if err != nil {
+               return FieldReadResult{}, nil
+       }
+
+       var resultVal interface{}
+       if resultSet {
+               resultVal = result
+       }
+
+       return FieldReadResult{
+               Value:  resultVal,
+               Exists: resultSet,
+       }, nil
+}
+
+func (r *DiffFieldReader) readPrimitive(
+       address []string, schema *Schema) (FieldReadResult, error) {
+       result, err := r.Source.ReadField(address)
+       if err != nil {
+               return FieldReadResult{}, err
+       }
+
+       attrD, ok := r.Diff.Attributes[strings.Join(address, ".")]
+       if !ok {
+               return result, nil
+       }
+
+       var resultVal string
+       if !attrD.NewComputed {
+               resultVal = attrD.New
+               if attrD.NewExtra != nil {
+                       result.ValueProcessed = resultVal
+                       if err := mapstructure.WeakDecode(attrD.NewExtra, &resultVal); err != nil {
+                               return FieldReadResult{}, err
+                       }
+               }
+       }
+
+       result.Computed = attrD.NewComputed
+       result.Exists = true
+       result.Value, err = stringToPrimitive(resultVal, false, schema)
+       if err != nil {
+               return FieldReadResult{}, err
+       }
+
+       return result, nil
+}
+
+func (r *DiffFieldReader) readSet(
+       address []string, schema *Schema) (FieldReadResult, error) {
+       prefix := strings.Join(address, ".") + "."
+
+       // Create the set that will be our result
+       set := schema.ZeroValue().(*Set)
+
+       // Go through the map and find all the set items
+       for k, d := range r.Diff.Attributes {
+               if d.NewRemoved {
+                       // If the field is removed, we always ignore it
+                       continue
+               }
+               if !strings.HasPrefix(k, prefix) {
+                       continue
+               }
+               if strings.HasSuffix(k, "#") {
+                       // Ignore any count field
+                       continue
+               }
+
+               // Split the key, since it might be a sub-object like "idx.field"
+               parts := strings.Split(k[len(prefix):], ".")
+               idx := parts[0]
+
+               raw, err := r.ReadField(append(address, idx))
+               if err != nil {
+                       return FieldReadResult{}, err
+               }
+               if !raw.Exists {
+                       // This shouldn't happen because we just verified it does exist
+                       panic("missing field in set: " + k + "." + idx)
+               }
+
+               set.Add(raw.Value)
+       }
+
+       // Determine if the set "exists". It exists if there are items or if
+       // the diff explicitly wanted it empty.
+       exists := set.Len() > 0
+       if !exists {
+               // We could check if the diff value is "0" here but I think the
+               // existence of "#" on its own is enough to show it existed. This
+               // protects us in the future from the zero value changing from
+               // "0" to "" breaking us (if that were to happen).
+               if _, ok := r.Diff.Attributes[prefix+"#"]; ok {
+                       exists = true
+               }
+       }
+
+       if !exists {
+               result, err := r.Source.ReadField(address)
+               if err != nil {
+                       return FieldReadResult{}, err
+               }
+               if result.Exists {
+                       return result, nil
+               }
+       }
+
+       return FieldReadResult{
+               Value:  set,
+               Exists: exists,
+       }, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_map.go
new file mode 100644 (file)
index 0000000..9533981
--- /dev/null
@@ -0,0 +1,232 @@
+package schema
+
+import (
+       "fmt"
+       "strings"
+)
+
+// MapFieldReader reads fields out of an untyped map[string]string to
+// the best of its ability.
+type MapFieldReader struct {
+       Map    MapReader
+       Schema map[string]*Schema
+}
+
+func (r *MapFieldReader) ReadField(address []string) (FieldReadResult, error) {
+       k := strings.Join(address, ".")
+       schemaList := addrToSchema(address, r.Schema)
+       if len(schemaList) == 0 {
+               return FieldReadResult{}, nil
+       }
+
+       schema := schemaList[len(schemaList)-1]
+       switch schema.Type {
+       case TypeBool, TypeInt, TypeFloat, TypeString:
+               return r.readPrimitive(address, schema)
+       case TypeList:
+               return readListField(r, address, schema)
+       case TypeMap:
+               return r.readMap(k, schema)
+       case TypeSet:
+               return r.readSet(address, schema)
+       case typeObject:
+               return readObjectField(r, address, schema.Elem.(map[string]*Schema))
+       default:
+               panic(fmt.Sprintf("Unknown type: %s", schema.Type))
+       }
+}
+
+func (r *MapFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) {
+       result := make(map[string]interface{})
+       resultSet := false
+
+       // If the name of the map field is directly in the map with an
+       // empty string, it means that the map is being deleted, so mark
+       // that is is set.
+       if v, ok := r.Map.Access(k); ok && v == "" {
+               resultSet = true
+       }
+
+       prefix := k + "."
+       r.Map.Range(func(k, v string) bool {
+               if strings.HasPrefix(k, prefix) {
+                       resultSet = true
+
+                       key := k[len(prefix):]
+                       if key != "%" && key != "#" {
+                               result[key] = v
+                       }
+               }
+
+               return true
+       })
+
+       err := mapValuesToPrimitive(result, schema)
+       if err != nil {
+               return FieldReadResult{}, nil
+       }
+
+       var resultVal interface{}
+       if resultSet {
+               resultVal = result
+       }
+
+       return FieldReadResult{
+               Value:  resultVal,
+               Exists: resultSet,
+       }, nil
+}
+
+func (r *MapFieldReader) readPrimitive(
+       address []string, schema *Schema) (FieldReadResult, error) {
+       k := strings.Join(address, ".")
+       result, ok := r.Map.Access(k)
+       if !ok {
+               return FieldReadResult{}, nil
+       }
+
+       returnVal, err := stringToPrimitive(result, false, schema)
+       if err != nil {
+               return FieldReadResult{}, err
+       }
+
+       return FieldReadResult{
+               Value:  returnVal,
+               Exists: true,
+       }, nil
+}
+
+func (r *MapFieldReader) readSet(
+       address []string, schema *Schema) (FieldReadResult, error) {
+       // Get the number of elements in the list
+       countRaw, err := r.readPrimitive(
+               append(address, "#"), &Schema{Type: TypeInt})
+       if err != nil {
+               return FieldReadResult{}, err
+       }
+       if !countRaw.Exists {
+               // No count, means we have no list
+               countRaw.Value = 0
+       }
+
+       // Create the set that will be our result
+       set := schema.ZeroValue().(*Set)
+
+       // If we have an empty list, then return an empty list
+       if countRaw.Computed || countRaw.Value.(int) == 0 {
+               return FieldReadResult{
+                       Value:    set,
+                       Exists:   countRaw.Exists,
+                       Computed: countRaw.Computed,
+               }, nil
+       }
+
+       // Go through the map and find all the set items
+       prefix := strings.Join(address, ".") + "."
+       countExpected := countRaw.Value.(int)
+       countActual := make(map[string]struct{})
+       completed := r.Map.Range(func(k, _ string) bool {
+               if !strings.HasPrefix(k, prefix) {
+                       return true
+               }
+               if strings.HasPrefix(k, prefix+"#") {
+                       // Ignore the count field
+                       return true
+               }
+
+               // Split the key, since it might be a sub-object like "idx.field"
+               parts := strings.Split(k[len(prefix):], ".")
+               idx := parts[0]
+
+               var raw FieldReadResult
+               raw, err = r.ReadField(append(address, idx))
+               if err != nil {
+                       return false
+               }
+               if !raw.Exists {
+                       // This shouldn't happen because we just verified it does exist
+                       panic("missing field in set: " + k + "." + idx)
+               }
+
+               set.Add(raw.Value)
+
+               // Due to the way multimap readers work, if we've seen the number
+               // of fields we expect, then exit so that we don't read later values.
+               // For example: the "set" map might have "ports.#", "ports.0", and
+               // "ports.1", but the "state" map might have those plus "ports.2".
+               // We don't want "ports.2"
+               countActual[idx] = struct{}{}
+               if len(countActual) >= countExpected {
+                       return false
+               }
+
+               return true
+       })
+       if !completed && err != nil {
+               return FieldReadResult{}, err
+       }
+
+       return FieldReadResult{
+               Value:  set,
+               Exists: true,
+       }, nil
+}
+
+// MapReader is an interface that is given to MapFieldReader for accessing
+// a "map". This can be used to have alternate implementations. For a basic
+// map[string]string, use BasicMapReader.
+type MapReader interface {
+       Access(string) (string, bool)
+       Range(func(string, string) bool) bool
+}
+
+// BasicMapReader implements MapReader for a single map.
+type BasicMapReader map[string]string
+
+func (r BasicMapReader) Access(k string) (string, bool) {
+       v, ok := r[k]
+       return v, ok
+}
+
+func (r BasicMapReader) Range(f func(string, string) bool) bool {
+       for k, v := range r {
+               if cont := f(k, v); !cont {
+                       return false
+               }
+       }
+
+       return true
+}
+
+// MultiMapReader reads over multiple maps, preferring keys that are
+// founder earlier (lower number index) vs. later (higher number index)
+type MultiMapReader []map[string]string
+
+func (r MultiMapReader) Access(k string) (string, bool) {
+       for _, m := range r {
+               if v, ok := m[k]; ok {
+                       return v, ok
+               }
+       }
+
+       return "", false
+}
+
+func (r MultiMapReader) Range(f func(string, string) bool) bool {
+       done := make(map[string]struct{})
+       for _, m := range r {
+               for k, v := range m {
+                       if _, ok := done[k]; ok {
+                               continue
+                       }
+
+                       if cont := f(k, v); !cont {
+                               return false
+                       }
+
+                       done[k] = struct{}{}
+               }
+       }
+
+       return true
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_reader_multi.go
new file mode 100644 (file)
index 0000000..89ad3a8
--- /dev/null
@@ -0,0 +1,63 @@
+package schema
+
+import (
+       "fmt"
+)
+
+// MultiLevelFieldReader reads from other field readers,
+// merging their results along the way in a specific order. You can specify
+// "levels" and name them in order to read only an exact level or up to
+// a specific level.
+//
+// This is useful for saying things such as "read the field from the state
+// and config and merge them" or "read the latest value of the field".
+type MultiLevelFieldReader struct {
+       Readers map[string]FieldReader
+       Levels  []string
+}
+
+func (r *MultiLevelFieldReader) ReadField(address []string) (FieldReadResult, error) {
+       return r.ReadFieldMerge(address, r.Levels[len(r.Levels)-1])
+}
+
+func (r *MultiLevelFieldReader) ReadFieldExact(
+       address []string, level string) (FieldReadResult, error) {
+       reader, ok := r.Readers[level]
+       if !ok {
+               return FieldReadResult{}, fmt.Errorf(
+                       "Unknown reader level: %s", level)
+       }
+
+       result, err := reader.ReadField(address)
+       if err != nil {
+               return FieldReadResult{}, fmt.Errorf(
+                       "Error reading level %s: %s", level, err)
+       }
+
+       return result, nil
+}
+
+func (r *MultiLevelFieldReader) ReadFieldMerge(
+       address []string, level string) (FieldReadResult, error) {
+       var result FieldReadResult
+       for _, l := range r.Levels {
+               if r, ok := r.Readers[l]; ok {
+                       out, err := r.ReadField(address)
+                       if err != nil {
+                               return FieldReadResult{}, fmt.Errorf(
+                                       "Error reading level %s: %s", l, err)
+                       }
+
+                       // TODO: computed
+                       if out.Exists {
+                               result = out
+                       }
+               }
+
+               if l == level {
+                       break
+               }
+       }
+
+       return result, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer.go
new file mode 100644 (file)
index 0000000..9abc41b
--- /dev/null
@@ -0,0 +1,8 @@
+package schema
+
+// FieldWriters are responsible for writing fields by address into
+// a proper typed representation. ResourceData uses this to write new data
+// into existing sources.
+type FieldWriter interface {
+       WriteField([]string, interface{}) error
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go b/vendor/github.com/hashicorp/terraform/helper/schema/field_writer_map.go
new file mode 100644 (file)
index 0000000..689ed8d
--- /dev/null
@@ -0,0 +1,319 @@
+package schema
+
+import (
+       "fmt"
+       "reflect"
+       "strconv"
+       "strings"
+       "sync"
+
+       "github.com/mitchellh/mapstructure"
+)
+
+// MapFieldWriter writes data into a single map[string]string structure.
+type MapFieldWriter struct {
+       Schema map[string]*Schema
+
+       lock   sync.Mutex
+       result map[string]string
+}
+
+// Map returns the underlying map that is being written to.
+func (w *MapFieldWriter) Map() map[string]string {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+       if w.result == nil {
+               w.result = make(map[string]string)
+       }
+
+       return w.result
+}
+
+func (w *MapFieldWriter) unsafeWriteField(addr string, value string) {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+       if w.result == nil {
+               w.result = make(map[string]string)
+       }
+
+       w.result[addr] = value
+}
+
+func (w *MapFieldWriter) WriteField(addr []string, value interface{}) error {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+       if w.result == nil {
+               w.result = make(map[string]string)
+       }
+
+       schemaList := addrToSchema(addr, w.Schema)
+       if len(schemaList) == 0 {
+               return fmt.Errorf("Invalid address to set: %#v", addr)
+       }
+
+       // If we're setting anything other than a list root or set root,
+       // then disallow it.
+       for _, schema := range schemaList[:len(schemaList)-1] {
+               if schema.Type == TypeList {
+                       return fmt.Errorf(
+                               "%s: can only set full list",
+                               strings.Join(addr, "."))
+               }
+
+               if schema.Type == TypeMap {
+                       return fmt.Errorf(
+                               "%s: can only set full map",
+                               strings.Join(addr, "."))
+               }
+
+               if schema.Type == TypeSet {
+                       return fmt.Errorf(
+                               "%s: can only set full set",
+                               strings.Join(addr, "."))
+               }
+       }
+
+       return w.set(addr, value)
+}
+
+func (w *MapFieldWriter) set(addr []string, value interface{}) error {
+       schemaList := addrToSchema(addr, w.Schema)
+       if len(schemaList) == 0 {
+               return fmt.Errorf("Invalid address to set: %#v", addr)
+       }
+
+       schema := schemaList[len(schemaList)-1]
+       switch schema.Type {
+       case TypeBool, TypeInt, TypeFloat, TypeString:
+               return w.setPrimitive(addr, value, schema)
+       case TypeList:
+               return w.setList(addr, value, schema)
+       case TypeMap:
+               return w.setMap(addr, value, schema)
+       case TypeSet:
+               return w.setSet(addr, value, schema)
+       case typeObject:
+               return w.setObject(addr, value, schema)
+       default:
+               panic(fmt.Sprintf("Unknown type: %#v", schema.Type))
+       }
+}
+
+func (w *MapFieldWriter) setList(
+       addr []string,
+       v interface{},
+       schema *Schema) error {
+       k := strings.Join(addr, ".")
+       setElement := func(idx string, value interface{}) error {
+               addrCopy := make([]string, len(addr), len(addr)+1)
+               copy(addrCopy, addr)
+               return w.set(append(addrCopy, idx), value)
+       }
+
+       var vs []interface{}
+       if err := mapstructure.Decode(v, &vs); err != nil {
+               return fmt.Errorf("%s: %s", k, err)
+       }
+
+       // Set the entire list.
+       var err error
+       for i, elem := range vs {
+               is := strconv.FormatInt(int64(i), 10)
+               err = setElement(is, elem)
+               if err != nil {
+                       break
+               }
+       }
+       if err != nil {
+               for i, _ := range vs {
+                       is := strconv.FormatInt(int64(i), 10)
+                       setElement(is, nil)
+               }
+
+               return err
+       }
+
+       w.result[k+".#"] = strconv.FormatInt(int64(len(vs)), 10)
+       return nil
+}
+
+func (w *MapFieldWriter) setMap(
+       addr []string,
+       value interface{},
+       schema *Schema) error {
+       k := strings.Join(addr, ".")
+       v := reflect.ValueOf(value)
+       vs := make(map[string]interface{})
+
+       if value == nil {
+               // The empty string here means the map is removed.
+               w.result[k] = ""
+               return nil
+       }
+
+       if v.Kind() != reflect.Map {
+               return fmt.Errorf("%s: must be a map", k)
+       }
+       if v.Type().Key().Kind() != reflect.String {
+               return fmt.Errorf("%s: keys must strings", k)
+       }
+       for _, mk := range v.MapKeys() {
+               mv := v.MapIndex(mk)
+               vs[mk.String()] = mv.Interface()
+       }
+
+       // Remove the pure key since we're setting the full map value
+       delete(w.result, k)
+
+       // Set each subkey
+       addrCopy := make([]string, len(addr), len(addr)+1)
+       copy(addrCopy, addr)
+       for subKey, v := range vs {
+               if err := w.set(append(addrCopy, subKey), v); err != nil {
+                       return err
+               }
+       }
+
+       // Set the count
+       w.result[k+".%"] = strconv.Itoa(len(vs))
+
+       return nil
+}
+
+func (w *MapFieldWriter) setObject(
+       addr []string,
+       value interface{},
+       schema *Schema) error {
+       // Set the entire object. First decode into a proper structure
+       var v map[string]interface{}
+       if err := mapstructure.Decode(value, &v); err != nil {
+               return fmt.Errorf("%s: %s", strings.Join(addr, "."), err)
+       }
+
+       // Make space for additional elements in the address
+       addrCopy := make([]string, len(addr), len(addr)+1)
+       copy(addrCopy, addr)
+
+       // Set each element in turn
+       var err error
+       for k1, v1 := range v {
+               if err = w.set(append(addrCopy, k1), v1); err != nil {
+                       break
+               }
+       }
+       if err != nil {
+               for k1, _ := range v {
+                       w.set(append(addrCopy, k1), nil)
+               }
+       }
+
+       return err
+}
+
+func (w *MapFieldWriter) setPrimitive(
+       addr []string,
+       v interface{},
+       schema *Schema) error {
+       k := strings.Join(addr, ".")
+
+       if v == nil {
+               // The empty string here means the value is removed.
+               w.result[k] = ""
+               return nil
+       }
+
+       var set string
+       switch schema.Type {
+       case TypeBool:
+               var b bool
+               if err := mapstructure.Decode(v, &b); err != nil {
+                       return fmt.Errorf("%s: %s", k, err)
+               }
+
+               set = strconv.FormatBool(b)
+       case TypeString:
+               if err := mapstructure.Decode(v, &set); err != nil {
+                       return fmt.Errorf("%s: %s", k, err)
+               }
+       case TypeInt:
+               var n int
+               if err := mapstructure.Decode(v, &n); err != nil {
+                       return fmt.Errorf("%s: %s", k, err)
+               }
+               set = strconv.FormatInt(int64(n), 10)
+       case TypeFloat:
+               var n float64
+               if err := mapstructure.Decode(v, &n); err != nil {
+                       return fmt.Errorf("%s: %s", k, err)
+               }
+               set = strconv.FormatFloat(float64(n), 'G', -1, 64)
+       default:
+               return fmt.Errorf("Unknown type: %#v", schema.Type)
+       }
+
+       w.result[k] = set
+       return nil
+}
+
+func (w *MapFieldWriter) setSet(
+       addr []string,
+       value interface{},
+       schema *Schema) error {
+       addrCopy := make([]string, len(addr), len(addr)+1)
+       copy(addrCopy, addr)
+       k := strings.Join(addr, ".")
+
+       if value == nil {
+               w.result[k+".#"] = "0"
+               return nil
+       }
+
+       // If it is a slice, then we have to turn it into a *Set so that
+       // we get the proper order back based on the hash code.
+       if v := reflect.ValueOf(value); v.Kind() == reflect.Slice {
+               // Build a temp *ResourceData to use for the conversion
+               tempSchema := *schema
+               tempSchema.Type = TypeList
+               tempSchemaMap := map[string]*Schema{addr[0]: &tempSchema}
+               tempW := &MapFieldWriter{Schema: tempSchemaMap}
+
+               // Set the entire list, this lets us get sane values out of it
+               if err := tempW.WriteField(addr, value); err != nil {
+                       return err
+               }
+
+               // Build the set by going over the list items in order and
+               // hashing them into the set. The reason we go over the list and
+               // not the `value` directly is because this forces all types
+               // to become []interface{} (generic) instead of []string, which
+               // most hash functions are expecting.
+               s := schema.ZeroValue().(*Set)
+               tempR := &MapFieldReader{
+                       Map:    BasicMapReader(tempW.Map()),
+                       Schema: tempSchemaMap,
+               }
+               for i := 0; i < v.Len(); i++ {
+                       is := strconv.FormatInt(int64(i), 10)
+                       result, err := tempR.ReadField(append(addrCopy, is))
+                       if err != nil {
+                               return err
+                       }
+                       if !result.Exists {
+                               panic("set item just set doesn't exist")
+                       }
+
+                       s.Add(result.Value)
+               }
+
+               value = s
+       }
+
+       for code, elem := range value.(*Set).m {
+               if err := w.set(append(addrCopy, code), elem); err != nil {
+                       return err
+               }
+       }
+
+       w.result[k+".#"] = strconv.Itoa(value.(*Set).Len())
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go
new file mode 100644 (file)
index 0000000..3a97629
--- /dev/null
@@ -0,0 +1,36 @@
+// Code generated by "stringer -type=getSource resource_data_get_source.go"; DO NOT EDIT.
+
+package schema
+
+import "fmt"
+
+const (
+       _getSource_name_0 = "getSourceStategetSourceConfig"
+       _getSource_name_1 = "getSourceDiff"
+       _getSource_name_2 = "getSourceSet"
+       _getSource_name_3 = "getSourceLevelMaskgetSourceExact"
+)
+
+var (
+       _getSource_index_0 = [...]uint8{0, 14, 29}
+       _getSource_index_1 = [...]uint8{0, 13}
+       _getSource_index_2 = [...]uint8{0, 12}
+       _getSource_index_3 = [...]uint8{0, 18, 32}
+)
+
+func (i getSource) String() string {
+       switch {
+       case 1 <= i && i <= 2:
+               i -= 1
+               return _getSource_name_0[_getSource_index_0[i]:_getSource_index_0[i+1]]
+       case i == 4:
+               return _getSource_name_1
+       case i == 8:
+               return _getSource_name_2
+       case 15 <= i && i <= 16:
+               i -= 15
+               return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]]
+       default:
+               return fmt.Sprintf("getSource(%d)", i)
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provider.go b/vendor/github.com/hashicorp/terraform/helper/schema/provider.go
new file mode 100644 (file)
index 0000000..d52d2f5
--- /dev/null
@@ -0,0 +1,400 @@
+package schema
+
+import (
+       "context"
+       "errors"
+       "fmt"
+       "sort"
+       "sync"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/hashicorp/terraform/terraform"
+)
+
+// Provider represents a resource provider in Terraform, and properly
+// implements all of the ResourceProvider API.
+//
+// By defining a schema for the configuration of the provider, the
+// map of supporting resources, and a configuration function, the schema
+// framework takes over and handles all the provider operations for you.
+//
+// After defining the provider structure, it is unlikely that you'll require any
+// of the methods on Provider itself.
+type Provider struct {
+       // Schema is the schema for the configuration of this provider. If this
+       // provider has no configuration, this can be omitted.
+       //
+       // The keys of this map are the configuration keys, and the value is
+       // the schema describing the value of the configuration.
+       Schema map[string]*Schema
+
+       // ResourcesMap is the list of available resources that this provider
+       // can manage, along with their Resource structure defining their
+       // own schemas and CRUD operations.
+       //
+       // Provider automatically handles routing operations such as Apply,
+       // Diff, etc. to the proper resource.
+       ResourcesMap map[string]*Resource
+
+       // DataSourcesMap is the collection of available data sources that
+       // this provider implements, with a Resource instance defining
+       // the schema and Read operation of each.
+       //
+       // Resource instances for data sources must have a Read function
+       // and must *not* implement Create, Update or Delete.
+       DataSourcesMap map[string]*Resource
+
+       // ConfigureFunc is a function for configuring the provider. If the
+       // provider doesn't need to be configured, this can be omitted.
+       //
+       // See the ConfigureFunc documentation for more information.
+       ConfigureFunc ConfigureFunc
+
+       // MetaReset is called by TestReset to reset any state stored in the meta
+       // interface.  This is especially important if the StopContext is stored by
+       // the provider.
+       MetaReset func() error
+
+       meta interface{}
+
+       // a mutex is required because TestReset can directly repalce the stopCtx
+       stopMu        sync.Mutex
+       stopCtx       context.Context
+       stopCtxCancel context.CancelFunc
+       stopOnce      sync.Once
+}
+
+// ConfigureFunc is the function used to configure a Provider.
+//
+// The interface{} value returned by this function is stored and passed into
+// the subsequent resources as the meta parameter. This return value is
+// usually used to pass along a configured API client, a configuration
+// structure, etc.
+type ConfigureFunc func(*ResourceData) (interface{}, error)
+
+// InternalValidate should be called to validate the structure
+// of the provider.
+//
+// This should be called in a unit test for any provider to verify
+// before release that a provider is properly configured for use with
+// this library.
+func (p *Provider) InternalValidate() error {
+       if p == nil {
+               return errors.New("provider is nil")
+       }
+
+       var validationErrors error
+       sm := schemaMap(p.Schema)
+       if err := sm.InternalValidate(sm); err != nil {
+               validationErrors = multierror.Append(validationErrors, err)
+       }
+
+       for k, r := range p.ResourcesMap {
+               if err := r.InternalValidate(nil, true); err != nil {
+                       validationErrors = multierror.Append(validationErrors, fmt.Errorf("resource %s: %s", k, err))
+               }
+       }
+
+       for k, r := range p.DataSourcesMap {
+               if err := r.InternalValidate(nil, false); err != nil {
+                       validationErrors = multierror.Append(validationErrors, fmt.Errorf("data source %s: %s", k, err))
+               }
+       }
+
+       return validationErrors
+}
+
+// Meta returns the metadata associated with this provider that was
+// returned by the Configure call. It will be nil until Configure is called.
+func (p *Provider) Meta() interface{} {
+       return p.meta
+}
+
+// SetMeta can be used to forcefully set the Meta object of the provider.
+// Note that if Configure is called the return value will override anything
+// set here.
+func (p *Provider) SetMeta(v interface{}) {
+       p.meta = v
+}
+
+// Stopped reports whether the provider has been stopped or not.
+func (p *Provider) Stopped() bool {
+       ctx := p.StopContext()
+       select {
+       case <-ctx.Done():
+               return true
+       default:
+               return false
+       }
+}
+
+// StopCh returns a channel that is closed once the provider is stopped.
+func (p *Provider) StopContext() context.Context {
+       p.stopOnce.Do(p.stopInit)
+
+       p.stopMu.Lock()
+       defer p.stopMu.Unlock()
+
+       return p.stopCtx
+}
+
+func (p *Provider) stopInit() {
+       p.stopMu.Lock()
+       defer p.stopMu.Unlock()
+
+       p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background())
+}
+
+// Stop implementation of terraform.ResourceProvider interface.
+func (p *Provider) Stop() error {
+       p.stopOnce.Do(p.stopInit)
+
+       p.stopMu.Lock()
+       defer p.stopMu.Unlock()
+
+       p.stopCtxCancel()
+       return nil
+}
+
+// TestReset resets any state stored in the Provider, and will call TestReset
+// on Meta if it implements the TestProvider interface.
+// This may be used to reset the schema.Provider at the start of a test, and is
+// automatically called by resource.Test.
+func (p *Provider) TestReset() error {
+       p.stopInit()
+       if p.MetaReset != nil {
+               return p.MetaReset()
+       }
+       return nil
+}
+
+// Input implementation of terraform.ResourceProvider interface.
+func (p *Provider) Input(
+       input terraform.UIInput,
+       c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
+       return schemaMap(p.Schema).Input(input, c)
+}
+
+// Validate implementation of terraform.ResourceProvider interface.
+func (p *Provider) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+       if err := p.InternalValidate(); err != nil {
+               return nil, []error{fmt.Errorf(
+                       "Internal validation of the provider failed! This is always a bug\n"+
+                               "with the provider itself, and not a user issue. Please report\n"+
+                               "this bug:\n\n%s", err)}
+       }
+
+       return schemaMap(p.Schema).Validate(c)
+}
+
+// ValidateResource implementation of terraform.ResourceProvider interface.
+func (p *Provider) ValidateResource(
+       t string, c *terraform.ResourceConfig) ([]string, []error) {
+       r, ok := p.ResourcesMap[t]
+       if !ok {
+               return nil, []error{fmt.Errorf(
+                       "Provider doesn't support resource: %s", t)}
+       }
+
+       return r.Validate(c)
+}
+
+// Configure implementation of terraform.ResourceProvider interface.
+func (p *Provider) Configure(c *terraform.ResourceConfig) error {
+       // No configuration
+       if p.ConfigureFunc == nil {
+               return nil
+       }
+
+       sm := schemaMap(p.Schema)
+
+       // Get a ResourceData for this configuration. To do this, we actually
+       // generate an intermediary "diff" although that is never exposed.
+       diff, err := sm.Diff(nil, c)
+       if err != nil {
+               return err
+       }
+
+       data, err := sm.Data(nil, diff)
+       if err != nil {
+               return err
+       }
+
+       meta, err := p.ConfigureFunc(data)
+       if err != nil {
+               return err
+       }
+
+       p.meta = meta
+       return nil
+}
+
+// Apply implementation of terraform.ResourceProvider interface.
+func (p *Provider) Apply(
+       info *terraform.InstanceInfo,
+       s *terraform.InstanceState,
+       d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
+       r, ok := p.ResourcesMap[info.Type]
+       if !ok {
+               return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+       }
+
+       return r.Apply(s, d, p.meta)
+}
+
+// Diff implementation of terraform.ResourceProvider interface.
+func (p *Provider) Diff(
+       info *terraform.InstanceInfo,
+       s *terraform.InstanceState,
+       c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+       r, ok := p.ResourcesMap[info.Type]
+       if !ok {
+               return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+       }
+
+       return r.Diff(s, c)
+}
+
+// Refresh implementation of terraform.ResourceProvider interface.
+func (p *Provider) Refresh(
+       info *terraform.InstanceInfo,
+       s *terraform.InstanceState) (*terraform.InstanceState, error) {
+       r, ok := p.ResourcesMap[info.Type]
+       if !ok {
+               return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+       }
+
+       return r.Refresh(s, p.meta)
+}
+
+// Resources implementation of terraform.ResourceProvider interface.
+func (p *Provider) Resources() []terraform.ResourceType {
+       keys := make([]string, 0, len(p.ResourcesMap))
+       for k, _ := range p.ResourcesMap {
+               keys = append(keys, k)
+       }
+       sort.Strings(keys)
+
+       result := make([]terraform.ResourceType, 0, len(keys))
+       for _, k := range keys {
+               resource := p.ResourcesMap[k]
+
+               // This isn't really possible (it'd fail InternalValidate), but
+               // we do it anyways to avoid a panic.
+               if resource == nil {
+                       resource = &Resource{}
+               }
+
+               result = append(result, terraform.ResourceType{
+                       Name:       k,
+                       Importable: resource.Importer != nil,
+               })
+       }
+
+       return result
+}
+
+func (p *Provider) ImportState(
+       info *terraform.InstanceInfo,
+       id string) ([]*terraform.InstanceState, error) {
+       // Find the resource
+       r, ok := p.ResourcesMap[info.Type]
+       if !ok {
+               return nil, fmt.Errorf("unknown resource type: %s", info.Type)
+       }
+
+       // If it doesn't support import, error
+       if r.Importer == nil {
+               return nil, fmt.Errorf("resource %s doesn't support import", info.Type)
+       }
+
+       // Create the data
+       data := r.Data(nil)
+       data.SetId(id)
+       data.SetType(info.Type)
+
+       // Call the import function
+       results := []*ResourceData{data}
+       if r.Importer.State != nil {
+               var err error
+               results, err = r.Importer.State(data, p.meta)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       // Convert the results to InstanceState values and return it
+       states := make([]*terraform.InstanceState, len(results))
+       for i, r := range results {
+               states[i] = r.State()
+       }
+
+       // Verify that all are non-nil. If there are any nil the error
+       // isn't obvious so we circumvent that with a friendlier error.
+       for _, s := range states {
+               if s == nil {
+                       return nil, fmt.Errorf(
+                               "nil entry in ImportState results. This is always a bug with\n" +
+                                       "the resource that is being imported. Please report this as\n" +
+                                       "a bug to Terraform.")
+               }
+       }
+
+       return states, nil
+}
+
+// ValidateDataSource implementation of terraform.ResourceProvider interface.
+func (p *Provider) ValidateDataSource(
+       t string, c *terraform.ResourceConfig) ([]string, []error) {
+       r, ok := p.DataSourcesMap[t]
+       if !ok {
+               return nil, []error{fmt.Errorf(
+                       "Provider doesn't support data source: %s", t)}
+       }
+
+       return r.Validate(c)
+}
+
+// ReadDataDiff implementation of terraform.ResourceProvider interface.
+func (p *Provider) ReadDataDiff(
+       info *terraform.InstanceInfo,
+       c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+
+       r, ok := p.DataSourcesMap[info.Type]
+       if !ok {
+               return nil, fmt.Errorf("unknown data source: %s", info.Type)
+       }
+
+       return r.Diff(nil, c)
+}
+
+// RefreshData implementation of terraform.ResourceProvider interface.
+func (p *Provider) ReadDataApply(
+       info *terraform.InstanceInfo,
+       d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
+
+       r, ok := p.DataSourcesMap[info.Type]
+       if !ok {
+               return nil, fmt.Errorf("unknown data source: %s", info.Type)
+       }
+
+       return r.ReadDataApply(d, p.meta)
+}
+
+// DataSources implementation of terraform.ResourceProvider interface.
+func (p *Provider) DataSources() []terraform.DataSource {
+       keys := make([]string, 0, len(p.DataSourcesMap))
+       for k, _ := range p.DataSourcesMap {
+               keys = append(keys, k)
+       }
+       sort.Strings(keys)
+
+       result := make([]terraform.DataSource, 0, len(keys))
+       for _, k := range keys {
+               result = append(result, terraform.DataSource{
+                       Name: k,
+               })
+       }
+
+       return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go b/vendor/github.com/hashicorp/terraform/helper/schema/provisioner.go
new file mode 100644 (file)
index 0000000..c1564a2
--- /dev/null
@@ -0,0 +1,180 @@
+package schema
+
+import (
+       "context"
+       "errors"
+       "fmt"
+       "sync"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/terraform"
+)
+
+// Provisioner represents a resource provisioner in Terraform and properly
+// implements all of the ResourceProvisioner API.
+//
+// This higher level structure makes it much easier to implement a new or
+// custom provisioner for Terraform.
+//
+// The function callbacks for this structure are all passed a context object.
+// This context object has a number of pre-defined values that can be accessed
+// via the global functions defined in context.go.
+type Provisioner struct {
+       // ConnSchema is the schema for the connection settings for this
+       // provisioner.
+       //
+       // The keys of this map are the configuration keys, and the value is
+       // the schema describing the value of the configuration.
+       //
+       // NOTE: The value of connection keys can only be strings for now.
+       ConnSchema map[string]*Schema
+
+       // Schema is the schema for the usage of this provisioner.
+       //
+       // The keys of this map are the configuration keys, and the value is
+       // the schema describing the value of the configuration.
+       Schema map[string]*Schema
+
+       // ApplyFunc is the function for executing the provisioner. This is required.
+       // It is given a context. See the Provisioner struct docs for more
+       // information.
+       ApplyFunc func(ctx context.Context) error
+
+       stopCtx       context.Context
+       stopCtxCancel context.CancelFunc
+       stopOnce      sync.Once
+}
+
+// Keys that can be used to access data in the context parameters for
+// Provisioners.
+var (
+       connDataInvalid = contextKey("data invalid")
+
+       // This returns a *ResourceData for the connection information.
+       // Guaranteed to never be nil.
+       ProvConnDataKey = contextKey("provider conn data")
+
+       // This returns a *ResourceData for the config information.
+       // Guaranteed to never be nil.
+       ProvConfigDataKey = contextKey("provider config data")
+
+       // This returns a terraform.UIOutput. Guaranteed to never be nil.
+       ProvOutputKey = contextKey("provider output")
+
+       // This returns the raw InstanceState passed to Apply. Guaranteed to
+       // be set, but may be nil.
+       ProvRawStateKey = contextKey("provider raw state")
+)
+
+// InternalValidate should be called to validate the structure
+// of the provisioner.
+//
+// This should be called in a unit test to verify before release that this
+// structure is properly configured for use.
+func (p *Provisioner) InternalValidate() error {
+       if p == nil {
+               return errors.New("provisioner is nil")
+       }
+
+       var validationErrors error
+       {
+               sm := schemaMap(p.ConnSchema)
+               if err := sm.InternalValidate(sm); err != nil {
+                       validationErrors = multierror.Append(validationErrors, err)
+               }
+       }
+
+       {
+               sm := schemaMap(p.Schema)
+               if err := sm.InternalValidate(sm); err != nil {
+                       validationErrors = multierror.Append(validationErrors, err)
+               }
+       }
+
+       if p.ApplyFunc == nil {
+               validationErrors = multierror.Append(validationErrors, fmt.Errorf(
+                       "ApplyFunc must not be nil"))
+       }
+
+       return validationErrors
+}
+
+// StopContext returns a context that checks whether a provisioner is stopped.
+func (p *Provisioner) StopContext() context.Context {
+       p.stopOnce.Do(p.stopInit)
+       return p.stopCtx
+}
+
+func (p *Provisioner) stopInit() {
+       p.stopCtx, p.stopCtxCancel = context.WithCancel(context.Background())
+}
+
+// Stop implementation of terraform.ResourceProvisioner interface.
+func (p *Provisioner) Stop() error {
+       p.stopOnce.Do(p.stopInit)
+       p.stopCtxCancel()
+       return nil
+}
+
+func (p *Provisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+       return schemaMap(p.Schema).Validate(c)
+}
+
+// Apply implementation of terraform.ResourceProvisioner interface.
+func (p *Provisioner) Apply(
+       o terraform.UIOutput,
+       s *terraform.InstanceState,
+       c *terraform.ResourceConfig) error {
+       var connData, configData *ResourceData
+
+       {
+               // We first need to turn the connection information into a
+               // terraform.ResourceConfig so that we can use that type to more
+               // easily build a ResourceData structure. We do this by simply treating
+               // the conn info as configuration input.
+               raw := make(map[string]interface{})
+               if s != nil {
+                       for k, v := range s.Ephemeral.ConnInfo {
+                               raw[k] = v
+                       }
+               }
+
+               c, err := config.NewRawConfig(raw)
+               if err != nil {
+                       return err
+               }
+
+               sm := schemaMap(p.ConnSchema)
+               diff, err := sm.Diff(nil, terraform.NewResourceConfig(c))
+               if err != nil {
+                       return err
+               }
+               connData, err = sm.Data(nil, diff)
+               if err != nil {
+                       return err
+               }
+       }
+
+       {
+               // Build the configuration data. Doing this requires making a "diff"
+               // even though that's never used. We use that just to get the correct types.
+               configMap := schemaMap(p.Schema)
+               diff, err := configMap.Diff(nil, c)
+               if err != nil {
+                       return err
+               }
+               configData, err = configMap.Data(nil, diff)
+               if err != nil {
+                       return err
+               }
+       }
+
+       // Build the context and call the function
+       ctx := p.StopContext()
+       ctx = context.WithValue(ctx, ProvConnDataKey, connData)
+       ctx = context.WithValue(ctx, ProvConfigDataKey, configData)
+       ctx = context.WithValue(ctx, ProvOutputKey, o)
+       ctx = context.WithValue(ctx, ProvRawStateKey, s)
+       return p.ApplyFunc(ctx)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go
new file mode 100644 (file)
index 0000000..c810558
--- /dev/null
@@ -0,0 +1,478 @@
+package schema
+
+import (
+       "errors"
+       "fmt"
+       "log"
+       "strconv"
+
+       "github.com/hashicorp/terraform/terraform"
+)
+
+// Resource represents a thing in Terraform that has a set of configurable
+// attributes and a lifecycle (create, read, update, delete).
+//
+// The Resource schema is an abstraction that allows provider writers to
+// worry only about CRUD operations while off-loading validation, diff
+// generation, etc. to this higher level library.
+//
+// In spite of the name, this struct is not used only for terraform resources,
+// but also for data sources. In the case of data sources, the Create,
+// Update and Delete functions must not be provided.
+type Resource struct {
+       // Schema is the schema for the configuration of this resource.
+       //
+       // The keys of this map are the configuration keys, and the values
+       // describe the schema of the configuration value.
+       //
+       // The schema is used to represent both configurable data as well
+       // as data that might be computed in the process of creating this
+       // resource.
+       Schema map[string]*Schema
+
+       // SchemaVersion is the version number for this resource's Schema
+       // definition. The current SchemaVersion stored in the state for each
+       // resource. Provider authors can increment this version number
+       // when Schema semantics change. If the State's SchemaVersion is less than
+       // the current SchemaVersion, the InstanceState is yielded to the
+       // MigrateState callback, where the provider can make whatever changes it
+       // needs to update the state to be compatible to the latest version of the
+       // Schema.
+       //
+       // When unset, SchemaVersion defaults to 0, so provider authors can start
+       // their Versioning at any integer >= 1
+       SchemaVersion int
+
+       // MigrateState is responsible for updating an InstanceState with an old
+       // version to the format expected by the current version of the Schema.
+       //
+       // It is called during Refresh if the State's stored SchemaVersion is less
+       // than the current SchemaVersion of the Resource.
+       //
+       // The function is yielded the state's stored SchemaVersion and a pointer to
+       // the InstanceState that needs updating, as well as the configured
+       // provider's configured meta interface{}, in case the migration process
+       // needs to make any remote API calls.
+       MigrateState StateMigrateFunc
+
+       // The functions below are the CRUD operations for this resource.
+       //
+       // The only optional operation is Update. If Update is not implemented,
+       // then updates will not be supported for this resource.
+       //
+       // The ResourceData parameter in the functions below are used to
+       // query configuration and changes for the resource as well as to set
+       // the ID, computed data, etc.
+       //
+       // The interface{} parameter is the result of the ConfigureFunc in
+       // the provider for this resource. If the provider does not define
+       // a ConfigureFunc, this will be nil. This parameter should be used
+       // to store API clients, configuration structures, etc.
+       //
+       // If any errors occur during each of the operation, an error should be
+       // returned. If a resource was partially updated, be careful to enable
+       // partial state mode for ResourceData and use it accordingly.
+       //
+       // Exists is a function that is called to check if a resource still
+       // exists. If this returns false, then this will affect the diff
+       // accordingly. If this function isn't set, it will not be called. It
+       // is highly recommended to set it. The *ResourceData passed to Exists
+       // should _not_ be modified.
+       Create CreateFunc
+       Read   ReadFunc
+       Update UpdateFunc
+       Delete DeleteFunc
+       Exists ExistsFunc
+
+       // Importer is the ResourceImporter implementation for this resource.
+       // If this is nil, then this resource does not support importing. If
+       // this is non-nil, then it supports importing and ResourceImporter
+       // must be validated. The validity of ResourceImporter is verified
+       // by InternalValidate on Resource.
+       Importer *ResourceImporter
+
+       // If non-empty, this string is emitted as a warning during Validate.
+       // This is a private interface for now, for use by DataSourceResourceShim,
+       // and not for general use. (But maybe later...)
+       deprecationMessage string
+
+       // Timeouts allow users to specify specific time durations in which an
+       // operation should time out, to allow them to extend an action to suit their
+       // usage. For example, a user may specify a large Creation timeout for their
+       // AWS RDS Instance due to it's size, or restoring from a snapshot.
+       // Resource implementors must enable Timeout support by adding the allowed
+       // actions (Create, Read, Update, Delete, Default) to the Resource struct, and
+       // accessing them in the matching methods.
+       Timeouts *ResourceTimeout
+}
+
+// See Resource documentation.
+type CreateFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type ReadFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type UpdateFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type DeleteFunc func(*ResourceData, interface{}) error
+
+// See Resource documentation.
+type ExistsFunc func(*ResourceData, interface{}) (bool, error)
+
+// See Resource documentation.
+type StateMigrateFunc func(
+       int, *terraform.InstanceState, interface{}) (*terraform.InstanceState, error)
+
+// Apply creates, updates, and/or deletes a resource.
+func (r *Resource) Apply(
+       s *terraform.InstanceState,
+       d *terraform.InstanceDiff,
+       meta interface{}) (*terraform.InstanceState, error) {
+       data, err := schemaMap(r.Schema).Data(s, d)
+       if err != nil {
+               return s, err
+       }
+
+       // Instance Diff shoould have the timeout info, need to copy it over to the
+       // ResourceData meta
+       rt := ResourceTimeout{}
+       if _, ok := d.Meta[TimeoutKey]; ok {
+               if err := rt.DiffDecode(d); err != nil {
+                       log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
+               }
+       } else {
+               log.Printf("[DEBUG] No meta timeoutkey found in Apply()")
+       }
+       data.timeouts = &rt
+
+       if s == nil {
+               // The Terraform API dictates that this should never happen, but
+               // it doesn't hurt to be safe in this case.
+               s = new(terraform.InstanceState)
+       }
+
+       if d.Destroy || d.RequiresNew() {
+               if s.ID != "" {
+                       // Destroy the resource since it is created
+                       if err := r.Delete(data, meta); err != nil {
+                               return r.recordCurrentSchemaVersion(data.State()), err
+                       }
+
+                       // Make sure the ID is gone.
+                       data.SetId("")
+               }
+
+               // If we're only destroying, and not creating, then return
+               // now since we're done!
+               if !d.RequiresNew() {
+                       return nil, nil
+               }
+
+               // Reset the data to be stateless since we just destroyed
+               data, err = schemaMap(r.Schema).Data(nil, d)
+               // data was reset, need to re-apply the parsed timeouts
+               data.timeouts = &rt
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       err = nil
+       if data.Id() == "" {
+               // We're creating, it is a new resource.
+               data.MarkNewResource()
+               err = r.Create(data, meta)
+       } else {
+               if r.Update == nil {
+                       return s, fmt.Errorf("doesn't support update")
+               }
+
+               err = r.Update(data, meta)
+       }
+
+       return r.recordCurrentSchemaVersion(data.State()), err
+}
+
+// Diff returns a diff of this resource and is API compatible with the
+// ResourceProvider interface.
+func (r *Resource) Diff(
+       s *terraform.InstanceState,
+       c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+
+       t := &ResourceTimeout{}
+       err := t.ConfigDecode(r, c)
+
+       if err != nil {
+               return nil, fmt.Errorf("[ERR] Error decoding timeout: %s", err)
+       }
+
+       instanceDiff, err := schemaMap(r.Schema).Diff(s, c)
+       if err != nil {
+               return instanceDiff, err
+       }
+
+       if instanceDiff != nil {
+               if err := t.DiffEncode(instanceDiff); err != nil {
+                       log.Printf("[ERR] Error encoding timeout to instance diff: %s", err)
+               }
+       } else {
+               log.Printf("[DEBUG] Instance Diff is nil in Diff()")
+       }
+
+       return instanceDiff, err
+}
+
+// Validate validates the resource configuration against the schema.
+func (r *Resource) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+       warns, errs := schemaMap(r.Schema).Validate(c)
+
+       if r.deprecationMessage != "" {
+               warns = append(warns, r.deprecationMessage)
+       }
+
+       return warns, errs
+}
+
+// ReadDataApply loads the data for a data source, given a diff that
+// describes the configuration arguments and desired computed attributes.
+func (r *Resource) ReadDataApply(
+       d *terraform.InstanceDiff,
+       meta interface{},
+) (*terraform.InstanceState, error) {
+
+       // Data sources are always built completely from scratch
+       // on each read, so the source state is always nil.
+       data, err := schemaMap(r.Schema).Data(nil, d)
+       if err != nil {
+               return nil, err
+       }
+
+       err = r.Read(data, meta)
+       state := data.State()
+       if state != nil && state.ID == "" {
+               // Data sources can set an ID if they want, but they aren't
+               // required to; we'll provide a placeholder if they don't,
+               // to preserve the invariant that all resources have non-empty
+               // ids.
+               state.ID = "-"
+       }
+
+       return r.recordCurrentSchemaVersion(state), err
+}
+
+// Refresh refreshes the state of the resource.
+func (r *Resource) Refresh(
+       s *terraform.InstanceState,
+       meta interface{}) (*terraform.InstanceState, error) {
+       // If the ID is already somehow blank, it doesn't exist
+       if s.ID == "" {
+               return nil, nil
+       }
+
+       rt := ResourceTimeout{}
+       if _, ok := s.Meta[TimeoutKey]; ok {
+               if err := rt.StateDecode(s); err != nil {
+                       log.Printf("[ERR] Error decoding ResourceTimeout: %s", err)
+               }
+       }
+
+       if r.Exists != nil {
+               // Make a copy of data so that if it is modified it doesn't
+               // affect our Read later.
+               data, err := schemaMap(r.Schema).Data(s, nil)
+               data.timeouts = &rt
+
+               if err != nil {
+                       return s, err
+               }
+
+               exists, err := r.Exists(data, meta)
+               if err != nil {
+                       return s, err
+               }
+               if !exists {
+                       return nil, nil
+               }
+       }
+
+       needsMigration, stateSchemaVersion := r.checkSchemaVersion(s)
+       if needsMigration && r.MigrateState != nil {
+               s, err := r.MigrateState(stateSchemaVersion, s, meta)
+               if err != nil {
+                       return s, err
+               }
+       }
+
+       data, err := schemaMap(r.Schema).Data(s, nil)
+       data.timeouts = &rt
+       if err != nil {
+               return s, err
+       }
+
+       err = r.Read(data, meta)
+       state := data.State()
+       if state != nil && state.ID == "" {
+               state = nil
+       }
+
+       return r.recordCurrentSchemaVersion(state), err
+}
+
+// InternalValidate should be called to validate the structure
+// of the resource.
+//
+// This should be called in a unit test for any resource to verify
+// before release that a resource is properly configured for use with
+// this library.
+//
+// Provider.InternalValidate() will automatically call this for all of
+// the resources it manages, so you don't need to call this manually if it
+// is part of a Provider.
+func (r *Resource) InternalValidate(topSchemaMap schemaMap, writable bool) error {
+       if r == nil {
+               return errors.New("resource is nil")
+       }
+
+       if !writable {
+               if r.Create != nil || r.Update != nil || r.Delete != nil {
+                       return fmt.Errorf("must not implement Create, Update or Delete")
+               }
+       }
+
+       tsm := topSchemaMap
+
+       if r.isTopLevel() && writable {
+               // All non-Computed attributes must be ForceNew if Update is not defined
+               if r.Update == nil {
+                       nonForceNewAttrs := make([]string, 0)
+                       for k, v := range r.Schema {
+                               if !v.ForceNew && !v.Computed {
+                                       nonForceNewAttrs = append(nonForceNewAttrs, k)
+                               }
+                       }
+                       if len(nonForceNewAttrs) > 0 {
+                               return fmt.Errorf(
+                                       "No Update defined, must set ForceNew on: %#v", nonForceNewAttrs)
+                       }
+               } else {
+                       nonUpdateableAttrs := make([]string, 0)
+                       for k, v := range r.Schema {
+                               if v.ForceNew || v.Computed && !v.Optional {
+                                       nonUpdateableAttrs = append(nonUpdateableAttrs, k)
+                               }
+                       }
+                       updateableAttrs := len(r.Schema) - len(nonUpdateableAttrs)
+                       if updateableAttrs == 0 {
+                               return fmt.Errorf(
+                                       "All fields are ForceNew or Computed w/out Optional, Update is superfluous")
+                       }
+               }
+
+               tsm = schemaMap(r.Schema)
+
+               // Destroy, and Read are required
+               if r.Read == nil {
+                       return fmt.Errorf("Read must be implemented")
+               }
+               if r.Delete == nil {
+                       return fmt.Errorf("Delete must be implemented")
+               }
+
+               // If we have an importer, we need to verify the importer.
+               if r.Importer != nil {
+                       if err := r.Importer.InternalValidate(); err != nil {
+                               return err
+                       }
+               }
+       }
+
+       return schemaMap(r.Schema).InternalValidate(tsm)
+}
+
+// Data returns a ResourceData struct for this Resource. Each return value
+// is a separate copy and can be safely modified differently.
+//
+// The data returned from this function has no actual affect on the Resource
+// itself (including the state given to this function).
+//
+// This function is useful for unit tests and ResourceImporter functions.
+func (r *Resource) Data(s *terraform.InstanceState) *ResourceData {
+       result, err := schemaMap(r.Schema).Data(s, nil)
+       if err != nil {
+               // At the time of writing, this isn't possible (Data never returns
+               // non-nil errors). We panic to find this in the future if we have to.
+               // I don't see a reason for Data to ever return an error.
+               panic(err)
+       }
+
+       // Set the schema version to latest by default
+       result.meta = map[string]interface{}{
+               "schema_version": strconv.Itoa(r.SchemaVersion),
+       }
+
+       return result
+}
+
+// TestResourceData Yields a ResourceData filled with this resource's schema for use in unit testing
+//
+// TODO: May be able to be removed with the above ResourceData function.
+func (r *Resource) TestResourceData() *ResourceData {
+       return &ResourceData{
+               schema: r.Schema,
+       }
+}
+
+// Returns true if the resource is "top level" i.e. not a sub-resource.
+func (r *Resource) isTopLevel() bool {
+       // TODO: This is a heuristic; replace with a definitive attribute?
+       return r.Create != nil
+}
+
+// Determines if a given InstanceState needs to be migrated by checking the
+// stored version number with the current SchemaVersion
+func (r *Resource) checkSchemaVersion(is *terraform.InstanceState) (bool, int) {
+       // Get the raw interface{} value for the schema version. If it doesn't
+       // exist or is nil then set it to zero.
+       raw := is.Meta["schema_version"]
+       if raw == nil {
+               raw = "0"
+       }
+
+       // Try to convert it to a string. If it isn't a string then we pretend
+       // that it isn't set at all. It should never not be a string unless it
+       // was manually tampered with.
+       rawString, ok := raw.(string)
+       if !ok {
+               rawString = "0"
+       }
+
+       stateSchemaVersion, _ := strconv.Atoi(rawString)
+       return stateSchemaVersion < r.SchemaVersion, stateSchemaVersion
+}
+
+func (r *Resource) recordCurrentSchemaVersion(
+       state *terraform.InstanceState) *terraform.InstanceState {
+       if state != nil && r.SchemaVersion > 0 {
+               if state.Meta == nil {
+                       state.Meta = make(map[string]interface{})
+               }
+               state.Meta["schema_version"] = strconv.Itoa(r.SchemaVersion)
+       }
+       return state
+}
+
+// Noop is a convenience implementation of resource function which takes
+// no action and returns no error.
+func Noop(*ResourceData, interface{}) error {
+       return nil
+}
+
+// RemoveFromState is a convenience implementation of a resource function
+// which sets the resource ID to empty string (to remove it from state)
+// and returns no error.
+func RemoveFromState(d *ResourceData, _ interface{}) error {
+       d.SetId("")
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go
new file mode 100644 (file)
index 0000000..b2bc8f6
--- /dev/null
@@ -0,0 +1,502 @@
+package schema
+
+import (
+       "log"
+       "reflect"
+       "strings"
+       "sync"
+       "time"
+
+       "github.com/hashicorp/terraform/terraform"
+)
+
+// ResourceData is used to query and set the attributes of a resource.
+//
+// ResourceData is the primary argument received for CRUD operations on
+// a resource as well as configuration of a provider. It is a powerful
+// structure that can be used to not only query data, but check for changes,
+// define partial state updates, etc.
+//
+// The most relevant methods to take a look at are Get, Set, and Partial.
+type ResourceData struct {
+       // Settable (internally)
+       schema   map[string]*Schema
+       config   *terraform.ResourceConfig
+       state    *terraform.InstanceState
+       diff     *terraform.InstanceDiff
+       meta     map[string]interface{}
+       timeouts *ResourceTimeout
+
+       // Don't set
+       multiReader *MultiLevelFieldReader
+       setWriter   *MapFieldWriter
+       newState    *terraform.InstanceState
+       partial     bool
+       partialMap  map[string]struct{}
+       once        sync.Once
+       isNew       bool
+}
+
+// getResult is the internal structure that is generated when a Get
+// is called that contains some extra data that might be used.
+type getResult struct {
+       Value          interface{}
+       ValueProcessed interface{}
+       Computed       bool
+       Exists         bool
+       Schema         *Schema
+}
+
+// UnsafeSetFieldRaw allows setting arbitrary values in state to arbitrary
+// values, bypassing schema. This MUST NOT be used in normal circumstances -
+// it exists only to support the remote_state data source.
+func (d *ResourceData) UnsafeSetFieldRaw(key string, value string) {
+       d.once.Do(d.init)
+
+       d.setWriter.unsafeWriteField(key, value)
+}
+
+// Get returns the data for the given key, or nil if the key doesn't exist
+// in the schema.
+//
+// If the key does exist in the schema but doesn't exist in the configuration,
+// then the default value for that type will be returned. For strings, this is
+// "", for numbers it is 0, etc.
+//
+// If you want to test if something is set at all in the configuration,
+// use GetOk.
+func (d *ResourceData) Get(key string) interface{} {
+       v, _ := d.GetOk(key)
+       return v
+}
+
+// GetChange returns the old and new value for a given key.
+//
+// HasChange should be used to check if a change exists. It is possible
+// that both the old and new value are the same if the old value was not
+// set and the new value is. This is common, for example, for boolean
+// fields which have a zero value of false.
+func (d *ResourceData) GetChange(key string) (interface{}, interface{}) {
+       o, n := d.getChange(key, getSourceState, getSourceDiff)
+       return o.Value, n.Value
+}
+
+// GetOk returns the data for the given key and whether or not the key
+// has been set to a non-zero value at some point.
+//
+// The first result will not necessarilly be nil if the value doesn't exist.
+// The second result should be checked to determine this information.
+func (d *ResourceData) GetOk(key string) (interface{}, bool) {
+       r := d.getRaw(key, getSourceSet)
+       exists := r.Exists && !r.Computed
+       if exists {
+               // If it exists, we also want to verify it is not the zero-value.
+               value := r.Value
+               zero := r.Schema.Type.Zero()
+
+               if eq, ok := value.(Equal); ok {
+                       exists = !eq.Equal(zero)
+               } else {
+                       exists = !reflect.DeepEqual(value, zero)
+               }
+       }
+
+       return r.Value, exists
+}
+
+func (d *ResourceData) getRaw(key string, level getSource) getResult {
+       var parts []string
+       if key != "" {
+               parts = strings.Split(key, ".")
+       }
+
+       return d.get(parts, level)
+}
+
+// HasChange returns whether or not the given key has been changed.
+func (d *ResourceData) HasChange(key string) bool {
+       o, n := d.GetChange(key)
+
+       // If the type implements the Equal interface, then call that
+       // instead of just doing a reflect.DeepEqual. An example where this is
+       // needed is *Set
+       if eq, ok := o.(Equal); ok {
+               return !eq.Equal(n)
+       }
+
+       return !reflect.DeepEqual(o, n)
+}
+
+// Partial turns partial state mode on/off.
+//
+// When partial state mode is enabled, then only key prefixes specified
+// by SetPartial will be in the final state. This allows providers to return
+// partial states for partially applied resources (when errors occur).
+func (d *ResourceData) Partial(on bool) {
+       d.partial = on
+       if on {
+               if d.partialMap == nil {
+                       d.partialMap = make(map[string]struct{})
+               }
+       } else {
+               d.partialMap = nil
+       }
+}
+
+// Set sets the value for the given key.
+//
+// If the key is invalid or the value is not a correct type, an error
+// will be returned.
+func (d *ResourceData) Set(key string, value interface{}) error {
+       d.once.Do(d.init)
+
+       // If the value is a pointer to a non-struct, get its value and
+       // use that. This allows Set to take a pointer to primitives to
+       // simplify the interface.
+       reflectVal := reflect.ValueOf(value)
+       if reflectVal.Kind() == reflect.Ptr {
+               if reflectVal.IsNil() {
+                       // If the pointer is nil, then the value is just nil
+                       value = nil
+               } else {
+                       // Otherwise, we dereference the pointer as long as its not
+                       // a pointer to a struct, since struct pointers are allowed.
+                       reflectVal = reflect.Indirect(reflectVal)
+                       if reflectVal.Kind() != reflect.Struct {
+                               value = reflectVal.Interface()
+                       }
+               }
+       }
+
+       return d.setWriter.WriteField(strings.Split(key, "."), value)
+}
+
+// SetPartial adds the key to the final state output while
+// in partial state mode. The key must be a root key in the schema (i.e.
+// it cannot be "list.0").
+//
+// If partial state mode is disabled, then this has no effect. Additionally,
+// whenever partial state mode is toggled, the partial data is cleared.
+func (d *ResourceData) SetPartial(k string) {
+       if d.partial {
+               d.partialMap[k] = struct{}{}
+       }
+}
+
+func (d *ResourceData) MarkNewResource() {
+       d.isNew = true
+}
+
+func (d *ResourceData) IsNewResource() bool {
+       return d.isNew
+}
+
+// Id returns the ID of the resource.
+func (d *ResourceData) Id() string {
+       var result string
+
+       if d.state != nil {
+               result = d.state.ID
+       }
+
+       if d.newState != nil {
+               result = d.newState.ID
+       }
+
+       return result
+}
+
+// ConnInfo returns the connection info for this resource.
+func (d *ResourceData) ConnInfo() map[string]string {
+       if d.newState != nil {
+               return d.newState.Ephemeral.ConnInfo
+       }
+
+       if d.state != nil {
+               return d.state.Ephemeral.ConnInfo
+       }
+
+       return nil
+}
+
+// SetId sets the ID of the resource. If the value is blank, then the
+// resource is destroyed.
+func (d *ResourceData) SetId(v string) {
+       d.once.Do(d.init)
+       d.newState.ID = v
+}
+
+// SetConnInfo sets the connection info for a resource.
+func (d *ResourceData) SetConnInfo(v map[string]string) {
+       d.once.Do(d.init)
+       d.newState.Ephemeral.ConnInfo = v
+}
+
+// SetType sets the ephemeral type for the data. This is only required
+// for importing.
+func (d *ResourceData) SetType(t string) {
+       d.once.Do(d.init)
+       d.newState.Ephemeral.Type = t
+}
+
+// State returns the new InstanceState after the diff and any Set
+// calls.
+func (d *ResourceData) State() *terraform.InstanceState {
+       var result terraform.InstanceState
+       result.ID = d.Id()
+       result.Meta = d.meta
+
+       // If we have no ID, then this resource doesn't exist and we just
+       // return nil.
+       if result.ID == "" {
+               return nil
+       }
+
+       if d.timeouts != nil {
+               if err := d.timeouts.StateEncode(&result); err != nil {
+                       log.Printf("[ERR] Error encoding Timeout meta to Instance State: %s", err)
+               }
+       }
+
+       // Look for a magic key in the schema that determines we skip the
+       // integrity check of fields existing in the schema, allowing dynamic
+       // keys to be created.
+       hasDynamicAttributes := false
+       for k, _ := range d.schema {
+               if k == "__has_dynamic_attributes" {
+                       hasDynamicAttributes = true
+                       log.Printf("[INFO] Resource %s has dynamic attributes", result.ID)
+               }
+       }
+
+       // In order to build the final state attributes, we read the full
+       // attribute set as a map[string]interface{}, write it to a MapFieldWriter,
+       // and then use that map.
+       rawMap := make(map[string]interface{})
+       for k := range d.schema {
+               source := getSourceSet
+               if d.partial {
+                       source = getSourceState
+                       if _, ok := d.partialMap[k]; ok {
+                               source = getSourceSet
+                       }
+               }
+
+               raw := d.get([]string{k}, source)
+               if raw.Exists && !raw.Computed {
+                       rawMap[k] = raw.Value
+                       if raw.ValueProcessed != nil {
+                               rawMap[k] = raw.ValueProcessed
+                       }
+               }
+       }
+
+       mapW := &MapFieldWriter{Schema: d.schema}
+       if err := mapW.WriteField(nil, rawMap); err != nil {
+               return nil
+       }
+
+       result.Attributes = mapW.Map()
+
+       if hasDynamicAttributes {
+               // If we have dynamic attributes, just copy the attributes map
+               // one for one into the result attributes.
+               for k, v := range d.setWriter.Map() {
+                       // Don't clobber schema values. This limits usage of dynamic
+                       // attributes to names which _do not_ conflict with schema
+                       // keys!
+                       if _, ok := result.Attributes[k]; !ok {
+                               result.Attributes[k] = v
+                       }
+               }
+       }
+
+       if d.newState != nil {
+               result.Ephemeral = d.newState.Ephemeral
+       }
+
+       // TODO: This is hacky and we can remove this when we have a proper
+       // state writer. We should instead have a proper StateFieldWriter
+       // and use that.
+       for k, schema := range d.schema {
+               if schema.Type != TypeMap {
+                       continue
+               }
+
+               if result.Attributes[k] == "" {
+                       delete(result.Attributes, k)
+               }
+       }
+
+       if v := d.Id(); v != "" {
+               result.Attributes["id"] = d.Id()
+       }
+
+       if d.state != nil {
+               result.Tainted = d.state.Tainted
+       }
+
+       return &result
+}
+
+// Timeout returns the data for the given timeout key
+// Returns a duration of 20 minutes for any key not found, or not found and no default.
+func (d *ResourceData) Timeout(key string) time.Duration {
+       key = strings.ToLower(key)
+
+       var timeout *time.Duration
+       switch key {
+       case TimeoutCreate:
+               timeout = d.timeouts.Create
+       case TimeoutRead:
+               timeout = d.timeouts.Read
+       case TimeoutUpdate:
+               timeout = d.timeouts.Update
+       case TimeoutDelete:
+               timeout = d.timeouts.Delete
+       }
+
+       if timeout != nil {
+               return *timeout
+       }
+
+       if d.timeouts.Default != nil {
+               return *d.timeouts.Default
+       }
+
+       // Return system default of 20 minutes
+       return 20 * time.Minute
+}
+
+func (d *ResourceData) init() {
+       // Initialize the field that will store our new state
+       var copyState terraform.InstanceState
+       if d.state != nil {
+               copyState = *d.state.DeepCopy()
+       }
+       d.newState = &copyState
+
+       // Initialize the map for storing set data
+       d.setWriter = &MapFieldWriter{Schema: d.schema}
+
+       // Initialize the reader for getting data from the
+       // underlying sources (config, diff, etc.)
+       readers := make(map[string]FieldReader)
+       var stateAttributes map[string]string
+       if d.state != nil {
+               stateAttributes = d.state.Attributes
+               readers["state"] = &MapFieldReader{
+                       Schema: d.schema,
+                       Map:    BasicMapReader(stateAttributes),
+               }
+       }
+       if d.config != nil {
+               readers["config"] = &ConfigFieldReader{
+                       Schema: d.schema,
+                       Config: d.config,
+               }
+       }
+       if d.diff != nil {
+               readers["diff"] = &DiffFieldReader{
+                       Schema: d.schema,
+                       Diff:   d.diff,
+                       Source: &MultiLevelFieldReader{
+                               Levels:  []string{"state", "config"},
+                               Readers: readers,
+                       },
+               }
+       }
+       readers["set"] = &MapFieldReader{
+               Schema: d.schema,
+               Map:    BasicMapReader(d.setWriter.Map()),
+       }
+       d.multiReader = &MultiLevelFieldReader{
+               Levels: []string{
+                       "state",
+                       "config",
+                       "diff",
+                       "set",
+               },
+
+               Readers: readers,
+       }
+}
+
+func (d *ResourceData) diffChange(
+       k string) (interface{}, interface{}, bool, bool) {
+       // Get the change between the state and the config.
+       o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact)
+       if !o.Exists {
+               o.Value = nil
+       }
+       if !n.Exists {
+               n.Value = nil
+       }
+
+       // Return the old, new, and whether there is a change
+       return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed
+}
+
+func (d *ResourceData) getChange(
+       k string,
+       oldLevel getSource,
+       newLevel getSource) (getResult, getResult) {
+       var parts, parts2 []string
+       if k != "" {
+               parts = strings.Split(k, ".")
+               parts2 = strings.Split(k, ".")
+       }
+
+       o := d.get(parts, oldLevel)
+       n := d.get(parts2, newLevel)
+       return o, n
+}
+
+func (d *ResourceData) get(addr []string, source getSource) getResult {
+       d.once.Do(d.init)
+
+       level := "set"
+       flags := source & ^getSourceLevelMask
+       exact := flags&getSourceExact != 0
+       source = source & getSourceLevelMask
+       if source >= getSourceSet {
+               level = "set"
+       } else if source >= getSourceDiff {
+               level = "diff"
+       } else if source >= getSourceConfig {
+               level = "config"
+       } else {
+               level = "state"
+       }
+
+       var result FieldReadResult
+       var err error
+       if exact {
+               result, err = d.multiReader.ReadFieldExact(addr, level)
+       } else {
+               result, err = d.multiReader.ReadFieldMerge(addr, level)
+       }
+       if err != nil {
+               panic(err)
+       }
+
+       // If the result doesn't exist, then we set the value to the zero value
+       var schema *Schema
+       if schemaL := addrToSchema(addr, d.schema); len(schemaL) > 0 {
+               schema = schemaL[len(schemaL)-1]
+       }
+
+       if result.Value == nil && schema != nil {
+               result.Value = result.ValueOrZero(schema)
+       }
+
+       // Transform the FieldReadResult into a getResult. It might be worth
+       // merging these two structures one day.
+       return getResult{
+               Value:          result.Value,
+               ValueProcessed: result.ValueProcessed,
+               Computed:       result.Computed,
+               Exists:         result.Exists,
+               Schema:         schema,
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data_get_source.go
new file mode 100644 (file)
index 0000000..7dd655d
--- /dev/null
@@ -0,0 +1,17 @@
+package schema
+
+//go:generate stringer -type=getSource resource_data_get_source.go
+
+// getSource represents the level we want to get for a value (internally).
+// Any source less than or equal to the level will be loaded (whichever
+// has a value first).
+type getSource byte
+
+const (
+       getSourceState getSource = 1 << iota
+       getSourceConfig
+       getSourceDiff
+       getSourceSet
+       getSourceExact               // Only get from the _exact_ level
+       getSourceLevelMask getSource = getSourceState | getSourceConfig | getSourceDiff | getSourceSet
+)
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_importer.go
new file mode 100644 (file)
index 0000000..5dada3c
--- /dev/null
@@ -0,0 +1,52 @@
+package schema
+
+// ResourceImporter defines how a resource is imported in Terraform. This
+// can be set onto a Resource struct to make it Importable. Not all resources
+// have to be importable; if a Resource doesn't have a ResourceImporter then
+// it won't be importable.
+//
+// "Importing" in Terraform is the process of taking an already-created
+// resource and bringing it under Terraform management. This can include
+// updating Terraform state, generating Terraform configuration, etc.
+type ResourceImporter struct {
+       // The functions below must all be implemented for importing to work.
+
+       // State is called to convert an ID to one or more InstanceState to
+       // insert into the Terraform state. If this isn't specified, then
+       // the ID is passed straight through.
+       State StateFunc
+}
+
+// StateFunc is the function called to import a resource into the
+// Terraform state. It is given a ResourceData with only ID set. This
+// ID is going to be an arbitrary value given by the user and may not map
+// directly to the ID format that the resource expects, so that should
+// be validated.
+//
+// This should return a slice of ResourceData that turn into the state
+// that was imported. This might be as simple as returning only the argument
+// that was given to the function. In other cases (such as AWS security groups),
+// an import may fan out to multiple resources and this will have to return
+// multiple.
+//
+// To create the ResourceData structures for other resource types (if
+// you have to), instantiate your resource and call the Data function.
+type StateFunc func(*ResourceData, interface{}) ([]*ResourceData, error)
+
+// InternalValidate should be called to validate the structure of this
+// importer. This should be called in a unit test.
+//
+// Resource.InternalValidate() will automatically call this, so this doesn't
+// need to be called manually. Further, Resource.InternalValidate() is
+// automatically called by Provider.InternalValidate(), so you only need
+// to internal validate the provider.
+func (r *ResourceImporter) InternalValidate() error {
+       return nil
+}
+
+// ImportStatePassthrough is an implementation of StateFunc that can be
+// used to simply pass the ID directly through. This should be used only
+// in the case that an ID-only refresh is possible.
+func ImportStatePassthrough(d *ResourceData, m interface{}) ([]*ResourceData, error) {
+       return []*ResourceData{d}, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_timeout.go
new file mode 100644 (file)
index 0000000..445819f
--- /dev/null
@@ -0,0 +1,237 @@
+package schema
+
+import (
+       "fmt"
+       "log"
+       "time"
+
+       "github.com/hashicorp/terraform/terraform"
+       "github.com/mitchellh/copystructure"
+)
+
+const TimeoutKey = "e2bfb730-ecaa-11e6-8f88-34363bc7c4c0"
+const TimeoutsConfigKey = "timeouts"
+
+const (
+       TimeoutCreate  = "create"
+       TimeoutRead    = "read"
+       TimeoutUpdate  = "update"
+       TimeoutDelete  = "delete"
+       TimeoutDefault = "default"
+)
+
+func timeoutKeys() []string {
+       return []string{
+               TimeoutCreate,
+               TimeoutRead,
+               TimeoutUpdate,
+               TimeoutDelete,
+               TimeoutDefault,
+       }
+}
+
+// could be time.Duration, int64 or float64
+func DefaultTimeout(tx interface{}) *time.Duration {
+       var td time.Duration
+       switch raw := tx.(type) {
+       case time.Duration:
+               return &raw
+       case int64:
+               td = time.Duration(raw)
+       case float64:
+               td = time.Duration(int64(raw))
+       default:
+               log.Printf("[WARN] Unknown type in DefaultTimeout: %#v", tx)
+       }
+       return &td
+}
+
+type ResourceTimeout struct {
+       Create, Read, Update, Delete, Default *time.Duration
+}
+
+// ConfigDecode takes a schema and the configuration (available in Diff) and
+// validates, parses the timeouts into `t`
+func (t *ResourceTimeout) ConfigDecode(s *Resource, c *terraform.ResourceConfig) error {
+       if s.Timeouts != nil {
+               raw, err := copystructure.Copy(s.Timeouts)
+               if err != nil {
+                       log.Printf("[DEBUG] Error with deep copy: %s", err)
+               }
+               *t = *raw.(*ResourceTimeout)
+       }
+
+       if raw, ok := c.Config[TimeoutsConfigKey]; ok {
+               if configTimeouts, ok := raw.([]map[string]interface{}); ok {
+                       for _, timeoutValues := range configTimeouts {
+                               // loop through each Timeout given in the configuration and validate they
+                               // the Timeout defined in the resource
+                               for timeKey, timeValue := range timeoutValues {
+                                       // validate that we're dealing with the normal CRUD actions
+                                       var found bool
+                                       for _, key := range timeoutKeys() {
+                                               if timeKey == key {
+                                                       found = true
+                                                       break
+                                               }
+                                       }
+
+                                       if !found {
+                                               return fmt.Errorf("Unsupported Timeout configuration key found (%s)", timeKey)
+                                       }
+
+                                       // Get timeout
+                                       rt, err := time.ParseDuration(timeValue.(string))
+                                       if err != nil {
+                                               return fmt.Errorf("Error parsing Timeout for (%s): %s", timeKey, err)
+                                       }
+
+                                       var timeout *time.Duration
+                                       switch timeKey {
+                                       case TimeoutCreate:
+                                               timeout = t.Create
+                                       case TimeoutUpdate:
+                                               timeout = t.Update
+                                       case TimeoutRead:
+                                               timeout = t.Read
+                                       case TimeoutDelete:
+                                               timeout = t.Delete
+                                       case TimeoutDefault:
+                                               timeout = t.Default
+                                       }
+
+                                       // If the resource has not delcared this in the definition, then error
+                                       // with an unsupported message
+                                       if timeout == nil {
+                                               return unsupportedTimeoutKeyError(timeKey)
+                                       }
+
+                                       *timeout = rt
+                               }
+                       }
+               } else {
+                       log.Printf("[WARN] Invalid Timeout structure found, skipping timeouts")
+               }
+       }
+
+       return nil
+}
+
+func unsupportedTimeoutKeyError(key string) error {
+       return fmt.Errorf("Timeout Key (%s) is not supported", key)
+}
+
+// DiffEncode, StateEncode, and MetaDecode are analogous to the Go stdlib JSONEncoder
+// interface: they encode/decode a timeouts struct from an instance diff, which is
+// where the timeout data is stored after a diff to pass into Apply.
+//
+// StateEncode encodes the timeout into the ResourceData's InstanceState for
+// saving to state
+//
+func (t *ResourceTimeout) DiffEncode(id *terraform.InstanceDiff) error {
+       return t.metaEncode(id)
+}
+
+func (t *ResourceTimeout) StateEncode(is *terraform.InstanceState) error {
+       return t.metaEncode(is)
+}
+
+// metaEncode encodes the ResourceTimeout into a map[string]interface{} format
+// and stores it in the Meta field of the interface it's given.
+// Assumes the interface is either *terraform.InstanceState or
+// *terraform.InstanceDiff, returns an error otherwise
+func (t *ResourceTimeout) metaEncode(ids interface{}) error {
+       m := make(map[string]interface{})
+
+       if t.Create != nil {
+               m[TimeoutCreate] = t.Create.Nanoseconds()
+       }
+       if t.Read != nil {
+               m[TimeoutRead] = t.Read.Nanoseconds()
+       }
+       if t.Update != nil {
+               m[TimeoutUpdate] = t.Update.Nanoseconds()
+       }
+       if t.Delete != nil {
+               m[TimeoutDelete] = t.Delete.Nanoseconds()
+       }
+       if t.Default != nil {
+               m[TimeoutDefault] = t.Default.Nanoseconds()
+               // for any key above that is nil, if default is specified, we need to
+               // populate it with the default
+               for _, k := range timeoutKeys() {
+                       if _, ok := m[k]; !ok {
+                               m[k] = t.Default.Nanoseconds()
+                       }
+               }
+       }
+
+       // only add the Timeout to the Meta if we have values
+       if len(m) > 0 {
+               switch instance := ids.(type) {
+               case *terraform.InstanceDiff:
+                       if instance.Meta == nil {
+                               instance.Meta = make(map[string]interface{})
+                       }
+                       instance.Meta[TimeoutKey] = m
+               case *terraform.InstanceState:
+                       if instance.Meta == nil {
+                               instance.Meta = make(map[string]interface{})
+                       }
+                       instance.Meta[TimeoutKey] = m
+               default:
+                       return fmt.Errorf("Error matching type for Diff Encode")
+               }
+       }
+
+       return nil
+}
+
+func (t *ResourceTimeout) StateDecode(id *terraform.InstanceState) error {
+       return t.metaDecode(id)
+}
+func (t *ResourceTimeout) DiffDecode(is *terraform.InstanceDiff) error {
+       return t.metaDecode(is)
+}
+
+func (t *ResourceTimeout) metaDecode(ids interface{}) error {
+       var rawMeta interface{}
+       var ok bool
+       switch rawInstance := ids.(type) {
+       case *terraform.InstanceDiff:
+               rawMeta, ok = rawInstance.Meta[TimeoutKey]
+               if !ok {
+                       return nil
+               }
+       case *terraform.InstanceState:
+               rawMeta, ok = rawInstance.Meta[TimeoutKey]
+               if !ok {
+                       return nil
+               }
+       default:
+               return fmt.Errorf("Unknown or unsupported type in metaDecode: %#v", ids)
+       }
+
+       times := rawMeta.(map[string]interface{})
+       if len(times) == 0 {
+               return nil
+       }
+
+       if v, ok := times[TimeoutCreate]; ok {
+               t.Create = DefaultTimeout(v)
+       }
+       if v, ok := times[TimeoutRead]; ok {
+               t.Read = DefaultTimeout(v)
+       }
+       if v, ok := times[TimeoutUpdate]; ok {
+               t.Update = DefaultTimeout(v)
+       }
+       if v, ok := times[TimeoutDelete]; ok {
+               t.Delete = DefaultTimeout(v)
+       }
+       if v, ok := times[TimeoutDefault]; ok {
+               t.Default = DefaultTimeout(v)
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go
new file mode 100644 (file)
index 0000000..32d1721
--- /dev/null
@@ -0,0 +1,1537 @@
+// schema is a high-level framework for easily writing new providers
+// for Terraform. Usage of schema is recommended over attempting to write
+// to the low-level plugin interfaces manually.
+//
+// schema breaks down provider creation into simple CRUD operations for
+// resources. The logic of diffing, destroying before creating, updating
+// or creating, etc. is all handled by the framework. The plugin author
+// only needs to implement a configuration schema and the CRUD operations and
+// everything else is meant to just work.
+//
+// A good starting point is to view the Provider structure.
+package schema
+
+import (
+       "fmt"
+       "os"
+       "reflect"
+       "sort"
+       "strconv"
+       "strings"
+
+       "github.com/hashicorp/terraform/terraform"
+       "github.com/mitchellh/mapstructure"
+)
+
+// type used for schema package context keys
+type contextKey string
+
+// Schema is used to describe the structure of a value.
+//
+// Read the documentation of the struct elements for important details.
+type Schema struct {
+       // Type is the type of the value and must be one of the ValueType values.
+       //
+       // This type not only determines what type is expected/valid in configuring
+       // this value, but also what type is returned when ResourceData.Get is
+       // called. The types returned by Get are:
+       //
+       //   TypeBool - bool
+       //   TypeInt - int
+       //   TypeFloat - float64
+       //   TypeString - string
+       //   TypeList - []interface{}
+       //   TypeMap - map[string]interface{}
+       //   TypeSet - *schema.Set
+       //
+       Type ValueType
+
+       // If one of these is set, then this item can come from the configuration.
+       // Both cannot be set. If Optional is set, the value is optional. If
+       // Required is set, the value is required.
+       //
+       // One of these must be set if the value is not computed. That is:
+       // value either comes from the config, is computed, or is both.
+       Optional bool
+       Required bool
+
+       // If this is non-nil, the provided function will be used during diff
+       // of this field. If this is nil, a default diff for the type of the
+       // schema will be used.
+       //
+       // This allows comparison based on something other than primitive, list
+       // or map equality - for example SSH public keys may be considered
+       // equivalent regardless of trailing whitespace.
+       DiffSuppressFunc SchemaDiffSuppressFunc
+
+       // If this is non-nil, then this will be a default value that is used
+       // when this item is not set in the configuration.
+       //
+       // DefaultFunc can be specified to compute a dynamic default.
+       // Only one of Default or DefaultFunc can be set. If DefaultFunc is
+       // used then its return value should be stable to avoid generating
+       // confusing/perpetual diffs.
+       //
+       // Changing either Default or the return value of DefaultFunc can be
+       // a breaking change, especially if the attribute in question has
+       // ForceNew set. If a default needs to change to align with changing
+       // assumptions in an upstream API then it may be necessary to also use
+       // the MigrateState function on the resource to change the state to match,
+       // or have the Read function adjust the state value to align with the
+       // new default.
+       //
+       // If Required is true above, then Default cannot be set. DefaultFunc
+       // can be set with Required. If the DefaultFunc returns nil, then there
+       // will be no default and the user will be asked to fill it in.
+       //
+       // If either of these is set, then the user won't be asked for input
+       // for this key if the default is not nil.
+       Default     interface{}
+       DefaultFunc SchemaDefaultFunc
+
+       // Description is used as the description for docs or asking for user
+       // input. It should be relatively short (a few sentences max) and should
+       // be formatted to fit a CLI.
+       Description string
+
+       // InputDefault is the default value to use for when inputs are requested.
+       // This differs from Default in that if Default is set, no input is
+       // asked for. If Input is asked, this will be the default value offered.
+       InputDefault string
+
+       // The fields below relate to diffs.
+       //
+       // If Computed is true, then the result of this value is computed
+       // (unless specified by config) on creation.
+       //
+       // If ForceNew is true, then a change in this resource necessitates
+       // the creation of a new resource.
+       //
+       // StateFunc is a function called to change the value of this before
+       // storing it in the state (and likewise before comparing for diffs).
+       // The use for this is for example with large strings, you may want
+       // to simply store the hash of it.
+       Computed  bool
+       ForceNew  bool
+       StateFunc SchemaStateFunc
+
+       // The following fields are only set for a TypeList or TypeSet Type.
+       //
+       // Elem must be either a *Schema or a *Resource only if the Type is
+       // TypeList, and represents what the element type is. If it is *Schema,
+       // the element type is just a simple value. If it is *Resource, the
+       // element type is a complex structure, potentially with its own lifecycle.
+       //
+       // MaxItems defines a maximum amount of items that can exist within a
+       // TypeSet or TypeList. Specific use cases would be if a TypeSet is being
+       // used to wrap a complex structure, however more than one instance would
+       // cause instability.
+       //
+       // MinItems defines a minimum amount of items that can exist within a
+       // TypeSet or TypeList. Specific use cases would be if a TypeSet is being
+       // used to wrap a complex structure, however less than one instance would
+       // cause instability.
+       //
+       // PromoteSingle, if true, will allow single elements to be standalone
+       // and promote them to a list. For example "foo" would be promoted to
+       // ["foo"] automatically. This is primarily for legacy reasons and the
+       // ambiguity is not recommended for new usage. Promotion is only allowed
+       // for primitive element types.
+       Elem          interface{}
+       MaxItems      int
+       MinItems      int
+       PromoteSingle bool
+
+       // The following fields are only valid for a TypeSet type.
+       //
+       // Set defines a function to determine the unique ID of an item so that
+       // a proper set can be built.
+       Set SchemaSetFunc
+
+       // ComputedWhen is a set of queries on the configuration. Whenever any
+       // of these things is changed, it will require a recompute (this requires
+       // that Computed is set to true).
+       //
+       // NOTE: This currently does not work.
+       ComputedWhen []string
+
+       // ConflictsWith is a set of schema keys that conflict with this schema.
+       // This will only check that they're set in the _config_. This will not
+       // raise an error for a malfunctioning resource that sets a conflicting
+       // key.
+       ConflictsWith []string
+
+       // When Deprecated is set, this attribute is deprecated.
+       //
+       // A deprecated field still works, but will probably stop working in near
+       // future. This string is the message shown to the user with instructions on
+       // how to address the deprecation.
+       Deprecated string
+
+       // When Removed is set, this attribute has been removed from the schema
+       //
+       // Removed attributes can be left in the Schema to generate informative error
+       // messages for the user when they show up in resource configurations.
+       // This string is the message shown to the user with instructions on
+       // what do to about the removed attribute.
+       Removed string
+
+       // ValidateFunc allows individual fields to define arbitrary validation
+       // logic. It is yielded the provided config value as an interface{} that is
+       // guaranteed to be of the proper Schema type, and it can yield warnings or
+       // errors based on inspection of that value.
+       //
+       // ValidateFunc currently only works for primitive types.
+       ValidateFunc SchemaValidateFunc
+
+       // Sensitive ensures that the attribute's value does not get displayed in
+       // logs or regular output. It should be used for passwords or other
+       // secret fields. Future versions of Terraform may encrypt these
+       // values.
+       Sensitive bool
+}
+
+// SchemaDiffSuppresFunc is a function which can be used to determine
+// whether a detected diff on a schema element is "valid" or not, and
+// suppress it from the plan if necessary.
+//
+// Return true if the diff should be suppressed, false to retain it.
+type SchemaDiffSuppressFunc func(k, old, new string, d *ResourceData) bool
+
+// SchemaDefaultFunc is a function called to return a default value for
+// a field.
+type SchemaDefaultFunc func() (interface{}, error)
+
+// EnvDefaultFunc is a helper function that returns the value of the
+// given environment variable, if one exists, or the default value
+// otherwise.
+func EnvDefaultFunc(k string, dv interface{}) SchemaDefaultFunc {
+       return func() (interface{}, error) {
+               if v := os.Getenv(k); v != "" {
+                       return v, nil
+               }
+
+               return dv, nil
+       }
+}
+
+// MultiEnvDefaultFunc is a helper function that returns the value of the first
+// environment variable in the given list that returns a non-empty value. If
+// none of the environment variables return a value, the default value is
+// returned.
+func MultiEnvDefaultFunc(ks []string, dv interface{}) SchemaDefaultFunc {
+       return func() (interface{}, error) {
+               for _, k := range ks {
+                       if v := os.Getenv(k); v != "" {
+                               return v, nil
+                       }
+               }
+               return dv, nil
+       }
+}
+
+// SchemaSetFunc is a function that must return a unique ID for the given
+// element. This unique ID is used to store the element in a hash.
+type SchemaSetFunc func(interface{}) int
+
+// SchemaStateFunc is a function used to convert some type to a string
+// to be stored in the state.
+type SchemaStateFunc func(interface{}) string
+
+// SchemaValidateFunc is a function used to validate a single field in the
+// schema.
+type SchemaValidateFunc func(interface{}, string) ([]string, []error)
+
+func (s *Schema) GoString() string {
+       return fmt.Sprintf("*%#v", *s)
+}
+
+// Returns a default value for this schema by either reading Default or
+// evaluating DefaultFunc. If neither of these are defined, returns nil.
+func (s *Schema) DefaultValue() (interface{}, error) {
+       if s.Default != nil {
+               return s.Default, nil
+       }
+
+       if s.DefaultFunc != nil {
+               defaultValue, err := s.DefaultFunc()
+               if err != nil {
+                       return nil, fmt.Errorf("error loading default: %s", err)
+               }
+               return defaultValue, nil
+       }
+
+       return nil, nil
+}
+
+// Returns a zero value for the schema.
+func (s *Schema) ZeroValue() interface{} {
+       // If it's a set then we'll do a bit of extra work to provide the
+       // right hashing function in our empty value.
+       if s.Type == TypeSet {
+               setFunc := s.Set
+               if setFunc == nil {
+                       // Default set function uses the schema to hash the whole value
+                       elem := s.Elem
+                       switch t := elem.(type) {
+                       case *Schema:
+                               setFunc = HashSchema(t)
+                       case *Resource:
+                               setFunc = HashResource(t)
+                       default:
+                               panic("invalid set element type")
+                       }
+               }
+               return &Set{F: setFunc}
+       } else {
+               return s.Type.Zero()
+       }
+}
+
+func (s *Schema) finalizeDiff(
+       d *terraform.ResourceAttrDiff) *terraform.ResourceAttrDiff {
+       if d == nil {
+               return d
+       }
+
+       if s.Type == TypeBool {
+               normalizeBoolString := func(s string) string {
+                       switch s {
+                       case "0":
+                               return "false"
+                       case "1":
+                               return "true"
+                       }
+                       return s
+               }
+               d.Old = normalizeBoolString(d.Old)
+               d.New = normalizeBoolString(d.New)
+       }
+
+       if s.Computed && !d.NewRemoved && d.New == "" {
+               // Computed attribute without a new value set
+               d.NewComputed = true
+       }
+
+       if s.ForceNew {
+               // ForceNew, mark that this field is requiring new under the
+               // following conditions, explained below:
+               //
+               //   * Old != New - There is a change in value. This field
+               //       is therefore causing a new resource.
+               //
+               //   * NewComputed - This field is being computed, hence a
+               //       potential change in value, mark as causing a new resource.
+               d.RequiresNew = d.Old != d.New || d.NewComputed
+       }
+
+       if d.NewRemoved {
+               return d
+       }
+
+       if s.Computed {
+               if d.Old != "" && d.New == "" {
+                       // This is a computed value with an old value set already,
+                       // just let it go.
+                       return nil
+               }
+
+               if d.New == "" {
+                       // Computed attribute without a new value set
+                       d.NewComputed = true
+               }
+       }
+
+       if s.Sensitive {
+               // Set the Sensitive flag so output is hidden in the UI
+               d.Sensitive = true
+       }
+
+       return d
+}
+
+// schemaMap is a wrapper that adds nice functions on top of schemas.
+type schemaMap map[string]*Schema
+
+// Data returns a ResourceData for the given schema, state, and diff.
+//
+// The diff is optional.
+func (m schemaMap) Data(
+       s *terraform.InstanceState,
+       d *terraform.InstanceDiff) (*ResourceData, error) {
+       return &ResourceData{
+               schema: m,
+               state:  s,
+               diff:   d,
+       }, nil
+}
+
+// Diff returns the diff for a resource given the schema map,
+// state, and configuration.
+func (m schemaMap) Diff(
+       s *terraform.InstanceState,
+       c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+       result := new(terraform.InstanceDiff)
+       result.Attributes = make(map[string]*terraform.ResourceAttrDiff)
+
+       // Make sure to mark if the resource is tainted
+       if s != nil {
+               result.DestroyTainted = s.Tainted
+       }
+
+       d := &ResourceData{
+               schema: m,
+               state:  s,
+               config: c,
+       }
+
+       for k, schema := range m {
+               err := m.diff(k, schema, result, d, false)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       // If the diff requires a new resource, then we recompute the diff
+       // so we have the complete new resource diff, and preserve the
+       // RequiresNew fields where necessary so the user knows exactly what
+       // caused that.
+       if result.RequiresNew() {
+               // Create the new diff
+               result2 := new(terraform.InstanceDiff)
+               result2.Attributes = make(map[string]*terraform.ResourceAttrDiff)
+
+               // Preserve the DestroyTainted flag
+               result2.DestroyTainted = result.DestroyTainted
+
+               // Reset the data to not contain state. We have to call init()
+               // again in order to reset the FieldReaders.
+               d.state = nil
+               d.init()
+
+               // Perform the diff again
+               for k, schema := range m {
+                       err := m.diff(k, schema, result2, d, false)
+                       if err != nil {
+                               return nil, err
+                       }
+               }
+
+               // Force all the fields to not force a new since we know what we
+               // want to force new.
+               for k, attr := range result2.Attributes {
+                       if attr == nil {
+                               continue
+                       }
+
+                       if attr.RequiresNew {
+                               attr.RequiresNew = false
+                       }
+
+                       if s != nil {
+                               attr.Old = s.Attributes[k]
+                       }
+               }
+
+               // Now copy in all the requires new diffs...
+               for k, attr := range result.Attributes {
+                       if attr == nil {
+                               continue
+                       }
+
+                       newAttr, ok := result2.Attributes[k]
+                       if !ok {
+                               newAttr = attr
+                       }
+
+                       if attr.RequiresNew {
+                               newAttr.RequiresNew = true
+                       }
+
+                       result2.Attributes[k] = newAttr
+               }
+
+               // And set the diff!
+               result = result2
+       }
+
+       // Remove any nil diffs just to keep things clean
+       for k, v := range result.Attributes {
+               if v == nil {
+                       delete(result.Attributes, k)
+               }
+       }
+
+       // Go through and detect all of the ComputedWhens now that we've
+       // finished the diff.
+       // TODO
+
+       if result.Empty() {
+               // If we don't have any diff elements, just return nil
+               return nil, nil
+       }
+
+       return result, nil
+}
+
+// Input implements the terraform.ResourceProvider method by asking
+// for input for required configuration keys that don't have a value.
+func (m schemaMap) Input(
+       input terraform.UIInput,
+       c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
+       keys := make([]string, 0, len(m))
+       for k, _ := range m {
+               keys = append(keys, k)
+       }
+       sort.Strings(keys)
+
+       for _, k := range keys {
+               v := m[k]
+
+               // Skip things that don't require config, if that is even valid
+               // for a provider schema.
+               // Required XOR Optional must always be true to validate, so we only
+               // need to check one.
+               if v.Optional {
+                       continue
+               }
+
+               // Deprecated fields should never prompt
+               if v.Deprecated != "" {
+                       continue
+               }
+
+               // Skip things that have a value of some sort already
+               if _, ok := c.Raw[k]; ok {
+                       continue
+               }
+
+               // Skip if it has a default value
+               defaultValue, err := v.DefaultValue()
+               if err != nil {
+                       return nil, fmt.Errorf("%s: error loading default: %s", k, err)
+               }
+               if defaultValue != nil {
+                       continue
+               }
+
+               var value interface{}
+               switch v.Type {
+               case TypeBool, TypeInt, TypeFloat, TypeSet, TypeList:
+                       continue
+               case TypeString:
+                       value, err = m.inputString(input, k, v)
+               default:
+                       panic(fmt.Sprintf("Unknown type for input: %#v", v.Type))
+               }
+
+               if err != nil {
+                       return nil, fmt.Errorf(
+                               "%s: %s", k, err)
+               }
+
+               c.Config[k] = value
+       }
+
+       return c, nil
+}
+
+// Validate validates the configuration against this schema mapping.
+func (m schemaMap) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+       return m.validateObject("", m, c)
+}
+
+// InternalValidate validates the format of this schema. This should be called
+// from a unit test (and not in user-path code) to verify that a schema
+// is properly built.
+func (m schemaMap) InternalValidate(topSchemaMap schemaMap) error {
+       if topSchemaMap == nil {
+               topSchemaMap = m
+       }
+       for k, v := range m {
+               if v.Type == TypeInvalid {
+                       return fmt.Errorf("%s: Type must be specified", k)
+               }
+
+               if v.Optional && v.Required {
+                       return fmt.Errorf("%s: Optional or Required must be set, not both", k)
+               }
+
+               if v.Required && v.Computed {
+                       return fmt.Errorf("%s: Cannot be both Required and Computed", k)
+               }
+
+               if !v.Required && !v.Optional && !v.Computed {
+                       return fmt.Errorf("%s: One of optional, required, or computed must be set", k)
+               }
+
+               if v.Computed && v.Default != nil {
+                       return fmt.Errorf("%s: Default must be nil if computed", k)
+               }
+
+               if v.Required && v.Default != nil {
+                       return fmt.Errorf("%s: Default cannot be set with Required", k)
+               }
+
+               if len(v.ComputedWhen) > 0 && !v.Computed {
+                       return fmt.Errorf("%s: ComputedWhen can only be set with Computed", k)
+               }
+
+               if len(v.ConflictsWith) > 0 && v.Required {
+                       return fmt.Errorf("%s: ConflictsWith cannot be set with Required", k)
+               }
+
+               if len(v.ConflictsWith) > 0 {
+                       for _, key := range v.ConflictsWith {
+                               parts := strings.Split(key, ".")
+                               sm := topSchemaMap
+                               var target *Schema
+                               for _, part := range parts {
+                                       // Skip index fields
+                                       if _, err := strconv.Atoi(part); err == nil {
+                                               continue
+                                       }
+
+                                       var ok bool
+                                       if target, ok = sm[part]; !ok {
+                                               return fmt.Errorf("%s: ConflictsWith references unknown attribute (%s)", k, key)
+                                       }
+
+                                       if subResource, ok := target.Elem.(*Resource); ok {
+                                               sm = schemaMap(subResource.Schema)
+                                       }
+                               }
+                               if target == nil {
+                                       return fmt.Errorf("%s: ConflictsWith cannot find target attribute (%s), sm: %#v", k, key, sm)
+                               }
+                               if target.Required {
+                                       return fmt.Errorf("%s: ConflictsWith cannot contain Required attribute (%s)", k, key)
+                               }
+
+                               if len(target.ComputedWhen) > 0 {
+                                       return fmt.Errorf("%s: ConflictsWith cannot contain Computed(When) attribute (%s)", k, key)
+                               }
+                       }
+               }
+
+               if v.Type == TypeList || v.Type == TypeSet {
+                       if v.Elem == nil {
+                               return fmt.Errorf("%s: Elem must be set for lists", k)
+                       }
+
+                       if v.Default != nil {
+                               return fmt.Errorf("%s: Default is not valid for lists or sets", k)
+                       }
+
+                       if v.Type != TypeSet && v.Set != nil {
+                               return fmt.Errorf("%s: Set can only be set for TypeSet", k)
+                       }
+
+                       switch t := v.Elem.(type) {
+                       case *Resource:
+                               if err := t.InternalValidate(topSchemaMap, true); err != nil {
+                                       return err
+                               }
+                       case *Schema:
+                               bad := t.Computed || t.Optional || t.Required
+                               if bad {
+                                       return fmt.Errorf(
+                                               "%s: Elem must have only Type set", k)
+                               }
+                       }
+               } else {
+                       if v.MaxItems > 0 || v.MinItems > 0 {
+                               return fmt.Errorf("%s: MaxItems and MinItems are only supported on lists or sets", k)
+                       }
+               }
+
+               // Computed-only field
+               if v.Computed && !v.Optional {
+                       if v.ValidateFunc != nil {
+                               return fmt.Errorf("%s: ValidateFunc is for validating user input, "+
+                                       "there's nothing to validate on computed-only field", k)
+                       }
+                       if v.DiffSuppressFunc != nil {
+                               return fmt.Errorf("%s: DiffSuppressFunc is for suppressing differences"+
+                                       " between config and state representation. "+
+                                       "There is no config for computed-only field, nothing to compare.", k)
+                       }
+               }
+
+               if v.ValidateFunc != nil {
+                       switch v.Type {
+                       case TypeList, TypeSet:
+                               return fmt.Errorf("ValidateFunc is not yet supported on lists or sets.")
+                       }
+               }
+       }
+
+       return nil
+}
+
+func (m schemaMap) diff(
+       k string,
+       schema *Schema,
+       diff *terraform.InstanceDiff,
+       d *ResourceData,
+       all bool) error {
+
+       unsupressedDiff := new(terraform.InstanceDiff)
+       unsupressedDiff.Attributes = make(map[string]*terraform.ResourceAttrDiff)
+
+       var err error
+       switch schema.Type {
+       case TypeBool, TypeInt, TypeFloat, TypeString:
+               err = m.diffString(k, schema, unsupressedDiff, d, all)
+       case TypeList:
+               err = m.diffList(k, schema, unsupressedDiff, d, all)
+       case TypeMap:
+               err = m.diffMap(k, schema, unsupressedDiff, d, all)
+       case TypeSet:
+               err = m.diffSet(k, schema, unsupressedDiff, d, all)
+       default:
+               err = fmt.Errorf("%s: unknown type %#v", k, schema.Type)
+       }
+
+       for attrK, attrV := range unsupressedDiff.Attributes {
+               if schema.DiffSuppressFunc != nil &&
+                       attrV != nil &&
+                       schema.DiffSuppressFunc(attrK, attrV.Old, attrV.New, d) {
+                       continue
+               }
+
+               diff.Attributes[attrK] = attrV
+       }
+
+       return err
+}
+
+func (m schemaMap) diffList(
+       k string,
+       schema *Schema,
+       diff *terraform.InstanceDiff,
+       d *ResourceData,
+       all bool) error {
+       o, n, _, computedList := d.diffChange(k)
+       if computedList {
+               n = nil
+       }
+       nSet := n != nil
+
+       // If we have an old value and no new value is set or will be
+       // computed once all variables can be interpolated and we're
+       // computed, then nothing has changed.
+       if o != nil && n == nil && !computedList && schema.Computed {
+               return nil
+       }
+
+       if o == nil {
+               o = []interface{}{}
+       }
+       if n == nil {
+               n = []interface{}{}
+       }
+       if s, ok := o.(*Set); ok {
+               o = s.List()
+       }
+       if s, ok := n.(*Set); ok {
+               n = s.List()
+       }
+       os := o.([]interface{})
+       vs := n.([]interface{})
+
+       // If the new value was set, and the two are equal, then we're done.
+       // We have to do this check here because sets might be NOT
+       // reflect.DeepEqual so we need to wait until we get the []interface{}
+       if !all && nSet && reflect.DeepEqual(os, vs) {
+               return nil
+       }
+
+       // Get the counts
+       oldLen := len(os)
+       newLen := len(vs)
+       oldStr := strconv.FormatInt(int64(oldLen), 10)
+
+       // If the whole list is computed, then say that the # is computed
+       if computedList {
+               diff.Attributes[k+".#"] = &terraform.ResourceAttrDiff{
+                       Old:         oldStr,
+                       NewComputed: true,
+                       RequiresNew: schema.ForceNew,
+               }
+               return nil
+       }
+
+       // If the counts are not the same, then record that diff
+       changed := oldLen != newLen
+       computed := oldLen == 0 && newLen == 0 && schema.Computed
+       if changed || computed || all {
+               countSchema := &Schema{
+                       Type:     TypeInt,
+                       Computed: schema.Computed,
+                       ForceNew: schema.ForceNew,
+               }
+
+               newStr := ""
+               if !computed {
+                       newStr = strconv.FormatInt(int64(newLen), 10)
+               } else {
+                       oldStr = ""
+               }
+
+               diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
+                       Old: oldStr,
+                       New: newStr,
+               })
+       }
+
+       // Figure out the maximum
+       maxLen := oldLen
+       if newLen > maxLen {
+               maxLen = newLen
+       }
+
+       switch t := schema.Elem.(type) {
+       case *Resource:
+               // This is a complex resource
+               for i := 0; i < maxLen; i++ {
+                       for k2, schema := range t.Schema {
+                               subK := fmt.Sprintf("%s.%d.%s", k, i, k2)
+                               err := m.diff(subK, schema, diff, d, all)
+                               if err != nil {
+                                       return err
+                               }
+                       }
+               }
+       case *Schema:
+               // Copy the schema so that we can set Computed/ForceNew from
+               // the parent schema (the TypeList).
+               t2 := *t
+               t2.ForceNew = schema.ForceNew
+
+               // This is just a primitive element, so go through each and
+               // just diff each.
+               for i := 0; i < maxLen; i++ {
+                       subK := fmt.Sprintf("%s.%d", k, i)
+                       err := m.diff(subK, &t2, diff, d, all)
+                       if err != nil {
+                               return err
+                       }
+               }
+       default:
+               return fmt.Errorf("%s: unknown element type (internal)", k)
+       }
+
+       return nil
+}
+
+func (m schemaMap) diffMap(
+       k string,
+       schema *Schema,
+       diff *terraform.InstanceDiff,
+       d *ResourceData,
+       all bool) error {
+       prefix := k + "."
+
+       // First get all the values from the state
+       var stateMap, configMap map[string]string
+       o, n, _, nComputed := d.diffChange(k)
+       if err := mapstructure.WeakDecode(o, &stateMap); err != nil {
+               return fmt.Errorf("%s: %s", k, err)
+       }
+       if err := mapstructure.WeakDecode(n, &configMap); err != nil {
+               return fmt.Errorf("%s: %s", k, err)
+       }
+
+       // Keep track of whether the state _exists_ at all prior to clearing it
+       stateExists := o != nil
+
+       // Delete any count values, since we don't use those
+       delete(configMap, "%")
+       delete(stateMap, "%")
+
+       // Check if the number of elements has changed.
+       oldLen, newLen := len(stateMap), len(configMap)
+       changed := oldLen != newLen
+       if oldLen != 0 && newLen == 0 && schema.Computed {
+               changed = false
+       }
+
+       // It is computed if we have no old value, no new value, the schema
+       // says it is computed, and it didn't exist in the state before. The
+       // last point means: if it existed in the state, even empty, then it
+       // has already been computed.
+       computed := oldLen == 0 && newLen == 0 && schema.Computed && !stateExists
+
+       // If the count has changed or we're computed, then add a diff for the
+       // count. "nComputed" means that the new value _contains_ a value that
+       // is computed. We don't do granular diffs for this yet, so we mark the
+       // whole map as computed.
+       if changed || computed || nComputed {
+               countSchema := &Schema{
+                       Type:     TypeInt,
+                       Computed: schema.Computed || nComputed,
+                       ForceNew: schema.ForceNew,
+               }
+
+               oldStr := strconv.FormatInt(int64(oldLen), 10)
+               newStr := ""
+               if !computed && !nComputed {
+                       newStr = strconv.FormatInt(int64(newLen), 10)
+               } else {
+                       oldStr = ""
+               }
+
+               diff.Attributes[k+".%"] = countSchema.finalizeDiff(
+                       &terraform.ResourceAttrDiff{
+                               Old: oldStr,
+                               New: newStr,
+                       },
+               )
+       }
+
+       // If the new map is nil and we're computed, then ignore it.
+       if n == nil && schema.Computed {
+               return nil
+       }
+
+       // Now we compare, preferring values from the config map
+       for k, v := range configMap {
+               old, ok := stateMap[k]
+               delete(stateMap, k)
+
+               if old == v && ok && !all {
+                       continue
+               }
+
+               diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
+                       Old: old,
+                       New: v,
+               })
+       }
+       for k, v := range stateMap {
+               diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
+                       Old:        v,
+                       NewRemoved: true,
+               })
+       }
+
+       return nil
+}
+
+func (m schemaMap) diffSet(
+       k string,
+       schema *Schema,
+       diff *terraform.InstanceDiff,
+       d *ResourceData,
+       all bool) error {
+
+       o, n, _, computedSet := d.diffChange(k)
+       if computedSet {
+               n = nil
+       }
+       nSet := n != nil
+
+       // If we have an old value and no new value is set or will be
+       // computed once all variables can be interpolated and we're
+       // computed, then nothing has changed.
+       if o != nil && n == nil && !computedSet && schema.Computed {
+               return nil
+       }
+
+       if o == nil {
+               o = schema.ZeroValue().(*Set)
+       }
+       if n == nil {
+               n = schema.ZeroValue().(*Set)
+       }
+       os := o.(*Set)
+       ns := n.(*Set)
+
+       // If the new value was set, compare the listCode's to determine if
+       // the two are equal. Comparing listCode's instead of the actual values
+       // is needed because there could be computed values in the set which
+       // would result in false positives while comparing.
+       if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) {
+               return nil
+       }
+
+       // Get the counts
+       oldLen := os.Len()
+       newLen := ns.Len()
+       oldStr := strconv.Itoa(oldLen)
+       newStr := strconv.Itoa(newLen)
+
+       // Build a schema for our count
+       countSchema := &Schema{
+               Type:     TypeInt,
+               Computed: schema.Computed,
+               ForceNew: schema.ForceNew,
+       }
+
+       // If the set computed then say that the # is computed
+       if computedSet || schema.Computed && !nSet {
+               // If # already exists, equals 0 and no new set is supplied, there
+               // is nothing to record in the diff
+               count, ok := d.GetOk(k + ".#")
+               if ok && count.(int) == 0 && !nSet && !computedSet {
+                       return nil
+               }
+
+               // Set the count but make sure that if # does not exist, we don't
+               // use the zeroed value
+               countStr := strconv.Itoa(count.(int))
+               if !ok {
+                       countStr = ""
+               }
+
+               diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
+                       Old:         countStr,
+                       NewComputed: true,
+               })
+               return nil
+       }
+
+       // If the counts are not the same, then record that diff
+       changed := oldLen != newLen
+       if changed || all {
+               diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
+                       Old: oldStr,
+                       New: newStr,
+               })
+       }
+
+       // Build the list of codes that will make up our set. This is the
+       // removed codes as well as all the codes in the new codes.
+       codes := make([][]string, 2)
+       codes[0] = os.Difference(ns).listCode()
+       codes[1] = ns.listCode()
+       for _, list := range codes {
+               for _, code := range list {
+                       switch t := schema.Elem.(type) {
+                       case *Resource:
+                               // This is a complex resource
+                               for k2, schema := range t.Schema {
+                                       subK := fmt.Sprintf("%s.%s.%s", k, code, k2)
+                                       err := m.diff(subK, schema, diff, d, true)
+                                       if err != nil {
+                                               return err
+                                       }
+                               }
+                       case *Schema:
+                               // Copy the schema so that we can set Computed/ForceNew from
+                               // the parent schema (the TypeSet).
+                               t2 := *t
+                               t2.ForceNew = schema.ForceNew
+
+                               // This is just a primitive element, so go through each and
+                               // just diff each.
+                               subK := fmt.Sprintf("%s.%s", k, code)
+                               err := m.diff(subK, &t2, diff, d, true)
+                               if err != nil {
+                                       return err
+                               }
+                       default:
+                               return fmt.Errorf("%s: unknown element type (internal)", k)
+                       }
+               }
+       }
+
+       return nil
+}
+
+func (m schemaMap) diffString(
+       k string,
+       schema *Schema,
+       diff *terraform.InstanceDiff,
+       d *ResourceData,
+       all bool) error {
+       var originalN interface{}
+       var os, ns string
+       o, n, _, computed := d.diffChange(k)
+       if schema.StateFunc != nil && n != nil {
+               originalN = n
+               n = schema.StateFunc(n)
+       }
+       nraw := n
+       if nraw == nil && o != nil {
+               nraw = schema.Type.Zero()
+       }
+       if err := mapstructure.WeakDecode(o, &os); err != nil {
+               return fmt.Errorf("%s: %s", k, err)
+       }
+       if err := mapstructure.WeakDecode(nraw, &ns); err != nil {
+               return fmt.Errorf("%s: %s", k, err)
+       }
+
+       if os == ns && !all {
+               // They're the same value. If there old value is not blank or we
+               // have an ID, then return right away since we're already setup.
+               if os != "" || d.Id() != "" {
+                       return nil
+               }
+
+               // Otherwise, only continue if we're computed
+               if !schema.Computed && !computed {
+                       return nil
+               }
+       }
+
+       removed := false
+       if o != nil && n == nil {
+               removed = true
+       }
+       if removed && schema.Computed {
+               return nil
+       }
+
+       diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
+               Old:         os,
+               New:         ns,
+               NewExtra:    originalN,
+               NewRemoved:  removed,
+               NewComputed: computed,
+       })
+
+       return nil
+}
+
+func (m schemaMap) inputString(
+       input terraform.UIInput,
+       k string,
+       schema *Schema) (interface{}, error) {
+       result, err := input.Input(&terraform.InputOpts{
+               Id:          k,
+               Query:       k,
+               Description: schema.Description,
+               Default:     schema.InputDefault,
+       })
+
+       return result, err
+}
+
+func (m schemaMap) validate(
+       k string,
+       schema *Schema,
+       c *terraform.ResourceConfig) ([]string, []error) {
+       raw, ok := c.Get(k)
+       if !ok && schema.DefaultFunc != nil {
+               // We have a dynamic default. Check if we have a value.
+               var err error
+               raw, err = schema.DefaultFunc()
+               if err != nil {
+                       return nil, []error{fmt.Errorf(
+                               "%q, error loading default: %s", k, err)}
+               }
+
+               // We're okay as long as we had a value set
+               ok = raw != nil
+       }
+       if !ok {
+               if schema.Required {
+                       return nil, []error{fmt.Errorf(
+                               "%q: required field is not set", k)}
+               }
+
+               return nil, nil
+       }
+
+       if !schema.Required && !schema.Optional {
+               // This is a computed-only field
+               return nil, []error{fmt.Errorf(
+                       "%q: this field cannot be set", k)}
+       }
+
+       err := m.validateConflictingAttributes(k, schema, c)
+       if err != nil {
+               return nil, []error{err}
+       }
+
+       return m.validateType(k, raw, schema, c)
+}
+
+func (m schemaMap) validateConflictingAttributes(
+       k string,
+       schema *Schema,
+       c *terraform.ResourceConfig) error {
+
+       if len(schema.ConflictsWith) == 0 {
+               return nil
+       }
+
+       for _, conflicting_key := range schema.ConflictsWith {
+               if value, ok := c.Get(conflicting_key); ok {
+                       return fmt.Errorf(
+                               "%q: conflicts with %s (%#v)", k, conflicting_key, value)
+               }
+       }
+
+       return nil
+}
+
+func (m schemaMap) validateList(
+       k string,
+       raw interface{},
+       schema *Schema,
+       c *terraform.ResourceConfig) ([]string, []error) {
+       // We use reflection to verify the slice because you can't
+       // case to []interface{} unless the slice is exactly that type.
+       rawV := reflect.ValueOf(raw)
+
+       // If we support promotion and the raw value isn't a slice, wrap
+       // it in []interface{} and check again.
+       if schema.PromoteSingle && rawV.Kind() != reflect.Slice {
+               raw = []interface{}{raw}
+               rawV = reflect.ValueOf(raw)
+       }
+
+       if rawV.Kind() != reflect.Slice {
+               return nil, []error{fmt.Errorf(
+                       "%s: should be a list", k)}
+       }
+
+       // Validate length
+       if schema.MaxItems > 0 && rawV.Len() > schema.MaxItems {
+               return nil, []error{fmt.Errorf(
+                       "%s: attribute supports %d item maximum, config has %d declared", k, schema.MaxItems, rawV.Len())}
+       }
+
+       if schema.MinItems > 0 && rawV.Len() < schema.MinItems {
+               return nil, []error{fmt.Errorf(
+                       "%s: attribute supports %d item as a minimum, config has %d declared", k, schema.MinItems, rawV.Len())}
+       }
+
+       // Now build the []interface{}
+       raws := make([]interface{}, rawV.Len())
+       for i, _ := range raws {
+               raws[i] = rawV.Index(i).Interface()
+       }
+
+       var ws []string
+       var es []error
+       for i, raw := range raws {
+               key := fmt.Sprintf("%s.%d", k, i)
+
+               // Reify the key value from the ResourceConfig.
+               // If the list was computed we have all raw values, but some of these
+               // may be known in the config, and aren't individually marked as Computed.
+               if r, ok := c.Get(key); ok {
+                       raw = r
+               }
+
+               var ws2 []string
+               var es2 []error
+               switch t := schema.Elem.(type) {
+               case *Resource:
+                       // This is a sub-resource
+                       ws2, es2 = m.validateObject(key, t.Schema, c)
+               case *Schema:
+                       ws2, es2 = m.validateType(key, raw, t, c)
+               }
+
+               if len(ws2) > 0 {
+                       ws = append(ws, ws2...)
+               }
+               if len(es2) > 0 {
+                       es = append(es, es2...)
+               }
+       }
+
+       return ws, es
+}
+
+func (m schemaMap) validateMap(
+       k string,
+       raw interface{},
+       schema *Schema,
+       c *terraform.ResourceConfig) ([]string, []error) {
+       // We use reflection to verify the slice because you can't
+       // case to []interface{} unless the slice is exactly that type.
+       rawV := reflect.ValueOf(raw)
+       switch rawV.Kind() {
+       case reflect.String:
+               // If raw and reified are equal, this is a string and should
+               // be rejected.
+               reified, reifiedOk := c.Get(k)
+               if reifiedOk && raw == reified && !c.IsComputed(k) {
+                       return nil, []error{fmt.Errorf("%s: should be a map", k)}
+               }
+               // Otherwise it's likely raw is an interpolation.
+               return nil, nil
+       case reflect.Map:
+       case reflect.Slice:
+       default:
+               return nil, []error{fmt.Errorf("%s: should be a map", k)}
+       }
+
+       // If it is not a slice, validate directly
+       if rawV.Kind() != reflect.Slice {
+               mapIface := rawV.Interface()
+               if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 {
+                       return nil, errs
+               }
+               if schema.ValidateFunc != nil {
+                       return schema.ValidateFunc(mapIface, k)
+               }
+               return nil, nil
+       }
+
+       // It is a slice, verify that all the elements are maps
+       raws := make([]interface{}, rawV.Len())
+       for i, _ := range raws {
+               raws[i] = rawV.Index(i).Interface()
+       }
+
+       for _, raw := range raws {
+               v := reflect.ValueOf(raw)
+               if v.Kind() != reflect.Map {
+                       return nil, []error{fmt.Errorf(
+                               "%s: should be a map", k)}
+               }
+               mapIface := v.Interface()
+               if _, errs := validateMapValues(k, mapIface.(map[string]interface{}), schema); len(errs) > 0 {
+                       return nil, errs
+               }
+       }
+
+       if schema.ValidateFunc != nil {
+               validatableMap := make(map[string]interface{})
+               for _, raw := range raws {
+                       for k, v := range raw.(map[string]interface{}) {
+                               validatableMap[k] = v
+                       }
+               }
+
+               return schema.ValidateFunc(validatableMap, k)
+       }
+
+       return nil, nil
+}
+
+func validateMapValues(k string, m map[string]interface{}, schema *Schema) ([]string, []error) {
+       for key, raw := range m {
+               valueType, err := getValueType(k, schema)
+               if err != nil {
+                       return nil, []error{err}
+               }
+
+               switch valueType {
+               case TypeBool:
+                       var n bool
+                       if err := mapstructure.WeakDecode(raw, &n); err != nil {
+                               return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+                       }
+               case TypeInt:
+                       var n int
+                       if err := mapstructure.WeakDecode(raw, &n); err != nil {
+                               return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+                       }
+               case TypeFloat:
+                       var n float64
+                       if err := mapstructure.WeakDecode(raw, &n); err != nil {
+                               return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+                       }
+               case TypeString:
+                       var n string
+                       if err := mapstructure.WeakDecode(raw, &n); err != nil {
+                               return nil, []error{fmt.Errorf("%s (%s): %s", k, key, err)}
+                       }
+               default:
+                       panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type))
+               }
+       }
+       return nil, nil
+}
+
+func getValueType(k string, schema *Schema) (ValueType, error) {
+       if schema.Elem == nil {
+               return TypeString, nil
+       }
+       if vt, ok := schema.Elem.(ValueType); ok {
+               return vt, nil
+       }
+
+       if s, ok := schema.Elem.(*Schema); ok {
+               if s.Elem == nil {
+                       return TypeString, nil
+               }
+               if vt, ok := s.Elem.(ValueType); ok {
+                       return vt, nil
+               }
+       }
+
+       if _, ok := schema.Elem.(*Resource); ok {
+               // TODO: We don't actually support this (yet)
+               // but silently pass the validation, until we decide
+               // how to handle nested structures in maps
+               return TypeString, nil
+       }
+       return 0, fmt.Errorf("%s: unexpected map value type: %#v", k, schema.Elem)
+}
+
+func (m schemaMap) validateObject(
+       k string,
+       schema map[string]*Schema,
+       c *terraform.ResourceConfig) ([]string, []error) {
+       raw, _ := c.GetRaw(k)
+       if _, ok := raw.(map[string]interface{}); !ok {
+               return nil, []error{fmt.Errorf(
+                       "%s: expected object, got %s",
+                       k, reflect.ValueOf(raw).Kind())}
+       }
+
+       var ws []string
+       var es []error
+       for subK, s := range schema {
+               key := subK
+               if k != "" {
+                       key = fmt.Sprintf("%s.%s", k, subK)
+               }
+
+               ws2, es2 := m.validate(key, s, c)
+               if len(ws2) > 0 {
+                       ws = append(ws, ws2...)
+               }
+               if len(es2) > 0 {
+                       es = append(es, es2...)
+               }
+       }
+
+       // Detect any extra/unknown keys and report those as errors.
+       if m, ok := raw.(map[string]interface{}); ok {
+               for subk, _ := range m {
+                       if _, ok := schema[subk]; !ok {
+                               if subk == TimeoutsConfigKey {
+                                       continue
+                               }
+                               es = append(es, fmt.Errorf(
+                                       "%s: invalid or unknown key: %s", k, subk))
+                       }
+               }
+       }
+
+       return ws, es
+}
+
+func (m schemaMap) validatePrimitive(
+       k string,
+       raw interface{},
+       schema *Schema,
+       c *terraform.ResourceConfig) ([]string, []error) {
+
+       // Catch if the user gave a complex type where a primitive was
+       // expected, so we can return a friendly error message that
+       // doesn't contain Go type system terminology.
+       switch reflect.ValueOf(raw).Type().Kind() {
+       case reflect.Slice:
+               return nil, []error{
+                       fmt.Errorf("%s must be a single value, not a list", k),
+               }
+       case reflect.Map:
+               return nil, []error{
+                       fmt.Errorf("%s must be a single value, not a map", k),
+               }
+       default: // ok
+       }
+
+       if c.IsComputed(k) {
+               // If the key is being computed, then it is not an error as
+               // long as it's not a slice or map.
+               return nil, nil
+       }
+
+       var decoded interface{}
+       switch schema.Type {
+       case TypeBool:
+               // Verify that we can parse this as the correct type
+               var n bool
+               if err := mapstructure.WeakDecode(raw, &n); err != nil {
+                       return nil, []error{fmt.Errorf("%s: %s", k, err)}
+               }
+               decoded = n
+       case TypeInt:
+               // Verify that we can parse this as an int
+               var n int
+               if err := mapstructure.WeakDecode(raw, &n); err != nil {
+                       return nil, []error{fmt.Errorf("%s: %s", k, err)}
+               }
+               decoded = n
+       case TypeFloat:
+               // Verify that we can parse this as an int
+               var n float64
+               if err := mapstructure.WeakDecode(raw, &n); err != nil {
+                       return nil, []error{fmt.Errorf("%s: %s", k, err)}
+               }
+               decoded = n
+       case TypeString:
+               // Verify that we can parse this as a string
+               var n string
+               if err := mapstructure.WeakDecode(raw, &n); err != nil {
+                       return nil, []error{fmt.Errorf("%s: %s", k, err)}
+               }
+               decoded = n
+       default:
+               panic(fmt.Sprintf("Unknown validation type: %#v", schema.Type))
+       }
+
+       if schema.ValidateFunc != nil {
+               return schema.ValidateFunc(decoded, k)
+       }
+
+       return nil, nil
+}
+
+func (m schemaMap) validateType(
+       k string,
+       raw interface{},
+       schema *Schema,
+       c *terraform.ResourceConfig) ([]string, []error) {
+       var ws []string
+       var es []error
+       switch schema.Type {
+       case TypeSet, TypeList:
+               ws, es = m.validateList(k, raw, schema, c)
+       case TypeMap:
+               ws, es = m.validateMap(k, raw, schema, c)
+       default:
+               ws, es = m.validatePrimitive(k, raw, schema, c)
+       }
+
+       if schema.Deprecated != "" {
+               ws = append(ws, fmt.Sprintf(
+                       "%q: [DEPRECATED] %s", k, schema.Deprecated))
+       }
+
+       if schema.Removed != "" {
+               es = append(es, fmt.Errorf(
+                       "%q: [REMOVED] %s", k, schema.Removed))
+       }
+
+       return ws, es
+}
+
+// Zero returns the zero value for a type.
+func (t ValueType) Zero() interface{} {
+       switch t {
+       case TypeInvalid:
+               return nil
+       case TypeBool:
+               return false
+       case TypeInt:
+               return 0
+       case TypeFloat:
+               return 0.0
+       case TypeString:
+               return ""
+       case TypeList:
+               return []interface{}{}
+       case TypeMap:
+               return map[string]interface{}{}
+       case TypeSet:
+               return new(Set)
+       case typeObject:
+               return map[string]interface{}{}
+       default:
+               panic(fmt.Sprintf("unknown type %s", t))
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go b/vendor/github.com/hashicorp/terraform/helper/schema/serialize.go
new file mode 100644 (file)
index 0000000..fe6d750
--- /dev/null
@@ -0,0 +1,125 @@
+package schema
+
+import (
+       "bytes"
+       "fmt"
+       "sort"
+       "strconv"
+)
+
+func SerializeValueForHash(buf *bytes.Buffer, val interface{}, schema *Schema) {
+       if val == nil {
+               buf.WriteRune(';')
+               return
+       }
+
+       switch schema.Type {
+       case TypeBool:
+               if val.(bool) {
+                       buf.WriteRune('1')
+               } else {
+                       buf.WriteRune('0')
+               }
+       case TypeInt:
+               buf.WriteString(strconv.Itoa(val.(int)))
+       case TypeFloat:
+               buf.WriteString(strconv.FormatFloat(val.(float64), 'g', -1, 64))
+       case TypeString:
+               buf.WriteString(val.(string))
+       case TypeList:
+               buf.WriteRune('(')
+               l := val.([]interface{})
+               for _, innerVal := range l {
+                       serializeCollectionMemberForHash(buf, innerVal, schema.Elem)
+               }
+               buf.WriteRune(')')
+       case TypeMap:
+
+               m := val.(map[string]interface{})
+               var keys []string
+               for k := range m {
+                       keys = append(keys, k)
+               }
+               sort.Strings(keys)
+               buf.WriteRune('[')
+               for _, k := range keys {
+                       innerVal := m[k]
+                       if innerVal == nil {
+                               continue
+                       }
+                       buf.WriteString(k)
+                       buf.WriteRune(':')
+
+                       switch innerVal := innerVal.(type) {
+                       case int:
+                               buf.WriteString(strconv.Itoa(innerVal))
+                       case float64:
+                               buf.WriteString(strconv.FormatFloat(innerVal, 'g', -1, 64))
+                       case string:
+                               buf.WriteString(innerVal)
+                       default:
+                               panic(fmt.Sprintf("unknown value type in TypeMap %T", innerVal))
+                       }
+
+                       buf.WriteRune(';')
+               }
+               buf.WriteRune(']')
+       case TypeSet:
+               buf.WriteRune('{')
+               s := val.(*Set)
+               for _, innerVal := range s.List() {
+                       serializeCollectionMemberForHash(buf, innerVal, schema.Elem)
+               }
+               buf.WriteRune('}')
+       default:
+               panic("unknown schema type to serialize")
+       }
+       buf.WriteRune(';')
+}
+
+// SerializeValueForHash appends a serialization of the given resource config
+// to the given buffer, guaranteeing deterministic results given the same value
+// and schema.
+//
+// Its primary purpose is as input into a hashing function in order
+// to hash complex substructures when used in sets, and so the serialization
+// is not reversible.
+func SerializeResourceForHash(buf *bytes.Buffer, val interface{}, resource *Resource) {
+       if val == nil {
+               return
+       }
+       sm := resource.Schema
+       m := val.(map[string]interface{})
+       var keys []string
+       for k := range sm {
+               keys = append(keys, k)
+       }
+       sort.Strings(keys)
+       for _, k := range keys {
+               innerSchema := sm[k]
+               // Skip attributes that are not user-provided. Computed attributes
+               // do not contribute to the hash since their ultimate value cannot
+               // be known at plan/diff time.
+               if !(innerSchema.Required || innerSchema.Optional) {
+                       continue
+               }
+
+               buf.WriteString(k)
+               buf.WriteRune(':')
+               innerVal := m[k]
+               SerializeValueForHash(buf, innerVal, innerSchema)
+       }
+}
+
+func serializeCollectionMemberForHash(buf *bytes.Buffer, val interface{}, elem interface{}) {
+       switch tElem := elem.(type) {
+       case *Schema:
+               SerializeValueForHash(buf, val, tElem)
+       case *Resource:
+               buf.WriteRune('<')
+               SerializeResourceForHash(buf, val, tElem)
+               buf.WriteString(">;")
+       default:
+               panic(fmt.Sprintf("invalid element type: %T", tElem))
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/set.go b/vendor/github.com/hashicorp/terraform/helper/schema/set.go
new file mode 100644 (file)
index 0000000..de05f40
--- /dev/null
@@ -0,0 +1,209 @@
+package schema
+
+import (
+       "bytes"
+       "fmt"
+       "reflect"
+       "sort"
+       "strconv"
+       "sync"
+
+       "github.com/hashicorp/terraform/helper/hashcode"
+)
+
+// HashString hashes strings. If you want a Set of strings, this is the
+// SchemaSetFunc you want.
+func HashString(v interface{}) int {
+       return hashcode.String(v.(string))
+}
+
+// HashResource hashes complex structures that are described using
+// a *Resource. This is the default set implementation used when a set's
+// element type is a full resource.
+func HashResource(resource *Resource) SchemaSetFunc {
+       return func(v interface{}) int {
+               var buf bytes.Buffer
+               SerializeResourceForHash(&buf, v, resource)
+               return hashcode.String(buf.String())
+       }
+}
+
+// HashSchema hashes values that are described using a *Schema. This is the
+// default set implementation used when a set's element type is a single
+// schema.
+func HashSchema(schema *Schema) SchemaSetFunc {
+       return func(v interface{}) int {
+               var buf bytes.Buffer
+               SerializeValueForHash(&buf, v, schema)
+               return hashcode.String(buf.String())
+       }
+}
+
+// Set is a set data structure that is returned for elements of type
+// TypeSet.
+type Set struct {
+       F SchemaSetFunc
+
+       m    map[string]interface{}
+       once sync.Once
+}
+
+// NewSet is a convenience method for creating a new set with the given
+// items.
+func NewSet(f SchemaSetFunc, items []interface{}) *Set {
+       s := &Set{F: f}
+       for _, i := range items {
+               s.Add(i)
+       }
+
+       return s
+}
+
+// CopySet returns a copy of another set.
+func CopySet(otherSet *Set) *Set {
+       return NewSet(otherSet.F, otherSet.List())
+}
+
+// Add adds an item to the set if it isn't already in the set.
+func (s *Set) Add(item interface{}) {
+       s.add(item, false)
+}
+
+// Remove removes an item if it's already in the set. Idempotent.
+func (s *Set) Remove(item interface{}) {
+       s.remove(item)
+}
+
+// Contains checks if the set has the given item.
+func (s *Set) Contains(item interface{}) bool {
+       _, ok := s.m[s.hash(item)]
+       return ok
+}
+
+// Len returns the amount of items in the set.
+func (s *Set) Len() int {
+       return len(s.m)
+}
+
+// List returns the elements of this set in slice format.
+//
+// The order of the returned elements is deterministic. Given the same
+// set, the order of this will always be the same.
+func (s *Set) List() []interface{} {
+       result := make([]interface{}, len(s.m))
+       for i, k := range s.listCode() {
+               result[i] = s.m[k]
+       }
+
+       return result
+}
+
+// Difference performs a set difference of the two sets, returning
+// a new third set that has only the elements unique to this set.
+func (s *Set) Difference(other *Set) *Set {
+       result := &Set{F: s.F}
+       result.once.Do(result.init)
+
+       for k, v := range s.m {
+               if _, ok := other.m[k]; !ok {
+                       result.m[k] = v
+               }
+       }
+
+       return result
+}
+
+// Intersection performs the set intersection of the two sets
+// and returns a new third set.
+func (s *Set) Intersection(other *Set) *Set {
+       result := &Set{F: s.F}
+       result.once.Do(result.init)
+
+       for k, v := range s.m {
+               if _, ok := other.m[k]; ok {
+                       result.m[k] = v
+               }
+       }
+
+       return result
+}
+
+// Union performs the set union of the two sets and returns a new third
+// set.
+func (s *Set) Union(other *Set) *Set {
+       result := &Set{F: s.F}
+       result.once.Do(result.init)
+
+       for k, v := range s.m {
+               result.m[k] = v
+       }
+       for k, v := range other.m {
+               result.m[k] = v
+       }
+
+       return result
+}
+
+func (s *Set) Equal(raw interface{}) bool {
+       other, ok := raw.(*Set)
+       if !ok {
+               return false
+       }
+
+       return reflect.DeepEqual(s.m, other.m)
+}
+
+func (s *Set) GoString() string {
+       return fmt.Sprintf("*Set(%#v)", s.m)
+}
+
+func (s *Set) init() {
+       s.m = make(map[string]interface{})
+}
+
+func (s *Set) add(item interface{}, computed bool) string {
+       s.once.Do(s.init)
+
+       code := s.hash(item)
+       if computed {
+               code = "~" + code
+       }
+
+       if _, ok := s.m[code]; !ok {
+               s.m[code] = item
+       }
+
+       return code
+}
+
+func (s *Set) hash(item interface{}) string {
+       code := s.F(item)
+       // Always return a nonnegative hashcode.
+       if code < 0 {
+               code = -code
+       }
+       return strconv.Itoa(code)
+}
+
+func (s *Set) remove(item interface{}) string {
+       s.once.Do(s.init)
+
+       code := s.hash(item)
+       delete(s.m, code)
+
+       return code
+}
+
+func (s *Set) index(item interface{}) int {
+       return sort.SearchStrings(s.listCode(), s.hash(item))
+}
+
+func (s *Set) listCode() []string {
+       // Sort the hash codes so the order of the list is deterministic
+       keys := make([]string, 0, len(s.m))
+       for k := range s.m {
+               keys = append(keys, k)
+       }
+       sort.Sort(sort.StringSlice(keys))
+       return keys
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/testing.go b/vendor/github.com/hashicorp/terraform/helper/schema/testing.go
new file mode 100644 (file)
index 0000000..9765bdb
--- /dev/null
@@ -0,0 +1,30 @@
+package schema
+
+import (
+       "testing"
+
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/terraform"
+)
+
+// TestResourceDataRaw creates a ResourceData from a raw configuration map.
+func TestResourceDataRaw(
+       t *testing.T, schema map[string]*Schema, raw map[string]interface{}) *ResourceData {
+       c, err := config.NewRawConfig(raw)
+       if err != nil {
+               t.Fatalf("err: %s", err)
+       }
+
+       sm := schemaMap(schema)
+       diff, err := sm.Diff(nil, terraform.NewResourceConfig(c))
+       if err != nil {
+               t.Fatalf("err: %s", err)
+       }
+
+       result, err := sm.Data(nil, diff)
+       if err != nil {
+               t.Fatalf("err: %s", err)
+       }
+
+       return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype.go
new file mode 100644 (file)
index 0000000..9286987
--- /dev/null
@@ -0,0 +1,21 @@
+package schema
+
+//go:generate stringer -type=ValueType valuetype.go
+
+// ValueType is an enum of the type that can be represented by a schema.
+type ValueType int
+
+const (
+       TypeInvalid ValueType = iota
+       TypeBool
+       TypeInt
+       TypeFloat
+       TypeString
+       TypeList
+       TypeMap
+       TypeSet
+       typeObject
+)
+
+// NOTE: ValueType has more functions defined on it in schema.go. We can't
+// put them here because we reference other files.
diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go
new file mode 100644 (file)
index 0000000..1610cec
--- /dev/null
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=ValueType valuetype.go"; DO NOT EDIT.
+
+package schema
+
+import "fmt"
+
+const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject"
+
+var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77}
+
+func (i ValueType) String() string {
+       if i < 0 || i >= ValueType(len(_ValueType_index)-1) {
+               return fmt.Sprintf("ValueType(%d)", i)
+       }
+       return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go b/vendor/github.com/hashicorp/terraform/helper/shadow/closer.go
new file mode 100644 (file)
index 0000000..7edd5e7
--- /dev/null
@@ -0,0 +1,80 @@
+package shadow
+
+import (
+       "fmt"
+       "io"
+       "reflect"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/mitchellh/reflectwalk"
+)
+
+// Close will close all shadow values within the given structure.
+//
+// This uses reflection to walk the structure, find all shadow elements,
+// and close them. Currently this will only find struct fields that are
+// shadow values, and not slice elements, etc.
+func Close(v interface{}) error {
+       // We require a pointer so we can address the internal fields
+       val := reflect.ValueOf(v)
+       if val.Kind() != reflect.Ptr {
+               return fmt.Errorf("value must be a pointer")
+       }
+
+       // Walk and close
+       var w closeWalker
+       if err := reflectwalk.Walk(v, &w); err != nil {
+               return err
+       }
+
+       return w.Err
+}
+
+type closeWalker struct {
+       Err error
+}
+
+func (w *closeWalker) Struct(reflect.Value) error {
+       // Do nothing. We implement this for reflectwalk.StructWalker
+       return nil
+}
+
+func (w *closeWalker) StructField(f reflect.StructField, v reflect.Value) error {
+       // Not sure why this would be but lets avoid some panics
+       if !v.IsValid() {
+               return nil
+       }
+
+       // Empty for exported, so don't check unexported fields
+       if f.PkgPath != "" {
+               return nil
+       }
+
+       // Verify the io.Closer is in this package
+       typ := v.Type()
+       if typ.PkgPath() != "github.com/hashicorp/terraform/helper/shadow" {
+               return nil
+       }
+
+       // We're looking for an io.Closer
+       raw := v.Interface()
+       if raw == nil {
+               return nil
+       }
+
+       closer, ok := raw.(io.Closer)
+       if !ok && v.CanAddr() {
+               closer, ok = v.Addr().Interface().(io.Closer)
+       }
+       if !ok {
+               return reflectwalk.SkipEntry
+       }
+
+       // Close it
+       if err := closer.Close(); err != nil {
+               w.Err = multierror.Append(w.Err, err)
+       }
+
+       // Don't go into the struct field
+       return reflectwalk.SkipEntry
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/compared_value.go
new file mode 100644 (file)
index 0000000..4223e92
--- /dev/null
@@ -0,0 +1,128 @@
+package shadow
+
+import (
+       "sync"
+)
+
+// ComparedValue is a struct that finds a value by comparing some key
+// to the list of stored values. This is useful when there is no easy
+// uniquely identifying key that works in a map (for that, use KeyedValue).
+//
+// ComparedValue is very expensive, relative to other Value types. Try to
+// limit the number of values stored in a ComparedValue by potentially
+// nesting it within a KeyedValue (a keyed value points to a compared value,
+// for example).
+type ComparedValue struct {
+       // Func is a function that is given the lookup key and a single
+       // stored value. If it matches, it returns true.
+       Func func(k, v interface{}) bool
+
+       lock    sync.Mutex
+       once    sync.Once
+       closed  bool
+       values  []interface{}
+       waiters map[interface{}]*Value
+}
+
+// Close closes the value. This can never fail. For a definition of
+// "close" see the ErrClosed docs.
+func (w *ComparedValue) Close() error {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+
+       // Set closed to true always
+       w.closed = true
+
+       // For all waiters, complete with ErrClosed
+       for k, val := range w.waiters {
+               val.SetValue(ErrClosed)
+               delete(w.waiters, k)
+       }
+
+       return nil
+}
+
+// Value returns the value that was set for the given key, or blocks
+// until one is available.
+func (w *ComparedValue) Value(k interface{}) interface{} {
+       v, val := w.valueWaiter(k)
+       if val == nil {
+               return v
+       }
+
+       return val.Value()
+}
+
+// ValueOk gets the value for the given key, returning immediately if the
+// value doesn't exist. The second return argument is true if the value exists.
+func (w *ComparedValue) ValueOk(k interface{}) (interface{}, bool) {
+       v, val := w.valueWaiter(k)
+       return v, val == nil
+}
+
+func (w *ComparedValue) SetValue(v interface{}) {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+       w.once.Do(w.init)
+
+       // Check if we already have this exact value (by simply comparing
+       // with == directly). If we do, then we don't insert it again.
+       found := false
+       for _, v2 := range w.values {
+               if v == v2 {
+                       found = true
+                       break
+               }
+       }
+
+       if !found {
+               // Set the value, always
+               w.values = append(w.values, v)
+       }
+
+       // Go through the waiters
+       for k, val := range w.waiters {
+               if w.Func(k, v) {
+                       val.SetValue(v)
+                       delete(w.waiters, k)
+               }
+       }
+}
+
+func (w *ComparedValue) valueWaiter(k interface{}) (interface{}, *Value) {
+       w.lock.Lock()
+       w.once.Do(w.init)
+
+       // Look for a pre-existing value
+       for _, v := range w.values {
+               if w.Func(k, v) {
+                       w.lock.Unlock()
+                       return v, nil
+               }
+       }
+
+       // If we're closed, return that
+       if w.closed {
+               w.lock.Unlock()
+               return ErrClosed, nil
+       }
+
+       // Pre-existing value doesn't exist, create a waiter
+       val := w.waiters[k]
+       if val == nil {
+               val = new(Value)
+               w.waiters[k] = val
+       }
+       w.lock.Unlock()
+
+       // Return the waiter
+       return nil, val
+}
+
+// Must be called with w.lock held.
+func (w *ComparedValue) init() {
+       w.waiters = make(map[interface{}]*Value)
+       if w.Func == nil {
+               w.Func = func(k, v interface{}) bool { return k == v }
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/keyed_value.go
new file mode 100644 (file)
index 0000000..432b036
--- /dev/null
@@ -0,0 +1,151 @@
+package shadow
+
+import (
+       "sync"
+)
+
+// KeyedValue is a struct that coordinates a value by key. If a value is
+// not available for a give key, it'll block until it is available.
+type KeyedValue struct {
+       lock    sync.Mutex
+       once    sync.Once
+       values  map[string]interface{}
+       waiters map[string]*Value
+       closed  bool
+}
+
+// Close closes the value. This can never fail. For a definition of
+// "close" see the ErrClosed docs.
+func (w *KeyedValue) Close() error {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+
+       // Set closed to true always
+       w.closed = true
+
+       // For all waiters, complete with ErrClosed
+       for k, val := range w.waiters {
+               val.SetValue(ErrClosed)
+               delete(w.waiters, k)
+       }
+
+       return nil
+}
+
+// Value returns the value that was set for the given key, or blocks
+// until one is available.
+func (w *KeyedValue) Value(k string) interface{} {
+       w.lock.Lock()
+       v, val := w.valueWaiter(k)
+       w.lock.Unlock()
+
+       // If we have no waiter, then return the value
+       if val == nil {
+               return v
+       }
+
+       // We have a waiter, so wait
+       return val.Value()
+}
+
+// WaitForChange waits for the value with the given key to be set again.
+// If the key isn't set, it'll wait for an initial value. Note that while
+// it is called "WaitForChange", the value isn't guaranteed to _change_;
+// this will return when a SetValue is called for the given k.
+func (w *KeyedValue) WaitForChange(k string) interface{} {
+       w.lock.Lock()
+       w.once.Do(w.init)
+
+       // If we're closed, we're closed
+       if w.closed {
+               w.lock.Unlock()
+               return ErrClosed
+       }
+
+       // Check for an active waiter. If there isn't one, make it
+       val := w.waiters[k]
+       if val == nil {
+               val = new(Value)
+               w.waiters[k] = val
+       }
+       w.lock.Unlock()
+
+       // And wait
+       return val.Value()
+}
+
+// ValueOk gets the value for the given key, returning immediately if the
+// value doesn't exist. The second return argument is true if the value exists.
+func (w *KeyedValue) ValueOk(k string) (interface{}, bool) {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+
+       v, val := w.valueWaiter(k)
+       return v, val == nil
+}
+
+func (w *KeyedValue) SetValue(k string, v interface{}) {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+       w.setValue(k, v)
+}
+
+// Init will initialize the key to a given value only if the key has
+// not been set before. This is safe to call multiple times and in parallel.
+func (w *KeyedValue) Init(k string, v interface{}) {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+
+       // If we have a waiter, set the value.
+       _, val := w.valueWaiter(k)
+       if val != nil {
+               w.setValue(k, v)
+       }
+}
+
+// Must be called with w.lock held.
+func (w *KeyedValue) init() {
+       w.values = make(map[string]interface{})
+       w.waiters = make(map[string]*Value)
+}
+
+// setValue is like SetValue but assumes the lock is held.
+func (w *KeyedValue) setValue(k string, v interface{}) {
+       w.once.Do(w.init)
+
+       // Set the value, always
+       w.values[k] = v
+
+       // If we have a waiter, set it
+       if val, ok := w.waiters[k]; ok {
+               val.SetValue(v)
+               delete(w.waiters, k)
+       }
+}
+
+// valueWaiter gets the value or the Value waiter for a given key.
+//
+// This must be called with lock held.
+func (w *KeyedValue) valueWaiter(k string) (interface{}, *Value) {
+       w.once.Do(w.init)
+
+       // If we have this value already, return it
+       if v, ok := w.values[k]; ok {
+               return v, nil
+       }
+
+       // If we're closed, return that
+       if w.closed {
+               return ErrClosed, nil
+       }
+
+       // No pending value, check for a waiter
+       val := w.waiters[k]
+       if val == nil {
+               val = new(Value)
+               w.waiters[k] = val
+       }
+
+       // Return the waiter
+       return nil, val
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/ordered_value.go
new file mode 100644 (file)
index 0000000..0a43d4d
--- /dev/null
@@ -0,0 +1,66 @@
+package shadow
+
+import (
+       "container/list"
+       "sync"
+)
+
+// OrderedValue is a struct that keeps track of a value in the order
+// it is set. Each time Value() is called, it will return the most recent
+// calls value then discard it.
+//
+// This is unlike Value that returns the same value once it is set.
+type OrderedValue struct {
+       lock    sync.Mutex
+       values  *list.List
+       waiters *list.List
+}
+
+// Value returns the last value that was set, or blocks until one
+// is received.
+func (w *OrderedValue) Value() interface{} {
+       w.lock.Lock()
+
+       // If we have a pending value already, use it
+       if w.values != nil && w.values.Len() > 0 {
+               front := w.values.Front()
+               w.values.Remove(front)
+               w.lock.Unlock()
+               return front.Value
+       }
+
+       // No pending value, create a waiter
+       if w.waiters == nil {
+               w.waiters = list.New()
+       }
+
+       var val Value
+       w.waiters.PushBack(&val)
+       w.lock.Unlock()
+
+       // Return the value once we have it
+       return val.Value()
+}
+
+// SetValue sets the latest value.
+func (w *OrderedValue) SetValue(v interface{}) {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+
+       // If we have a waiter, notify it
+       if w.waiters != nil && w.waiters.Len() > 0 {
+               front := w.waiters.Front()
+               w.waiters.Remove(front)
+
+               val := front.Value.(*Value)
+               val.SetValue(v)
+               return
+       }
+
+       // Add it to the list of values
+       if w.values == nil {
+               w.values = list.New()
+       }
+
+       w.values.PushBack(v)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/shadow/value.go b/vendor/github.com/hashicorp/terraform/helper/shadow/value.go
new file mode 100644 (file)
index 0000000..2413335
--- /dev/null
@@ -0,0 +1,79 @@
+package shadow
+
+import (
+       "errors"
+       "sync"
+)
+
+// ErrClosed is returned by any closed values.
+//
+// A "closed value" is when the shadow has been notified that the real
+// side is complete and any blocking values will _never_ be satisfied
+// in the future. In this case, this error is returned. If a value is already
+// available, that is still returned.
+var ErrClosed = errors.New("shadow closed")
+
+// Value is a struct that coordinates a value between two
+// parallel routines. It is similar to atomic.Value except that when
+// Value is called if it isn't set it will wait for it.
+//
+// The Value can be closed with Close, which will cause any future
+// blocking operations to return immediately with ErrClosed.
+type Value struct {
+       lock     sync.Mutex
+       cond     *sync.Cond
+       value    interface{}
+       valueSet bool
+}
+
+// Close closes the value. This can never fail. For a definition of
+// "close" see the struct docs.
+func (w *Value) Close() error {
+       w.lock.Lock()
+       set := w.valueSet
+       w.lock.Unlock()
+
+       // If we haven't set the value, set it
+       if !set {
+               w.SetValue(ErrClosed)
+       }
+
+       // Done
+       return nil
+}
+
+// Value returns the value that was set.
+func (w *Value) Value() interface{} {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+
+       // If we already have a value just return
+       for !w.valueSet {
+               // No value, setup the condition variable if we have to
+               if w.cond == nil {
+                       w.cond = sync.NewCond(&w.lock)
+               }
+
+               // Wait on it
+               w.cond.Wait()
+       }
+
+       // Return the value
+       return w.value
+}
+
+// SetValue sets the value.
+func (w *Value) SetValue(v interface{}) {
+       w.lock.Lock()
+       defer w.lock.Unlock()
+
+       // Set the value
+       w.valueSet = true
+       w.value = v
+
+       // If we have a condition, clear it
+       if w.cond != nil {
+               w.cond.Broadcast()
+               w.cond = nil
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/expand_json.go b/vendor/github.com/hashicorp/terraform/helper/structure/expand_json.go
new file mode 100644 (file)
index 0000000..b3eb90f
--- /dev/null
@@ -0,0 +1,11 @@
+package structure
+
+import "encoding/json"
+
+func ExpandJsonFromString(jsonString string) (map[string]interface{}, error) {
+       var result map[string]interface{}
+
+       err := json.Unmarshal([]byte(jsonString), &result)
+
+       return result, err
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/flatten_json.go b/vendor/github.com/hashicorp/terraform/helper/structure/flatten_json.go
new file mode 100644 (file)
index 0000000..578ad2e
--- /dev/null
@@ -0,0 +1,16 @@
+package structure
+
+import "encoding/json"
+
+func FlattenJsonToString(input map[string]interface{}) (string, error) {
+       if len(input) == 0 {
+               return "", nil
+       }
+
+       result, err := json.Marshal(input)
+       if err != nil {
+               return "", err
+       }
+
+       return string(result), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/normalize_json.go b/vendor/github.com/hashicorp/terraform/helper/structure/normalize_json.go
new file mode 100644 (file)
index 0000000..3256b47
--- /dev/null
@@ -0,0 +1,24 @@
+package structure
+
+import "encoding/json"
+
+// Takes a value containing JSON string and passes it through
+// the JSON parser to normalize it, returns either a parsing
+// error or normalized JSON string.
+func NormalizeJsonString(jsonString interface{}) (string, error) {
+       var j interface{}
+
+       if jsonString == nil || jsonString.(string) == "" {
+               return "", nil
+       }
+
+       s := jsonString.(string)
+
+       err := json.Unmarshal([]byte(s), &j)
+       if err != nil {
+               return s, err
+       }
+
+       bytes, _ := json.Marshal(j)
+       return string(bytes[:]), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/structure/suppress_json_diff.go b/vendor/github.com/hashicorp/terraform/helper/structure/suppress_json_diff.go
new file mode 100644 (file)
index 0000000..46f794a
--- /dev/null
@@ -0,0 +1,21 @@
+package structure
+
+import (
+       "reflect"
+
+       "github.com/hashicorp/terraform/helper/schema"
+)
+
+func SuppressJsonDiff(k, old, new string, d *schema.ResourceData) bool {
+       oldMap, err := ExpandJsonFromString(old)
+       if err != nil {
+               return false
+       }
+
+       newMap, err := ExpandJsonFromString(new)
+       if err != nil {
+               return false
+       }
+
+       return reflect.DeepEqual(oldMap, newMap)
+}
diff --git a/vendor/github.com/hashicorp/terraform/helper/validation/validation.go b/vendor/github.com/hashicorp/terraform/helper/validation/validation.go
new file mode 100644 (file)
index 0000000..7b894f5
--- /dev/null
@@ -0,0 +1,108 @@
+package validation
+
+import (
+       "fmt"
+       "net"
+       "strings"
+
+       "github.com/hashicorp/terraform/helper/schema"
+       "github.com/hashicorp/terraform/helper/structure"
+)
+
+// IntBetween returns a SchemaValidateFunc which tests if the provided value
+// is of type int and is between min and max (inclusive)
+func IntBetween(min, max int) schema.SchemaValidateFunc {
+       return func(i interface{}, k string) (s []string, es []error) {
+               v, ok := i.(int)
+               if !ok {
+                       es = append(es, fmt.Errorf("expected type of %s to be int", k))
+                       return
+               }
+
+               if v < min || v > max {
+                       es = append(es, fmt.Errorf("expected %s to be in the range (%d - %d), got %d", k, min, max, v))
+                       return
+               }
+
+               return
+       }
+}
+
+// StringInSlice returns a SchemaValidateFunc which tests if the provided value
+// is of type string and matches the value of an element in the valid slice
+// will test with in lower case if ignoreCase is true
+func StringInSlice(valid []string, ignoreCase bool) schema.SchemaValidateFunc {
+       return func(i interface{}, k string) (s []string, es []error) {
+               v, ok := i.(string)
+               if !ok {
+                       es = append(es, fmt.Errorf("expected type of %s to be string", k))
+                       return
+               }
+
+               for _, str := range valid {
+                       if v == str || (ignoreCase && strings.ToLower(v) == strings.ToLower(str)) {
+                               return
+                       }
+               }
+
+               es = append(es, fmt.Errorf("expected %s to be one of %v, got %s", k, valid, v))
+               return
+       }
+}
+
+// StringLenBetween returns a SchemaValidateFunc which tests if the provided value
+// is of type string and has length between min and max (inclusive)
+func StringLenBetween(min, max int) schema.SchemaValidateFunc {
+       return func(i interface{}, k string) (s []string, es []error) {
+               v, ok := i.(string)
+               if !ok {
+                       es = append(es, fmt.Errorf("expected type of %s to be string", k))
+                       return
+               }
+               if len(v) < min || len(v) > max {
+                       es = append(es, fmt.Errorf("expected length of %s to be in the range (%d - %d), got %s", k, min, max, v))
+               }
+               return
+       }
+}
+
+// CIDRNetwork returns a SchemaValidateFunc which tests if the provided value
+// is of type string, is in valid CIDR network notation, and has significant bits between min and max (inclusive)
+func CIDRNetwork(min, max int) schema.SchemaValidateFunc {
+       return func(i interface{}, k string) (s []string, es []error) {
+               v, ok := i.(string)
+               if !ok {
+                       es = append(es, fmt.Errorf("expected type of %s to be string", k))
+                       return
+               }
+
+               _, ipnet, err := net.ParseCIDR(v)
+               if err != nil {
+                       es = append(es, fmt.Errorf(
+                               "expected %s to contain a valid CIDR, got: %s with err: %s", k, v, err))
+                       return
+               }
+
+               if ipnet == nil || v != ipnet.String() {
+                       es = append(es, fmt.Errorf(
+                               "expected %s to contain a valid network CIDR, expected %s, got %s",
+                               k, ipnet, v))
+               }
+
+               sigbits, _ := ipnet.Mask.Size()
+               if sigbits < min || sigbits > max {
+                       es = append(es, fmt.Errorf(
+                               "expected %q to contain a network CIDR with between %d and %d significant bits, got: %d",
+                               k, min, max, sigbits))
+               }
+
+               return
+       }
+}
+
+func ValidateJsonString(v interface{}, k string) (ws []string, errors []error) {
+       if _, err := structure.NormalizeJsonString(v); err != nil {
+               errors = append(errors, fmt.Errorf("%q contains an invalid JSON: %s", k, err))
+       }
+       return
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/plugin.go b/vendor/github.com/hashicorp/terraform/plugin/plugin.go
new file mode 100644 (file)
index 0000000..00fa7b2
--- /dev/null
@@ -0,0 +1,13 @@
+package plugin
+
+import (
+       "github.com/hashicorp/go-plugin"
+)
+
+// See serve.go for serving plugins
+
+// PluginMap should be used by clients for the map of plugins.
+var PluginMap = map[string]plugin.Plugin{
+       "provider":    &ResourceProviderPlugin{},
+       "provisioner": &ResourceProvisionerPlugin{},
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provider.go
new file mode 100644 (file)
index 0000000..473f786
--- /dev/null
@@ -0,0 +1,578 @@
+package plugin
+
+import (
+       "net/rpc"
+
+       "github.com/hashicorp/go-plugin"
+       "github.com/hashicorp/terraform/terraform"
+)
+
+// ResourceProviderPlugin is the plugin.Plugin implementation.
+type ResourceProviderPlugin struct {
+       F func() terraform.ResourceProvider
+}
+
+func (p *ResourceProviderPlugin) Server(b *plugin.MuxBroker) (interface{}, error) {
+       return &ResourceProviderServer{Broker: b, Provider: p.F()}, nil
+}
+
+func (p *ResourceProviderPlugin) Client(
+       b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
+       return &ResourceProvider{Broker: b, Client: c}, nil
+}
+
+// ResourceProvider is an implementation of terraform.ResourceProvider
+// that communicates over RPC.
+type ResourceProvider struct {
+       Broker *plugin.MuxBroker
+       Client *rpc.Client
+}
+
+func (p *ResourceProvider) Stop() error {
+       var resp ResourceProviderStopResponse
+       err := p.Client.Call("Plugin.Stop", new(interface{}), &resp)
+       if err != nil {
+               return err
+       }
+       if resp.Error != nil {
+               err = resp.Error
+       }
+
+       return err
+}
+
+func (p *ResourceProvider) Input(
+       input terraform.UIInput,
+       c *terraform.ResourceConfig) (*terraform.ResourceConfig, error) {
+       id := p.Broker.NextId()
+       go p.Broker.AcceptAndServe(id, &UIInputServer{
+               UIInput: input,
+       })
+
+       var resp ResourceProviderInputResponse
+       args := ResourceProviderInputArgs{
+               InputId: id,
+               Config:  c,
+       }
+
+       err := p.Client.Call("Plugin.Input", &args, &resp)
+       if err != nil {
+               return nil, err
+       }
+       if resp.Error != nil {
+               err = resp.Error
+               return nil, err
+       }
+
+       return resp.Config, nil
+}
+
+func (p *ResourceProvider) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+       var resp ResourceProviderValidateResponse
+       args := ResourceProviderValidateArgs{
+               Config: c,
+       }
+
+       err := p.Client.Call("Plugin.Validate", &args, &resp)
+       if err != nil {
+               return nil, []error{err}
+       }
+
+       var errs []error
+       if len(resp.Errors) > 0 {
+               errs = make([]error, len(resp.Errors))
+               for i, err := range resp.Errors {
+                       errs[i] = err
+               }
+       }
+
+       return resp.Warnings, errs
+}
+
+func (p *ResourceProvider) ValidateResource(
+       t string, c *terraform.ResourceConfig) ([]string, []error) {
+       var resp ResourceProviderValidateResourceResponse
+       args := ResourceProviderValidateResourceArgs{
+               Config: c,
+               Type:   t,
+       }
+
+       err := p.Client.Call("Plugin.ValidateResource", &args, &resp)
+       if err != nil {
+               return nil, []error{err}
+       }
+
+       var errs []error
+       if len(resp.Errors) > 0 {
+               errs = make([]error, len(resp.Errors))
+               for i, err := range resp.Errors {
+                       errs[i] = err
+               }
+       }
+
+       return resp.Warnings, errs
+}
+
+func (p *ResourceProvider) Configure(c *terraform.ResourceConfig) error {
+       var resp ResourceProviderConfigureResponse
+       err := p.Client.Call("Plugin.Configure", c, &resp)
+       if err != nil {
+               return err
+       }
+       if resp.Error != nil {
+               err = resp.Error
+       }
+
+       return err
+}
+
+func (p *ResourceProvider) Apply(
+       info *terraform.InstanceInfo,
+       s *terraform.InstanceState,
+       d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
+       var resp ResourceProviderApplyResponse
+       args := &ResourceProviderApplyArgs{
+               Info:  info,
+               State: s,
+               Diff:  d,
+       }
+
+       err := p.Client.Call("Plugin.Apply", args, &resp)
+       if err != nil {
+               return nil, err
+       }
+       if resp.Error != nil {
+               err = resp.Error
+       }
+
+       return resp.State, err
+}
+
+func (p *ResourceProvider) Diff(
+       info *terraform.InstanceInfo,
+       s *terraform.InstanceState,
+       c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+       var resp ResourceProviderDiffResponse
+       args := &ResourceProviderDiffArgs{
+               Info:   info,
+               State:  s,
+               Config: c,
+       }
+       err := p.Client.Call("Plugin.Diff", args, &resp)
+       if err != nil {
+               return nil, err
+       }
+       if resp.Error != nil {
+               err = resp.Error
+       }
+
+       return resp.Diff, err
+}
+
+func (p *ResourceProvider) ValidateDataSource(
+       t string, c *terraform.ResourceConfig) ([]string, []error) {
+       var resp ResourceProviderValidateResourceResponse
+       args := ResourceProviderValidateResourceArgs{
+               Config: c,
+               Type:   t,
+       }
+
+       err := p.Client.Call("Plugin.ValidateDataSource", &args, &resp)
+       if err != nil {
+               return nil, []error{err}
+       }
+
+       var errs []error
+       if len(resp.Errors) > 0 {
+               errs = make([]error, len(resp.Errors))
+               for i, err := range resp.Errors {
+                       errs[i] = err
+               }
+       }
+
+       return resp.Warnings, errs
+}
+
+func (p *ResourceProvider) Refresh(
+       info *terraform.InstanceInfo,
+       s *terraform.InstanceState) (*terraform.InstanceState, error) {
+       var resp ResourceProviderRefreshResponse
+       args := &ResourceProviderRefreshArgs{
+               Info:  info,
+               State: s,
+       }
+
+       err := p.Client.Call("Plugin.Refresh", args, &resp)
+       if err != nil {
+               return nil, err
+       }
+       if resp.Error != nil {
+               err = resp.Error
+       }
+
+       return resp.State, err
+}
+
+func (p *ResourceProvider) ImportState(
+       info *terraform.InstanceInfo,
+       id string) ([]*terraform.InstanceState, error) {
+       var resp ResourceProviderImportStateResponse
+       args := &ResourceProviderImportStateArgs{
+               Info: info,
+               Id:   id,
+       }
+
+       err := p.Client.Call("Plugin.ImportState", args, &resp)
+       if err != nil {
+               return nil, err
+       }
+       if resp.Error != nil {
+               err = resp.Error
+       }
+
+       return resp.State, err
+}
+
+func (p *ResourceProvider) Resources() []terraform.ResourceType {
+       var result []terraform.ResourceType
+
+       err := p.Client.Call("Plugin.Resources", new(interface{}), &result)
+       if err != nil {
+               // TODO: panic, log, what?
+               return nil
+       }
+
+       return result
+}
+
+func (p *ResourceProvider) ReadDataDiff(
+       info *terraform.InstanceInfo,
+       c *terraform.ResourceConfig) (*terraform.InstanceDiff, error) {
+       var resp ResourceProviderReadDataDiffResponse
+       args := &ResourceProviderReadDataDiffArgs{
+               Info:   info,
+               Config: c,
+       }
+
+       err := p.Client.Call("Plugin.ReadDataDiff", args, &resp)
+       if err != nil {
+               return nil, err
+       }
+       if resp.Error != nil {
+               err = resp.Error
+       }
+
+       return resp.Diff, err
+}
+
+func (p *ResourceProvider) ReadDataApply(
+       info *terraform.InstanceInfo,
+       d *terraform.InstanceDiff) (*terraform.InstanceState, error) {
+       var resp ResourceProviderReadDataApplyResponse
+       args := &ResourceProviderReadDataApplyArgs{
+               Info: info,
+               Diff: d,
+       }
+
+       err := p.Client.Call("Plugin.ReadDataApply", args, &resp)
+       if err != nil {
+               return nil, err
+       }
+       if resp.Error != nil {
+               err = resp.Error
+       }
+
+       return resp.State, err
+}
+
+func (p *ResourceProvider) DataSources() []terraform.DataSource {
+       var result []terraform.DataSource
+
+       err := p.Client.Call("Plugin.DataSources", new(interface{}), &result)
+       if err != nil {
+               // TODO: panic, log, what?
+               return nil
+       }
+
+       return result
+}
+
+func (p *ResourceProvider) Close() error {
+       return p.Client.Close()
+}
+
+// ResourceProviderServer is a net/rpc compatible structure for serving
+// a ResourceProvider. This should not be used directly.
+type ResourceProviderServer struct {
+       Broker   *plugin.MuxBroker
+       Provider terraform.ResourceProvider
+}
+
+type ResourceProviderStopResponse struct {
+       Error *plugin.BasicError
+}
+
+type ResourceProviderConfigureResponse struct {
+       Error *plugin.BasicError
+}
+
+type ResourceProviderInputArgs struct {
+       InputId uint32
+       Config  *terraform.ResourceConfig
+}
+
+type ResourceProviderInputResponse struct {
+       Config *terraform.ResourceConfig
+       Error  *plugin.BasicError
+}
+
+type ResourceProviderApplyArgs struct {
+       Info  *terraform.InstanceInfo
+       State *terraform.InstanceState
+       Diff  *terraform.InstanceDiff
+}
+
+type ResourceProviderApplyResponse struct {
+       State *terraform.InstanceState
+       Error *plugin.BasicError
+}
+
+type ResourceProviderDiffArgs struct {
+       Info   *terraform.InstanceInfo
+       State  *terraform.InstanceState
+       Config *terraform.ResourceConfig
+}
+
+type ResourceProviderDiffResponse struct {
+       Diff  *terraform.InstanceDiff
+       Error *plugin.BasicError
+}
+
+type ResourceProviderRefreshArgs struct {
+       Info  *terraform.InstanceInfo
+       State *terraform.InstanceState
+}
+
+type ResourceProviderRefreshResponse struct {
+       State *terraform.InstanceState
+       Error *plugin.BasicError
+}
+
+type ResourceProviderImportStateArgs struct {
+       Info *terraform.InstanceInfo
+       Id   string
+}
+
+type ResourceProviderImportStateResponse struct {
+       State []*terraform.InstanceState
+       Error *plugin.BasicError
+}
+
+type ResourceProviderReadDataApplyArgs struct {
+       Info *terraform.InstanceInfo
+       Diff *terraform.InstanceDiff
+}
+
+type ResourceProviderReadDataApplyResponse struct {
+       State *terraform.InstanceState
+       Error *plugin.BasicError
+}
+
+type ResourceProviderReadDataDiffArgs struct {
+       Info   *terraform.InstanceInfo
+       Config *terraform.ResourceConfig
+}
+
+type ResourceProviderReadDataDiffResponse struct {
+       Diff  *terraform.InstanceDiff
+       Error *plugin.BasicError
+}
+
+type ResourceProviderValidateArgs struct {
+       Config *terraform.ResourceConfig
+}
+
+type ResourceProviderValidateResponse struct {
+       Warnings []string
+       Errors   []*plugin.BasicError
+}
+
+type ResourceProviderValidateResourceArgs struct {
+       Config *terraform.ResourceConfig
+       Type   string
+}
+
+type ResourceProviderValidateResourceResponse struct {
+       Warnings []string
+       Errors   []*plugin.BasicError
+}
+
+func (s *ResourceProviderServer) Stop(
+       _ interface{},
+       reply *ResourceProviderStopResponse) error {
+       err := s.Provider.Stop()
+       *reply = ResourceProviderStopResponse{
+               Error: plugin.NewBasicError(err),
+       }
+
+       return nil
+}
+
+func (s *ResourceProviderServer) Input(
+       args *ResourceProviderInputArgs,
+       reply *ResourceProviderInputResponse) error {
+       conn, err := s.Broker.Dial(args.InputId)
+       if err != nil {
+               *reply = ResourceProviderInputResponse{
+                       Error: plugin.NewBasicError(err),
+               }
+               return nil
+       }
+       client := rpc.NewClient(conn)
+       defer client.Close()
+
+       input := &UIInput{Client: client}
+
+       config, err := s.Provider.Input(input, args.Config)
+       *reply = ResourceProviderInputResponse{
+               Config: config,
+               Error:  plugin.NewBasicError(err),
+       }
+
+       return nil
+}
+
+func (s *ResourceProviderServer) Validate(
+       args *ResourceProviderValidateArgs,
+       reply *ResourceProviderValidateResponse) error {
+       warns, errs := s.Provider.Validate(args.Config)
+       berrs := make([]*plugin.BasicError, len(errs))
+       for i, err := range errs {
+               berrs[i] = plugin.NewBasicError(err)
+       }
+       *reply = ResourceProviderValidateResponse{
+               Warnings: warns,
+               Errors:   berrs,
+       }
+       return nil
+}
+
+func (s *ResourceProviderServer) ValidateResource(
+       args *ResourceProviderValidateResourceArgs,
+       reply *ResourceProviderValidateResourceResponse) error {
+       warns, errs := s.Provider.ValidateResource(args.Type, args.Config)
+       berrs := make([]*plugin.BasicError, len(errs))
+       for i, err := range errs {
+               berrs[i] = plugin.NewBasicError(err)
+       }
+       *reply = ResourceProviderValidateResourceResponse{
+               Warnings: warns,
+               Errors:   berrs,
+       }
+       return nil
+}
+
+func (s *ResourceProviderServer) Configure(
+       config *terraform.ResourceConfig,
+       reply *ResourceProviderConfigureResponse) error {
+       err := s.Provider.Configure(config)
+       *reply = ResourceProviderConfigureResponse{
+               Error: plugin.NewBasicError(err),
+       }
+       return nil
+}
+
+func (s *ResourceProviderServer) Apply(
+       args *ResourceProviderApplyArgs,
+       result *ResourceProviderApplyResponse) error {
+       state, err := s.Provider.Apply(args.Info, args.State, args.Diff)
+       *result = ResourceProviderApplyResponse{
+               State: state,
+               Error: plugin.NewBasicError(err),
+       }
+       return nil
+}
+
+func (s *ResourceProviderServer) Diff(
+       args *ResourceProviderDiffArgs,
+       result *ResourceProviderDiffResponse) error {
+       diff, err := s.Provider.Diff(args.Info, args.State, args.Config)
+       *result = ResourceProviderDiffResponse{
+               Diff:  diff,
+               Error: plugin.NewBasicError(err),
+       }
+       return nil
+}
+
+func (s *ResourceProviderServer) Refresh(
+       args *ResourceProviderRefreshArgs,
+       result *ResourceProviderRefreshResponse) error {
+       newState, err := s.Provider.Refresh(args.Info, args.State)
+       *result = ResourceProviderRefreshResponse{
+               State: newState,
+               Error: plugin.NewBasicError(err),
+       }
+       return nil
+}
+
+func (s *ResourceProviderServer) ImportState(
+       args *ResourceProviderImportStateArgs,
+       result *ResourceProviderImportStateResponse) error {
+       states, err := s.Provider.ImportState(args.Info, args.Id)
+       *result = ResourceProviderImportStateResponse{
+               State: states,
+               Error: plugin.NewBasicError(err),
+       }
+       return nil
+}
+
+func (s *ResourceProviderServer) Resources(
+       nothing interface{},
+       result *[]terraform.ResourceType) error {
+       *result = s.Provider.Resources()
+       return nil
+}
+
+func (s *ResourceProviderServer) ValidateDataSource(
+       args *ResourceProviderValidateResourceArgs,
+       reply *ResourceProviderValidateResourceResponse) error {
+       warns, errs := s.Provider.ValidateDataSource(args.Type, args.Config)
+       berrs := make([]*plugin.BasicError, len(errs))
+       for i, err := range errs {
+               berrs[i] = plugin.NewBasicError(err)
+       }
+       *reply = ResourceProviderValidateResourceResponse{
+               Warnings: warns,
+               Errors:   berrs,
+       }
+       return nil
+}
+
+func (s *ResourceProviderServer) ReadDataDiff(
+       args *ResourceProviderReadDataDiffArgs,
+       result *ResourceProviderReadDataDiffResponse) error {
+       diff, err := s.Provider.ReadDataDiff(args.Info, args.Config)
+       *result = ResourceProviderReadDataDiffResponse{
+               Diff:  diff,
+               Error: plugin.NewBasicError(err),
+       }
+       return nil
+}
+
+func (s *ResourceProviderServer) ReadDataApply(
+       args *ResourceProviderReadDataApplyArgs,
+       result *ResourceProviderReadDataApplyResponse) error {
+       newState, err := s.Provider.ReadDataApply(args.Info, args.Diff)
+       *result = ResourceProviderReadDataApplyResponse{
+               State: newState,
+               Error: plugin.NewBasicError(err),
+       }
+       return nil
+}
+
+func (s *ResourceProviderServer) DataSources(
+       nothing interface{},
+       result *[]terraform.DataSource) error {
+       *result = s.Provider.DataSources()
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/plugin/resource_provisioner.go
new file mode 100644 (file)
index 0000000..8fce9d8
--- /dev/null
@@ -0,0 +1,173 @@
+package plugin
+
+import (
+       "net/rpc"
+
+       "github.com/hashicorp/go-plugin"
+       "github.com/hashicorp/terraform/terraform"
+)
+
+// ResourceProvisionerPlugin is the plugin.Plugin implementation.
+type ResourceProvisionerPlugin struct {
+       F func() terraform.ResourceProvisioner
+}
+
+func (p *ResourceProvisionerPlugin) Server(b *plugin.MuxBroker) (interface{}, error) {
+       return &ResourceProvisionerServer{Broker: b, Provisioner: p.F()}, nil
+}
+
+func (p *ResourceProvisionerPlugin) Client(
+       b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) {
+       return &ResourceProvisioner{Broker: b, Client: c}, nil
+}
+
+// ResourceProvisioner is an implementation of terraform.ResourceProvisioner
+// that communicates over RPC.
+type ResourceProvisioner struct {
+       Broker *plugin.MuxBroker
+       Client *rpc.Client
+}
+
+func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) ([]string, []error) {
+       var resp ResourceProvisionerValidateResponse
+       args := ResourceProvisionerValidateArgs{
+               Config: c,
+       }
+
+       err := p.Client.Call("Plugin.Validate", &args, &resp)
+       if err != nil {
+               return nil, []error{err}
+       }
+
+       var errs []error
+       if len(resp.Errors) > 0 {
+               errs = make([]error, len(resp.Errors))
+               for i, err := range resp.Errors {
+                       errs[i] = err
+               }
+       }
+
+       return resp.Warnings, errs
+}
+
+func (p *ResourceProvisioner) Apply(
+       output terraform.UIOutput,
+       s *terraform.InstanceState,
+       c *terraform.ResourceConfig) error {
+       id := p.Broker.NextId()
+       go p.Broker.AcceptAndServe(id, &UIOutputServer{
+               UIOutput: output,
+       })
+
+       var resp ResourceProvisionerApplyResponse
+       args := &ResourceProvisionerApplyArgs{
+               OutputId: id,
+               State:    s,
+               Config:   c,
+       }
+
+       err := p.Client.Call("Plugin.Apply", args, &resp)
+       if err != nil {
+               return err
+       }
+       if resp.Error != nil {
+               err = resp.Error
+       }
+
+       return err
+}
+
+func (p *ResourceProvisioner) Stop() error {
+       var resp ResourceProvisionerStopResponse
+       err := p.Client.Call("Plugin.Stop", new(interface{}), &resp)
+       if err != nil {
+               return err
+       }
+       if resp.Error != nil {
+               err = resp.Error
+       }
+
+       return err
+}
+
+func (p *ResourceProvisioner) Close() error {
+       return p.Client.Close()
+}
+
+type ResourceProvisionerValidateArgs struct {
+       Config *terraform.ResourceConfig
+}
+
+type ResourceProvisionerValidateResponse struct {
+       Warnings []string
+       Errors   []*plugin.BasicError
+}
+
+type ResourceProvisionerApplyArgs struct {
+       OutputId uint32
+       State    *terraform.InstanceState
+       Config   *terraform.ResourceConfig
+}
+
+type ResourceProvisionerApplyResponse struct {
+       Error *plugin.BasicError
+}
+
+type ResourceProvisionerStopResponse struct {
+       Error *plugin.BasicError
+}
+
+// ResourceProvisionerServer is a net/rpc compatible structure for serving
+// a ResourceProvisioner. This should not be used directly.
+type ResourceProvisionerServer struct {
+       Broker      *plugin.MuxBroker
+       Provisioner terraform.ResourceProvisioner
+}
+
+func (s *ResourceProvisionerServer) Apply(
+       args *ResourceProvisionerApplyArgs,
+       result *ResourceProvisionerApplyResponse) error {
+       conn, err := s.Broker.Dial(args.OutputId)
+       if err != nil {
+               *result = ResourceProvisionerApplyResponse{
+                       Error: plugin.NewBasicError(err),
+               }
+               return nil
+       }
+       client := rpc.NewClient(conn)
+       defer client.Close()
+
+       output := &UIOutput{Client: client}
+
+       err = s.Provisioner.Apply(output, args.State, args.Config)
+       *result = ResourceProvisionerApplyResponse{
+               Error: plugin.NewBasicError(err),
+       }
+       return nil
+}
+
+func (s *ResourceProvisionerServer) Validate(
+       args *ResourceProvisionerValidateArgs,
+       reply *ResourceProvisionerValidateResponse) error {
+       warns, errs := s.Provisioner.Validate(args.Config)
+       berrs := make([]*plugin.BasicError, len(errs))
+       for i, err := range errs {
+               berrs[i] = plugin.NewBasicError(err)
+       }
+       *reply = ResourceProvisionerValidateResponse{
+               Warnings: warns,
+               Errors:   berrs,
+       }
+       return nil
+}
+
+func (s *ResourceProvisionerServer) Stop(
+       _ interface{},
+       reply *ResourceProvisionerStopResponse) error {
+       err := s.Provisioner.Stop()
+       *reply = ResourceProvisionerStopResponse{
+               Error: plugin.NewBasicError(err),
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/serve.go b/vendor/github.com/hashicorp/terraform/plugin/serve.go
new file mode 100644 (file)
index 0000000..2028a61
--- /dev/null
@@ -0,0 +1,54 @@
+package plugin
+
+import (
+       "github.com/hashicorp/go-plugin"
+       "github.com/hashicorp/terraform/terraform"
+)
+
+// The constants below are the names of the plugins that can be dispensed
+// from the plugin server.
+const (
+       ProviderPluginName    = "provider"
+       ProvisionerPluginName = "provisioner"
+)
+
+// Handshake is the HandshakeConfig used to configure clients and servers.
+var Handshake = plugin.HandshakeConfig{
+       // The ProtocolVersion is the version that must match between TF core
+       // and TF plugins. This should be bumped whenever a change happens in
+       // one or the other that makes it so that they can't safely communicate.
+       // This could be adding a new interface value, it could be how
+       // helper/schema computes diffs, etc.
+       ProtocolVersion: 4,
+
+       // The magic cookie values should NEVER be changed.
+       MagicCookieKey:   "TF_PLUGIN_MAGIC_COOKIE",
+       MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2",
+}
+
+type ProviderFunc func() terraform.ResourceProvider
+type ProvisionerFunc func() terraform.ResourceProvisioner
+
+// ServeOpts are the configurations to serve a plugin.
+type ServeOpts struct {
+       ProviderFunc    ProviderFunc
+       ProvisionerFunc ProvisionerFunc
+}
+
+// Serve serves a plugin. This function never returns and should be the final
+// function called in the main function of the plugin.
+func Serve(opts *ServeOpts) {
+       plugin.Serve(&plugin.ServeConfig{
+               HandshakeConfig: Handshake,
+               Plugins:         pluginMap(opts),
+       })
+}
+
+// pluginMap returns the map[string]plugin.Plugin to use for configuring a plugin
+// server or client.
+func pluginMap(opts *ServeOpts) map[string]plugin.Plugin {
+       return map[string]plugin.Plugin{
+               "provider":    &ResourceProviderPlugin{F: opts.ProviderFunc},
+               "provisioner": &ResourceProvisionerPlugin{F: opts.ProvisionerFunc},
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_input.go b/vendor/github.com/hashicorp/terraform/plugin/ui_input.go
new file mode 100644 (file)
index 0000000..493efc0
--- /dev/null
@@ -0,0 +1,51 @@
+package plugin
+
+import (
+       "net/rpc"
+
+       "github.com/hashicorp/go-plugin"
+       "github.com/hashicorp/terraform/terraform"
+)
+
+// UIInput is an implementatin of terraform.UIInput that communicates
+// over RPC.
+type UIInput struct {
+       Client *rpc.Client
+}
+
+func (i *UIInput) Input(opts *terraform.InputOpts) (string, error) {
+       var resp UIInputInputResponse
+       err := i.Client.Call("Plugin.Input", opts, &resp)
+       if err != nil {
+               return "", err
+       }
+       if resp.Error != nil {
+               err = resp.Error
+               return "", err
+       }
+
+       return resp.Value, nil
+}
+
+type UIInputInputResponse struct {
+       Value string
+       Error *plugin.BasicError
+}
+
+// UIInputServer is a net/rpc compatible structure for serving
+// a UIInputServer. This should not be used directly.
+type UIInputServer struct {
+       UIInput terraform.UIInput
+}
+
+func (s *UIInputServer) Input(
+       opts *terraform.InputOpts,
+       reply *UIInputInputResponse) error {
+       value, err := s.UIInput.Input(opts)
+       *reply = UIInputInputResponse{
+               Value: value,
+               Error: plugin.NewBasicError(err),
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/plugin/ui_output.go b/vendor/github.com/hashicorp/terraform/plugin/ui_output.go
new file mode 100644 (file)
index 0000000..c222b00
--- /dev/null
@@ -0,0 +1,29 @@
+package plugin
+
+import (
+       "net/rpc"
+
+       "github.com/hashicorp/terraform/terraform"
+)
+
+// UIOutput is an implementatin of terraform.UIOutput that communicates
+// over RPC.
+type UIOutput struct {
+       Client *rpc.Client
+}
+
+func (o *UIOutput) Output(v string) {
+       o.Client.Call("Plugin.Output", v, new(interface{}))
+}
+
+// UIOutputServer is the RPC server for serving UIOutput.
+type UIOutputServer struct {
+       UIOutput terraform.UIOutput
+}
+
+func (s *UIOutputServer) Output(
+       v string,
+       reply *interface{}) error {
+       s.UIOutput.Output(v)
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go
new file mode 100644 (file)
index 0000000..306128e
--- /dev/null
@@ -0,0 +1,1022 @@
+package terraform
+
+import (
+       "context"
+       "fmt"
+       "log"
+       "sort"
+       "strings"
+       "sync"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/hashicorp/hcl"
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/helper/experiment"
+)
+
+// InputMode defines what sort of input will be asked for when Input
+// is called on Context.
+type InputMode byte
+
+const (
+       // InputModeVar asks for all variables
+       InputModeVar InputMode = 1 << iota
+
+       // InputModeVarUnset asks for variables which are not set yet.
+       // InputModeVar must be set for this to have an effect.
+       InputModeVarUnset
+
+       // InputModeProvider asks for provider variables
+       InputModeProvider
+
+       // InputModeStd is the standard operating mode and asks for both variables
+       // and providers.
+       InputModeStd = InputModeVar | InputModeProvider
+)
+
+var (
+       // contextFailOnShadowError will cause Context operations to return
+       // errors when shadow operations fail. This is only used for testing.
+       contextFailOnShadowError = false
+
+       // contextTestDeepCopyOnPlan will perform a Diff DeepCopy on every
+       // Plan operation, effectively testing the Diff DeepCopy whenever
+       // a Plan occurs. This is enabled for tests.
+       contextTestDeepCopyOnPlan = false
+)
+
+// ContextOpts are the user-configurable options to create a context with
+// NewContext.
+type ContextOpts struct {
+       Meta               *ContextMeta
+       Destroy            bool
+       Diff               *Diff
+       Hooks              []Hook
+       Module             *module.Tree
+       Parallelism        int
+       State              *State
+       StateFutureAllowed bool
+       Providers          map[string]ResourceProviderFactory
+       Provisioners       map[string]ResourceProvisionerFactory
+       Shadow             bool
+       Targets            []string
+       Variables          map[string]interface{}
+
+       UIInput UIInput
+}
+
+// ContextMeta is metadata about the running context. This is information
+// that this package or structure cannot determine on its own but exposes
+// into Terraform in various ways. This must be provided by the Context
+// initializer.
+type ContextMeta struct {
+       Env string // Env is the state environment
+}
+
+// Context represents all the context that Terraform needs in order to
+// perform operations on infrastructure. This structure is built using
+// NewContext. See the documentation for that.
+//
+// Extra functions on Context can be found in context_*.go files.
+type Context struct {
+       // Maintainer note: Anytime this struct is changed, please verify
+       // that newShadowContext still does the right thing. Tests should
+       // fail regardless but putting this note here as well.
+
+       components contextComponentFactory
+       destroy    bool
+       diff       *Diff
+       diffLock   sync.RWMutex
+       hooks      []Hook
+       meta       *ContextMeta
+       module     *module.Tree
+       sh         *stopHook
+       shadow     bool
+       state      *State
+       stateLock  sync.RWMutex
+       targets    []string
+       uiInput    UIInput
+       variables  map[string]interface{}
+
+       l                   sync.Mutex // Lock acquired during any task
+       parallelSem         Semaphore
+       providerInputConfig map[string]map[string]interface{}
+       runLock             sync.Mutex
+       runCond             *sync.Cond
+       runContext          context.Context
+       runContextCancel    context.CancelFunc
+       shadowErr           error
+}
+
+// NewContext creates a new Context structure.
+//
+// Once a Context is creator, the pointer values within ContextOpts
+// should not be mutated in any way, since the pointers are copied, not
+// the values themselves.
+func NewContext(opts *ContextOpts) (*Context, error) {
+       // Validate the version requirement if it is given
+       if opts.Module != nil {
+               if err := checkRequiredVersion(opts.Module); err != nil {
+                       return nil, err
+               }
+       }
+
+       // Copy all the hooks and add our stop hook. We don't append directly
+       // to the Config so that we're not modifying that in-place.
+       sh := new(stopHook)
+       hooks := make([]Hook, len(opts.Hooks)+1)
+       copy(hooks, opts.Hooks)
+       hooks[len(opts.Hooks)] = sh
+
+       state := opts.State
+       if state == nil {
+               state = new(State)
+               state.init()
+       }
+
+       // If our state is from the future, then error. Callers can avoid
+       // this error by explicitly setting `StateFutureAllowed`.
+       if !opts.StateFutureAllowed && state.FromFutureTerraform() {
+               return nil, fmt.Errorf(
+                       "Terraform doesn't allow running any operations against a state\n"+
+                               "that was written by a future Terraform version. The state is\n"+
+                               "reporting it is written by Terraform '%s'.\n\n"+
+                               "Please run at least that version of Terraform to continue.",
+                       state.TFVersion)
+       }
+
+       // Explicitly reset our state version to our current version so that
+       // any operations we do will write out that our latest version
+       // has run.
+       state.TFVersion = Version
+
+       // Determine parallelism, default to 10. We do this both to limit
+       // CPU pressure but also to have an extra guard against rate throttling
+       // from providers.
+       par := opts.Parallelism
+       if par == 0 {
+               par = 10
+       }
+
+       // Set up the variables in the following sequence:
+       //    0 - Take default values from the configuration
+       //    1 - Take values from TF_VAR_x environment variables
+       //    2 - Take values specified in -var flags, overriding values
+       //        set by environment variables if necessary. This includes
+       //        values taken from -var-file in addition.
+       variables := make(map[string]interface{})
+
+       if opts.Module != nil {
+               var err error
+               variables, err = Variables(opts.Module, opts.Variables)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       diff := opts.Diff
+       if diff == nil {
+               diff = &Diff{}
+       }
+
+       return &Context{
+               components: &basicComponentFactory{
+                       providers:    opts.Providers,
+                       provisioners: opts.Provisioners,
+               },
+               destroy:   opts.Destroy,
+               diff:      diff,
+               hooks:     hooks,
+               meta:      opts.Meta,
+               module:    opts.Module,
+               shadow:    opts.Shadow,
+               state:     state,
+               targets:   opts.Targets,
+               uiInput:   opts.UIInput,
+               variables: variables,
+
+               parallelSem:         NewSemaphore(par),
+               providerInputConfig: make(map[string]map[string]interface{}),
+               sh:                  sh,
+       }, nil
+}
+
+type ContextGraphOpts struct {
+       // If true, validates the graph structure (checks for cycles).
+       Validate bool
+
+       // Legacy graphs only: won't prune the graph
+       Verbose bool
+}
+
+// Graph returns the graph used for the given operation type.
+//
+// The most extensive or complex graph type is GraphTypePlan.
+func (c *Context) Graph(typ GraphType, opts *ContextGraphOpts) (*Graph, error) {
+       if opts == nil {
+               opts = &ContextGraphOpts{Validate: true}
+       }
+
+       log.Printf("[INFO] terraform: building graph: %s", typ)
+       switch typ {
+       case GraphTypeApply:
+               return (&ApplyGraphBuilder{
+                       Module:       c.module,
+                       Diff:         c.diff,
+                       State:        c.state,
+                       Providers:    c.components.ResourceProviders(),
+                       Provisioners: c.components.ResourceProvisioners(),
+                       Targets:      c.targets,
+                       Destroy:      c.destroy,
+                       Validate:     opts.Validate,
+               }).Build(RootModulePath)
+
+       case GraphTypeInput:
+               // The input graph is just a slightly modified plan graph
+               fallthrough
+       case GraphTypeValidate:
+               // The validate graph is just a slightly modified plan graph
+               fallthrough
+       case GraphTypePlan:
+               // Create the plan graph builder
+               p := &PlanGraphBuilder{
+                       Module:    c.module,
+                       State:     c.state,
+                       Providers: c.components.ResourceProviders(),
+                       Targets:   c.targets,
+                       Validate:  opts.Validate,
+               }
+
+               // Some special cases for other graph types shared with plan currently
+               var b GraphBuilder = p
+               switch typ {
+               case GraphTypeInput:
+                       b = InputGraphBuilder(p)
+               case GraphTypeValidate:
+                       // We need to set the provisioners so those can be validated
+                       p.Provisioners = c.components.ResourceProvisioners()
+
+                       b = ValidateGraphBuilder(p)
+               }
+
+               return b.Build(RootModulePath)
+
+       case GraphTypePlanDestroy:
+               return (&DestroyPlanGraphBuilder{
+                       Module:   c.module,
+                       State:    c.state,
+                       Targets:  c.targets,
+                       Validate: opts.Validate,
+               }).Build(RootModulePath)
+
+       case GraphTypeRefresh:
+               return (&RefreshGraphBuilder{
+                       Module:    c.module,
+                       State:     c.state,
+                       Providers: c.components.ResourceProviders(),
+                       Targets:   c.targets,
+                       Validate:  opts.Validate,
+               }).Build(RootModulePath)
+       }
+
+       return nil, fmt.Errorf("unknown graph type: %s", typ)
+}
+
+// ShadowError returns any errors caught during a shadow operation.
+//
+// A shadow operation is an operation run in parallel to a real operation
+// that performs the same tasks using new logic on copied state. The results
+// are compared to ensure that the new logic works the same as the old logic.
+// The shadow never affects the real operation or return values.
+//
+// The result of the shadow operation are only available through this function
+// call after a real operation is complete.
+//
+// For API consumers of Context, you can safely ignore this function
+// completely if you have no interest in helping report experimental feature
+// errors to Terraform maintainers. Otherwise, please call this function
+// after every operation and report this to the user.
+//
+// IMPORTANT: Shadow errors are _never_ critical: they _never_ affect
+// the real state or result of a real operation. They are purely informational
+// to assist in future Terraform versions being more stable. Please message
+// this effectively to the end user.
+//
+// This must be called only when no other operation is running (refresh,
+// plan, etc.). The result can be used in parallel to any other operation
+// running.
+func (c *Context) ShadowError() error {
+       return c.shadowErr
+}
+
+// State returns a copy of the current state associated with this context.
+//
+// This cannot safely be called in parallel with any other Context function.
+func (c *Context) State() *State {
+       return c.state.DeepCopy()
+}
+
+// Interpolater returns an Interpolater built on a copy of the state
+// that can be used to test interpolation values.
+func (c *Context) Interpolater() *Interpolater {
+       var varLock sync.Mutex
+       var stateLock sync.RWMutex
+       return &Interpolater{
+               Operation:          walkApply,
+               Meta:               c.meta,
+               Module:             c.module,
+               State:              c.state.DeepCopy(),
+               StateLock:          &stateLock,
+               VariableValues:     c.variables,
+               VariableValuesLock: &varLock,
+       }
+}
+
+// Input asks for input to fill variables and provider configurations.
+// This modifies the configuration in-place, so asking for Input twice
+// may result in different UI output showing different current values.
+func (c *Context) Input(mode InputMode) error {
+       defer c.acquireRun("input")()
+
+       if mode&InputModeVar != 0 {
+               // Walk the variables first for the root module. We walk them in
+               // alphabetical order for UX reasons.
+               rootConf := c.module.Config()
+               names := make([]string, len(rootConf.Variables))
+               m := make(map[string]*config.Variable)
+               for i, v := range rootConf.Variables {
+                       names[i] = v.Name
+                       m[v.Name] = v
+               }
+               sort.Strings(names)
+               for _, n := range names {
+                       // If we only care about unset variables, then if the variable
+                       // is set, continue on.
+                       if mode&InputModeVarUnset != 0 {
+                               if _, ok := c.variables[n]; ok {
+                                       continue
+                               }
+                       }
+
+                       var valueType config.VariableType
+
+                       v := m[n]
+                       switch valueType = v.Type(); valueType {
+                       case config.VariableTypeUnknown:
+                               continue
+                       case config.VariableTypeMap:
+                               // OK
+                       case config.VariableTypeList:
+                               // OK
+                       case config.VariableTypeString:
+                               // OK
+                       default:
+                               panic(fmt.Sprintf("Unknown variable type: %#v", v.Type()))
+                       }
+
+                       // If the variable is not already set, and the variable defines a
+                       // default, use that for the value.
+                       if _, ok := c.variables[n]; !ok {
+                               if v.Default != nil {
+                                       c.variables[n] = v.Default.(string)
+                                       continue
+                               }
+                       }
+
+                       // this should only happen during tests
+                       if c.uiInput == nil {
+                               log.Println("[WARN] Content.uiInput is nil")
+                               continue
+                       }
+
+                       // Ask the user for a value for this variable
+                       var value string
+                       retry := 0
+                       for {
+                               var err error
+                               value, err = c.uiInput.Input(&InputOpts{
+                                       Id:          fmt.Sprintf("var.%s", n),
+                                       Query:       fmt.Sprintf("var.%s", n),
+                                       Description: v.Description,
+                               })
+                               if err != nil {
+                                       return fmt.Errorf(
+                                               "Error asking for %s: %s", n, err)
+                               }
+
+                               if value == "" && v.Required() {
+                                       // Redo if it is required, but abort if we keep getting
+                                       // blank entries
+                                       if retry > 2 {
+                                               return fmt.Errorf("missing required value for %q", n)
+                                       }
+                                       retry++
+                                       continue
+                               }
+
+                               break
+                       }
+
+                       // no value provided, so don't set the variable at all
+                       if value == "" {
+                               continue
+                       }
+
+                       decoded, err := parseVariableAsHCL(n, value, valueType)
+                       if err != nil {
+                               return err
+                       }
+
+                       if decoded != nil {
+                               c.variables[n] = decoded
+                       }
+               }
+       }
+
+       if mode&InputModeProvider != 0 {
+               // Build the graph
+               graph, err := c.Graph(GraphTypeInput, nil)
+               if err != nil {
+                       return err
+               }
+
+               // Do the walk
+               if _, err := c.walk(graph, nil, walkInput); err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
+
+// Apply applies the changes represented by this context and returns
+// the resulting state.
+//
+// Even in the case an error is returned, the state may be returned and will
+// potentially be partially updated.  In addition to returning the resulting
+// state, this context is updated with the latest state.
+//
+// If the state is required after an error, the caller should call
+// Context.State, rather than rely on the return value.
+//
+// TODO: Apply and Refresh should either always return a state, or rely on the
+//       State() method. Currently the helper/resource testing framework relies
+//       on the absence of a returned state to determine if Destroy can be
+//       called, so that will need to be refactored before this can be changed.
+func (c *Context) Apply() (*State, error) {
+       defer c.acquireRun("apply")()
+
+       // Copy our own state
+       c.state = c.state.DeepCopy()
+
+       // Build the graph.
+       graph, err := c.Graph(GraphTypeApply, nil)
+       if err != nil {
+               return nil, err
+       }
+
+       // Determine the operation
+       operation := walkApply
+       if c.destroy {
+               operation = walkDestroy
+       }
+
+       // Walk the graph
+       walker, err := c.walk(graph, graph, operation)
+       if len(walker.ValidationErrors) > 0 {
+               err = multierror.Append(err, walker.ValidationErrors...)
+       }
+
+       // Clean out any unused things
+       c.state.prune()
+
+       return c.state, err
+}
+
+// Plan generates an execution plan for the given context.
+//
+// The execution plan encapsulates the context and can be stored
+// in order to reinstantiate a context later for Apply.
+//
+// Plan also updates the diff of this context to be the diff generated
+// by the plan, so Apply can be called after.
+func (c *Context) Plan() (*Plan, error) {
+       defer c.acquireRun("plan")()
+
+       p := &Plan{
+               Module:  c.module,
+               Vars:    c.variables,
+               State:   c.state,
+               Targets: c.targets,
+       }
+
+       var operation walkOperation
+       if c.destroy {
+               operation = walkPlanDestroy
+       } else {
+               // Set our state to be something temporary. We do this so that
+               // the plan can update a fake state so that variables work, then
+               // we replace it back with our old state.
+               old := c.state
+               if old == nil {
+                       c.state = &State{}
+                       c.state.init()
+               } else {
+                       c.state = old.DeepCopy()
+               }
+               defer func() {
+                       c.state = old
+               }()
+
+               operation = walkPlan
+       }
+
+       // Setup our diff
+       c.diffLock.Lock()
+       c.diff = new(Diff)
+       c.diff.init()
+       c.diffLock.Unlock()
+
+       // Build the graph.
+       graphType := GraphTypePlan
+       if c.destroy {
+               graphType = GraphTypePlanDestroy
+       }
+       graph, err := c.Graph(graphType, nil)
+       if err != nil {
+               return nil, err
+       }
+
+       // Do the walk
+       walker, err := c.walk(graph, graph, operation)
+       if err != nil {
+               return nil, err
+       }
+       p.Diff = c.diff
+
+       // If this is true, it means we're running unit tests. In this case,
+       // we perform a deep copy just to ensure that all context tests also
+       // test that a diff is copy-able. This will panic if it fails. This
+       // is enabled during unit tests.
+       //
+       // This should never be true during production usage, but even if it is,
+       // it can't do any real harm.
+       if contextTestDeepCopyOnPlan {
+               p.Diff.DeepCopy()
+       }
+
+       /*
+               // We don't do the reverification during the new destroy plan because
+               // it will use a different apply process.
+               if X_legacyGraph {
+                       // Now that we have a diff, we can build the exact graph that Apply will use
+                       // and catch any possible cycles during the Plan phase.
+                       if _, err := c.Graph(GraphTypeLegacy, nil); err != nil {
+                               return nil, err
+                       }
+               }
+       */
+
+       var errs error
+       if len(walker.ValidationErrors) > 0 {
+               errs = multierror.Append(errs, walker.ValidationErrors...)
+       }
+       return p, errs
+}
+
+// Refresh goes through all the resources in the state and refreshes them
+// to their latest state. This will update the state that this context
+// works with, along with returning it.
+//
+// Even in the case an error is returned, the state may be returned and
+// will potentially be partially updated.
+func (c *Context) Refresh() (*State, error) {
+       defer c.acquireRun("refresh")()
+
+       // Copy our own state
+       c.state = c.state.DeepCopy()
+
+       // Build the graph.
+       graph, err := c.Graph(GraphTypeRefresh, nil)
+       if err != nil {
+               return nil, err
+       }
+
+       // Do the walk
+       if _, err := c.walk(graph, graph, walkRefresh); err != nil {
+               return nil, err
+       }
+
+       // Clean out any unused things
+       c.state.prune()
+
+       return c.state, nil
+}
+
+// Stop stops the running task.
+//
+// Stop will block until the task completes.
+func (c *Context) Stop() {
+       log.Printf("[WARN] terraform: Stop called, initiating interrupt sequence")
+
+       c.l.Lock()
+       defer c.l.Unlock()
+
+       // If we're running, then stop
+       if c.runContextCancel != nil {
+               log.Printf("[WARN] terraform: run context exists, stopping")
+
+               // Tell the hook we want to stop
+               c.sh.Stop()
+
+               // Stop the context
+               c.runContextCancel()
+               c.runContextCancel = nil
+       }
+
+       // Grab the condition var before we exit
+       if cond := c.runCond; cond != nil {
+               cond.Wait()
+       }
+
+       log.Printf("[WARN] terraform: stop complete")
+}
+
+// Validate validates the configuration and returns any warnings or errors.
+func (c *Context) Validate() ([]string, []error) {
+       defer c.acquireRun("validate")()
+
+       var errs error
+
+       // Validate the configuration itself
+       if err := c.module.Validate(); err != nil {
+               errs = multierror.Append(errs, err)
+       }
+
+       // This only needs to be done for the root module, since inter-module
+       // variables are validated in the module tree.
+       if config := c.module.Config(); config != nil {
+               // Validate the user variables
+               if err := smcUserVariables(config, c.variables); len(err) > 0 {
+                       errs = multierror.Append(errs, err...)
+               }
+       }
+
+       // If we have errors at this point, the graphing has no chance,
+       // so just bail early.
+       if errs != nil {
+               return nil, []error{errs}
+       }
+
+       // Build the graph so we can walk it and run Validate on nodes.
+       // We also validate the graph generated here, but this graph doesn't
+       // necessarily match the graph that Plan will generate, so we'll validate the
+       // graph again later after Planning.
+       graph, err := c.Graph(GraphTypeValidate, nil)
+       if err != nil {
+               return nil, []error{err}
+       }
+
+       // Walk
+       walker, err := c.walk(graph, graph, walkValidate)
+       if err != nil {
+               return nil, multierror.Append(errs, err).Errors
+       }
+
+       // Return the result
+       rerrs := multierror.Append(errs, walker.ValidationErrors...)
+
+       sort.Strings(walker.ValidationWarnings)
+       sort.Slice(rerrs.Errors, func(i, j int) bool {
+               return rerrs.Errors[i].Error() < rerrs.Errors[j].Error()
+       })
+
+       return walker.ValidationWarnings, rerrs.Errors
+}
+
+// Module returns the module tree associated with this context.
+func (c *Context) Module() *module.Tree {
+       return c.module
+}
+
+// Variables will return the mapping of variables that were defined
+// for this Context. If Input was called, this mapping may be different
+// than what was given.
+func (c *Context) Variables() map[string]interface{} {
+       return c.variables
+}
+
+// SetVariable sets a variable after a context has already been built.
+func (c *Context) SetVariable(k string, v interface{}) {
+       c.variables[k] = v
+}
+
+func (c *Context) acquireRun(phase string) func() {
+       // With the run lock held, grab the context lock to make changes
+       // to the run context.
+       c.l.Lock()
+       defer c.l.Unlock()
+
+       // Wait until we're no longer running
+       for c.runCond != nil {
+               c.runCond.Wait()
+       }
+
+       // Build our lock
+       c.runCond = sync.NewCond(&c.l)
+
+       // Setup debugging
+       dbug.SetPhase(phase)
+
+       // Create a new run context
+       c.runContext, c.runContextCancel = context.WithCancel(context.Background())
+
+       // Reset the stop hook so we're not stopped
+       c.sh.Reset()
+
+       // Reset the shadow errors
+       c.shadowErr = nil
+
+       return c.releaseRun
+}
+
+func (c *Context) releaseRun() {
+       // Grab the context lock so that we can make modifications to fields
+       c.l.Lock()
+       defer c.l.Unlock()
+
+       // setting the phase to "INVALID" lets us easily detect if we have
+       // operations happening outside of a run, or we missed setting the proper
+       // phase
+       dbug.SetPhase("INVALID")
+
+       // End our run. We check if runContext is non-nil because it can be
+       // set to nil if it was cancelled via Stop()
+       if c.runContextCancel != nil {
+               c.runContextCancel()
+       }
+
+       // Unlock all waiting our condition
+       cond := c.runCond
+       c.runCond = nil
+       cond.Broadcast()
+
+       // Unset the context
+       c.runContext = nil
+}
+
+func (c *Context) walk(
+       graph, shadow *Graph, operation walkOperation) (*ContextGraphWalker, error) {
+       // Keep track of the "real" context which is the context that does
+       // the real work: talking to real providers, modifying real state, etc.
+       realCtx := c
+
+       // If we don't want shadowing, remove it
+       if !experiment.Enabled(experiment.X_shadow) {
+               shadow = nil
+       }
+
+       // Just log this so we can see it in a debug log
+       if !c.shadow {
+               log.Printf("[WARN] terraform: shadow graph disabled")
+               shadow = nil
+       }
+
+       // If we have a shadow graph, walk that as well
+       var shadowCtx *Context
+       var shadowCloser Shadow
+       if shadow != nil {
+               // Build the shadow context. In the process, override the real context
+               // with the one that is wrapped so that the shadow context can verify
+               // the results of the real.
+               realCtx, shadowCtx, shadowCloser = newShadowContext(c)
+       }
+
+       log.Printf("[DEBUG] Starting graph walk: %s", operation.String())
+
+       walker := &ContextGraphWalker{
+               Context:     realCtx,
+               Operation:   operation,
+               StopContext: c.runContext,
+       }
+
+       // Watch for a stop so we can call the provider Stop() API.
+       watchStop, watchWait := c.watchStop(walker)
+
+       // Walk the real graph, this will block until it completes
+       realErr := graph.Walk(walker)
+
+       // Close the channel so the watcher stops, and wait for it to return.
+       close(watchStop)
+       <-watchWait
+
+       // If we have a shadow graph and we interrupted the real graph, then
+       // we just close the shadow and never verify it. It is non-trivial to
+       // recreate the exact execution state up until an interruption so this
+       // isn't supported with shadows at the moment.
+       if shadowCloser != nil && c.sh.Stopped() {
+               // Ignore the error result, there is nothing we could care about
+               shadowCloser.CloseShadow()
+
+               // Set it to nil so we don't do anything
+               shadowCloser = nil
+       }
+
+       // If we have a shadow graph, wait for that to complete.
+       if shadowCloser != nil {
+               // Build the graph walker for the shadow. We also wrap this in
+               // a panicwrap so that panics are captured. For the shadow graph,
+               // we just want panics to be normal errors rather than to crash
+               // Terraform.
+               shadowWalker := GraphWalkerPanicwrap(&ContextGraphWalker{
+                       Context:   shadowCtx,
+                       Operation: operation,
+               })
+
+               // Kick off the shadow walk. This will block on any operations
+               // on the real walk so it is fine to start first.
+               log.Printf("[INFO] Starting shadow graph walk: %s", operation.String())
+               shadowCh := make(chan error)
+               go func() {
+                       shadowCh <- shadow.Walk(shadowWalker)
+               }()
+
+               // Notify the shadow that we're done
+               if err := shadowCloser.CloseShadow(); err != nil {
+                       c.shadowErr = multierror.Append(c.shadowErr, err)
+               }
+
+               // Wait for the walk to end
+               log.Printf("[DEBUG] Waiting for shadow graph to complete...")
+               shadowWalkErr := <-shadowCh
+
+               // Get any shadow errors
+               if err := shadowCloser.ShadowError(); err != nil {
+                       c.shadowErr = multierror.Append(c.shadowErr, err)
+               }
+
+               // Verify the contexts (compare)
+               if err := shadowContextVerify(realCtx, shadowCtx); err != nil {
+                       c.shadowErr = multierror.Append(c.shadowErr, err)
+               }
+
+               // At this point, if we're supposed to fail on error, then
+               // we PANIC. Some tests just verify that there is an error,
+               // so simply appending it to realErr and returning could hide
+               // shadow problems.
+               //
+               // This must be done BEFORE appending shadowWalkErr since the
+               // shadowWalkErr may include expected errors.
+               //
+               // We only do this if we don't have a real error. In the case of
+               // a real error, we can't guarantee what nodes were and weren't
+               // traversed in parallel scenarios so we can't guarantee no
+               // shadow errors.
+               if c.shadowErr != nil && contextFailOnShadowError && realErr == nil {
+                       panic(multierror.Prefix(c.shadowErr, "shadow graph:"))
+               }
+
+               // Now, if we have a walk error, we append that through
+               if shadowWalkErr != nil {
+                       c.shadowErr = multierror.Append(c.shadowErr, shadowWalkErr)
+               }
+
+               if c.shadowErr == nil {
+                       log.Printf("[INFO] Shadow graph success!")
+               } else {
+                       log.Printf("[ERROR] Shadow graph error: %s", c.shadowErr)
+
+                       // If we're supposed to fail on shadow errors, then report it
+                       if contextFailOnShadowError {
+                               realErr = multierror.Append(realErr, multierror.Prefix(
+                                       c.shadowErr, "shadow graph:"))
+                       }
+               }
+       }
+
+       return walker, realErr
+}
+
+// watchStop immediately returns a `stop` and a `wait` chan after dispatching
+// the watchStop goroutine. This will watch the runContext for cancellation and
+// stop the providers accordingly.  When the watch is no longer needed, the
+// `stop` chan should be closed before waiting on the `wait` chan.
+// The `wait` chan is important, because without synchronizing with the end of
+// the watchStop goroutine, the runContext may also be closed during the select
+// incorrectly causing providers to be stopped. Even if the graph walk is done
+// at that point, stopping a provider permanently cancels its StopContext which
+// can cause later actions to fail.
+func (c *Context) watchStop(walker *ContextGraphWalker) (chan struct{}, <-chan struct{}) {
+       stop := make(chan struct{})
+       wait := make(chan struct{})
+
+       // get the runContext cancellation channel now, because releaseRun will
+       // write to the runContext field.
+       done := c.runContext.Done()
+
+       go func() {
+               defer close(wait)
+               // Wait for a stop or completion
+               select {
+               case <-done:
+                       // done means the context was canceled, so we need to try and stop
+                       // providers.
+               case <-stop:
+                       // our own stop channel was closed.
+                       return
+               }
+
+               // If we're here, we're stopped, trigger the call.
+
+               {
+                       // Copy the providers so that a misbehaved blocking Stop doesn't
+                       // completely hang Terraform.
+                       walker.providerLock.Lock()
+                       ps := make([]ResourceProvider, 0, len(walker.providerCache))
+                       for _, p := range walker.providerCache {
+                               ps = append(ps, p)
+                       }
+                       defer walker.providerLock.Unlock()
+
+                       for _, p := range ps {
+                               // We ignore the error for now since there isn't any reasonable
+                               // action to take if there is an error here, since the stop is still
+                               // advisory: Terraform will exit once the graph node completes.
+                               p.Stop()
+                       }
+               }
+
+               {
+                       // Call stop on all the provisioners
+                       walker.provisionerLock.Lock()
+                       ps := make([]ResourceProvisioner, 0, len(walker.provisionerCache))
+                       for _, p := range walker.provisionerCache {
+                               ps = append(ps, p)
+                       }
+                       defer walker.provisionerLock.Unlock()
+
+                       for _, p := range ps {
+                               // We ignore the error for now since there isn't any reasonable
+                               // action to take if there is an error here, since the stop is still
+                               // advisory: Terraform will exit once the graph node completes.
+                               p.Stop()
+                       }
+               }
+       }()
+
+       return stop, wait
+}
+
+// parseVariableAsHCL parses the value of a single variable as would have been specified
+// on the command line via -var or in an environment variable named TF_VAR_x, where x is
+// the name of the variable. In order to get around the restriction of HCL requiring a
+// top level object, we prepend a sentinel key, decode the user-specified value as its
+// value and pull the value back out of the resulting map.
+func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) {
+       // expecting a string so don't decode anything, just strip quotes
+       if targetType == config.VariableTypeString {
+               return strings.Trim(input, `"`), nil
+       }
+
+       // return empty types
+       if strings.TrimSpace(input) == "" {
+               switch targetType {
+               case config.VariableTypeList:
+                       return []interface{}{}, nil
+               case config.VariableTypeMap:
+                       return make(map[string]interface{}), nil
+               }
+       }
+
+       const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY"
+       inputWithSentinal := fmt.Sprintf("%s = %s", sentinelValue, input)
+
+       var decoded map[string]interface{}
+       err := hcl.Decode(&decoded, inputWithSentinal)
+       if err != nil {
+               return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", name, input, err)
+       }
+
+       if len(decoded) != 1 {
+               return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", name, input)
+       }
+
+       parsedValue, ok := decoded[sentinelValue]
+       if !ok {
+               return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
+       }
+
+       switch targetType {
+       case config.VariableTypeList:
+               return parsedValue, nil
+       case config.VariableTypeMap:
+               if list, ok := parsedValue.([]map[string]interface{}); ok {
+                       return list[0], nil
+               }
+
+               return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input)
+       default:
+               panic(fmt.Errorf("unknown type %s", targetType.Printable()))
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_components.go b/vendor/github.com/hashicorp/terraform/terraform/context_components.go
new file mode 100644 (file)
index 0000000..6f50744
--- /dev/null
@@ -0,0 +1,65 @@
+package terraform
+
+import (
+       "fmt"
+)
+
+// contextComponentFactory is the interface that Context uses
+// to initialize various components such as providers and provisioners.
+// This factory gets more information than the raw maps using to initialize
+// a Context. This information is used for debugging.
+type contextComponentFactory interface {
+       // ResourceProvider creates a new ResourceProvider with the given
+       // type. The "uid" is a unique identifier for this provider being
+       // initialized that can be used for internal tracking.
+       ResourceProvider(typ, uid string) (ResourceProvider, error)
+       ResourceProviders() []string
+
+       // ResourceProvisioner creates a new ResourceProvisioner with the
+       // given type. The "uid" is a unique identifier for this provisioner
+       // being initialized that can be used for internal tracking.
+       ResourceProvisioner(typ, uid string) (ResourceProvisioner, error)
+       ResourceProvisioners() []string
+}
+
+// basicComponentFactory just calls a factory from a map directly.
+type basicComponentFactory struct {
+       providers    map[string]ResourceProviderFactory
+       provisioners map[string]ResourceProvisionerFactory
+}
+
+func (c *basicComponentFactory) ResourceProviders() []string {
+       result := make([]string, len(c.providers))
+       for k, _ := range c.providers {
+               result = append(result, k)
+       }
+
+       return result
+}
+
+func (c *basicComponentFactory) ResourceProvisioners() []string {
+       result := make([]string, len(c.provisioners))
+       for k, _ := range c.provisioners {
+               result = append(result, k)
+       }
+
+       return result
+}
+
+func (c *basicComponentFactory) ResourceProvider(typ, uid string) (ResourceProvider, error) {
+       f, ok := c.providers[typ]
+       if !ok {
+               return nil, fmt.Errorf("unknown provider %q", typ)
+       }
+
+       return f()
+}
+
+func (c *basicComponentFactory) ResourceProvisioner(typ, uid string) (ResourceProvisioner, error) {
+       f, ok := c.provisioners[typ]
+       if !ok {
+               return nil, fmt.Errorf("unknown provisioner %q", typ)
+       }
+
+       return f()
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go b/vendor/github.com/hashicorp/terraform/terraform/context_graph_type.go
new file mode 100644 (file)
index 0000000..084f010
--- /dev/null
@@ -0,0 +1,32 @@
+package terraform
+
+//go:generate stringer -type=GraphType context_graph_type.go
+
+// GraphType is an enum of the type of graph to create with a Context.
+// The values of the constants may change so they shouldn't be depended on;
+// always use the constant name.
+type GraphType byte
+
+const (
+       GraphTypeInvalid GraphType = 0
+       GraphTypeLegacy  GraphType = iota
+       GraphTypeRefresh
+       GraphTypePlan
+       GraphTypePlanDestroy
+       GraphTypeApply
+       GraphTypeInput
+       GraphTypeValidate
+)
+
+// GraphTypeMap is a mapping of human-readable string to GraphType. This
+// is useful to use as the mechanism for human input for configurable
+// graph types.
+var GraphTypeMap = map[string]GraphType{
+       "apply":        GraphTypeApply,
+       "input":        GraphTypeInput,
+       "plan":         GraphTypePlan,
+       "plan-destroy": GraphTypePlanDestroy,
+       "refresh":      GraphTypeRefresh,
+       "legacy":       GraphTypeLegacy,
+       "validate":     GraphTypeValidate,
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_import.go b/vendor/github.com/hashicorp/terraform/terraform/context_import.go
new file mode 100644 (file)
index 0000000..f1d5776
--- /dev/null
@@ -0,0 +1,77 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/config/module"
+)
+
+// ImportOpts are used as the configuration for Import.
+type ImportOpts struct {
+       // Targets are the targets to import
+       Targets []*ImportTarget
+
+       // Module is optional, and specifies a config module that is loaded
+       // into the graph and evaluated. The use case for this is to provide
+       // provider configuration.
+       Module *module.Tree
+}
+
+// ImportTarget is a single resource to import.
+type ImportTarget struct {
+       // Addr is the full resource address of the resource to import.
+       // Example: "module.foo.aws_instance.bar"
+       Addr string
+
+       // ID is the ID of the resource to import. This is resource-specific.
+       ID string
+
+       // Provider string
+       Provider string
+}
+
+// Import takes already-created external resources and brings them
+// under Terraform management. Import requires the exact type, name, and ID
+// of the resources to import.
+//
+// This operation is idempotent. If the requested resource is already
+// imported, no changes are made to the state.
+//
+// Further, this operation also gracefully handles partial state. If during
+// an import there is a failure, all previously imported resources remain
+// imported.
+func (c *Context) Import(opts *ImportOpts) (*State, error) {
+       // Hold a lock since we can modify our own state here
+       defer c.acquireRun("import")()
+
+       // Copy our own state
+       c.state = c.state.DeepCopy()
+
+       // If no module is given, default to the module configured with
+       // the Context.
+       module := opts.Module
+       if module == nil {
+               module = c.module
+       }
+
+       // Initialize our graph builder
+       builder := &ImportGraphBuilder{
+               ImportTargets: opts.Targets,
+               Module:        module,
+               Providers:     c.components.ResourceProviders(),
+       }
+
+       // Build the graph!
+       graph, err := builder.Build(RootModulePath)
+       if err != nil {
+               return c.state, err
+       }
+
+       // Walk it
+       if _, err := c.walk(graph, nil, walkImport); err != nil {
+               return c.state, err
+       }
+
+       // Clean the state
+       c.state.prune()
+
+       return c.state, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/debug.go b/vendor/github.com/hashicorp/terraform/terraform/debug.go
new file mode 100644 (file)
index 0000000..265339f
--- /dev/null
@@ -0,0 +1,523 @@
+package terraform
+
+import (
+       "archive/tar"
+       "bytes"
+       "compress/gzip"
+       "encoding/json"
+       "fmt"
+       "io"
+       "os"
+       "path/filepath"
+       "sync"
+       "time"
+)
+
+// DebugInfo is the global handler for writing the debug archive. All methods
+// are safe to call concurrently. Setting DebugInfo to nil will disable writing
+// the debug archive. All methods are safe to call on the nil value.
+var dbug *debugInfo
+
+// SetDebugInfo initializes the debug handler with a backing file in the
+// provided directory. This must be called before any other terraform package
+// operations or not at all. Once his is called, CloseDebugInfo should be
+// called before program exit.
+func SetDebugInfo(path string) error {
+       if os.Getenv("TF_DEBUG") == "" {
+               return nil
+       }
+
+       di, err := newDebugInfoFile(path)
+       if err != nil {
+               return err
+       }
+
+       dbug = di
+       return nil
+}
+
+// CloseDebugInfo is the exported interface to Close the debug info handler.
+// The debug handler needs to be closed before program exit, so we export this
+// function to be deferred in the appropriate entrypoint for our executable.
+func CloseDebugInfo() error {
+       return dbug.Close()
+}
+
+// newDebugInfoFile initializes the global debug handler with a backing file in
+// the provided directory.
+func newDebugInfoFile(dir string) (*debugInfo, error) {
+       err := os.MkdirAll(dir, 0755)
+       if err != nil {
+               return nil, err
+       }
+
+       // FIXME: not guaranteed unique, but good enough for now
+       name := fmt.Sprintf("debug-%s", time.Now().Format("2006-01-02-15-04-05.999999999"))
+       archivePath := filepath.Join(dir, name+".tar.gz")
+
+       f, err := os.OpenFile(archivePath, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666)
+       if err != nil {
+               return nil, err
+       }
+       return newDebugInfo(name, f)
+}
+
+// newDebugInfo initializes the global debug handler.
+func newDebugInfo(name string, w io.Writer) (*debugInfo, error) {
+       gz := gzip.NewWriter(w)
+
+       d := &debugInfo{
+               name: name,
+               w:    w,
+               gz:   gz,
+               tar:  tar.NewWriter(gz),
+       }
+
+       // create the subdirs we need
+       topHdr := &tar.Header{
+               Name:     name,
+               Typeflag: tar.TypeDir,
+               Mode:     0755,
+       }
+       graphsHdr := &tar.Header{
+               Name:     name + "/graphs",
+               Typeflag: tar.TypeDir,
+               Mode:     0755,
+       }
+       err := d.tar.WriteHeader(topHdr)
+       // if the first errors, the second will too
+       err = d.tar.WriteHeader(graphsHdr)
+       if err != nil {
+               return nil, err
+       }
+
+       return d, nil
+}
+
+// debugInfo provides various methods for writing debug information to a
+// central archive. The debugInfo struct should be initialized once before any
+// output is written, and Close should be called before program exit. All
+// exported methods on debugInfo will be safe for concurrent use. The exported
+// methods are also all safe to call on a nil pointer, so that there is no need
+// for conditional blocks before writing debug information.
+//
+// Each write operation done by the debugInfo will flush the gzip.Writer and
+// tar.Writer, and call Sync() or Flush() on the output writer as needed. This
+// ensures that as much data as possible is written to storage in the event of
+// a crash. The append format of the tar file, and the stream format of the
+// gzip writer allow easy recovery f the data in the event that the debugInfo
+// is not closed before program exit.
+type debugInfo struct {
+       sync.Mutex
+
+       // archive root directory name
+       name string
+
+       // current operation phase
+       phase string
+
+       // step is monotonic counter for for recording the order of operations
+       step int
+
+       // flag to protect Close()
+       closed bool
+
+       // the debug log output is in a tar.gz format, written to the io.Writer w
+       w   io.Writer
+       gz  *gzip.Writer
+       tar *tar.Writer
+}
+
+// Set the name of the current operational phase in the debug handler. Each file
+// in the archive will contain the name of the phase in which it was created,
+// i.e. "input", "apply", "plan", "refresh", "validate"
+func (d *debugInfo) SetPhase(phase string) {
+       if d == nil {
+               return
+       }
+       d.Lock()
+       defer d.Unlock()
+
+       d.phase = phase
+}
+
+// Close the debugInfo, finalizing the data in storage. This closes the
+// tar.Writer, the gzip.Wrtier, and if the output writer is an io.Closer, it is
+// also closed.
+func (d *debugInfo) Close() error {
+       if d == nil {
+               return nil
+       }
+
+       d.Lock()
+       defer d.Unlock()
+
+       if d.closed {
+               return nil
+       }
+       d.closed = true
+
+       d.tar.Close()
+       d.gz.Close()
+
+       if c, ok := d.w.(io.Closer); ok {
+               return c.Close()
+       }
+       return nil
+}
+
+// debug buffer is an io.WriteCloser that will write itself to the debug
+// archive when closed.
+type debugBuffer struct {
+       debugInfo *debugInfo
+       name      string
+       buf       bytes.Buffer
+}
+
+func (b *debugBuffer) Write(d []byte) (int, error) {
+       return b.buf.Write(d)
+}
+
+func (b *debugBuffer) Close() error {
+       return b.debugInfo.WriteFile(b.name, b.buf.Bytes())
+}
+
+// ioutils only has a noop ReadCloser
+type nopWriteCloser struct{}
+
+func (nopWriteCloser) Write([]byte) (int, error) { return 0, nil }
+func (nopWriteCloser) Close() error              { return nil }
+
+// NewFileWriter returns an io.WriteClose that will be buffered and written to
+// the debug archive when closed.
+func (d *debugInfo) NewFileWriter(name string) io.WriteCloser {
+       if d == nil {
+               return nopWriteCloser{}
+       }
+
+       return &debugBuffer{
+               debugInfo: d,
+               name:      name,
+       }
+}
+
+type syncer interface {
+       Sync() error
+}
+
+type flusher interface {
+       Flush() error
+}
+
+// Flush the tar.Writer and the gzip.Writer. Flush() or Sync() will be called
+// on the output writer if they are available.
+func (d *debugInfo) flush() {
+       d.tar.Flush()
+       d.gz.Flush()
+
+       if f, ok := d.w.(flusher); ok {
+               f.Flush()
+       }
+
+       if s, ok := d.w.(syncer); ok {
+               s.Sync()
+       }
+}
+
+// WriteFile writes data as a single file to the debug arhive.
+func (d *debugInfo) WriteFile(name string, data []byte) error {
+       if d == nil {
+               return nil
+       }
+
+       d.Lock()
+       defer d.Unlock()
+       return d.writeFile(name, data)
+}
+
+func (d *debugInfo) writeFile(name string, data []byte) error {
+       defer d.flush()
+       path := fmt.Sprintf("%s/%d-%s-%s", d.name, d.step, d.phase, name)
+       d.step++
+
+       hdr := &tar.Header{
+               Name: path,
+               Mode: 0644,
+               Size: int64(len(data)),
+       }
+       err := d.tar.WriteHeader(hdr)
+       if err != nil {
+               return err
+       }
+
+       _, err = d.tar.Write(data)
+       return err
+}
+
+// DebugHook implements all methods of the terraform.Hook interface, and writes
+// the arguments to a file in the archive. When a suitable format for the
+// argument isn't available, the argument is encoded using json.Marshal. If the
+// debug handler is nil, all DebugHook methods are noop, so no time is spent in
+// marshaling the data structures.
+type DebugHook struct{}
+
+func (*DebugHook) PreApply(ii *InstanceInfo, is *InstanceState, id *InstanceDiff) (HookAction, error) {
+       if dbug == nil {
+               return HookActionContinue, nil
+       }
+
+       var buf bytes.Buffer
+
+       if ii != nil {
+               buf.WriteString(ii.HumanId() + "\n")
+       }
+
+       if is != nil {
+               buf.WriteString(is.String() + "\n")
+       }
+
+       idCopy, err := id.Copy()
+       if err != nil {
+               return HookActionContinue, err
+       }
+       js, err := json.MarshalIndent(idCopy, "", "  ")
+       if err != nil {
+               return HookActionContinue, err
+       }
+       buf.Write(js)
+
+       dbug.WriteFile("hook-PreApply", buf.Bytes())
+
+       return HookActionContinue, nil
+}
+
+func (*DebugHook) PostApply(ii *InstanceInfo, is *InstanceState, err error) (HookAction, error) {
+       if dbug == nil {
+               return HookActionContinue, nil
+       }
+
+       var buf bytes.Buffer
+
+       if ii != nil {
+               buf.WriteString(ii.HumanId() + "\n")
+       }
+
+       if is != nil {
+               buf.WriteString(is.String() + "\n")
+       }
+
+       if err != nil {
+               buf.WriteString(err.Error())
+       }
+
+       dbug.WriteFile("hook-PostApply", buf.Bytes())
+
+       return HookActionContinue, nil
+}
+
+func (*DebugHook) PreDiff(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+       if dbug == nil {
+               return HookActionContinue, nil
+       }
+
+       var buf bytes.Buffer
+       if ii != nil {
+               buf.WriteString(ii.HumanId() + "\n")
+       }
+
+       if is != nil {
+               buf.WriteString(is.String())
+               buf.WriteString("\n")
+       }
+       dbug.WriteFile("hook-PreDiff", buf.Bytes())
+
+       return HookActionContinue, nil
+}
+
+func (*DebugHook) PostDiff(ii *InstanceInfo, id *InstanceDiff) (HookAction, error) {
+       if dbug == nil {
+               return HookActionContinue, nil
+       }
+
+       var buf bytes.Buffer
+       if ii != nil {
+               buf.WriteString(ii.HumanId() + "\n")
+       }
+
+       idCopy, err := id.Copy()
+       if err != nil {
+               return HookActionContinue, err
+       }
+       js, err := json.MarshalIndent(idCopy, "", "  ")
+       if err != nil {
+               return HookActionContinue, err
+       }
+       buf.Write(js)
+
+       dbug.WriteFile("hook-PostDiff", buf.Bytes())
+
+       return HookActionContinue, nil
+}
+
+func (*DebugHook) PreProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+       if dbug == nil {
+               return HookActionContinue, nil
+       }
+
+       var buf bytes.Buffer
+       if ii != nil {
+               buf.WriteString(ii.HumanId() + "\n")
+       }
+
+       if is != nil {
+               buf.WriteString(is.String())
+               buf.WriteString("\n")
+       }
+       dbug.WriteFile("hook-PreProvisionResource", buf.Bytes())
+
+       return HookActionContinue, nil
+}
+
+func (*DebugHook) PostProvisionResource(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+       if dbug == nil {
+               return HookActionContinue, nil
+       }
+
+       var buf bytes.Buffer
+       if ii != nil {
+               buf.WriteString(ii.HumanId())
+               buf.WriteString("\n")
+       }
+
+       if is != nil {
+               buf.WriteString(is.String())
+               buf.WriteString("\n")
+       }
+       dbug.WriteFile("hook-PostProvisionResource", buf.Bytes())
+       return HookActionContinue, nil
+}
+
+func (*DebugHook) PreProvision(ii *InstanceInfo, s string) (HookAction, error) {
+       if dbug == nil {
+               return HookActionContinue, nil
+       }
+
+       var buf bytes.Buffer
+       if ii != nil {
+               buf.WriteString(ii.HumanId())
+               buf.WriteString("\n")
+       }
+       buf.WriteString(s + "\n")
+
+       dbug.WriteFile("hook-PreProvision", buf.Bytes())
+       return HookActionContinue, nil
+}
+
+func (*DebugHook) PostProvision(ii *InstanceInfo, s string, err error) (HookAction, error) {
+       if dbug == nil {
+               return HookActionContinue, nil
+       }
+
+       var buf bytes.Buffer
+       if ii != nil {
+               buf.WriteString(ii.HumanId() + "\n")
+       }
+       buf.WriteString(s + "\n")
+
+       dbug.WriteFile("hook-PostProvision", buf.Bytes())
+       return HookActionContinue, nil
+}
+
+func (*DebugHook) ProvisionOutput(ii *InstanceInfo, s1 string, s2 string) {
+       if dbug == nil {
+               return
+       }
+
+       var buf bytes.Buffer
+       if ii != nil {
+               buf.WriteString(ii.HumanId())
+               buf.WriteString("\n")
+       }
+       buf.WriteString(s1 + "\n")
+       buf.WriteString(s2 + "\n")
+
+       dbug.WriteFile("hook-ProvisionOutput", buf.Bytes())
+}
+
+func (*DebugHook) PreRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+       if dbug == nil {
+               return HookActionContinue, nil
+       }
+
+       var buf bytes.Buffer
+       if ii != nil {
+               buf.WriteString(ii.HumanId() + "\n")
+       }
+
+       if is != nil {
+               buf.WriteString(is.String())
+               buf.WriteString("\n")
+       }
+       dbug.WriteFile("hook-PreRefresh", buf.Bytes())
+       return HookActionContinue, nil
+}
+
+func (*DebugHook) PostRefresh(ii *InstanceInfo, is *InstanceState) (HookAction, error) {
+       if dbug == nil {
+               return HookActionContinue, nil
+       }
+
+       var buf bytes.Buffer
+       if ii != nil {
+               buf.WriteString(ii.HumanId())
+               buf.WriteString("\n")
+       }
+
+       if is != nil {
+               buf.WriteString(is.String())
+               buf.WriteString("\n")
+       }
+       dbug.WriteFile("hook-PostRefresh", buf.Bytes())
+       return HookActionContinue, nil
+}
+
+func (*DebugHook) PreImportState(ii *InstanceInfo, s string) (HookAction, error) {
+       if dbug == nil {
+               return HookActionContinue, nil
+       }
+
+       var buf bytes.Buffer
+       if ii != nil {
+               buf.WriteString(ii.HumanId())
+               buf.WriteString("\n")
+       }
+       buf.WriteString(s + "\n")
+
+       dbug.WriteFile("hook-PreImportState", buf.Bytes())
+       return HookActionContinue, nil
+}
+
+func (*DebugHook) PostImportState(ii *InstanceInfo, iss []*InstanceState) (HookAction, error) {
+       if dbug == nil {
+               return HookActionContinue, nil
+       }
+
+       var buf bytes.Buffer
+
+       if ii != nil {
+               buf.WriteString(ii.HumanId() + "\n")
+       }
+
+       for _, is := range iss {
+               if is != nil {
+                       buf.WriteString(is.String() + "\n")
+               }
+       }
+       dbug.WriteFile("hook-PostImportState", buf.Bytes())
+       return HookActionContinue, nil
+}
+
+// skip logging this for now, since it could be huge
+func (*DebugHook) PostStateUpdate(*State) (HookAction, error) {
+       return HookActionContinue, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go
new file mode 100644 (file)
index 0000000..a9fae6c
--- /dev/null
@@ -0,0 +1,866 @@
+package terraform
+
+import (
+       "bufio"
+       "bytes"
+       "fmt"
+       "reflect"
+       "regexp"
+       "sort"
+       "strings"
+       "sync"
+
+       "github.com/mitchellh/copystructure"
+)
+
+// DiffChangeType is an enum with the kind of changes a diff has planned.
+type DiffChangeType byte
+
+const (
+       DiffInvalid DiffChangeType = iota
+       DiffNone
+       DiffCreate
+       DiffUpdate
+       DiffDestroy
+       DiffDestroyCreate
+)
+
+// multiVal matches the index key to a flatmapped set, list or map
+var multiVal = regexp.MustCompile(`\.(#|%)$`)
+
+// Diff trackes the changes that are necessary to apply a configuration
+// to an existing infrastructure.
+type Diff struct {
+       // Modules contains all the modules that have a diff
+       Modules []*ModuleDiff
+}
+
+// Prune cleans out unused structures in the diff without affecting
+// the behavior of the diff at all.
+//
+// This is not safe to call concurrently. This is safe to call on a
+// nil Diff.
+func (d *Diff) Prune() {
+       if d == nil {
+               return
+       }
+
+       // Prune all empty modules
+       newModules := make([]*ModuleDiff, 0, len(d.Modules))
+       for _, m := range d.Modules {
+               // If the module isn't empty, we keep it
+               if !m.Empty() {
+                       newModules = append(newModules, m)
+               }
+       }
+       if len(newModules) == 0 {
+               newModules = nil
+       }
+       d.Modules = newModules
+}
+
+// AddModule adds the module with the given path to the diff.
+//
+// This should be the preferred method to add module diffs since it
+// allows us to optimize lookups later as well as control sorting.
+func (d *Diff) AddModule(path []string) *ModuleDiff {
+       m := &ModuleDiff{Path: path}
+       m.init()
+       d.Modules = append(d.Modules, m)
+       return m
+}
+
+// ModuleByPath is used to lookup the module diff for the given path.
+// This should be the preferred lookup mechanism as it allows for future
+// lookup optimizations.
+func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
+       if d == nil {
+               return nil
+       }
+       for _, mod := range d.Modules {
+               if mod.Path == nil {
+                       panic("missing module path")
+               }
+               if reflect.DeepEqual(mod.Path, path) {
+                       return mod
+               }
+       }
+       return nil
+}
+
+// RootModule returns the ModuleState for the root module
+func (d *Diff) RootModule() *ModuleDiff {
+       root := d.ModuleByPath(rootModulePath)
+       if root == nil {
+               panic("missing root module")
+       }
+       return root
+}
+
+// Empty returns true if the diff has no changes.
+func (d *Diff) Empty() bool {
+       if d == nil {
+               return true
+       }
+
+       for _, m := range d.Modules {
+               if !m.Empty() {
+                       return false
+               }
+       }
+
+       return true
+}
+
+// Equal compares two diffs for exact equality.
+//
+// This is different from the Same comparison that is supported which
+// checks for operation equality taking into account computed values. Equal
+// instead checks for exact equality.
+func (d *Diff) Equal(d2 *Diff) bool {
+       // If one is nil, they must both be nil
+       if d == nil || d2 == nil {
+               return d == d2
+       }
+
+       // Sort the modules
+       sort.Sort(moduleDiffSort(d.Modules))
+       sort.Sort(moduleDiffSort(d2.Modules))
+
+       // Copy since we have to modify the module destroy flag to false so
+       // we don't compare that. TODO: delete this when we get rid of the
+       // destroy flag on modules.
+       dCopy := d.DeepCopy()
+       d2Copy := d2.DeepCopy()
+       for _, m := range dCopy.Modules {
+               m.Destroy = false
+       }
+       for _, m := range d2Copy.Modules {
+               m.Destroy = false
+       }
+
+       // Use DeepEqual
+       return reflect.DeepEqual(dCopy, d2Copy)
+}
+
+// DeepCopy performs a deep copy of all parts of the Diff, making the
+// resulting Diff safe to use without modifying this one.
+func (d *Diff) DeepCopy() *Diff {
+       copy, err := copystructure.Config{Lock: true}.Copy(d)
+       if err != nil {
+               panic(err)
+       }
+
+       return copy.(*Diff)
+}
+
+func (d *Diff) String() string {
+       var buf bytes.Buffer
+
+       keys := make([]string, 0, len(d.Modules))
+       lookup := make(map[string]*ModuleDiff)
+       for _, m := range d.Modules {
+               key := fmt.Sprintf("module.%s", strings.Join(m.Path[1:], "."))
+               keys = append(keys, key)
+               lookup[key] = m
+       }
+       sort.Strings(keys)
+
+       for _, key := range keys {
+               m := lookup[key]
+               mStr := m.String()
+
+               // If we're the root module, we just write the output directly.
+               if reflect.DeepEqual(m.Path, rootModulePath) {
+                       buf.WriteString(mStr + "\n")
+                       continue
+               }
+
+               buf.WriteString(fmt.Sprintf("%s:\n", key))
+
+               s := bufio.NewScanner(strings.NewReader(mStr))
+               for s.Scan() {
+                       buf.WriteString(fmt.Sprintf("  %s\n", s.Text()))
+               }
+       }
+
+       return strings.TrimSpace(buf.String())
+}
+
+func (d *Diff) init() {
+       if d.Modules == nil {
+               rootDiff := &ModuleDiff{Path: rootModulePath}
+               d.Modules = []*ModuleDiff{rootDiff}
+       }
+       for _, m := range d.Modules {
+               m.init()
+       }
+}
+
+// ModuleDiff tracks the differences between resources to apply within
+// a single module.
+type ModuleDiff struct {
+       Path      []string
+       Resources map[string]*InstanceDiff
+       Destroy   bool // Set only by the destroy plan
+}
+
+func (d *ModuleDiff) init() {
+       if d.Resources == nil {
+               d.Resources = make(map[string]*InstanceDiff)
+       }
+       for _, r := range d.Resources {
+               r.init()
+       }
+}
+
+// ChangeType returns the type of changes that the diff for this
+// module includes.
+//
+// At a module level, this will only be DiffNone, DiffUpdate, DiffDestroy, or
+// DiffCreate. If an instance within the module has a DiffDestroyCreate
+// then this will register as a DiffCreate for a module.
+func (d *ModuleDiff) ChangeType() DiffChangeType {
+       result := DiffNone
+       for _, r := range d.Resources {
+               change := r.ChangeType()
+               switch change {
+               case DiffCreate, DiffDestroy:
+                       if result == DiffNone {
+                               result = change
+                       }
+               case DiffDestroyCreate, DiffUpdate:
+                       result = DiffUpdate
+               }
+       }
+
+       return result
+}
+
+// Empty returns true if the diff has no changes within this module.
+func (d *ModuleDiff) Empty() bool {
+       if d.Destroy {
+               return false
+       }
+
+       if len(d.Resources) == 0 {
+               return true
+       }
+
+       for _, rd := range d.Resources {
+               if !rd.Empty() {
+                       return false
+               }
+       }
+
+       return true
+}
+
+// Instances returns the instance diffs for the id given. This can return
+// multiple instance diffs if there are counts within the resource.
+func (d *ModuleDiff) Instances(id string) []*InstanceDiff {
+       var result []*InstanceDiff
+       for k, diff := range d.Resources {
+               if k == id || strings.HasPrefix(k, id+".") {
+                       if !diff.Empty() {
+                               result = append(result, diff)
+                       }
+               }
+       }
+
+       return result
+}
+
+// IsRoot says whether or not this module diff is for the root module.
+func (d *ModuleDiff) IsRoot() bool {
+       return reflect.DeepEqual(d.Path, rootModulePath)
+}
+
+// String outputs the diff in a long but command-line friendly output
+// format that users can read to quickly inspect a diff.
+func (d *ModuleDiff) String() string {
+       var buf bytes.Buffer
+
+       names := make([]string, 0, len(d.Resources))
+       for name, _ := range d.Resources {
+               names = append(names, name)
+       }
+       sort.Strings(names)
+
+       for _, name := range names {
+               rdiff := d.Resources[name]
+
+               crud := "UPDATE"
+               switch {
+               case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()):
+                       crud = "DESTROY/CREATE"
+               case rdiff.GetDestroy() || rdiff.GetDestroyDeposed():
+                       crud = "DESTROY"
+               case rdiff.RequiresNew():
+                       crud = "CREATE"
+               }
+
+               extra := ""
+               if !rdiff.GetDestroy() && rdiff.GetDestroyDeposed() {
+                       extra = " (deposed only)"
+               }
+
+               buf.WriteString(fmt.Sprintf(
+                       "%s: %s%s\n",
+                       crud,
+                       name,
+                       extra))
+
+               keyLen := 0
+               rdiffAttrs := rdiff.CopyAttributes()
+               keys := make([]string, 0, len(rdiffAttrs))
+               for key, _ := range rdiffAttrs {
+                       if key == "id" {
+                               continue
+                       }
+
+                       keys = append(keys, key)
+                       if len(key) > keyLen {
+                               keyLen = len(key)
+                       }
+               }
+               sort.Strings(keys)
+
+               for _, attrK := range keys {
+                       attrDiff, _ := rdiff.GetAttribute(attrK)
+
+                       v := attrDiff.New
+                       u := attrDiff.Old
+                       if attrDiff.NewComputed {
+                               v = "<computed>"
+                       }
+
+                       if attrDiff.Sensitive {
+                               u = "<sensitive>"
+                               v = "<sensitive>"
+                       }
+
+                       updateMsg := ""
+                       if attrDiff.RequiresNew {
+                               updateMsg = " (forces new resource)"
+                       } else if attrDiff.Sensitive {
+                               updateMsg = " (attribute changed)"
+                       }
+
+                       buf.WriteString(fmt.Sprintf(
+                               "  %s:%s %#v => %#v%s\n",
+                               attrK,
+                               strings.Repeat(" ", keyLen-len(attrK)),
+                               u,
+                               v,
+                               updateMsg))
+               }
+       }
+
+       return buf.String()
+}
+
+// InstanceDiff is the diff of a resource from some state to another.
+type InstanceDiff struct {
+       mu             sync.Mutex
+       Attributes     map[string]*ResourceAttrDiff
+       Destroy        bool
+       DestroyDeposed bool
+       DestroyTainted bool
+
+       // Meta is a simple K/V map that is stored in a diff and persisted to
+       // plans but otherwise is completely ignored by Terraform core. It is
+       // mean to be used for additional data a resource may want to pass through.
+       // The value here must only contain Go primitives and collections.
+       Meta map[string]interface{}
+}
+
+func (d *InstanceDiff) Lock()   { d.mu.Lock() }
+func (d *InstanceDiff) Unlock() { d.mu.Unlock() }
+
+// ResourceAttrDiff is the diff of a single attribute of a resource.
+type ResourceAttrDiff struct {
+       Old         string      // Old Value
+       New         string      // New Value
+       NewComputed bool        // True if new value is computed (unknown currently)
+       NewRemoved  bool        // True if this attribute is being removed
+       NewExtra    interface{} // Extra information for the provider
+       RequiresNew bool        // True if change requires new resource
+       Sensitive   bool        // True if the data should not be displayed in UI output
+       Type        DiffAttrType
+}
+
+// Empty returns true if the diff for this attr is neutral
+func (d *ResourceAttrDiff) Empty() bool {
+       return d.Old == d.New && !d.NewComputed && !d.NewRemoved
+}
+
+func (d *ResourceAttrDiff) GoString() string {
+       return fmt.Sprintf("*%#v", *d)
+}
+
+// DiffAttrType is an enum type that says whether a resource attribute
+// diff is an input attribute (comes from the configuration) or an
+// output attribute (comes as a result of applying the configuration). An
+// example input would be "ami" for AWS and an example output would be
+// "private_ip".
+type DiffAttrType byte
+
+const (
+       DiffAttrUnknown DiffAttrType = iota
+       DiffAttrInput
+       DiffAttrOutput
+)
+
+func (d *InstanceDiff) init() {
+       if d.Attributes == nil {
+               d.Attributes = make(map[string]*ResourceAttrDiff)
+       }
+}
+
+func NewInstanceDiff() *InstanceDiff {
+       return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)}
+}
+
+func (d *InstanceDiff) Copy() (*InstanceDiff, error) {
+       if d == nil {
+               return nil, nil
+       }
+
+       dCopy, err := copystructure.Config{Lock: true}.Copy(d)
+       if err != nil {
+               return nil, err
+       }
+
+       return dCopy.(*InstanceDiff), nil
+}
+
+// ChangeType returns the DiffChangeType represented by the diff
+// for this single instance.
+func (d *InstanceDiff) ChangeType() DiffChangeType {
+       if d.Empty() {
+               return DiffNone
+       }
+
+       if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) {
+               return DiffDestroyCreate
+       }
+
+       if d.GetDestroy() || d.GetDestroyDeposed() {
+               return DiffDestroy
+       }
+
+       if d.RequiresNew() {
+               return DiffCreate
+       }
+
+       return DiffUpdate
+}
+
+// Empty returns true if this diff encapsulates no changes.
+func (d *InstanceDiff) Empty() bool {
+       if d == nil {
+               return true
+       }
+
+       d.mu.Lock()
+       defer d.mu.Unlock()
+       return !d.Destroy &&
+               !d.DestroyTainted &&
+               !d.DestroyDeposed &&
+               len(d.Attributes) == 0
+}
+
+// Equal compares two diffs for exact equality.
+//
+// This is different from the Same comparison that is supported which
+// checks for operation equality taking into account computed values. Equal
+// instead checks for exact equality.
+func (d *InstanceDiff) Equal(d2 *InstanceDiff) bool {
+       // If one is nil, they must both be nil
+       if d == nil || d2 == nil {
+               return d == d2
+       }
+
+       // Use DeepEqual
+       return reflect.DeepEqual(d, d2)
+}
+
+// DeepCopy performs a deep copy of all parts of the InstanceDiff
+func (d *InstanceDiff) DeepCopy() *InstanceDiff {
+       copy, err := copystructure.Config{Lock: true}.Copy(d)
+       if err != nil {
+               panic(err)
+       }
+
+       return copy.(*InstanceDiff)
+}
+
+func (d *InstanceDiff) GoString() string {
+       return fmt.Sprintf("*%#v", InstanceDiff{
+               Attributes:     d.Attributes,
+               Destroy:        d.Destroy,
+               DestroyTainted: d.DestroyTainted,
+               DestroyDeposed: d.DestroyDeposed,
+       })
+}
+
+// RequiresNew returns true if the diff requires the creation of a new
+// resource (implying the destruction of the old).
+func (d *InstanceDiff) RequiresNew() bool {
+       if d == nil {
+               return false
+       }
+
+       d.mu.Lock()
+       defer d.mu.Unlock()
+
+       return d.requiresNew()
+}
+
+func (d *InstanceDiff) requiresNew() bool {
+       if d == nil {
+               return false
+       }
+
+       if d.DestroyTainted {
+               return true
+       }
+
+       for _, rd := range d.Attributes {
+               if rd != nil && rd.RequiresNew {
+                       return true
+               }
+       }
+
+       return false
+}
+
+func (d *InstanceDiff) GetDestroyDeposed() bool {
+       d.mu.Lock()
+       defer d.mu.Unlock()
+
+       return d.DestroyDeposed
+}
+
+func (d *InstanceDiff) SetDestroyDeposed(b bool) {
+       d.mu.Lock()
+       defer d.mu.Unlock()
+
+       d.DestroyDeposed = b
+}
+
+// These methods are properly locked, for use outside other InstanceDiff
+// methods but everywhere else within in the terraform package.
+// TODO refactor the locking scheme
+func (d *InstanceDiff) SetTainted(b bool) {
+       d.mu.Lock()
+       defer d.mu.Unlock()
+
+       d.DestroyTainted = b
+}
+
+func (d *InstanceDiff) GetDestroyTainted() bool {
+       d.mu.Lock()
+       defer d.mu.Unlock()
+
+       return d.DestroyTainted
+}
+
+func (d *InstanceDiff) SetDestroy(b bool) {
+       d.mu.Lock()
+       defer d.mu.Unlock()
+
+       d.Destroy = b
+}
+
+func (d *InstanceDiff) GetDestroy() bool {
+       d.mu.Lock()
+       defer d.mu.Unlock()
+
+       return d.Destroy
+}
+
+func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) {
+       d.mu.Lock()
+       defer d.mu.Unlock()
+
+       d.Attributes[key] = attr
+}
+
+func (d *InstanceDiff) DelAttribute(key string) {
+       d.mu.Lock()
+       defer d.mu.Unlock()
+
+       delete(d.Attributes, key)
+}
+
+func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) {
+       d.mu.Lock()
+       defer d.mu.Unlock()
+
+       attr, ok := d.Attributes[key]
+       return attr, ok
+}
+func (d *InstanceDiff) GetAttributesLen() int {
+       d.mu.Lock()
+       defer d.mu.Unlock()
+
+       return len(d.Attributes)
+}
+
+// Safely copies the Attributes map
+func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff {
+       d.mu.Lock()
+       defer d.mu.Unlock()
+
+       attrs := make(map[string]*ResourceAttrDiff)
+       for k, v := range d.Attributes {
+               attrs[k] = v
+       }
+
+       return attrs
+}
+
+// Same checks whether or not two InstanceDiff's are the "same". When
+// we say "same", it is not necessarily exactly equal. Instead, it is
+// just checking that the same attributes are changing, a destroy
+// isn't suddenly happening, etc.
+func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) {
+       // we can safely compare the pointers without a lock
+       switch {
+       case d == nil && d2 == nil:
+               return true, ""
+       case d == nil || d2 == nil:
+               return false, "one nil"
+       case d == d2:
+               return true, ""
+       }
+
+       d.mu.Lock()
+       defer d.mu.Unlock()
+
+       // If we're going from requiring new to NOT requiring new, then we have
+       // to see if all required news were computed. If so, it is allowed since
+       // computed may also mean "same value and therefore not new".
+       oldNew := d.requiresNew()
+       newNew := d2.RequiresNew()
+       if oldNew && !newNew {
+               oldNew = false
+
+               // This section builds a list of ignorable attributes for requiresNew
+               // by removing off any elements of collections going to zero elements.
+               // For collections going to zero, they may not exist at all in the
+               // new diff (and hence RequiresNew == false).
+               ignoreAttrs := make(map[string]struct{})
+               for k, diffOld := range d.Attributes {
+                       if !strings.HasSuffix(k, ".%") && !strings.HasSuffix(k, ".#") {
+                               continue
+                       }
+
+                       // This case is in here as a protection measure. The bug that this
+                       // code originally fixed (GH-11349) didn't have to deal with computed
+                       // so I'm not 100% sure what the correct behavior is. Best to leave
+                       // the old behavior.
+                       if diffOld.NewComputed {
+                               continue
+                       }
+
+                       // We're looking for the case a map goes to exactly 0.
+                       if diffOld.New != "0" {
+                               continue
+                       }
+
+                       // Found it! Ignore all of these. The prefix here is stripping
+                       // off the "%" so it is just "k."
+                       prefix := k[:len(k)-1]
+                       for k2, _ := range d.Attributes {
+                               if strings.HasPrefix(k2, prefix) {
+                                       ignoreAttrs[k2] = struct{}{}
+                               }
+                       }
+               }
+
+               for k, rd := range d.Attributes {
+                       if _, ok := ignoreAttrs[k]; ok {
+                               continue
+                       }
+
+                       // If the field is requires new and NOT computed, then what
+                       // we have is a diff mismatch for sure. We set that the old
+                       // diff does REQUIRE a ForceNew.
+                       if rd != nil && rd.RequiresNew && !rd.NewComputed {
+                               oldNew = true
+                               break
+                       }
+               }
+       }
+
+       if oldNew != newNew {
+               return false, fmt.Sprintf(
+                       "diff RequiresNew; old: %t, new: %t", oldNew, newNew)
+       }
+
+       // Verify that destroy matches. The second boolean here allows us to
+       // have mismatching Destroy if we're moving from RequiresNew true
+       // to false above. Therefore, the second boolean will only pass if
+       // we're moving from Destroy: true to false as well.
+       if d.Destroy != d2.GetDestroy() && d.requiresNew() == oldNew {
+               return false, fmt.Sprintf(
+                       "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy())
+       }
+
+       // Go through the old diff and make sure the new diff has all the
+       // same attributes. To start, build up the check map to be all the keys.
+       checkOld := make(map[string]struct{})
+       checkNew := make(map[string]struct{})
+       for k, _ := range d.Attributes {
+               checkOld[k] = struct{}{}
+       }
+       for k, _ := range d2.CopyAttributes() {
+               checkNew[k] = struct{}{}
+       }
+
+       // Make an ordered list so we are sure the approximated hashes are left
+       // to process at the end of the loop
+       keys := make([]string, 0, len(d.Attributes))
+       for k, _ := range d.Attributes {
+               keys = append(keys, k)
+       }
+       sort.StringSlice(keys).Sort()
+
+       for _, k := range keys {
+               diffOld := d.Attributes[k]
+
+               if _, ok := checkOld[k]; !ok {
+                       // We're not checking this key for whatever reason (see where
+                       // check is modified).
+                       continue
+               }
+
+               // Remove this key since we'll never hit it again
+               delete(checkOld, k)
+               delete(checkNew, k)
+
+               _, ok := d2.GetAttribute(k)
+               if !ok {
+                       // If there's no new attribute, and the old diff expected the attribute
+                       // to be removed, that's just fine.
+                       if diffOld.NewRemoved {
+                               continue
+                       }
+
+                       // If the last diff was a computed value then the absense of
+                       // that value is allowed since it may mean the value ended up
+                       // being the same.
+                       if diffOld.NewComputed {
+                               ok = true
+                       }
+
+                       // No exact match, but maybe this is a set containing computed
+                       // values. So check if there is an approximate hash in the key
+                       // and if so, try to match the key.
+                       if strings.Contains(k, "~") {
+                               parts := strings.Split(k, ".")
+                               parts2 := append([]string(nil), parts...)
+
+                               re := regexp.MustCompile(`^~\d+$`)
+                               for i, part := range parts {
+                                       if re.MatchString(part) {
+                                               // we're going to consider this the base of a
+                                               // computed hash, and remove all longer matching fields
+                                               ok = true
+
+                                               parts2[i] = `\d+`
+                                               parts2 = parts2[:i+1]
+                                               break
+                                       }
+                               }
+
+                               re, err := regexp.Compile("^" + strings.Join(parts2, `\.`))
+                               if err != nil {
+                                       return false, fmt.Sprintf("regexp failed to compile; err: %#v", err)
+                               }
+
+                               for k2, _ := range checkNew {
+                                       if re.MatchString(k2) {
+                                               delete(checkNew, k2)
+                                       }
+                               }
+                       }
+
+                       // This is a little tricky, but when a diff contains a computed
+                       // list, set, or map that can only be interpolated after the apply
+                       // command has created the dependent resources, it could turn out
+                       // that the result is actually the same as the existing state which
+                       // would remove the key from the diff.
+                       if diffOld.NewComputed && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) {
+                               ok = true
+                       }
+
+                       // Similarly, in a RequiresNew scenario, a list that shows up in the plan
+                       // diff can disappear from the apply diff, which is calculated from an
+                       // empty state.
+                       if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) {
+                               ok = true
+                       }
+
+                       if !ok {
+                               return false, fmt.Sprintf("attribute mismatch: %s", k)
+                       }
+               }
+
+               // search for the suffix of the base of a [computed] map, list or set.
+               match := multiVal.FindStringSubmatch(k)
+
+               if diffOld.NewComputed && len(match) == 2 {
+                       matchLen := len(match[1])
+
+                       // This is a computed list, set, or map, so remove any keys with
+                       // this prefix from the check list.
+                       kprefix := k[:len(k)-matchLen]
+                       for k2, _ := range checkOld {
+                               if strings.HasPrefix(k2, kprefix) {
+                                       delete(checkOld, k2)
+                               }
+                       }
+                       for k2, _ := range checkNew {
+                               if strings.HasPrefix(k2, kprefix) {
+                                       delete(checkNew, k2)
+                               }
+                       }
+               }
+
+               // TODO: check for the same value if not computed
+       }
+
+       // Check for leftover attributes
+       if len(checkNew) > 0 {
+               extras := make([]string, 0, len(checkNew))
+               for attr, _ := range checkNew {
+                       extras = append(extras, attr)
+               }
+               return false,
+                       fmt.Sprintf("extra attributes: %s", strings.Join(extras, ", "))
+       }
+
+       return true, ""
+}
+
+// moduleDiffSort implements sort.Interface to sort module diffs by path.
+type moduleDiffSort []*ModuleDiff
+
+func (s moduleDiffSort) Len() int      { return len(s) }
+func (s moduleDiffSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s moduleDiffSort) Less(i, j int) bool {
+       a := s[i]
+       b := s[j]
+
+       // If the lengths are different, then the shorter one always wins
+       if len(a.Path) != len(b.Path) {
+               return len(a.Path) < len(b.Path)
+       }
+
+       // Otherwise, compare lexically
+       return strings.Join(a.Path, ".") < strings.Join(b.Path, ".")
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/edge_destroy.go
new file mode 100644 (file)
index 0000000..bc9d638
--- /dev/null
@@ -0,0 +1,17 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/dag"
+)
+
+// DestroyEdge is an edge that represents a standard "destroy" relationship:
+// Target depends on Source because Source is destroying.
+type DestroyEdge struct {
+       S, T dag.Vertex
+}
+
+func (e *DestroyEdge) Hashcode() interface{} { return fmt.Sprintf("%p-%p", e.S, e.T) }
+func (e *DestroyEdge) Source() dag.Vertex    { return e.S }
+func (e *DestroyEdge) Target() dag.Vertex    { return e.T }
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval.go b/vendor/github.com/hashicorp/terraform/terraform/eval.go
new file mode 100644 (file)
index 0000000..3cb088a
--- /dev/null
@@ -0,0 +1,63 @@
+package terraform
+
+import (
+       "log"
+       "strings"
+)
+
+// EvalNode is the interface that must be implemented by graph nodes to
+// evaluate/execute.
+type EvalNode interface {
+       // Eval evaluates this node with the given context. The second parameter
+       // are the argument values. These will match in order and 1-1 with the
+       // results of the Args() return value.
+       Eval(EvalContext) (interface{}, error)
+}
+
+// GraphNodeEvalable is the interface that graph nodes must implement
+// to enable valuation.
+type GraphNodeEvalable interface {
+       EvalTree() EvalNode
+}
+
+// EvalEarlyExitError is a special error return value that can be returned
+// by eval nodes that does an early exit.
+type EvalEarlyExitError struct{}
+
+func (EvalEarlyExitError) Error() string { return "early exit" }
+
+// Eval evaluates the given EvalNode with the given context, properly
+// evaluating all args in the correct order.
+func Eval(n EvalNode, ctx EvalContext) (interface{}, error) {
+       // Call the lower level eval which doesn't understand early exit,
+       // and if we early exit, it isn't an error.
+       result, err := EvalRaw(n, ctx)
+       if err != nil {
+               if _, ok := err.(EvalEarlyExitError); ok {
+                       return nil, nil
+               }
+       }
+
+       return result, err
+}
+
+// EvalRaw is like Eval except that it returns all errors, even if they
+// signal something normal such as EvalEarlyExitError.
+func EvalRaw(n EvalNode, ctx EvalContext) (interface{}, error) {
+       path := "unknown"
+       if ctx != nil {
+               path = strings.Join(ctx.Path(), ".")
+       }
+
+       log.Printf("[DEBUG] %s: eval: %T", path, n)
+       output, err := n.Eval(ctx)
+       if err != nil {
+               if _, ok := err.(EvalEarlyExitError); ok {
+                       log.Printf("[DEBUG] %s: eval: %T, err: %s", path, n, err)
+               } else {
+                       log.Printf("[ERROR] %s: eval: %T, err: %s", path, n, err)
+               }
+       }
+
+       return output, err
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go b/vendor/github.com/hashicorp/terraform/terraform/eval_apply.go
new file mode 100644 (file)
index 0000000..2f6a497
--- /dev/null
@@ -0,0 +1,359 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+       "strconv"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/hashicorp/terraform/config"
+)
+
+// EvalApply is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalApply struct {
+       Info      *InstanceInfo
+       State     **InstanceState
+       Diff      **InstanceDiff
+       Provider  *ResourceProvider
+       Output    **InstanceState
+       CreateNew *bool
+       Error     *error
+}
+
+// TODO: test
+func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) {
+       diff := *n.Diff
+       provider := *n.Provider
+       state := *n.State
+
+       // If we have no diff, we have nothing to do!
+       if diff.Empty() {
+               log.Printf(
+                       "[DEBUG] apply: %s: diff is empty, doing nothing.", n.Info.Id)
+               return nil, nil
+       }
+
+       // Remove any output values from the diff
+       for k, ad := range diff.CopyAttributes() {
+               if ad.Type == DiffAttrOutput {
+                       diff.DelAttribute(k)
+               }
+       }
+
+       // If the state is nil, make it non-nil
+       if state == nil {
+               state = new(InstanceState)
+       }
+       state.init()
+
+       // Flag if we're creating a new instance
+       if n.CreateNew != nil {
+               *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew()
+       }
+
+       // With the completed diff, apply!
+       log.Printf("[DEBUG] apply: %s: executing Apply", n.Info.Id)
+       state, err := provider.Apply(n.Info, state, diff)
+       if state == nil {
+               state = new(InstanceState)
+       }
+       state.init()
+
+       // Force the "id" attribute to be our ID
+       if state.ID != "" {
+               state.Attributes["id"] = state.ID
+       }
+
+       // If the value is the unknown variable value, then it is an error.
+       // In this case we record the error and remove it from the state
+       for ak, av := range state.Attributes {
+               if av == config.UnknownVariableValue {
+                       err = multierror.Append(err, fmt.Errorf(
+                               "Attribute with unknown value: %s", ak))
+                       delete(state.Attributes, ak)
+               }
+       }
+
+       // Write the final state
+       if n.Output != nil {
+               *n.Output = state
+       }
+
+       // If there are no errors, then we append it to our output error
+       // if we have one, otherwise we just output it.
+       if err != nil {
+               if n.Error != nil {
+                       helpfulErr := fmt.Errorf("%s: %s", n.Info.Id, err.Error())
+                       *n.Error = multierror.Append(*n.Error, helpfulErr)
+               } else {
+                       return nil, err
+               }
+       }
+
+       return nil, nil
+}
+
+// EvalApplyPre is an EvalNode implementation that does the pre-Apply work
+type EvalApplyPre struct {
+       Info  *InstanceInfo
+       State **InstanceState
+       Diff  **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalApplyPre) Eval(ctx EvalContext) (interface{}, error) {
+       state := *n.State
+       diff := *n.Diff
+
+       // If the state is nil, make it non-nil
+       if state == nil {
+               state = new(InstanceState)
+       }
+       state.init()
+
+       {
+               // Call post-apply hook
+               err := ctx.Hook(func(h Hook) (HookAction, error) {
+                       return h.PreApply(n.Info, state, diff)
+               })
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       return nil, nil
+}
+
+// EvalApplyPost is an EvalNode implementation that does the post-Apply work
+type EvalApplyPost struct {
+       Info  *InstanceInfo
+       State **InstanceState
+       Error *error
+}
+
+// TODO: test
+func (n *EvalApplyPost) Eval(ctx EvalContext) (interface{}, error) {
+       state := *n.State
+
+       {
+               // Call post-apply hook
+               err := ctx.Hook(func(h Hook) (HookAction, error) {
+                       return h.PostApply(n.Info, state, *n.Error)
+               })
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       return nil, *n.Error
+}
+
+// EvalApplyProvisioners is an EvalNode implementation that executes
+// the provisioners for a resource.
+//
+// TODO(mitchellh): This should probably be split up into a more fine-grained
+// ApplyProvisioner (single) that is looped over.
+type EvalApplyProvisioners struct {
+       Info           *InstanceInfo
+       State          **InstanceState
+       Resource       *config.Resource
+       InterpResource *Resource
+       CreateNew      *bool
+       Error          *error
+
+       // When is the type of provisioner to run at this point
+       When config.ProvisionerWhen
+}
+
+// TODO: test
+func (n *EvalApplyProvisioners) Eval(ctx EvalContext) (interface{}, error) {
+       state := *n.State
+
+       if n.CreateNew != nil && !*n.CreateNew {
+               // If we're not creating a new resource, then don't run provisioners
+               return nil, nil
+       }
+
+       provs := n.filterProvisioners()
+       if len(provs) == 0 {
+               // We have no provisioners, so don't do anything
+               return nil, nil
+       }
+
+       // taint tells us whether to enable tainting.
+       taint := n.When == config.ProvisionerWhenCreate
+
+       if n.Error != nil && *n.Error != nil {
+               if taint {
+                       state.Tainted = true
+               }
+
+               // We're already tainted, so just return out
+               return nil, nil
+       }
+
+       {
+               // Call pre hook
+               err := ctx.Hook(func(h Hook) (HookAction, error) {
+                       return h.PreProvisionResource(n.Info, state)
+               })
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       // If there are no errors, then we append it to our output error
+       // if we have one, otherwise we just output it.
+       err := n.apply(ctx, provs)
+       if err != nil {
+               if taint {
+                       state.Tainted = true
+               }
+
+               if n.Error != nil {
+                       *n.Error = multierror.Append(*n.Error, err)
+               } else {
+                       return nil, err
+               }
+       }
+
+       {
+               // Call post hook
+               err := ctx.Hook(func(h Hook) (HookAction, error) {
+                       return h.PostProvisionResource(n.Info, state)
+               })
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       return nil, nil
+}
+
+// filterProvisioners filters the provisioners on the resource to only
+// the provisioners specified by the "when" option.
+func (n *EvalApplyProvisioners) filterProvisioners() []*config.Provisioner {
+       // Fast path the zero case
+       if n.Resource == nil {
+               return nil
+       }
+
+       if len(n.Resource.Provisioners) == 0 {
+               return nil
+       }
+
+       result := make([]*config.Provisioner, 0, len(n.Resource.Provisioners))
+       for _, p := range n.Resource.Provisioners {
+               if p.When == n.When {
+                       result = append(result, p)
+               }
+       }
+
+       return result
+}
+
+func (n *EvalApplyProvisioners) apply(ctx EvalContext, provs []*config.Provisioner) error {
+       state := *n.State
+
+       // Store the original connection info, restore later
+       origConnInfo := state.Ephemeral.ConnInfo
+       defer func() {
+               state.Ephemeral.ConnInfo = origConnInfo
+       }()
+
+       for _, prov := range provs {
+               // Get the provisioner
+               provisioner := ctx.Provisioner(prov.Type)
+
+               // Interpolate the provisioner config
+               provConfig, err := ctx.Interpolate(prov.RawConfig.Copy(), n.InterpResource)
+               if err != nil {
+                       return err
+               }
+
+               // Interpolate the conn info, since it may contain variables
+               connInfo, err := ctx.Interpolate(prov.ConnInfo.Copy(), n.InterpResource)
+               if err != nil {
+                       return err
+               }
+
+               // Merge the connection information
+               overlay := make(map[string]string)
+               if origConnInfo != nil {
+                       for k, v := range origConnInfo {
+                               overlay[k] = v
+                       }
+               }
+               for k, v := range connInfo.Config {
+                       switch vt := v.(type) {
+                       case string:
+                               overlay[k] = vt
+                       case int64:
+                               overlay[k] = strconv.FormatInt(vt, 10)
+                       case int32:
+                               overlay[k] = strconv.FormatInt(int64(vt), 10)
+                       case int:
+                               overlay[k] = strconv.FormatInt(int64(vt), 10)
+                       case float32:
+                               overlay[k] = strconv.FormatFloat(float64(vt), 'f', 3, 32)
+                       case float64:
+                               overlay[k] = strconv.FormatFloat(vt, 'f', 3, 64)
+                       case bool:
+                               overlay[k] = strconv.FormatBool(vt)
+                       default:
+                               overlay[k] = fmt.Sprintf("%v", vt)
+                       }
+               }
+               state.Ephemeral.ConnInfo = overlay
+
+               {
+                       // Call pre hook
+                       err := ctx.Hook(func(h Hook) (HookAction, error) {
+                               return h.PreProvision(n.Info, prov.Type)
+                       })
+                       if err != nil {
+                               return err
+                       }
+               }
+
+               // The output function
+               outputFn := func(msg string) {
+                       ctx.Hook(func(h Hook) (HookAction, error) {
+                               h.ProvisionOutput(n.Info, prov.Type, msg)
+                               return HookActionContinue, nil
+                       })
+               }
+
+               // Invoke the Provisioner
+               output := CallbackUIOutput{OutputFn: outputFn}
+               applyErr := provisioner.Apply(&output, state, provConfig)
+
+               // Call post hook
+               hookErr := ctx.Hook(func(h Hook) (HookAction, error) {
+                       return h.PostProvision(n.Info, prov.Type, applyErr)
+               })
+
+               // Handle the error before we deal with the hook
+               if applyErr != nil {
+                       // Determine failure behavior
+                       switch prov.OnFailure {
+                       case config.ProvisionerOnFailureContinue:
+                               log.Printf(
+                                       "[INFO] apply: %s [%s]: error during provision, continue requested",
+                                       n.Info.Id, prov.Type)
+
+                       case config.ProvisionerOnFailureFail:
+                               return applyErr
+                       }
+               }
+
+               // Deal with the hook
+               if hookErr != nil {
+                       return hookErr
+               }
+       }
+
+       return nil
+
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/eval_check_prevent_destroy.go
new file mode 100644 (file)
index 0000000..715e79e
--- /dev/null
@@ -0,0 +1,38 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// EvalPreventDestroy is an EvalNode implementation that returns an
+// error if a resource has PreventDestroy configured and the diff
+// would destroy the resource.
+type EvalCheckPreventDestroy struct {
+       Resource   *config.Resource
+       ResourceId string
+       Diff       **InstanceDiff
+}
+
+func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) {
+       if n.Diff == nil || *n.Diff == nil || n.Resource == nil {
+               return nil, nil
+       }
+
+       diff := *n.Diff
+       preventDestroy := n.Resource.Lifecycle.PreventDestroy
+
+       if diff.GetDestroy() && preventDestroy {
+               resourceId := n.ResourceId
+               if resourceId == "" {
+                       resourceId = n.Resource.Id()
+               }
+
+               return nil, fmt.Errorf(preventDestroyErrStr, resourceId)
+       }
+
+       return nil, nil
+}
+
+const preventDestroyErrStr = `%s: the plan would destroy this resource, but it currently has lifecycle.prevent_destroy set to true. To avoid this error and continue with the plan, either disable lifecycle.prevent_destroy or adjust the scope of the plan using the -target flag.`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context.go
new file mode 100644 (file)
index 0000000..a1f815b
--- /dev/null
@@ -0,0 +1,84 @@
+package terraform
+
+import (
+       "sync"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// EvalContext is the interface that is given to eval nodes to execute.
+type EvalContext interface {
+       // Stopped returns a channel that is closed when evaluation is stopped
+       // via Terraform.Context.Stop()
+       Stopped() <-chan struct{}
+
+       // Path is the current module path.
+       Path() []string
+
+       // Hook is used to call hook methods. The callback is called for each
+       // hook and should return the hook action to take and the error.
+       Hook(func(Hook) (HookAction, error)) error
+
+       // Input is the UIInput object for interacting with the UI.
+       Input() UIInput
+
+       // InitProvider initializes the provider with the given name and
+       // returns the implementation of the resource provider or an error.
+       //
+       // It is an error to initialize the same provider more than once.
+       InitProvider(string) (ResourceProvider, error)
+
+       // Provider gets the provider instance with the given name (already
+       // initialized) or returns nil if the provider isn't initialized.
+       Provider(string) ResourceProvider
+
+       // CloseProvider closes provider connections that aren't needed anymore.
+       CloseProvider(string) error
+
+       // ConfigureProvider configures the provider with the given
+       // configuration. This is a separate context call because this call
+       // is used to store the provider configuration for inheritance lookups
+       // with ParentProviderConfig().
+       ConfigureProvider(string, *ResourceConfig) error
+       SetProviderConfig(string, *ResourceConfig) error
+       ParentProviderConfig(string) *ResourceConfig
+
+       // ProviderInput and SetProviderInput are used to configure providers
+       // from user input.
+       ProviderInput(string) map[string]interface{}
+       SetProviderInput(string, map[string]interface{})
+
+       // InitProvisioner initializes the provisioner with the given name and
+       // returns the implementation of the resource provisioner or an error.
+       //
+       // It is an error to initialize the same provisioner more than once.
+       InitProvisioner(string) (ResourceProvisioner, error)
+
+       // Provisioner gets the provisioner instance with the given name (already
+       // initialized) or returns nil if the provisioner isn't initialized.
+       Provisioner(string) ResourceProvisioner
+
+       // CloseProvisioner closes provisioner connections that aren't needed
+       // anymore.
+       CloseProvisioner(string) error
+
+       // Interpolate takes the given raw configuration and completes
+       // the interpolations, returning the processed ResourceConfig.
+       //
+       // The resource argument is optional. If given, it is the resource
+       // that is currently being acted upon.
+       Interpolate(*config.RawConfig, *Resource) (*ResourceConfig, error)
+
+       // SetVariables sets the variables for the module within
+       // this context with the name n. This function call is additive:
+       // the second parameter is merged with any previous call.
+       SetVariables(string, map[string]interface{})
+
+       // Diff returns the global diff as well as the lock that should
+       // be used to modify that diff.
+       Diff() (*Diff, *sync.RWMutex)
+
+       // State returns the global state as well as the lock that should
+       // be used to modify that state.
+       State() (*State, *sync.RWMutex)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_builtin.go
new file mode 100644 (file)
index 0000000..3dcfb22
--- /dev/null
@@ -0,0 +1,347 @@
+package terraform
+
+import (
+       "context"
+       "fmt"
+       "log"
+       "strings"
+       "sync"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// BuiltinEvalContext is an EvalContext implementation that is used by
+// Terraform by default.
+type BuiltinEvalContext struct {
+       // StopContext is the context used to track whether we're complete
+       StopContext context.Context
+
+       // PathValue is the Path that this context is operating within.
+       PathValue []string
+
+       // Interpolater setting below affect the interpolation of variables.
+       //
+       // The InterpolaterVars are the exact value for ${var.foo} values.
+       // The map is shared between all contexts and is a mapping of
+       // PATH to KEY to VALUE. Because it is shared by all contexts as well
+       // as the Interpolater itself, it is protected by InterpolaterVarLock
+       // which must be locked during any access to the map.
+       Interpolater        *Interpolater
+       InterpolaterVars    map[string]map[string]interface{}
+       InterpolaterVarLock *sync.Mutex
+
+       Components          contextComponentFactory
+       Hooks               []Hook
+       InputValue          UIInput
+       ProviderCache       map[string]ResourceProvider
+       ProviderConfigCache map[string]*ResourceConfig
+       ProviderInputConfig map[string]map[string]interface{}
+       ProviderLock        *sync.Mutex
+       ProvisionerCache    map[string]ResourceProvisioner
+       ProvisionerLock     *sync.Mutex
+       DiffValue           *Diff
+       DiffLock            *sync.RWMutex
+       StateValue          *State
+       StateLock           *sync.RWMutex
+
+       once sync.Once
+}
+
+func (ctx *BuiltinEvalContext) Stopped() <-chan struct{} {
+       // This can happen during tests. During tests, we just block forever.
+       if ctx.StopContext == nil {
+               return nil
+       }
+
+       return ctx.StopContext.Done()
+}
+
+func (ctx *BuiltinEvalContext) Hook(fn func(Hook) (HookAction, error)) error {
+       for _, h := range ctx.Hooks {
+               action, err := fn(h)
+               if err != nil {
+                       return err
+               }
+
+               switch action {
+               case HookActionContinue:
+                       continue
+               case HookActionHalt:
+                       // Return an early exit error to trigger an early exit
+                       log.Printf("[WARN] Early exit triggered by hook: %T", h)
+                       return EvalEarlyExitError{}
+               }
+       }
+
+       return nil
+}
+
+func (ctx *BuiltinEvalContext) Input() UIInput {
+       return ctx.InputValue
+}
+
+func (ctx *BuiltinEvalContext) InitProvider(n string) (ResourceProvider, error) {
+       ctx.once.Do(ctx.init)
+
+       // If we already initialized, it is an error
+       if p := ctx.Provider(n); p != nil {
+               return nil, fmt.Errorf("Provider '%s' already initialized", n)
+       }
+
+       // Warning: make sure to acquire these locks AFTER the call to Provider
+       // above, since it also acquires locks.
+       ctx.ProviderLock.Lock()
+       defer ctx.ProviderLock.Unlock()
+
+       providerPath := make([]string, len(ctx.Path())+1)
+       copy(providerPath, ctx.Path())
+       providerPath[len(providerPath)-1] = n
+       key := PathCacheKey(providerPath)
+
+       typeName := strings.SplitN(n, ".", 2)[0]
+       p, err := ctx.Components.ResourceProvider(typeName, key)
+       if err != nil {
+               return nil, err
+       }
+
+       ctx.ProviderCache[key] = p
+       return p, nil
+}
+
+func (ctx *BuiltinEvalContext) Provider(n string) ResourceProvider {
+       ctx.once.Do(ctx.init)
+
+       ctx.ProviderLock.Lock()
+       defer ctx.ProviderLock.Unlock()
+
+       providerPath := make([]string, len(ctx.Path())+1)
+       copy(providerPath, ctx.Path())
+       providerPath[len(providerPath)-1] = n
+
+       return ctx.ProviderCache[PathCacheKey(providerPath)]
+}
+
+func (ctx *BuiltinEvalContext) CloseProvider(n string) error {
+       ctx.once.Do(ctx.init)
+
+       ctx.ProviderLock.Lock()
+       defer ctx.ProviderLock.Unlock()
+
+       providerPath := make([]string, len(ctx.Path())+1)
+       copy(providerPath, ctx.Path())
+       providerPath[len(providerPath)-1] = n
+
+       var provider interface{}
+       provider = ctx.ProviderCache[PathCacheKey(providerPath)]
+       if provider != nil {
+               if p, ok := provider.(ResourceProviderCloser); ok {
+                       delete(ctx.ProviderCache, PathCacheKey(providerPath))
+                       return p.Close()
+               }
+       }
+
+       return nil
+}
+
+func (ctx *BuiltinEvalContext) ConfigureProvider(
+       n string, cfg *ResourceConfig) error {
+       p := ctx.Provider(n)
+       if p == nil {
+               return fmt.Errorf("Provider '%s' not initialized", n)
+       }
+
+       if err := ctx.SetProviderConfig(n, cfg); err != nil {
+               return nil
+       }
+
+       return p.Configure(cfg)
+}
+
+func (ctx *BuiltinEvalContext) SetProviderConfig(
+       n string, cfg *ResourceConfig) error {
+       providerPath := make([]string, len(ctx.Path())+1)
+       copy(providerPath, ctx.Path())
+       providerPath[len(providerPath)-1] = n
+
+       // Save the configuration
+       ctx.ProviderLock.Lock()
+       ctx.ProviderConfigCache[PathCacheKey(providerPath)] = cfg
+       ctx.ProviderLock.Unlock()
+
+       return nil
+}
+
+func (ctx *BuiltinEvalContext) ProviderInput(n string) map[string]interface{} {
+       ctx.ProviderLock.Lock()
+       defer ctx.ProviderLock.Unlock()
+
+       // Make a copy of the path so we can safely edit it
+       path := ctx.Path()
+       pathCopy := make([]string, len(path)+1)
+       copy(pathCopy, path)
+
+       // Go up the tree.
+       for i := len(path) - 1; i >= 0; i-- {
+               pathCopy[i+1] = n
+               k := PathCacheKey(pathCopy[:i+2])
+               if v, ok := ctx.ProviderInputConfig[k]; ok {
+                       return v
+               }
+       }
+
+       return nil
+}
+
+func (ctx *BuiltinEvalContext) SetProviderInput(n string, c map[string]interface{}) {
+       providerPath := make([]string, len(ctx.Path())+1)
+       copy(providerPath, ctx.Path())
+       providerPath[len(providerPath)-1] = n
+
+       // Save the configuration
+       ctx.ProviderLock.Lock()
+       ctx.ProviderInputConfig[PathCacheKey(providerPath)] = c
+       ctx.ProviderLock.Unlock()
+}
+
+func (ctx *BuiltinEvalContext) ParentProviderConfig(n string) *ResourceConfig {
+       ctx.ProviderLock.Lock()
+       defer ctx.ProviderLock.Unlock()
+
+       // Make a copy of the path so we can safely edit it
+       path := ctx.Path()
+       pathCopy := make([]string, len(path)+1)
+       copy(pathCopy, path)
+
+       // Go up the tree.
+       for i := len(path) - 1; i >= 0; i-- {
+               pathCopy[i+1] = n
+               k := PathCacheKey(pathCopy[:i+2])
+               if v, ok := ctx.ProviderConfigCache[k]; ok {
+                       return v
+               }
+       }
+
+       return nil
+}
+
+func (ctx *BuiltinEvalContext) InitProvisioner(
+       n string) (ResourceProvisioner, error) {
+       ctx.once.Do(ctx.init)
+
+       // If we already initialized, it is an error
+       if p := ctx.Provisioner(n); p != nil {
+               return nil, fmt.Errorf("Provisioner '%s' already initialized", n)
+       }
+
+       // Warning: make sure to acquire these locks AFTER the call to Provisioner
+       // above, since it also acquires locks.
+       ctx.ProvisionerLock.Lock()
+       defer ctx.ProvisionerLock.Unlock()
+
+       provPath := make([]string, len(ctx.Path())+1)
+       copy(provPath, ctx.Path())
+       provPath[len(provPath)-1] = n
+       key := PathCacheKey(provPath)
+
+       p, err := ctx.Components.ResourceProvisioner(n, key)
+       if err != nil {
+               return nil, err
+       }
+
+       ctx.ProvisionerCache[key] = p
+       return p, nil
+}
+
+func (ctx *BuiltinEvalContext) Provisioner(n string) ResourceProvisioner {
+       ctx.once.Do(ctx.init)
+
+       ctx.ProvisionerLock.Lock()
+       defer ctx.ProvisionerLock.Unlock()
+
+       provPath := make([]string, len(ctx.Path())+1)
+       copy(provPath, ctx.Path())
+       provPath[len(provPath)-1] = n
+
+       return ctx.ProvisionerCache[PathCacheKey(provPath)]
+}
+
+func (ctx *BuiltinEvalContext) CloseProvisioner(n string) error {
+       ctx.once.Do(ctx.init)
+
+       ctx.ProvisionerLock.Lock()
+       defer ctx.ProvisionerLock.Unlock()
+
+       provPath := make([]string, len(ctx.Path())+1)
+       copy(provPath, ctx.Path())
+       provPath[len(provPath)-1] = n
+
+       var prov interface{}
+       prov = ctx.ProvisionerCache[PathCacheKey(provPath)]
+       if prov != nil {
+               if p, ok := prov.(ResourceProvisionerCloser); ok {
+                       delete(ctx.ProvisionerCache, PathCacheKey(provPath))
+                       return p.Close()
+               }
+       }
+
+       return nil
+}
+
+func (ctx *BuiltinEvalContext) Interpolate(
+       cfg *config.RawConfig, r *Resource) (*ResourceConfig, error) {
+       if cfg != nil {
+               scope := &InterpolationScope{
+                       Path:     ctx.Path(),
+                       Resource: r,
+               }
+
+               vs, err := ctx.Interpolater.Values(scope, cfg.Variables)
+               if err != nil {
+                       return nil, err
+               }
+
+               // Do the interpolation
+               if err := cfg.Interpolate(vs); err != nil {
+                       return nil, err
+               }
+       }
+
+       result := NewResourceConfig(cfg)
+       result.interpolateForce()
+       return result, nil
+}
+
+func (ctx *BuiltinEvalContext) Path() []string {
+       return ctx.PathValue
+}
+
+func (ctx *BuiltinEvalContext) SetVariables(n string, vs map[string]interface{}) {
+       ctx.InterpolaterVarLock.Lock()
+       defer ctx.InterpolaterVarLock.Unlock()
+
+       path := make([]string, len(ctx.Path())+1)
+       copy(path, ctx.Path())
+       path[len(path)-1] = n
+       key := PathCacheKey(path)
+
+       vars := ctx.InterpolaterVars[key]
+       if vars == nil {
+               vars = make(map[string]interface{})
+               ctx.InterpolaterVars[key] = vars
+       }
+
+       for k, v := range vs {
+               vars[k] = v
+       }
+}
+
+func (ctx *BuiltinEvalContext) Diff() (*Diff, *sync.RWMutex) {
+       return ctx.DiffValue, ctx.DiffLock
+}
+
+func (ctx *BuiltinEvalContext) State() (*State, *sync.RWMutex) {
+       return ctx.StateValue, ctx.StateLock
+}
+
+func (ctx *BuiltinEvalContext) init() {
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go b/vendor/github.com/hashicorp/terraform/terraform/eval_context_mock.go
new file mode 100644 (file)
index 0000000..4f90d5b
--- /dev/null
@@ -0,0 +1,208 @@
+package terraform
+
+import (
+       "sync"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// MockEvalContext is a mock version of EvalContext that can be used
+// for tests.
+type MockEvalContext struct {
+       StoppedCalled bool
+       StoppedValue  <-chan struct{}
+
+       HookCalled bool
+       HookHook   Hook
+       HookError  error
+
+       InputCalled bool
+       InputInput  UIInput
+
+       InitProviderCalled   bool
+       InitProviderName     string
+       InitProviderProvider ResourceProvider
+       InitProviderError    error
+
+       ProviderCalled   bool
+       ProviderName     string
+       ProviderProvider ResourceProvider
+
+       CloseProviderCalled   bool
+       CloseProviderName     string
+       CloseProviderProvider ResourceProvider
+
+       ProviderInputCalled bool
+       ProviderInputName   string
+       ProviderInputConfig map[string]interface{}
+
+       SetProviderInputCalled bool
+       SetProviderInputName   string
+       SetProviderInputConfig map[string]interface{}
+
+       ConfigureProviderCalled bool
+       ConfigureProviderName   string
+       ConfigureProviderConfig *ResourceConfig
+       ConfigureProviderError  error
+
+       SetProviderConfigCalled bool
+       SetProviderConfigName   string
+       SetProviderConfigConfig *ResourceConfig
+
+       ParentProviderConfigCalled bool
+       ParentProviderConfigName   string
+       ParentProviderConfigConfig *ResourceConfig
+
+       InitProvisionerCalled      bool
+       InitProvisionerName        string
+       InitProvisionerProvisioner ResourceProvisioner
+       InitProvisionerError       error
+
+       ProvisionerCalled      bool
+       ProvisionerName        string
+       ProvisionerProvisioner ResourceProvisioner
+
+       CloseProvisionerCalled      bool
+       CloseProvisionerName        string
+       CloseProvisionerProvisioner ResourceProvisioner
+
+       InterpolateCalled       bool
+       InterpolateConfig       *config.RawConfig
+       InterpolateResource     *Resource
+       InterpolateConfigResult *ResourceConfig
+       InterpolateError        error
+
+       PathCalled bool
+       PathPath   []string
+
+       SetVariablesCalled    bool
+       SetVariablesModule    string
+       SetVariablesVariables map[string]interface{}
+
+       DiffCalled bool
+       DiffDiff   *Diff
+       DiffLock   *sync.RWMutex
+
+       StateCalled bool
+       StateState  *State
+       StateLock   *sync.RWMutex
+}
+
+func (c *MockEvalContext) Stopped() <-chan struct{} {
+       c.StoppedCalled = true
+       return c.StoppedValue
+}
+
+func (c *MockEvalContext) Hook(fn func(Hook) (HookAction, error)) error {
+       c.HookCalled = true
+       if c.HookHook != nil {
+               if _, err := fn(c.HookHook); err != nil {
+                       return err
+               }
+       }
+
+       return c.HookError
+}
+
+func (c *MockEvalContext) Input() UIInput {
+       c.InputCalled = true
+       return c.InputInput
+}
+
+func (c *MockEvalContext) InitProvider(n string) (ResourceProvider, error) {
+       c.InitProviderCalled = true
+       c.InitProviderName = n
+       return c.InitProviderProvider, c.InitProviderError
+}
+
+func (c *MockEvalContext) Provider(n string) ResourceProvider {
+       c.ProviderCalled = true
+       c.ProviderName = n
+       return c.ProviderProvider
+}
+
+func (c *MockEvalContext) CloseProvider(n string) error {
+       c.CloseProviderCalled = true
+       c.CloseProviderName = n
+       return nil
+}
+
+func (c *MockEvalContext) ConfigureProvider(n string, cfg *ResourceConfig) error {
+       c.ConfigureProviderCalled = true
+       c.ConfigureProviderName = n
+       c.ConfigureProviderConfig = cfg
+       return c.ConfigureProviderError
+}
+
+func (c *MockEvalContext) SetProviderConfig(
+       n string, cfg *ResourceConfig) error {
+       c.SetProviderConfigCalled = true
+       c.SetProviderConfigName = n
+       c.SetProviderConfigConfig = cfg
+       return nil
+}
+
+func (c *MockEvalContext) ParentProviderConfig(n string) *ResourceConfig {
+       c.ParentProviderConfigCalled = true
+       c.ParentProviderConfigName = n
+       return c.ParentProviderConfigConfig
+}
+
+func (c *MockEvalContext) ProviderInput(n string) map[string]interface{} {
+       c.ProviderInputCalled = true
+       c.ProviderInputName = n
+       return c.ProviderInputConfig
+}
+
+func (c *MockEvalContext) SetProviderInput(n string, cfg map[string]interface{}) {
+       c.SetProviderInputCalled = true
+       c.SetProviderInputName = n
+       c.SetProviderInputConfig = cfg
+}
+
+func (c *MockEvalContext) InitProvisioner(n string) (ResourceProvisioner, error) {
+       c.InitProvisionerCalled = true
+       c.InitProvisionerName = n
+       return c.InitProvisionerProvisioner, c.InitProvisionerError
+}
+
+func (c *MockEvalContext) Provisioner(n string) ResourceProvisioner {
+       c.ProvisionerCalled = true
+       c.ProvisionerName = n
+       return c.ProvisionerProvisioner
+}
+
+func (c *MockEvalContext) CloseProvisioner(n string) error {
+       c.CloseProvisionerCalled = true
+       c.CloseProvisionerName = n
+       return nil
+}
+
+func (c *MockEvalContext) Interpolate(
+       config *config.RawConfig, resource *Resource) (*ResourceConfig, error) {
+       c.InterpolateCalled = true
+       c.InterpolateConfig = config
+       c.InterpolateResource = resource
+       return c.InterpolateConfigResult, c.InterpolateError
+}
+
+func (c *MockEvalContext) Path() []string {
+       c.PathCalled = true
+       return c.PathPath
+}
+
+func (c *MockEvalContext) SetVariables(n string, vs map[string]interface{}) {
+       c.SetVariablesCalled = true
+       c.SetVariablesModule = n
+       c.SetVariablesVariables = vs
+}
+
+func (c *MockEvalContext) Diff() (*Diff, *sync.RWMutex) {
+       c.DiffCalled = true
+       return c.DiffDiff, c.DiffLock
+}
+
+func (c *MockEvalContext) State() (*State, *sync.RWMutex) {
+       c.StateCalled = true
+       return c.StateState, c.StateLock
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count.go
new file mode 100644 (file)
index 0000000..2ae56a7
--- /dev/null
@@ -0,0 +1,58 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/config"
+)
+
+// EvalCountFixZeroOneBoundary is an EvalNode that fixes up the state
+// when there is a resource count with zero/one boundary, i.e. fixing
+// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
+type EvalCountFixZeroOneBoundary struct {
+       Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalCountFixZeroOneBoundary) Eval(ctx EvalContext) (interface{}, error) {
+       // Get the count, important for knowing whether we're supposed to
+       // be adding the zero, or trimming it.
+       count, err := n.Resource.Count()
+       if err != nil {
+               return nil, err
+       }
+
+       // Figure what to look for and what to replace it with
+       hunt := n.Resource.Id()
+       replace := hunt + ".0"
+       if count < 2 {
+               hunt, replace = replace, hunt
+       }
+
+       state, lock := ctx.State()
+
+       // Get a lock so we can access this instance and potentially make
+       // changes to it.
+       lock.Lock()
+       defer lock.Unlock()
+
+       // Look for the module state. If we don't have one, then it doesn't matter.
+       mod := state.ModuleByPath(ctx.Path())
+       if mod == nil {
+               return nil, nil
+       }
+
+       // Look for the resource state. If we don't have one, then it is okay.
+       rs, ok := mod.Resources[hunt]
+       if !ok {
+               return nil, nil
+       }
+
+       // If the replacement key exists, we just keep both
+       if _, ok := mod.Resources[replace]; ok {
+               return nil, nil
+       }
+
+       mod.Resources[replace] = rs
+       delete(mod.Resources, hunt)
+
+       return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_boundary.go
new file mode 100644 (file)
index 0000000..91e2b90
--- /dev/null
@@ -0,0 +1,78 @@
+package terraform
+
+import (
+       "log"
+)
+
+// EvalCountFixZeroOneBoundaryGlobal is an EvalNode that fixes up the state
+// when there is a resource count with zero/one boundary, i.e. fixing
+// a resource named "aws_instance.foo" to "aws_instance.foo.0" and vice-versa.
+//
+// This works on the global state.
+type EvalCountFixZeroOneBoundaryGlobal struct{}
+
+// TODO: test
+func (n *EvalCountFixZeroOneBoundaryGlobal) Eval(ctx EvalContext) (interface{}, error) {
+       // Get the state and lock it since we'll potentially modify it
+       state, lock := ctx.State()
+       lock.Lock()
+       defer lock.Unlock()
+
+       // Prune the state since we require a clean state to work
+       state.prune()
+
+       // Go through each modules since the boundaries are restricted to a
+       // module scope.
+       for _, m := range state.Modules {
+               if err := n.fixModule(m); err != nil {
+                       return nil, err
+               }
+       }
+
+       return nil, nil
+}
+
+func (n *EvalCountFixZeroOneBoundaryGlobal) fixModule(m *ModuleState) error {
+       // Counts keeps track of keys and their counts
+       counts := make(map[string]int)
+       for k, _ := range m.Resources {
+               // Parse the key
+               key, err := ParseResourceStateKey(k)
+               if err != nil {
+                       return err
+               }
+
+               // Set the index to -1 so that we can keep count
+               key.Index = -1
+
+               // Increment
+               counts[key.String()]++
+       }
+
+       // Go through the counts and do the fixup for each resource
+       for raw, count := range counts {
+               // Search and replace this resource
+               search := raw
+               replace := raw + ".0"
+               if count < 2 {
+                       search, replace = replace, search
+               }
+               log.Printf("[TRACE] EvalCountFixZeroOneBoundaryGlobal: count %d, search %q, replace %q", count, search, replace)
+
+               // Look for the resource state. If we don't have one, then it is okay.
+               rs, ok := m.Resources[search]
+               if !ok {
+                       continue
+               }
+
+               // If the replacement key exists, we just keep both
+               if _, ok := m.Resources[replace]; ok {
+                       continue
+               }
+
+               m.Resources[replace] = rs
+               delete(m.Resources, search)
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go b/vendor/github.com/hashicorp/terraform/terraform/eval_count_computed.go
new file mode 100644 (file)
index 0000000..54a8333
--- /dev/null
@@ -0,0 +1,25 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// EvalCountCheckComputed is an EvalNode that checks if a resource count
+// is computed and errors if so. This can possibly happen across a
+// module boundary and we don't yet support this.
+type EvalCountCheckComputed struct {
+       Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalCountCheckComputed) Eval(ctx EvalContext) (interface{}, error) {
+       if n.Resource.RawCount.Value() == unknownValue() {
+               return nil, fmt.Errorf(
+                       "%s: value of 'count' cannot be computed",
+                       n.Resource.Id())
+       }
+
+       return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go
new file mode 100644 (file)
index 0000000..6f09526
--- /dev/null
@@ -0,0 +1,478 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+       "strings"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// EvalCompareDiff is an EvalNode implementation that compares two diffs
+// and errors if the diffs are not equal.
+type EvalCompareDiff struct {
+       Info     *InstanceInfo
+       One, Two **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) {
+       one, two := *n.One, *n.Two
+
+       // If either are nil, let them be empty
+       if one == nil {
+               one = new(InstanceDiff)
+               one.init()
+       }
+       if two == nil {
+               two = new(InstanceDiff)
+               two.init()
+       }
+       oneId, _ := one.GetAttribute("id")
+       twoId, _ := two.GetAttribute("id")
+       one.DelAttribute("id")
+       two.DelAttribute("id")
+       defer func() {
+               if oneId != nil {
+                       one.SetAttribute("id", oneId)
+               }
+               if twoId != nil {
+                       two.SetAttribute("id", twoId)
+               }
+       }()
+
+       if same, reason := one.Same(two); !same {
+               log.Printf("[ERROR] %s: diffs didn't match", n.Info.Id)
+               log.Printf("[ERROR] %s: reason: %s", n.Info.Id, reason)
+               log.Printf("[ERROR] %s: diff one: %#v", n.Info.Id, one)
+               log.Printf("[ERROR] %s: diff two: %#v", n.Info.Id, two)
+               return nil, fmt.Errorf(
+                       "%s: diffs didn't match during apply. This is a bug with "+
+                               "Terraform and should be reported as a GitHub Issue.\n"+
+                               "\n"+
+                               "Please include the following information in your report:\n"+
+                               "\n"+
+                               "    Terraform Version: %s\n"+
+                               "    Resource ID: %s\n"+
+                               "    Mismatch reason: %s\n"+
+                               "    Diff One (usually from plan): %#v\n"+
+                               "    Diff Two (usually from apply): %#v\n"+
+                               "\n"+
+                               "Also include as much context as you can about your config, state, "+
+                               "and the steps you performed to trigger this error.\n",
+                       n.Info.Id, Version, n.Info.Id, reason, one, two)
+       }
+
+       return nil, nil
+}
+
+// EvalDiff is an EvalNode implementation that does a refresh for
+// a resource.
+type EvalDiff struct {
+       Name        string
+       Info        *InstanceInfo
+       Config      **ResourceConfig
+       Provider    *ResourceProvider
+       Diff        **InstanceDiff
+       State       **InstanceState
+       OutputDiff  **InstanceDiff
+       OutputState **InstanceState
+
+       // Resource is needed to fetch the ignore_changes list so we can
+       // filter user-requested ignored attributes from the diff.
+       Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {
+       state := *n.State
+       config := *n.Config
+       provider := *n.Provider
+
+       // Call pre-diff hook
+       err := ctx.Hook(func(h Hook) (HookAction, error) {
+               return h.PreDiff(n.Info, state)
+       })
+       if err != nil {
+               return nil, err
+       }
+
+       // The state for the diff must never be nil
+       diffState := state
+       if diffState == nil {
+               diffState = new(InstanceState)
+       }
+       diffState.init()
+
+       // Diff!
+       diff, err := provider.Diff(n.Info, diffState, config)
+       if err != nil {
+               return nil, err
+       }
+       if diff == nil {
+               diff = new(InstanceDiff)
+       }
+
+       // Set DestroyDeposed if we have deposed instances
+       _, err = readInstanceFromState(ctx, n.Name, nil, func(rs *ResourceState) (*InstanceState, error) {
+               if len(rs.Deposed) > 0 {
+                       diff.DestroyDeposed = true
+               }
+
+               return nil, nil
+       })
+       if err != nil {
+               return nil, err
+       }
+
+       // Preserve the DestroyTainted flag
+       if n.Diff != nil {
+               diff.SetTainted((*n.Diff).GetDestroyTainted())
+       }
+
+       // Require a destroy if there is an ID and it requires new.
+       if diff.RequiresNew() && state != nil && state.ID != "" {
+               diff.SetDestroy(true)
+       }
+
+       // If we're creating a new resource, compute its ID
+       if diff.RequiresNew() || state == nil || state.ID == "" {
+               var oldID string
+               if state != nil {
+                       oldID = state.Attributes["id"]
+               }
+
+               // Add diff to compute new ID
+               diff.init()
+               diff.SetAttribute("id", &ResourceAttrDiff{
+                       Old:         oldID,
+                       NewComputed: true,
+                       RequiresNew: true,
+                       Type:        DiffAttrOutput,
+               })
+       }
+
+       // filter out ignored resources
+       if err := n.processIgnoreChanges(diff); err != nil {
+               return nil, err
+       }
+
+       // Call post-refresh hook
+       err = ctx.Hook(func(h Hook) (HookAction, error) {
+               return h.PostDiff(n.Info, diff)
+       })
+       if err != nil {
+               return nil, err
+       }
+
+       // Update our output
+       *n.OutputDiff = diff
+
+       // Update the state if we care
+       if n.OutputState != nil {
+               *n.OutputState = state
+
+               // Merge our state so that the state is updated with our plan
+               if !diff.Empty() && n.OutputState != nil {
+                       *n.OutputState = state.MergeDiff(diff)
+               }
+       }
+
+       return nil, nil
+}
+
+func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
+       if diff == nil || n.Resource == nil || n.Resource.Id() == "" {
+               return nil
+       }
+       ignoreChanges := n.Resource.Lifecycle.IgnoreChanges
+
+       if len(ignoreChanges) == 0 {
+               return nil
+       }
+
+       // If we're just creating the resource, we shouldn't alter the
+       // Diff at all
+       if diff.ChangeType() == DiffCreate {
+               return nil
+       }
+
+       // If the resource has been tainted then we don't process ignore changes
+       // since we MUST recreate the entire resource.
+       if diff.GetDestroyTainted() {
+               return nil
+       }
+
+       attrs := diff.CopyAttributes()
+
+       // get the complete set of keys we want to ignore
+       ignorableAttrKeys := make(map[string]bool)
+       for _, ignoredKey := range ignoreChanges {
+               for k := range attrs {
+                       if ignoredKey == "*" || strings.HasPrefix(k, ignoredKey) {
+                               ignorableAttrKeys[k] = true
+                       }
+               }
+       }
+
+       // If the resource was being destroyed, check to see if we can ignore the
+       // reason for it being destroyed.
+       if diff.GetDestroy() {
+               for k, v := range attrs {
+                       if k == "id" {
+                               // id will always be changed if we intended to replace this instance
+                               continue
+                       }
+                       if v.Empty() || v.NewComputed {
+                               continue
+                       }
+
+                       // If any RequiresNew attribute isn't ignored, we need to keep the diff
+                       // as-is to be able to replace the resource.
+                       if v.RequiresNew && !ignorableAttrKeys[k] {
+                               return nil
+                       }
+               }
+
+               // Now that we know that we aren't replacing the instance, we can filter
+               // out all the empty and computed attributes. There may be a bunch of
+               // extraneous attribute diffs for the other non-requires-new attributes
+               // going from "" -> "configval" or "" -> "<computed>".
+               // We must make sure any flatmapped containers are filterred (or not) as a
+               // whole.
+               containers := groupContainers(diff)
+               keep := map[string]bool{}
+               for _, v := range containers {
+                       if v.keepDiff() {
+                               // At least one key has changes, so list all the sibling keys
+                               // to keep in the diff.
+                               for k := range v {
+                                       keep[k] = true
+                               }
+                       }
+               }
+
+               for k, v := range attrs {
+                       if (v.Empty() || v.NewComputed) && !keep[k] {
+                               ignorableAttrKeys[k] = true
+                       }
+               }
+       }
+
+       // Here we undo the two reactions to RequireNew in EvalDiff - the "id"
+       // attribute diff and the Destroy boolean field
+       log.Printf("[DEBUG] Removing 'id' diff and setting Destroy to false " +
+               "because after ignore_changes, this diff no longer requires replacement")
+       diff.DelAttribute("id")
+       diff.SetDestroy(false)
+
+       // If we didn't hit any of our early exit conditions, we can filter the diff.
+       for k := range ignorableAttrKeys {
+               log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s",
+                       n.Resource.Id(), k)
+               diff.DelAttribute(k)
+       }
+
+       return nil
+}
+
+// a group of key-*ResourceAttrDiff pairs from the same flatmapped container
+type flatAttrDiff map[string]*ResourceAttrDiff
+
+// we need to keep all keys if any of them have a diff
+func (f flatAttrDiff) keepDiff() bool {
+       for _, v := range f {
+               if !v.Empty() && !v.NewComputed {
+                       return true
+               }
+       }
+       return false
+}
+
+// sets, lists and maps need to be compared for diff inclusion as a whole, so
+// group the flatmapped keys together for easier comparison.
+func groupContainers(d *InstanceDiff) map[string]flatAttrDiff {
+       isIndex := multiVal.MatchString
+       containers := map[string]flatAttrDiff{}
+       attrs := d.CopyAttributes()
+       // we need to loop once to find the index key
+       for k := range attrs {
+               if isIndex(k) {
+                       // add the key, always including the final dot to fully qualify it
+                       containers[k[:len(k)-1]] = flatAttrDiff{}
+               }
+       }
+
+       // loop again to find all the sub keys
+       for prefix, values := range containers {
+               for k, attrDiff := range attrs {
+                       // we include the index value as well, since it could be part of the diff
+                       if strings.HasPrefix(k, prefix) {
+                               values[k] = attrDiff
+                       }
+               }
+       }
+
+       return containers
+}
+
+// EvalDiffDestroy is an EvalNode implementation that returns a plain
+// destroy diff.
+type EvalDiffDestroy struct {
+       Info   *InstanceInfo
+       State  **InstanceState
+       Output **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {
+       state := *n.State
+
+       // If there is no state or we don't have an ID, we're already destroyed
+       if state == nil || state.ID == "" {
+               return nil, nil
+       }
+
+       // Call pre-diff hook
+       err := ctx.Hook(func(h Hook) (HookAction, error) {
+               return h.PreDiff(n.Info, state)
+       })
+       if err != nil {
+               return nil, err
+       }
+
+       // The diff
+       diff := &InstanceDiff{Destroy: true}
+
+       // Call post-diff hook
+       err = ctx.Hook(func(h Hook) (HookAction, error) {
+               return h.PostDiff(n.Info, diff)
+       })
+       if err != nil {
+               return nil, err
+       }
+
+       // Update our output
+       *n.Output = diff
+
+       return nil, nil
+}
+
+// EvalDiffDestroyModule is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalDiffDestroyModule struct {
+       Path []string
+}
+
+// TODO: test
+func (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) {
+       diff, lock := ctx.Diff()
+
+       // Acquire the lock so that we can do this safely concurrently
+       lock.Lock()
+       defer lock.Unlock()
+
+       // Write the diff
+       modDiff := diff.ModuleByPath(n.Path)
+       if modDiff == nil {
+               modDiff = diff.AddModule(n.Path)
+       }
+       modDiff.Destroy = true
+
+       return nil, nil
+}
+
+// EvalFilterDiff is an EvalNode implementation that filters the diff
+// according to some filter.
+type EvalFilterDiff struct {
+       // Input and output
+       Diff   **InstanceDiff
+       Output **InstanceDiff
+
+       // Destroy, if true, will only include a destroy diff if it is set.
+       Destroy bool
+}
+
+func (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) {
+       if *n.Diff == nil {
+               return nil, nil
+       }
+
+       input := *n.Diff
+       result := new(InstanceDiff)
+
+       if n.Destroy {
+               if input.GetDestroy() || input.RequiresNew() {
+                       result.SetDestroy(true)
+               }
+       }
+
+       if n.Output != nil {
+               *n.Output = result
+       }
+
+       return nil, nil
+}
+
+// EvalReadDiff is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalReadDiff struct {
+       Name string
+       Diff **InstanceDiff
+}
+
+func (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) {
+       diff, lock := ctx.Diff()
+
+       // Acquire the lock so that we can do this safely concurrently
+       lock.Lock()
+       defer lock.Unlock()
+
+       // Write the diff
+       modDiff := diff.ModuleByPath(ctx.Path())
+       if modDiff == nil {
+               return nil, nil
+       }
+
+       *n.Diff = modDiff.Resources[n.Name]
+
+       return nil, nil
+}
+
+// EvalWriteDiff is an EvalNode implementation that writes the diff to
+// the full diff.
+type EvalWriteDiff struct {
+       Name string
+       Diff **InstanceDiff
+}
+
+// TODO: test
+func (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) {
+       diff, lock := ctx.Diff()
+
+       // The diff to write, if its empty it should write nil
+       var diffVal *InstanceDiff
+       if n.Diff != nil {
+               diffVal = *n.Diff
+       }
+       if diffVal.Empty() {
+               diffVal = nil
+       }
+
+       // Acquire the lock so that we can do this safely concurrently
+       lock.Lock()
+       defer lock.Unlock()
+
+       // Write the diff
+       modDiff := diff.ModuleByPath(ctx.Path())
+       if modDiff == nil {
+               modDiff = diff.AddModule(ctx.Path())
+       }
+       if diffVal != nil {
+               modDiff.Resources[n.Name] = diffVal
+       } else {
+               delete(modDiff.Resources, n.Name)
+       }
+
+       return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_error.go b/vendor/github.com/hashicorp/terraform/terraform/eval_error.go
new file mode 100644 (file)
index 0000000..470f798
--- /dev/null
@@ -0,0 +1,20 @@
+package terraform
+
+// EvalReturnError is an EvalNode implementation that returns an
+// error if it is present.
+//
+// This is useful for scenarios where an error has been captured by
+// another EvalNode (like EvalApply) for special EvalTree-based error
+// handling, and that handling has completed, so the error should be
+// returned normally.
+type EvalReturnError struct {
+       Error *error
+}
+
+func (n *EvalReturnError) Eval(ctx EvalContext) (interface{}, error) {
+       if n.Error == nil {
+               return nil, nil
+       }
+
+       return nil, *n.Error
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter.go
new file mode 100644 (file)
index 0000000..711c625
--- /dev/null
@@ -0,0 +1,25 @@
+package terraform
+
+// EvalNodeFilterFunc is the callback used to replace a node with
+// another to node. To not do the replacement, just return the input node.
+type EvalNodeFilterFunc func(EvalNode) EvalNode
+
+// EvalNodeFilterable is an interface that can be implemented by
+// EvalNodes to allow filtering of sub-elements. Note that this isn't
+// a common thing to implement and you probably don't need it.
+type EvalNodeFilterable interface {
+       EvalNode
+       Filter(EvalNodeFilterFunc)
+}
+
+// EvalFilter runs the filter on the given node and returns the
+// final filtered value. This should be called rather than checking
+// the EvalNode directly since this will properly handle EvalNodeFilterables.
+func EvalFilter(node EvalNode, fn EvalNodeFilterFunc) EvalNode {
+       if f, ok := node.(EvalNodeFilterable); ok {
+               f.Filter(fn)
+               return node
+       }
+
+       return fn(node)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go b/vendor/github.com/hashicorp/terraform/terraform/eval_filter_operation.go
new file mode 100644 (file)
index 0000000..1a55f02
--- /dev/null
@@ -0,0 +1,49 @@
+package terraform
+
+// EvalNodeOpFilterable is an interface that EvalNodes can implement
+// to be filterable by the operation that is being run on Terraform.
+type EvalNodeOpFilterable interface {
+       IncludeInOp(walkOperation) bool
+}
+
+// EvalNodeFilterOp returns a filter function that filters nodes that
+// include themselves in specific operations.
+func EvalNodeFilterOp(op walkOperation) EvalNodeFilterFunc {
+       return func(n EvalNode) EvalNode {
+               include := true
+               if of, ok := n.(EvalNodeOpFilterable); ok {
+                       include = of.IncludeInOp(op)
+               }
+               if include {
+                       return n
+               }
+
+               return EvalNoop{}
+       }
+}
+
+// EvalOpFilter is an EvalNode implementation that is a proxy to
+// another node but filters based on the operation.
+type EvalOpFilter struct {
+       // Ops is the list of operations to include this node in.
+       Ops []walkOperation
+
+       // Node is the node to execute
+       Node EvalNode
+}
+
+// TODO: test
+func (n *EvalOpFilter) Eval(ctx EvalContext) (interface{}, error) {
+       return EvalRaw(n.Node, ctx)
+}
+
+// EvalNodeOpFilterable impl.
+func (n *EvalOpFilter) IncludeInOp(op walkOperation) bool {
+       for _, v := range n.Ops {
+               if v == op {
+                       return true
+               }
+       }
+
+       return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_if.go b/vendor/github.com/hashicorp/terraform/terraform/eval_if.go
new file mode 100644 (file)
index 0000000..d6b46a1
--- /dev/null
@@ -0,0 +1,26 @@
+package terraform
+
+// EvalIf is an EvalNode that is a conditional.
+type EvalIf struct {
+       If   func(EvalContext) (bool, error)
+       Then EvalNode
+       Else EvalNode
+}
+
+// TODO: test
+func (n *EvalIf) Eval(ctx EvalContext) (interface{}, error) {
+       yes, err := n.If(ctx)
+       if err != nil {
+               return nil, err
+       }
+
+       if yes {
+               return EvalRaw(n.Then, ctx)
+       } else {
+               if n.Else != nil {
+                       return EvalRaw(n.Else, ctx)
+               }
+       }
+
+       return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_import_state.go
new file mode 100644 (file)
index 0000000..62cc581
--- /dev/null
@@ -0,0 +1,76 @@
+package terraform
+
+import (
+       "fmt"
+)
+
+// EvalImportState is an EvalNode implementation that performs an
+// ImportState operation on a provider. This will return the imported
+// states but won't modify any actual state.
+type EvalImportState struct {
+       Provider *ResourceProvider
+       Info     *InstanceInfo
+       Id       string
+       Output   *[]*InstanceState
+}
+
+// TODO: test
+func (n *EvalImportState) Eval(ctx EvalContext) (interface{}, error) {
+       provider := *n.Provider
+
+       {
+               // Call pre-import hook
+               err := ctx.Hook(func(h Hook) (HookAction, error) {
+                       return h.PreImportState(n.Info, n.Id)
+               })
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       // Import!
+       state, err := provider.ImportState(n.Info, n.Id)
+       if err != nil {
+               return nil, fmt.Errorf(
+                       "import %s (id: %s): %s", n.Info.HumanId(), n.Id, err)
+       }
+
+       if n.Output != nil {
+               *n.Output = state
+       }
+
+       {
+               // Call post-import hook
+               err := ctx.Hook(func(h Hook) (HookAction, error) {
+                       return h.PostImportState(n.Info, state)
+               })
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       return nil, nil
+}
+
+// EvalImportStateVerify verifies the state after ImportState and
+// after the refresh to make sure it is non-nil and valid.
+type EvalImportStateVerify struct {
+       Info  *InstanceInfo
+       Id    string
+       State **InstanceState
+}
+
+// TODO: test
+func (n *EvalImportStateVerify) Eval(ctx EvalContext) (interface{}, error) {
+       state := *n.State
+       if state.Empty() {
+               return nil, fmt.Errorf(
+                       "import %s (id: %s): Terraform detected a resource with this ID doesn't\n"+
+                               "exist. Please verify the ID is correct. You cannot import non-existent\n"+
+                               "resources using Terraform import.",
+                       n.Info.HumanId(),
+                       n.Id)
+       }
+
+       return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_interpolate.go
new file mode 100644 (file)
index 0000000..6825ff5
--- /dev/null
@@ -0,0 +1,24 @@
+package terraform
+
+import "github.com/hashicorp/terraform/config"
+
+// EvalInterpolate is an EvalNode implementation that takes a raw
+// configuration and interpolates it.
+type EvalInterpolate struct {
+       Config   *config.RawConfig
+       Resource *Resource
+       Output   **ResourceConfig
+}
+
+func (n *EvalInterpolate) Eval(ctx EvalContext) (interface{}, error) {
+       rc, err := ctx.Interpolate(n.Config, n.Resource)
+       if err != nil {
+               return nil, err
+       }
+
+       if n.Output != nil {
+               *n.Output = rc
+       }
+
+       return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go b/vendor/github.com/hashicorp/terraform/terraform/eval_noop.go
new file mode 100644 (file)
index 0000000..f4bc822
--- /dev/null
@@ -0,0 +1,8 @@
+package terraform
+
+// EvalNoop is an EvalNode that does nothing.
+type EvalNoop struct{}
+
+func (EvalNoop) Eval(EvalContext) (interface{}, error) {
+       return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_output.go b/vendor/github.com/hashicorp/terraform/terraform/eval_output.go
new file mode 100644 (file)
index 0000000..cf61781
--- /dev/null
@@ -0,0 +1,119 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// EvalDeleteOutput is an EvalNode implementation that deletes an output
+// from the state.
+type EvalDeleteOutput struct {
+       Name string
+}
+
+// TODO: test
+func (n *EvalDeleteOutput) Eval(ctx EvalContext) (interface{}, error) {
+       state, lock := ctx.State()
+       if state == nil {
+               return nil, nil
+       }
+
+       // Get a write lock so we can access this instance
+       lock.Lock()
+       defer lock.Unlock()
+
+       // Look for the module state. If we don't have one, create it.
+       mod := state.ModuleByPath(ctx.Path())
+       if mod == nil {
+               return nil, nil
+       }
+
+       delete(mod.Outputs, n.Name)
+
+       return nil, nil
+}
+
+// EvalWriteOutput is an EvalNode implementation that writes the output
+// for the given name to the current state.
+type EvalWriteOutput struct {
+       Name      string
+       Sensitive bool
+       Value     *config.RawConfig
+}
+
+// TODO: test
+func (n *EvalWriteOutput) Eval(ctx EvalContext) (interface{}, error) {
+       cfg, err := ctx.Interpolate(n.Value, nil)
+       if err != nil {
+               // Log error but continue anyway
+               log.Printf("[WARN] Output interpolation %q failed: %s", n.Name, err)
+       }
+
+       state, lock := ctx.State()
+       if state == nil {
+               return nil, fmt.Errorf("cannot write state to nil state")
+       }
+
+       // Get a write lock so we can access this instance
+       lock.Lock()
+       defer lock.Unlock()
+
+       // Look for the module state. If we don't have one, create it.
+       mod := state.ModuleByPath(ctx.Path())
+       if mod == nil {
+               mod = state.AddModule(ctx.Path())
+       }
+
+       // Get the value from the config
+       var valueRaw interface{} = config.UnknownVariableValue
+       if cfg != nil {
+               var ok bool
+               valueRaw, ok = cfg.Get("value")
+               if !ok {
+                       valueRaw = ""
+               }
+               if cfg.IsComputed("value") {
+                       valueRaw = config.UnknownVariableValue
+               }
+       }
+
+       switch valueTyped := valueRaw.(type) {
+       case string:
+               mod.Outputs[n.Name] = &OutputState{
+                       Type:      "string",
+                       Sensitive: n.Sensitive,
+                       Value:     valueTyped,
+               }
+       case []interface{}:
+               mod.Outputs[n.Name] = &OutputState{
+                       Type:      "list",
+                       Sensitive: n.Sensitive,
+                       Value:     valueTyped,
+               }
+       case map[string]interface{}:
+               mod.Outputs[n.Name] = &OutputState{
+                       Type:      "map",
+                       Sensitive: n.Sensitive,
+                       Value:     valueTyped,
+               }
+       case []map[string]interface{}:
+               // an HCL map is multi-valued, so if this was read out of a config the
+               // map may still be in a slice.
+               if len(valueTyped) == 1 {
+                       mod.Outputs[n.Name] = &OutputState{
+                               Type:      "map",
+                               Sensitive: n.Sensitive,
+                               Value:     valueTyped[0],
+                       }
+                       break
+               }
+               return nil, fmt.Errorf("output %s type (%T) with %d values not valid for type map",
+                       n.Name, valueTyped, len(valueTyped))
+       default:
+               return nil, fmt.Errorf("output %s is not a valid type (%T)\n", n.Name, valueTyped)
+       }
+
+       return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider.go
new file mode 100644 (file)
index 0000000..092fd18
--- /dev/null
@@ -0,0 +1,164 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// EvalSetProviderConfig sets the parent configuration for a provider
+// without configuring that provider, validating it, etc.
+type EvalSetProviderConfig struct {
+       Provider string
+       Config   **ResourceConfig
+}
+
+func (n *EvalSetProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
+       return nil, ctx.SetProviderConfig(n.Provider, *n.Config)
+}
+
+// EvalBuildProviderConfig outputs a *ResourceConfig that is properly
+// merged with parents and inputs on top of what is configured in the file.
+type EvalBuildProviderConfig struct {
+       Provider string
+       Config   **ResourceConfig
+       Output   **ResourceConfig
+}
+
+func (n *EvalBuildProviderConfig) Eval(ctx EvalContext) (interface{}, error) {
+       cfg := *n.Config
+
+       // If we have a configuration set, then merge that in
+       if input := ctx.ProviderInput(n.Provider); input != nil {
+               // "input" is a map of the subset of config values that were known
+               // during the input walk, set by EvalInputProvider. Note that
+               // in particular it does *not* include attributes that had
+               // computed values at input time; those appear *only* in
+               // "cfg" here.
+               rc, err := config.NewRawConfig(input)
+               if err != nil {
+                       return nil, err
+               }
+
+               merged := cfg.raw.Merge(rc)
+               cfg = NewResourceConfig(merged)
+       }
+
+       // Get the parent configuration if there is one
+       if parent := ctx.ParentProviderConfig(n.Provider); parent != nil {
+               merged := cfg.raw.Merge(parent.raw)
+               cfg = NewResourceConfig(merged)
+       }
+
+       *n.Output = cfg
+       return nil, nil
+}
+
+// EvalConfigProvider is an EvalNode implementation that configures
+// a provider that is already initialized and retrieved.
+type EvalConfigProvider struct {
+       Provider string
+       Config   **ResourceConfig
+}
+
+func (n *EvalConfigProvider) Eval(ctx EvalContext) (interface{}, error) {
+       return nil, ctx.ConfigureProvider(n.Provider, *n.Config)
+}
+
+// EvalInitProvider is an EvalNode implementation that initializes a provider
+// and returns nothing. The provider can be retrieved again with the
+// EvalGetProvider node.
+type EvalInitProvider struct {
+       Name string
+}
+
+func (n *EvalInitProvider) Eval(ctx EvalContext) (interface{}, error) {
+       return ctx.InitProvider(n.Name)
+}
+
+// EvalCloseProvider is an EvalNode implementation that closes provider
+// connections that aren't needed anymore.
+type EvalCloseProvider struct {
+       Name string
+}
+
+func (n *EvalCloseProvider) Eval(ctx EvalContext) (interface{}, error) {
+       ctx.CloseProvider(n.Name)
+       return nil, nil
+}
+
+// EvalGetProvider is an EvalNode implementation that retrieves an already
+// initialized provider instance for the given name.
+type EvalGetProvider struct {
+       Name   string
+       Output *ResourceProvider
+}
+
+func (n *EvalGetProvider) Eval(ctx EvalContext) (interface{}, error) {
+       result := ctx.Provider(n.Name)
+       if result == nil {
+               return nil, fmt.Errorf("provider %s not initialized", n.Name)
+       }
+
+       if n.Output != nil {
+               *n.Output = result
+       }
+
+       return nil, nil
+}
+
+// EvalInputProvider is an EvalNode implementation that asks for input
+// for the given provider configurations.
+type EvalInputProvider struct {
+       Name     string
+       Provider *ResourceProvider
+       Config   **ResourceConfig
+}
+
+func (n *EvalInputProvider) Eval(ctx EvalContext) (interface{}, error) {
+       // If we already configured this provider, then don't do this again
+       if v := ctx.ProviderInput(n.Name); v != nil {
+               return nil, nil
+       }
+
+       rc := *n.Config
+
+       // Wrap the input into a namespace
+       input := &PrefixUIInput{
+               IdPrefix:    fmt.Sprintf("provider.%s", n.Name),
+               QueryPrefix: fmt.Sprintf("provider.%s.", n.Name),
+               UIInput:     ctx.Input(),
+       }
+
+       // Go through each provider and capture the input necessary
+       // to satisfy it.
+       config, err := (*n.Provider).Input(input, rc)
+       if err != nil {
+               return nil, fmt.Errorf(
+                       "Error configuring %s: %s", n.Name, err)
+       }
+
+       // Set the input that we received so that child modules don't attempt
+       // to ask for input again.
+       if config != nil && len(config.Config) > 0 {
+               // This repository of provider input results on the context doesn't
+               // retain config.ComputedKeys, so we need to filter those out here
+               // in order that later users of this data won't try to use the unknown
+               // value placeholder as if it were a literal value. This map is just
+               // of known values we've been able to complete so far; dynamic stuff
+               // will be merged in by EvalBuildProviderConfig on subsequent
+               // (post-input) walks.
+               confMap := config.Config
+               if config.ComputedKeys != nil {
+                       for _, key := range config.ComputedKeys {
+                               delete(confMap, key)
+                       }
+               }
+
+               ctx.SetProviderInput(n.Name, confMap)
+       } else {
+               ctx.SetProviderInput(n.Name, map[string]interface{}{})
+       }
+
+       return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provisioner.go
new file mode 100644 (file)
index 0000000..89579c0
--- /dev/null
@@ -0,0 +1,47 @@
+package terraform
+
+import (
+       "fmt"
+)
+
+// EvalInitProvisioner is an EvalNode implementation that initializes a provisioner
+// and returns nothing. The provisioner can be retrieved again with the
+// EvalGetProvisioner node.
+type EvalInitProvisioner struct {
+       Name string
+}
+
+func (n *EvalInitProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+       return ctx.InitProvisioner(n.Name)
+}
+
+// EvalCloseProvisioner is an EvalNode implementation that closes provisioner
+// connections that aren't needed anymore.
+type EvalCloseProvisioner struct {
+       Name string
+}
+
+func (n *EvalCloseProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+       ctx.CloseProvisioner(n.Name)
+       return nil, nil
+}
+
+// EvalGetProvisioner is an EvalNode implementation that retrieves an already
+// initialized provisioner instance for the given name.
+type EvalGetProvisioner struct {
+       Name   string
+       Output *ResourceProvisioner
+}
+
+func (n *EvalGetProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+       result := ctx.Provisioner(n.Name)
+       if result == nil {
+               return nil, fmt.Errorf("provisioner %s not initialized", n.Name)
+       }
+
+       if n.Output != nil {
+               *n.Output = result
+       }
+
+       return result, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go b/vendor/github.com/hashicorp/terraform/terraform/eval_read_data.go
new file mode 100644 (file)
index 0000000..fb85a28
--- /dev/null
@@ -0,0 +1,139 @@
+package terraform
+
+import (
+       "fmt"
+)
+
+// EvalReadDataDiff is an EvalNode implementation that executes a data
+// resource's ReadDataDiff method to discover what attributes it exports.
+type EvalReadDataDiff struct {
+       Provider    *ResourceProvider
+       Output      **InstanceDiff
+       OutputState **InstanceState
+       Config      **ResourceConfig
+       Info        *InstanceInfo
+
+       // Set Previous when re-evaluating diff during apply, to ensure that
+       // the "Destroy" flag is preserved.
+       Previous **InstanceDiff
+}
+
+func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) {
+       // TODO: test
+
+       err := ctx.Hook(func(h Hook) (HookAction, error) {
+               return h.PreDiff(n.Info, nil)
+       })
+       if err != nil {
+               return nil, err
+       }
+
+       var diff *InstanceDiff
+
+       if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() {
+               // If we're re-diffing for a diff that was already planning to
+               // destroy, then we'll just continue with that plan.
+               diff = &InstanceDiff{Destroy: true}
+       } else {
+               provider := *n.Provider
+               config := *n.Config
+
+               var err error
+               diff, err = provider.ReadDataDiff(n.Info, config)
+               if err != nil {
+                       return nil, err
+               }
+               if diff == nil {
+                       diff = new(InstanceDiff)
+               }
+
+               // if id isn't explicitly set then it's always computed, because we're
+               // always "creating a new resource".
+               diff.init()
+               if _, ok := diff.Attributes["id"]; !ok {
+                       diff.SetAttribute("id", &ResourceAttrDiff{
+                               Old:         "",
+                               NewComputed: true,
+                               RequiresNew: true,
+                               Type:        DiffAttrOutput,
+                       })
+               }
+       }
+
+       err = ctx.Hook(func(h Hook) (HookAction, error) {
+               return h.PostDiff(n.Info, diff)
+       })
+       if err != nil {
+               return nil, err
+       }
+
+       *n.Output = diff
+
+       if n.OutputState != nil {
+               state := &InstanceState{}
+               *n.OutputState = state
+
+               // Apply the diff to the returned state, so the state includes
+               // any attribute values that are not computed.
+               if !diff.Empty() && n.OutputState != nil {
+                       *n.OutputState = state.MergeDiff(diff)
+               }
+       }
+
+       return nil, nil
+}
+
+// EvalReadDataApply is an EvalNode implementation that executes a data
+// resource's ReadDataApply method to read data from the data source.
+type EvalReadDataApply struct {
+       Provider *ResourceProvider
+       Output   **InstanceState
+       Diff     **InstanceDiff
+       Info     *InstanceInfo
+}
+
+func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) {
+       // TODO: test
+       provider := *n.Provider
+       diff := *n.Diff
+
+       // If the diff is for *destroying* this resource then we'll
+       // just drop its state and move on, since data resources don't
+       // support an actual "destroy" action.
+       if diff != nil && diff.GetDestroy() {
+               if n.Output != nil {
+                       *n.Output = nil
+               }
+               return nil, nil
+       }
+
+       // For the purpose of external hooks we present a data apply as a
+       // "Refresh" rather than an "Apply" because creating a data source
+       // is presented to users/callers as a "read" operation.
+       err := ctx.Hook(func(h Hook) (HookAction, error) {
+               // We don't have a state yet, so we'll just give the hook an
+               // empty one to work with.
+               return h.PreRefresh(n.Info, &InstanceState{})
+       })
+       if err != nil {
+               return nil, err
+       }
+
+       state, err := provider.ReadDataApply(n.Info, diff)
+       if err != nil {
+               return nil, fmt.Errorf("%s: %s", n.Info.Id, err)
+       }
+
+       err = ctx.Hook(func(h Hook) (HookAction, error) {
+               return h.PostRefresh(n.Info, state)
+       })
+       if err != nil {
+               return nil, err
+       }
+
+       if n.Output != nil {
+               *n.Output = state
+       }
+
+       return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/eval_refresh.go
new file mode 100644 (file)
index 0000000..fa2b812
--- /dev/null
@@ -0,0 +1,55 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+)
+
+// EvalRefresh is an EvalNode implementation that does a refresh for
+// a resource.
+type EvalRefresh struct {
+       Provider *ResourceProvider
+       State    **InstanceState
+       Info     *InstanceInfo
+       Output   **InstanceState
+}
+
+// TODO: test
+func (n *EvalRefresh) Eval(ctx EvalContext) (interface{}, error) {
+       provider := *n.Provider
+       state := *n.State
+
+       // If we have no state, we don't do any refreshing
+       if state == nil {
+               log.Printf("[DEBUG] refresh: %s: no state, not refreshing", n.Info.Id)
+               return nil, nil
+       }
+
+       // Call pre-refresh hook
+       err := ctx.Hook(func(h Hook) (HookAction, error) {
+               return h.PreRefresh(n.Info, state)
+       })
+       if err != nil {
+               return nil, err
+       }
+
+       // Refresh!
+       state, err = provider.Refresh(n.Info, state)
+       if err != nil {
+               return nil, fmt.Errorf("%s: %s", n.Info.Id, err.Error())
+       }
+
+       // Call post-refresh hook
+       err = ctx.Hook(func(h Hook) (HookAction, error) {
+               return h.PostRefresh(n.Info, state)
+       })
+       if err != nil {
+               return nil, err
+       }
+
+       if n.Output != nil {
+               *n.Output = state
+       }
+
+       return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go b/vendor/github.com/hashicorp/terraform/terraform/eval_resource.go
new file mode 100644 (file)
index 0000000..5eca678
--- /dev/null
@@ -0,0 +1,13 @@
+package terraform
+
+// EvalInstanceInfo is an EvalNode implementation that fills in the
+// InstanceInfo as much as it can.
+type EvalInstanceInfo struct {
+       Info *InstanceInfo
+}
+
+// TODO: test
+func (n *EvalInstanceInfo) Eval(ctx EvalContext) (interface{}, error) {
+       n.Info.ModulePath = ctx.Path()
+       return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go b/vendor/github.com/hashicorp/terraform/terraform/eval_sequence.go
new file mode 100644 (file)
index 0000000..82d8178
--- /dev/null
@@ -0,0 +1,27 @@
+package terraform
+
+// EvalSequence is an EvalNode that evaluates in sequence.
+type EvalSequence struct {
+       Nodes []EvalNode
+}
+
+func (n *EvalSequence) Eval(ctx EvalContext) (interface{}, error) {
+       for _, n := range n.Nodes {
+               if n == nil {
+                       continue
+               }
+
+               if _, err := EvalRaw(n, ctx); err != nil {
+                       return nil, err
+               }
+       }
+
+       return nil, nil
+}
+
+// EvalNodeFilterable impl.
+func (n *EvalSequence) Filter(fn EvalNodeFilterFunc) {
+       for i, node := range n.Nodes {
+               n.Nodes[i] = fn(node)
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_state.go b/vendor/github.com/hashicorp/terraform/terraform/eval_state.go
new file mode 100644 (file)
index 0000000..126a0e6
--- /dev/null
@@ -0,0 +1,324 @@
+package terraform
+
+import "fmt"
+
+// EvalReadState is an EvalNode implementation that reads the
+// primary InstanceState for a specific resource out of the state.
+type EvalReadState struct {
+       Name   string
+       Output **InstanceState
+}
+
+func (n *EvalReadState) Eval(ctx EvalContext) (interface{}, error) {
+       return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) {
+               return rs.Primary, nil
+       })
+}
+
+// EvalReadStateDeposed is an EvalNode implementation that reads the
+// deposed InstanceState for a specific resource out of the state
+type EvalReadStateDeposed struct {
+       Name   string
+       Output **InstanceState
+       // Index indicates which instance in the Deposed list to target, or -1 for
+       // the last item.
+       Index int
+}
+
+func (n *EvalReadStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
+       return readInstanceFromState(ctx, n.Name, n.Output, func(rs *ResourceState) (*InstanceState, error) {
+               // Get the index. If it is negative, then we get the last one
+               idx := n.Index
+               if idx < 0 {
+                       idx = len(rs.Deposed) - 1
+               }
+               if idx >= 0 && idx < len(rs.Deposed) {
+                       return rs.Deposed[idx], nil
+               } else {
+                       return nil, fmt.Errorf("bad deposed index: %d, for resource: %#v", idx, rs)
+               }
+       })
+}
+
+// Does the bulk of the work for the various flavors of ReadState eval nodes.
+// Each node just provides a reader function to get from the ResourceState to the
+// InstanceState, and this takes care of all the plumbing.
+func readInstanceFromState(
+       ctx EvalContext,
+       resourceName string,
+       output **InstanceState,
+       readerFn func(*ResourceState) (*InstanceState, error),
+) (*InstanceState, error) {
+       state, lock := ctx.State()
+
+       // Get a read lock so we can access this instance
+       lock.RLock()
+       defer lock.RUnlock()
+
+       // Look for the module state. If we don't have one, then it doesn't matter.
+       mod := state.ModuleByPath(ctx.Path())
+       if mod == nil {
+               return nil, nil
+       }
+
+       // Look for the resource state. If we don't have one, then it is okay.
+       rs := mod.Resources[resourceName]
+       if rs == nil {
+               return nil, nil
+       }
+
+       // Use the delegate function to get the instance state from the resource state
+       is, err := readerFn(rs)
+       if err != nil {
+               return nil, err
+       }
+
+       // Write the result to the output pointer
+       if output != nil {
+               *output = is
+       }
+
+       return is, nil
+}
+
+// EvalRequireState is an EvalNode implementation that early exits
+// if the state doesn't have an ID.
+type EvalRequireState struct {
+       State **InstanceState
+}
+
+func (n *EvalRequireState) Eval(ctx EvalContext) (interface{}, error) {
+       if n.State == nil {
+               return nil, EvalEarlyExitError{}
+       }
+
+       state := *n.State
+       if state == nil || state.ID == "" {
+               return nil, EvalEarlyExitError{}
+       }
+
+       return nil, nil
+}
+
+// EvalUpdateStateHook is an EvalNode implementation that calls the
+// PostStateUpdate hook with the current state.
+type EvalUpdateStateHook struct{}
+
+func (n *EvalUpdateStateHook) Eval(ctx EvalContext) (interface{}, error) {
+       state, lock := ctx.State()
+
+       // Get a full lock. Even calling something like WriteState can modify
+       // (prune) the state, so we need the full lock.
+       lock.Lock()
+       defer lock.Unlock()
+
+       // Call the hook
+       err := ctx.Hook(func(h Hook) (HookAction, error) {
+               return h.PostStateUpdate(state)
+       })
+       if err != nil {
+               return nil, err
+       }
+
+       return nil, nil
+}
+
+// EvalWriteState is an EvalNode implementation that writes the
+// primary InstanceState for a specific resource into the state.
+type EvalWriteState struct {
+       Name         string
+       ResourceType string
+       Provider     string
+       Dependencies []string
+       State        **InstanceState
+}
+
+func (n *EvalWriteState) Eval(ctx EvalContext) (interface{}, error) {
+       return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies,
+               func(rs *ResourceState) error {
+                       rs.Primary = *n.State
+                       return nil
+               },
+       )
+}
+
+// EvalWriteStateDeposed is an EvalNode implementation that writes
+// an InstanceState out to the Deposed list of a resource in the state.
+type EvalWriteStateDeposed struct {
+       Name         string
+       ResourceType string
+       Provider     string
+       Dependencies []string
+       State        **InstanceState
+       // Index indicates which instance in the Deposed list to target, or -1 to append.
+       Index int
+}
+
+func (n *EvalWriteStateDeposed) Eval(ctx EvalContext) (interface{}, error) {
+       return writeInstanceToState(ctx, n.Name, n.ResourceType, n.Provider, n.Dependencies,
+               func(rs *ResourceState) error {
+                       if n.Index == -1 {
+                               rs.Deposed = append(rs.Deposed, *n.State)
+                       } else {
+                               rs.Deposed[n.Index] = *n.State
+                       }
+                       return nil
+               },
+       )
+}
+
+// Pulls together the common tasks of the EvalWriteState nodes.  All the args
+// are passed directly down from the EvalNode along with a `writer` function
+// which is yielded the *ResourceState and is responsible for writing an
+// InstanceState to the proper field in the ResourceState.
+func writeInstanceToState(
+       ctx EvalContext,
+       resourceName string,
+       resourceType string,
+       provider string,
+       dependencies []string,
+       writerFn func(*ResourceState) error,
+) (*InstanceState, error) {
+       state, lock := ctx.State()
+       if state == nil {
+               return nil, fmt.Errorf("cannot write state to nil state")
+       }
+
+       // Get a write lock so we can access this instance
+       lock.Lock()
+       defer lock.Unlock()
+
+       // Look for the module state. If we don't have one, create it.
+       mod := state.ModuleByPath(ctx.Path())
+       if mod == nil {
+               mod = state.AddModule(ctx.Path())
+       }
+
+       // Look for the resource state.
+       rs := mod.Resources[resourceName]
+       if rs == nil {
+               rs = &ResourceState{}
+               rs.init()
+               mod.Resources[resourceName] = rs
+       }
+       rs.Type = resourceType
+       rs.Dependencies = dependencies
+       rs.Provider = provider
+
+       if err := writerFn(rs); err != nil {
+               return nil, err
+       }
+
+       return nil, nil
+}
+
+// EvalClearPrimaryState is an EvalNode implementation that clears the primary
+// instance from a resource state.
+type EvalClearPrimaryState struct {
+       Name string
+}
+
+func (n *EvalClearPrimaryState) Eval(ctx EvalContext) (interface{}, error) {
+       state, lock := ctx.State()
+
+       // Get a read lock so we can access this instance
+       lock.RLock()
+       defer lock.RUnlock()
+
+       // Look for the module state. If we don't have one, then it doesn't matter.
+       mod := state.ModuleByPath(ctx.Path())
+       if mod == nil {
+               return nil, nil
+       }
+
+       // Look for the resource state. If we don't have one, then it is okay.
+       rs := mod.Resources[n.Name]
+       if rs == nil {
+               return nil, nil
+       }
+
+       // Clear primary from the resource state
+       rs.Primary = nil
+
+       return nil, nil
+}
+
+// EvalDeposeState is an EvalNode implementation that takes the primary
+// out of a state and makes it Deposed. This is done at the beginning of
+// create-before-destroy calls so that the create can create while preserving
+// the old state of the to-be-destroyed resource.
+type EvalDeposeState struct {
+       Name string
+}
+
+// TODO: test
+func (n *EvalDeposeState) Eval(ctx EvalContext) (interface{}, error) {
+       state, lock := ctx.State()
+
+       // Get a read lock so we can access this instance
+       lock.RLock()
+       defer lock.RUnlock()
+
+       // Look for the module state. If we don't have one, then it doesn't matter.
+       mod := state.ModuleByPath(ctx.Path())
+       if mod == nil {
+               return nil, nil
+       }
+
+       // Look for the resource state. If we don't have one, then it is okay.
+       rs := mod.Resources[n.Name]
+       if rs == nil {
+               return nil, nil
+       }
+
+       // If we don't have a primary, we have nothing to depose
+       if rs.Primary == nil {
+               return nil, nil
+       }
+
+       // Depose
+       rs.Deposed = append(rs.Deposed, rs.Primary)
+       rs.Primary = nil
+
+       return nil, nil
+}
+
+// EvalUndeposeState is an EvalNode implementation that reads the
+// InstanceState for a specific resource out of the state.
+type EvalUndeposeState struct {
+       Name  string
+       State **InstanceState
+}
+
+// TODO: test
+func (n *EvalUndeposeState) Eval(ctx EvalContext) (interface{}, error) {
+       state, lock := ctx.State()
+
+       // Get a read lock so we can access this instance
+       lock.RLock()
+       defer lock.RUnlock()
+
+       // Look for the module state. If we don't have one, then it doesn't matter.
+       mod := state.ModuleByPath(ctx.Path())
+       if mod == nil {
+               return nil, nil
+       }
+
+       // Look for the resource state. If we don't have one, then it is okay.
+       rs := mod.Resources[n.Name]
+       if rs == nil {
+               return nil, nil
+       }
+
+       // If we don't have any desposed resource, then we don't have anything to do
+       if len(rs.Deposed) == 0 {
+               return nil, nil
+       }
+
+       // Undepose
+       idx := len(rs.Deposed) - 1
+       rs.Primary = rs.Deposed[idx]
+       rs.Deposed[idx] = *n.State
+
+       return nil, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go
new file mode 100644 (file)
index 0000000..478aa64
--- /dev/null
@@ -0,0 +1,227 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/config"
+       "github.com/mitchellh/mapstructure"
+)
+
+// EvalValidateError is the error structure returned if there were
+// validation errors.
+type EvalValidateError struct {
+       Warnings []string
+       Errors   []error
+}
+
+func (e *EvalValidateError) Error() string {
+       return fmt.Sprintf("Warnings: %s. Errors: %s", e.Warnings, e.Errors)
+}
+
+// EvalValidateCount is an EvalNode implementation that validates
+// the count of a resource.
+type EvalValidateCount struct {
+       Resource *config.Resource
+}
+
+// TODO: test
+func (n *EvalValidateCount) Eval(ctx EvalContext) (interface{}, error) {
+       var count int
+       var errs []error
+       var err error
+       if _, err := ctx.Interpolate(n.Resource.RawCount, nil); err != nil {
+               errs = append(errs, fmt.Errorf(
+                       "Failed to interpolate count: %s", err))
+               goto RETURN
+       }
+
+       count, err = n.Resource.Count()
+       if err != nil {
+               // If we can't get the count during validation, then
+               // just replace it with the number 1.
+               c := n.Resource.RawCount.Config()
+               c[n.Resource.RawCount.Key] = "1"
+               count = 1
+       }
+       err = nil
+
+       if count < 0 {
+               errs = append(errs, fmt.Errorf(
+                       "Count is less than zero: %d", count))
+       }
+
+RETURN:
+       if len(errs) != 0 {
+               err = &EvalValidateError{
+                       Errors: errs,
+               }
+       }
+       return nil, err
+}
+
+// EvalValidateProvider is an EvalNode implementation that validates
+// the configuration of a resource.
+type EvalValidateProvider struct {
+       Provider *ResourceProvider
+       Config   **ResourceConfig
+}
+
+func (n *EvalValidateProvider) Eval(ctx EvalContext) (interface{}, error) {
+       provider := *n.Provider
+       config := *n.Config
+
+       warns, errs := provider.Validate(config)
+       if len(warns) == 0 && len(errs) == 0 {
+               return nil, nil
+       }
+
+       return nil, &EvalValidateError{
+               Warnings: warns,
+               Errors:   errs,
+       }
+}
+
+// EvalValidateProvisioner is an EvalNode implementation that validates
+// the configuration of a resource.
+type EvalValidateProvisioner struct {
+       Provisioner *ResourceProvisioner
+       Config      **ResourceConfig
+       ConnConfig  **ResourceConfig
+}
+
+func (n *EvalValidateProvisioner) Eval(ctx EvalContext) (interface{}, error) {
+       provisioner := *n.Provisioner
+       config := *n.Config
+       var warns []string
+       var errs []error
+
+       {
+               // Validate the provisioner's own config first
+               w, e := provisioner.Validate(config)
+               warns = append(warns, w...)
+               errs = append(errs, e...)
+       }
+
+       {
+               // Now validate the connection config, which might either be from
+               // the provisioner block itself or inherited from the resource's
+               // shared connection info.
+               w, e := n.validateConnConfig(*n.ConnConfig)
+               warns = append(warns, w...)
+               errs = append(errs, e...)
+       }
+
+       if len(warns) == 0 && len(errs) == 0 {
+               return nil, nil
+       }
+
+       return nil, &EvalValidateError{
+               Warnings: warns,
+               Errors:   errs,
+       }
+}
+
+func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) (warns []string, errs []error) {
+       // We can't comprehensively validate the connection config since its
+       // final structure is decided by the communicator and we can't instantiate
+       // that until we have a complete instance state. However, we *can* catch
+       // configuration keys that are not valid for *any* communicator, catching
+       // typos early rather than waiting until we actually try to run one of
+       // the resource's provisioners.
+
+       type connConfigSuperset struct {
+               // All attribute types are interface{} here because at this point we
+               // may still have unresolved interpolation expressions, which will
+               // appear as strings regardless of the final goal type.
+
+               Type       interface{} `mapstructure:"type"`
+               User       interface{} `mapstructure:"user"`
+               Password   interface{} `mapstructure:"password"`
+               Host       interface{} `mapstructure:"host"`
+               Port       interface{} `mapstructure:"port"`
+               Timeout    interface{} `mapstructure:"timeout"`
+               ScriptPath interface{} `mapstructure:"script_path"`
+
+               // For type=ssh only (enforced in ssh communicator)
+               PrivateKey        interface{} `mapstructure:"private_key"`
+               Agent             interface{} `mapstructure:"agent"`
+               BastionHost       interface{} `mapstructure:"bastion_host"`
+               BastionPort       interface{} `mapstructure:"bastion_port"`
+               BastionUser       interface{} `mapstructure:"bastion_user"`
+               BastionPassword   interface{} `mapstructure:"bastion_password"`
+               BastionPrivateKey interface{} `mapstructure:"bastion_private_key"`
+
+               // For type=winrm only (enforced in winrm communicator)
+               HTTPS    interface{} `mapstructure:"https"`
+               Insecure interface{} `mapstructure:"insecure"`
+               CACert   interface{} `mapstructure:"cacert"`
+       }
+
+       var metadata mapstructure.Metadata
+       decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
+               Metadata: &metadata,
+               Result:   &connConfigSuperset{}, // result is disregarded; we only care about unused keys
+       })
+       if err != nil {
+               // should never happen
+               errs = append(errs, err)
+               return
+       }
+
+       if err := decoder.Decode(connConfig.Config); err != nil {
+               errs = append(errs, err)
+               return
+       }
+
+       for _, attrName := range metadata.Unused {
+               errs = append(errs, fmt.Errorf("unknown 'connection' argument %q", attrName))
+       }
+       return
+}
+
+// EvalValidateResource is an EvalNode implementation that validates
+// the configuration of a resource.
+type EvalValidateResource struct {
+       Provider     *ResourceProvider
+       Config       **ResourceConfig
+       ResourceName string
+       ResourceType string
+       ResourceMode config.ResourceMode
+
+       // IgnoreWarnings means that warnings will not be passed through. This allows
+       // "just-in-time" passes of validation to continue execution through warnings.
+       IgnoreWarnings bool
+}
+
+func (n *EvalValidateResource) Eval(ctx EvalContext) (interface{}, error) {
+       provider := *n.Provider
+       cfg := *n.Config
+       var warns []string
+       var errs []error
+       // Provider entry point varies depending on resource mode, because
+       // managed resources and data resources are two distinct concepts
+       // in the provider abstraction.
+       switch n.ResourceMode {
+       case config.ManagedResourceMode:
+               warns, errs = provider.ValidateResource(n.ResourceType, cfg)
+       case config.DataResourceMode:
+               warns, errs = provider.ValidateDataSource(n.ResourceType, cfg)
+       }
+
+       // If the resource name doesn't match the name regular
+       // expression, show an error.
+       if !config.NameRegexp.Match([]byte(n.ResourceName)) {
+               errs = append(errs, fmt.Errorf(
+                       "%s: resource name can only contain letters, numbers, "+
+                               "dashes, and underscores.", n.ResourceName))
+       }
+
+       if (len(warns) == 0 || n.IgnoreWarnings) && len(errs) == 0 {
+               return nil, nil
+       }
+
+       return nil, &EvalValidateError{
+               Warnings: warns,
+               Errors:   errs,
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate_selfref.go
new file mode 100644 (file)
index 0000000..ae4436a
--- /dev/null
@@ -0,0 +1,74 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// EvalValidateResourceSelfRef is an EvalNode implementation that validates that
+// a configuration doesn't contain a reference to the resource itself.
+//
+// This must be done prior to interpolating configuration in order to avoid
+// any infinite loop scenarios.
+type EvalValidateResourceSelfRef struct {
+       Addr   **ResourceAddress
+       Config **config.RawConfig
+}
+
+func (n *EvalValidateResourceSelfRef) Eval(ctx EvalContext) (interface{}, error) {
+       addr := *n.Addr
+       conf := *n.Config
+
+       // Go through the variables and find self references
+       var errs []error
+       for k, raw := range conf.Variables {
+               rv, ok := raw.(*config.ResourceVariable)
+               if !ok {
+                       continue
+               }
+
+               // Build an address from the variable
+               varAddr := &ResourceAddress{
+                       Path:         addr.Path,
+                       Mode:         rv.Mode,
+                       Type:         rv.Type,
+                       Name:         rv.Name,
+                       Index:        rv.Index,
+                       InstanceType: TypePrimary,
+               }
+
+               // If the variable access is a multi-access (*), then we just
+               // match the index so that we'll match our own addr if everything
+               // else matches.
+               if rv.Multi && rv.Index == -1 {
+                       varAddr.Index = addr.Index
+               }
+
+               // This is a weird thing where ResourceAddres has index "-1" when
+               // index isn't set at all. This means index "0" for resource access.
+               // So, if we have this scenario, just set our varAddr to -1 so it
+               // matches.
+               if addr.Index == -1 && varAddr.Index == 0 {
+                       varAddr.Index = -1
+               }
+
+               // If the addresses match, then this is a self reference
+               if varAddr.Equals(addr) && varAddr.Index == addr.Index {
+                       errs = append(errs, fmt.Errorf(
+                               "%s: self reference not allowed: %q",
+                               addr, k))
+               }
+       }
+
+       // If no errors, no errors!
+       if len(errs) == 0 {
+               return nil, nil
+       }
+
+       // Wrap the errors in the proper wrapper so we can handle validation
+       // formatting properly upstream.
+       return nil, &EvalValidateError{
+               Errors: errs,
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go b/vendor/github.com/hashicorp/terraform/terraform/eval_variable.go
new file mode 100644 (file)
index 0000000..e39a33c
--- /dev/null
@@ -0,0 +1,279 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+       "reflect"
+       "strconv"
+       "strings"
+
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/helper/hilmapstructure"
+)
+
+// EvalTypeCheckVariable is an EvalNode which ensures that the variable
+// values which are assigned as inputs to a module (including the root)
+// match the types which are either declared for the variables explicitly
+// or inferred from the default values.
+//
+// In order to achieve this three things are required:
+//     - a map of the proposed variable values
+//     - the configuration tree of the module in which the variable is
+//       declared
+//     - the path to the module (so we know which part of the tree to
+//       compare the values against).
+type EvalTypeCheckVariable struct {
+       Variables  map[string]interface{}
+       ModulePath []string
+       ModuleTree *module.Tree
+}
+
+func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) {
+       currentTree := n.ModuleTree
+       for _, pathComponent := range n.ModulePath[1:] {
+               currentTree = currentTree.Children()[pathComponent]
+       }
+       targetConfig := currentTree.Config()
+
+       prototypes := make(map[string]config.VariableType)
+       for _, variable := range targetConfig.Variables {
+               prototypes[variable.Name] = variable.Type()
+       }
+
+       // Only display a module in an error message if we are not in the root module
+       modulePathDescription := fmt.Sprintf(" in module %s", strings.Join(n.ModulePath[1:], "."))
+       if len(n.ModulePath) == 1 {
+               modulePathDescription = ""
+       }
+
+       for name, declaredType := range prototypes {
+               proposedValue, ok := n.Variables[name]
+               if !ok {
+                       // This means the default value should be used as no overriding value
+                       // has been set. Therefore we should continue as no check is necessary.
+                       continue
+               }
+
+               if proposedValue == config.UnknownVariableValue {
+                       continue
+               }
+
+               switch declaredType {
+               case config.VariableTypeString:
+                       switch proposedValue.(type) {
+                       case string:
+                               continue
+                       default:
+                               return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
+                                       name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
+                       }
+               case config.VariableTypeMap:
+                       switch proposedValue.(type) {
+                       case map[string]interface{}:
+                               continue
+                       default:
+                               return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
+                                       name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
+                       }
+               case config.VariableTypeList:
+                       switch proposedValue.(type) {
+                       case []interface{}:
+                               continue
+                       default:
+                               return nil, fmt.Errorf("variable %s%s should be type %s, got %s",
+                                       name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue))
+                       }
+               default:
+                       return nil, fmt.Errorf("variable %s%s should be type %s, got type string",
+                               name, modulePathDescription, declaredType.Printable())
+               }
+       }
+
+       return nil, nil
+}
+
+// EvalSetVariables is an EvalNode implementation that sets the variables
+// explicitly for interpolation later.
+type EvalSetVariables struct {
+       Module    *string
+       Variables map[string]interface{}
+}
+
+// TODO: test
+func (n *EvalSetVariables) Eval(ctx EvalContext) (interface{}, error) {
+       ctx.SetVariables(*n.Module, n.Variables)
+       return nil, nil
+}
+
+// EvalVariableBlock is an EvalNode implementation that evaluates the
+// given configuration, and uses the final values as a way to set the
+// mapping.
+type EvalVariableBlock struct {
+       Config         **ResourceConfig
+       VariableValues map[string]interface{}
+}
+
+func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) {
+       // Clear out the existing mapping
+       for k, _ := range n.VariableValues {
+               delete(n.VariableValues, k)
+       }
+
+       // Get our configuration
+       rc := *n.Config
+       for k, v := range rc.Config {
+               vKind := reflect.ValueOf(v).Type().Kind()
+
+               switch vKind {
+               case reflect.Slice:
+                       var vSlice []interface{}
+                       if err := hilmapstructure.WeakDecode(v, &vSlice); err == nil {
+                               n.VariableValues[k] = vSlice
+                               continue
+                       }
+               case reflect.Map:
+                       var vMap map[string]interface{}
+                       if err := hilmapstructure.WeakDecode(v, &vMap); err == nil {
+                               n.VariableValues[k] = vMap
+                               continue
+                       }
+               default:
+                       var vString string
+                       if err := hilmapstructure.WeakDecode(v, &vString); err == nil {
+                               n.VariableValues[k] = vString
+                               continue
+                       }
+               }
+
+               return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k)
+       }
+
+       for _, path := range rc.ComputedKeys {
+               log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path)
+               err := n.setUnknownVariableValueForPath(path)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       return nil, nil
+}
+
+func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error {
+       pathComponents := strings.Split(path, ".")
+
+       if len(pathComponents) < 1 {
+               return fmt.Errorf("No path comoponents in %s", path)
+       }
+
+       if len(pathComponents) == 1 {
+               // Special case the "top level" since we know the type
+               if _, ok := n.VariableValues[pathComponents[0]]; !ok {
+                       n.VariableValues[pathComponents[0]] = config.UnknownVariableValue
+               }
+               return nil
+       }
+
+       // Otherwise find the correct point in the tree and then set to unknown
+       var current interface{} = n.VariableValues[pathComponents[0]]
+       for i := 1; i < len(pathComponents); i++ {
+               switch tCurrent := current.(type) {
+               case []interface{}:
+                       index, err := strconv.Atoi(pathComponents[i])
+                       if err != nil {
+                               return fmt.Errorf("Cannot convert %s to slice index in path %s",
+                                       pathComponents[i], path)
+                       }
+                       current = tCurrent[index]
+               case []map[string]interface{}:
+                       index, err := strconv.Atoi(pathComponents[i])
+                       if err != nil {
+                               return fmt.Errorf("Cannot convert %s to slice index in path %s",
+                                       pathComponents[i], path)
+                       }
+                       current = tCurrent[index]
+               case map[string]interface{}:
+                       if val, hasVal := tCurrent[pathComponents[i]]; hasVal {
+                               current = val
+                               continue
+                       }
+
+                       tCurrent[pathComponents[i]] = config.UnknownVariableValue
+                       break
+               }
+       }
+
+       return nil
+}
+
+// EvalCoerceMapVariable is an EvalNode implementation that recognizes a
+// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a
+// bare map literal is indistinguishable from a list of maps w/ one element.
+//
+// We take all the same inputs as EvalTypeCheckVariable above, since we need
+// both the target type and the proposed value in order to properly coerce.
+type EvalCoerceMapVariable struct {
+       Variables  map[string]interface{}
+       ModulePath []string
+       ModuleTree *module.Tree
+}
+
+// Eval implements the EvalNode interface. See EvalCoerceMapVariable for
+// details.
+func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) {
+       currentTree := n.ModuleTree
+       for _, pathComponent := range n.ModulePath[1:] {
+               currentTree = currentTree.Children()[pathComponent]
+       }
+       targetConfig := currentTree.Config()
+
+       prototypes := make(map[string]config.VariableType)
+       for _, variable := range targetConfig.Variables {
+               prototypes[variable.Name] = variable.Type()
+       }
+
+       for name, declaredType := range prototypes {
+               if declaredType != config.VariableTypeMap {
+                       continue
+               }
+
+               proposedValue, ok := n.Variables[name]
+               if !ok {
+                       continue
+               }
+
+               if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 {
+                       if m, ok := list[0].(map[string]interface{}); ok {
+                               log.Printf("[DEBUG] EvalCoerceMapVariable: "+
+                                       "Coercing single element list into map: %#v", m)
+                               n.Variables[name] = m
+                       }
+               }
+       }
+
+       return nil, nil
+}
+
+// hclTypeName returns the name of the type that would represent this value in
+// a config file, or falls back to the Go type name if there's no corresponding
+// HCL type. This is used for formatted output, not for comparing types.
+func hclTypeName(i interface{}) string {
+       switch k := reflect.Indirect(reflect.ValueOf(i)).Kind(); k {
+       case reflect.Bool:
+               return "boolean"
+       case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+               reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
+               reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:
+               return "number"
+       case reflect.Array, reflect.Slice:
+               return "list"
+       case reflect.Map:
+               return "map"
+       case reflect.String:
+               return "string"
+       default:
+               // fall back to the Go type if there's no match
+               return k.String()
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go b/vendor/github.com/hashicorp/terraform/terraform/evaltree_provider.go
new file mode 100644 (file)
index 0000000..00392ef
--- /dev/null
@@ -0,0 +1,119 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/config"
+)
+
+// ProviderEvalTree returns the evaluation tree for initializing and
+// configuring providers.
+func ProviderEvalTree(n string, config *config.RawConfig) EvalNode {
+       var provider ResourceProvider
+       var resourceConfig *ResourceConfig
+
+       seq := make([]EvalNode, 0, 5)
+       seq = append(seq, &EvalInitProvider{Name: n})
+
+       // Input stuff
+       seq = append(seq, &EvalOpFilter{
+               Ops: []walkOperation{walkInput, walkImport},
+               Node: &EvalSequence{
+                       Nodes: []EvalNode{
+                               &EvalGetProvider{
+                                       Name:   n,
+                                       Output: &provider,
+                               },
+                               &EvalInterpolate{
+                                       Config: config,
+                                       Output: &resourceConfig,
+                               },
+                               &EvalBuildProviderConfig{
+                                       Provider: n,
+                                       Config:   &resourceConfig,
+                                       Output:   &resourceConfig,
+                               },
+                               &EvalInputProvider{
+                                       Name:     n,
+                                       Provider: &provider,
+                                       Config:   &resourceConfig,
+                               },
+                       },
+               },
+       })
+
+       seq = append(seq, &EvalOpFilter{
+               Ops: []walkOperation{walkValidate},
+               Node: &EvalSequence{
+                       Nodes: []EvalNode{
+                               &EvalGetProvider{
+                                       Name:   n,
+                                       Output: &provider,
+                               },
+                               &EvalInterpolate{
+                                       Config: config,
+                                       Output: &resourceConfig,
+                               },
+                               &EvalBuildProviderConfig{
+                                       Provider: n,
+                                       Config:   &resourceConfig,
+                                       Output:   &resourceConfig,
+                               },
+                               &EvalValidateProvider{
+                                       Provider: &provider,
+                                       Config:   &resourceConfig,
+                               },
+                               &EvalSetProviderConfig{
+                                       Provider: n,
+                                       Config:   &resourceConfig,
+                               },
+                       },
+               },
+       })
+
+       // Apply stuff
+       seq = append(seq, &EvalOpFilter{
+               Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport},
+               Node: &EvalSequence{
+                       Nodes: []EvalNode{
+                               &EvalGetProvider{
+                                       Name:   n,
+                                       Output: &provider,
+                               },
+                               &EvalInterpolate{
+                                       Config: config,
+                                       Output: &resourceConfig,
+                               },
+                               &EvalBuildProviderConfig{
+                                       Provider: n,
+                                       Config:   &resourceConfig,
+                                       Output:   &resourceConfig,
+                               },
+                               &EvalSetProviderConfig{
+                                       Provider: n,
+                                       Config:   &resourceConfig,
+                               },
+                       },
+               },
+       })
+
+       // We configure on everything but validate, since validate may
+       // not have access to all the variables.
+       seq = append(seq, &EvalOpFilter{
+               Ops: []walkOperation{walkRefresh, walkPlan, walkApply, walkDestroy, walkImport},
+               Node: &EvalSequence{
+                       Nodes: []EvalNode{
+                               &EvalConfigProvider{
+                                       Provider: n,
+                                       Config:   &resourceConfig,
+                               },
+                       },
+               },
+       })
+
+       return &EvalSequence{Nodes: seq}
+}
+
+// CloseProviderEvalTree returns the evaluation tree for closing
+// provider connections that aren't needed anymore.
+func CloseProviderEvalTree(n string) EvalNode {
+       return &EvalCloseProvider{Name: n}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph.go b/vendor/github.com/hashicorp/terraform/terraform/graph.go
new file mode 100644 (file)
index 0000000..48ce6a3
--- /dev/null
@@ -0,0 +1,172 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+       "runtime/debug"
+       "strings"
+
+       "github.com/hashicorp/terraform/dag"
+)
+
+// RootModuleName is the name given to the root module implicitly.
+const RootModuleName = "root"
+
+// RootModulePath is the path for the root module.
+var RootModulePath = []string{RootModuleName}
+
+// Graph represents the graph that Terraform uses to represent resources
+// and their dependencies.
+type Graph struct {
+       // Graph is the actual DAG. This is embedded so you can call the DAG
+       // methods directly.
+       dag.AcyclicGraph
+
+       // Path is the path in the module tree that this Graph represents.
+       // The root is represented by a single element list containing
+       // RootModuleName
+       Path []string
+
+       // debugName is a name for reference in the debug output. This is usually
+       // to indicate what topmost builder was, and if this graph is a shadow or
+       // not.
+       debugName string
+}
+
+func (g *Graph) DirectedGraph() dag.Grapher {
+       return &g.AcyclicGraph
+}
+
+// Walk walks the graph with the given walker for callbacks. The graph
+// will be walked with full parallelism, so the walker should expect
+// to be called in concurrently.
+func (g *Graph) Walk(walker GraphWalker) error {
+       return g.walk(walker)
+}
+
+func (g *Graph) walk(walker GraphWalker) error {
+       // The callbacks for enter/exiting a graph
+       ctx := walker.EnterPath(g.Path)
+       defer walker.ExitPath(g.Path)
+
+       // Get the path for logs
+       path := strings.Join(ctx.Path(), ".")
+
+       // Determine if our walker is a panic wrapper
+       panicwrap, ok := walker.(GraphWalkerPanicwrapper)
+       if !ok {
+               panicwrap = nil // just to be sure
+       }
+
+       debugName := "walk-graph.json"
+       if g.debugName != "" {
+               debugName = g.debugName + "-" + debugName
+       }
+
+       debugBuf := dbug.NewFileWriter(debugName)
+       g.SetDebugWriter(debugBuf)
+       defer debugBuf.Close()
+
+       // Walk the graph.
+       var walkFn dag.WalkFunc
+       walkFn = func(v dag.Vertex) (rerr error) {
+               log.Printf("[DEBUG] vertex '%s.%s': walking", path, dag.VertexName(v))
+               g.DebugVisitInfo(v, g.debugName)
+
+               // If we have a panic wrap GraphWalker and a panic occurs, recover
+               // and call that. We ensure the return value is an error, however,
+               // so that future nodes are not called.
+               defer func() {
+                       // If no panicwrap, do nothing
+                       if panicwrap == nil {
+                               return
+                       }
+
+                       // If no panic, do nothing
+                       err := recover()
+                       if err == nil {
+                               return
+                       }
+
+                       // Modify the return value to show the error
+                       rerr = fmt.Errorf("vertex %q captured panic: %s\n\n%s",
+                               dag.VertexName(v), err, debug.Stack())
+
+                       // Call the panic wrapper
+                       panicwrap.Panic(v, err)
+               }()
+
+               walker.EnterVertex(v)
+               defer walker.ExitVertex(v, rerr)
+
+               // vertexCtx is the context that we use when evaluating. This
+               // is normally the context of our graph but can be overridden
+               // with a GraphNodeSubPath impl.
+               vertexCtx := ctx
+               if pn, ok := v.(GraphNodeSubPath); ok && len(pn.Path()) > 0 {
+                       vertexCtx = walker.EnterPath(normalizeModulePath(pn.Path()))
+                       defer walker.ExitPath(pn.Path())
+               }
+
+               // If the node is eval-able, then evaluate it.
+               if ev, ok := v.(GraphNodeEvalable); ok {
+                       tree := ev.EvalTree()
+                       if tree == nil {
+                               panic(fmt.Sprintf(
+                                       "%s.%s (%T): nil eval tree", path, dag.VertexName(v), v))
+                       }
+
+                       // Allow the walker to change our tree if needed. Eval,
+                       // then callback with the output.
+                       log.Printf("[DEBUG] vertex '%s.%s': evaluating", path, dag.VertexName(v))
+
+                       g.DebugVertexInfo(v, fmt.Sprintf("evaluating %T(%s)", v, path))
+
+                       tree = walker.EnterEvalTree(v, tree)
+                       output, err := Eval(tree, vertexCtx)
+                       if rerr = walker.ExitEvalTree(v, output, err); rerr != nil {
+                               return
+                       }
+               }
+
+               // If the node is dynamically expanded, then expand it
+               if ev, ok := v.(GraphNodeDynamicExpandable); ok {
+                       log.Printf(
+                               "[DEBUG] vertex '%s.%s': expanding/walking dynamic subgraph",
+                               path,
+                               dag.VertexName(v))
+
+                       g.DebugVertexInfo(v, fmt.Sprintf("expanding %T(%s)", v, path))
+
+                       g, err := ev.DynamicExpand(vertexCtx)
+                       if err != nil {
+                               rerr = err
+                               return
+                       }
+                       if g != nil {
+                               // Walk the subgraph
+                               if rerr = g.walk(walker); rerr != nil {
+                                       return
+                               }
+                       }
+               }
+
+               // If the node has a subgraph, then walk the subgraph
+               if sn, ok := v.(GraphNodeSubgraph); ok {
+                       log.Printf(
+                               "[DEBUG] vertex '%s.%s': walking subgraph",
+                               path,
+                               dag.VertexName(v))
+
+                       g.DebugVertexInfo(v, fmt.Sprintf("subgraph: %T(%s)", v, path))
+
+                       if rerr = sn.Subgraph().(*Graph).walk(walker); rerr != nil {
+                               return
+                       }
+               }
+
+               return nil
+       }
+
+       return g.AcyclicGraph.Walk(walkFn)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder.go
new file mode 100644 (file)
index 0000000..6374bb9
--- /dev/null
@@ -0,0 +1,77 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+       "strings"
+)
+
+// GraphBuilder is an interface that can be implemented and used with
+// Terraform to build the graph that Terraform walks.
+type GraphBuilder interface {
+       // Build builds the graph for the given module path. It is up to
+       // the interface implementation whether this build should expand
+       // the graph or not.
+       Build(path []string) (*Graph, error)
+}
+
+// BasicGraphBuilder is a GraphBuilder that builds a graph out of a
+// series of transforms and (optionally) validates the graph is a valid
+// structure.
+type BasicGraphBuilder struct {
+       Steps    []GraphTransformer
+       Validate bool
+       // Optional name to add to the graph debug log
+       Name string
+}
+
+func (b *BasicGraphBuilder) Build(path []string) (*Graph, error) {
+       g := &Graph{Path: path}
+
+       debugName := "graph.json"
+       if b.Name != "" {
+               debugName = b.Name + "-" + debugName
+       }
+       debugBuf := dbug.NewFileWriter(debugName)
+       g.SetDebugWriter(debugBuf)
+       defer debugBuf.Close()
+
+       for _, step := range b.Steps {
+               if step == nil {
+                       continue
+               }
+
+               stepName := fmt.Sprintf("%T", step)
+               dot := strings.LastIndex(stepName, ".")
+               if dot >= 0 {
+                       stepName = stepName[dot+1:]
+               }
+
+               debugOp := g.DebugOperation(stepName, "")
+               err := step.Transform(g)
+
+               errMsg := ""
+               if err != nil {
+                       errMsg = err.Error()
+               }
+               debugOp.End(errMsg)
+
+               log.Printf(
+                       "[TRACE] Graph after step %T:\n\n%s",
+                       step, g.StringWithNodeTypes())
+
+               if err != nil {
+                       return g, err
+               }
+       }
+
+       // Validate the graph structure
+       if b.Validate {
+               if err := g.Validate(); err != nil {
+                       log.Printf("[ERROR] Graph validation failed. Graph:\n\n%s", g.String())
+                       return nil, err
+               }
+       }
+
+       return g, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_apply.go
new file mode 100644 (file)
index 0000000..38a90f2
--- /dev/null
@@ -0,0 +1,141 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// ApplyGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for applying a Terraform diff.
+//
+// Because the graph is built from the diff (vs. the config or state),
+// this helps ensure that the apply-time graph doesn't modify any resources
+// that aren't explicitly in the diff. There are other scenarios where the
+// diff can be deviated, so this is just one layer of protection.
+type ApplyGraphBuilder struct {
+       // Module is the root module for the graph to build.
+       Module *module.Tree
+
+       // Diff is the diff to apply.
+       Diff *Diff
+
+       // State is the current state
+       State *State
+
+       // Providers is the list of providers supported.
+       Providers []string
+
+       // Provisioners is the list of provisioners supported.
+       Provisioners []string
+
+       // Targets are resources to target. This is only required to make sure
+       // unnecessary outputs aren't included in the apply graph. The plan
+       // builder successfully handles targeting resources. In the future,
+       // outputs should go into the diff so that this is unnecessary.
+       Targets []string
+
+       // DisableReduce, if true, will not reduce the graph. Great for testing.
+       DisableReduce bool
+
+       // Destroy, if true, represents a pure destroy operation
+       Destroy bool
+
+       // Validate will do structural validation of the graph.
+       Validate bool
+}
+
+// See GraphBuilder
+func (b *ApplyGraphBuilder) Build(path []string) (*Graph, error) {
+       return (&BasicGraphBuilder{
+               Steps:    b.Steps(),
+               Validate: b.Validate,
+               Name:     "ApplyGraphBuilder",
+       }).Build(path)
+}
+
+// See GraphBuilder
+func (b *ApplyGraphBuilder) Steps() []GraphTransformer {
+       // Custom factory for creating providers.
+       concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
+               return &NodeApplyableProvider{
+                       NodeAbstractProvider: a,
+               }
+       }
+
+       concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+               return &NodeApplyableResource{
+                       NodeAbstractResource: a,
+               }
+       }
+
+       steps := []GraphTransformer{
+               // Creates all the nodes represented in the diff.
+               &DiffTransformer{
+                       Concrete: concreteResource,
+
+                       Diff:   b.Diff,
+                       Module: b.Module,
+                       State:  b.State,
+               },
+
+               // Create orphan output nodes
+               &OrphanOutputTransformer{Module: b.Module, State: b.State},
+
+               // Attach the configuration to any resources
+               &AttachResourceConfigTransformer{Module: b.Module},
+
+               // Attach the state
+               &AttachStateTransformer{State: b.State},
+
+               // Create all the providers
+               &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
+               &ProviderTransformer{},
+               &DisableProviderTransformer{},
+               &ParentProviderTransformer{},
+               &AttachProviderConfigTransformer{Module: b.Module},
+
+               // Destruction ordering
+               &DestroyEdgeTransformer{Module: b.Module, State: b.State},
+               GraphTransformIf(
+                       func() bool { return !b.Destroy },
+                       &CBDEdgeTransformer{Module: b.Module, State: b.State},
+               ),
+
+               // Provisioner-related transformations
+               &MissingProvisionerTransformer{Provisioners: b.Provisioners},
+               &ProvisionerTransformer{},
+
+               // Add root variables
+               &RootVariableTransformer{Module: b.Module},
+
+               // Add the outputs
+               &OutputTransformer{Module: b.Module},
+
+               // Add module variables
+               &ModuleVariableTransformer{Module: b.Module},
+
+               // Connect references so ordering is correct
+               &ReferenceTransformer{},
+
+               // Add the node to fix the state count boundaries
+               &CountBoundaryTransformer{},
+
+               // Target
+               &TargetsTransformer{Targets: b.Targets},
+
+               // Close opened plugin connections
+               &CloseProviderTransformer{},
+               &CloseProvisionerTransformer{},
+
+               // Single root
+               &RootTransformer{},
+       }
+
+       if !b.DisableReduce {
+               // Perform the transitive reduction to make our graph a bit
+               // more sane if possible (it usually is possible).
+               steps = append(steps, &TransitiveReductionTransformer{})
+       }
+
+       return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_destroy_plan.go
new file mode 100644 (file)
index 0000000..014b348
--- /dev/null
@@ -0,0 +1,67 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// DestroyPlanGraphBuilder implements GraphBuilder and is responsible for
+// planning a pure-destroy.
+//
+// Planning a pure destroy operation is simple because we can ignore most
+// ordering configuration and simply reverse the state.
+type DestroyPlanGraphBuilder struct {
+       // Module is the root module for the graph to build.
+       Module *module.Tree
+
+       // State is the current state
+       State *State
+
+       // Targets are resources to target
+       Targets []string
+
+       // Validate will do structural validation of the graph.
+       Validate bool
+}
+
+// See GraphBuilder
+func (b *DestroyPlanGraphBuilder) Build(path []string) (*Graph, error) {
+       return (&BasicGraphBuilder{
+               Steps:    b.Steps(),
+               Validate: b.Validate,
+               Name:     "DestroyPlanGraphBuilder",
+       }).Build(path)
+}
+
+// See GraphBuilder
+func (b *DestroyPlanGraphBuilder) Steps() []GraphTransformer {
+       concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+               return &NodePlanDestroyableResource{
+                       NodeAbstractResource: a,
+               }
+       }
+
+       steps := []GraphTransformer{
+               // Creates all the nodes represented in the state.
+               &StateTransformer{
+                       Concrete: concreteResource,
+                       State:    b.State,
+               },
+
+               // Attach the configuration to any resources
+               &AttachResourceConfigTransformer{Module: b.Module},
+
+               // Destruction ordering. We require this only so that
+               // targeting below will prune the correct things.
+               &DestroyEdgeTransformer{Module: b.Module, State: b.State},
+
+               // Target. Note we don't set "Destroy: true" here since we already
+               // created proper destroy ordering.
+               &TargetsTransformer{Targets: b.Targets},
+
+               // Single root
+               &RootTransformer{},
+       }
+
+       return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_import.go
new file mode 100644 (file)
index 0000000..7070c59
--- /dev/null
@@ -0,0 +1,76 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// ImportGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for importing resources into Terraform. This is a much, much
+// simpler graph than a normal configuration graph.
+type ImportGraphBuilder struct {
+       // ImportTargets are the list of resources to import.
+       ImportTargets []*ImportTarget
+
+       // Module is the module to add to the graph. See ImportOpts.Module.
+       Module *module.Tree
+
+       // Providers is the list of providers supported.
+       Providers []string
+}
+
+// Build builds the graph according to the steps returned by Steps.
+func (b *ImportGraphBuilder) Build(path []string) (*Graph, error) {
+       return (&BasicGraphBuilder{
+               Steps:    b.Steps(),
+               Validate: true,
+               Name:     "ImportGraphBuilder",
+       }).Build(path)
+}
+
+// Steps returns the ordered list of GraphTransformers that must be executed
+// to build a complete graph.
+func (b *ImportGraphBuilder) Steps() []GraphTransformer {
+       // Get the module. If we don't have one, we just use an empty tree
+       // so that the transform still works but does nothing.
+       mod := b.Module
+       if mod == nil {
+               mod = module.NewEmptyTree()
+       }
+
+       // Custom factory for creating providers.
+       concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
+               return &NodeApplyableProvider{
+                       NodeAbstractProvider: a,
+               }
+       }
+
+       steps := []GraphTransformer{
+               // Create all our resources from the configuration and state
+               &ConfigTransformer{Module: mod},
+
+               // Add the import steps
+               &ImportStateTransformer{Targets: b.ImportTargets},
+
+               // Provider-related transformations
+               &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
+               &ProviderTransformer{},
+               &DisableProviderTransformer{},
+               &ParentProviderTransformer{},
+               &AttachProviderConfigTransformer{Module: mod},
+
+               // This validates that the providers only depend on variables
+               &ImportProviderValidateTransformer{},
+
+               // Close opened plugin connections
+               &CloseProviderTransformer{},
+
+               // Single root
+               &RootTransformer{},
+
+               // Optimize
+               &TransitiveReductionTransformer{},
+       }
+
+       return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_input.go
new file mode 100644 (file)
index 0000000..0df48cd
--- /dev/null
@@ -0,0 +1,27 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/dag"
+)
+
+// InputGraphBuilder creates the graph for the input operation.
+//
+// Unlike other graph builders, this is a function since it currently modifies
+// and is based on the PlanGraphBuilder. The PlanGraphBuilder passed in will be
+// modified and should not be used for any other operations.
+func InputGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
+       // We're going to customize the concrete functions
+       p.CustomConcrete = true
+
+       // Set the provider to the normal provider. This will ask for input.
+       p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
+               return &NodeApplyableProvider{
+                       NodeAbstractProvider: a,
+               }
+       }
+
+       // We purposely don't set any more concrete fields since the remainder
+       // should be no-ops.
+
+       return p
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_plan.go
new file mode 100644 (file)
index 0000000..a6a3a90
--- /dev/null
@@ -0,0 +1,164 @@
+package terraform
+
+import (
+       "sync"
+
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// PlanGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for planning (creating a Terraform Diff).
+//
+// The primary difference between this graph and others:
+//
+//   * Based on the config since it represents the target state
+//
+//   * Ignores lifecycle options since no lifecycle events occur here. This
+//     simplifies the graph significantly since complex transforms such as
+//     create-before-destroy can be completely ignored.
+//
+type PlanGraphBuilder struct {
+       // Module is the root module for the graph to build.
+       Module *module.Tree
+
+       // State is the current state
+       State *State
+
+       // Providers is the list of providers supported.
+       Providers []string
+
+       // Provisioners is the list of provisioners supported.
+       Provisioners []string
+
+       // Targets are resources to target
+       Targets []string
+
+       // DisableReduce, if true, will not reduce the graph. Great for testing.
+       DisableReduce bool
+
+       // Validate will do structural validation of the graph.
+       Validate bool
+
+       // CustomConcrete can be set to customize the node types created
+       // for various parts of the plan. This is useful in order to customize
+       // the plan behavior.
+       CustomConcrete         bool
+       ConcreteProvider       ConcreteProviderNodeFunc
+       ConcreteResource       ConcreteResourceNodeFunc
+       ConcreteResourceOrphan ConcreteResourceNodeFunc
+
+       once sync.Once
+}
+
+// See GraphBuilder
+func (b *PlanGraphBuilder) Build(path []string) (*Graph, error) {
+       return (&BasicGraphBuilder{
+               Steps:    b.Steps(),
+               Validate: b.Validate,
+               Name:     "PlanGraphBuilder",
+       }).Build(path)
+}
+
+// See GraphBuilder
+func (b *PlanGraphBuilder) Steps() []GraphTransformer {
+       b.once.Do(b.init)
+
+       steps := []GraphTransformer{
+               // Creates all the resources represented in the config
+               &ConfigTransformer{
+                       Concrete: b.ConcreteResource,
+                       Module:   b.Module,
+               },
+
+               // Add the outputs
+               &OutputTransformer{Module: b.Module},
+
+               // Add orphan resources
+               &OrphanResourceTransformer{
+                       Concrete: b.ConcreteResourceOrphan,
+                       State:    b.State,
+                       Module:   b.Module,
+               },
+
+               // Attach the configuration to any resources
+               &AttachResourceConfigTransformer{Module: b.Module},
+
+               // Attach the state
+               &AttachStateTransformer{State: b.State},
+
+               // Add root variables
+               &RootVariableTransformer{Module: b.Module},
+
+               // Create all the providers
+               &MissingProviderTransformer{Providers: b.Providers, Concrete: b.ConcreteProvider},
+               &ProviderTransformer{},
+               &DisableProviderTransformer{},
+               &ParentProviderTransformer{},
+               &AttachProviderConfigTransformer{Module: b.Module},
+
+               // Provisioner-related transformations. Only add these if requested.
+               GraphTransformIf(
+                       func() bool { return b.Provisioners != nil },
+                       GraphTransformMulti(
+                               &MissingProvisionerTransformer{Provisioners: b.Provisioners},
+                               &ProvisionerTransformer{},
+                       ),
+               ),
+
+               // Add module variables
+               &ModuleVariableTransformer{Module: b.Module},
+
+               // Connect so that the references are ready for targeting. We'll
+               // have to connect again later for providers and so on.
+               &ReferenceTransformer{},
+
+               // Add the node to fix the state count boundaries
+               &CountBoundaryTransformer{},
+
+               // Target
+               &TargetsTransformer{Targets: b.Targets},
+
+               // Close opened plugin connections
+               &CloseProviderTransformer{},
+               &CloseProvisionerTransformer{},
+
+               // Single root
+               &RootTransformer{},
+       }
+
+       if !b.DisableReduce {
+               // Perform the transitive reduction to make our graph a bit
+               // more sane if possible (it usually is possible).
+               steps = append(steps, &TransitiveReductionTransformer{})
+       }
+
+       return steps
+}
+
+func (b *PlanGraphBuilder) init() {
+       // Do nothing if the user requests customizing the fields
+       if b.CustomConcrete {
+               return
+       }
+
+       b.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
+               return &NodeApplyableProvider{
+                       NodeAbstractProvider: a,
+               }
+       }
+
+       b.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
+               return &NodePlannableResource{
+                       NodeAbstractCountResource: &NodeAbstractCountResource{
+                               NodeAbstractResource: a,
+                       },
+               }
+       }
+
+       b.ConcreteResourceOrphan = func(a *NodeAbstractResource) dag.Vertex {
+               return &NodePlannableResourceOrphan{
+                       NodeAbstractResource: a,
+               }
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_refresh.go
new file mode 100644 (file)
index 0000000..88ae338
--- /dev/null
@@ -0,0 +1,132 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// RefreshGraphBuilder implements GraphBuilder and is responsible for building
+// a graph for refreshing (updating the Terraform state).
+//
+// The primary difference between this graph and others:
+//
+//   * Based on the state since it represents the only resources that
+//     need to be refreshed.
+//
+//   * Ignores lifecycle options since no lifecycle events occur here. This
+//     simplifies the graph significantly since complex transforms such as
+//     create-before-destroy can be completely ignored.
+//
+type RefreshGraphBuilder struct {
+       // Module is the root module for the graph to build.
+       Module *module.Tree
+
+       // State is the current state
+       State *State
+
+       // Providers is the list of providers supported.
+       Providers []string
+
+       // Targets are resources to target
+       Targets []string
+
+       // DisableReduce, if true, will not reduce the graph. Great for testing.
+       DisableReduce bool
+
+       // Validate will do structural validation of the graph.
+       Validate bool
+}
+
+// See GraphBuilder
+func (b *RefreshGraphBuilder) Build(path []string) (*Graph, error) {
+       return (&BasicGraphBuilder{
+               Steps:    b.Steps(),
+               Validate: b.Validate,
+               Name:     "RefreshGraphBuilder",
+       }).Build(path)
+}
+
+// See GraphBuilder
+func (b *RefreshGraphBuilder) Steps() []GraphTransformer {
+       // Custom factory for creating providers.
+       concreteProvider := func(a *NodeAbstractProvider) dag.Vertex {
+               return &NodeApplyableProvider{
+                       NodeAbstractProvider: a,
+               }
+       }
+
+       concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+               return &NodeRefreshableResource{
+                       NodeAbstractResource: a,
+               }
+       }
+
+       concreteDataResource := func(a *NodeAbstractResource) dag.Vertex {
+               return &NodeRefreshableDataResource{
+                       NodeAbstractCountResource: &NodeAbstractCountResource{
+                               NodeAbstractResource: a,
+                       },
+               }
+       }
+
+       steps := []GraphTransformer{
+               // Creates all the resources represented in the state
+               &StateTransformer{
+                       Concrete: concreteResource,
+                       State:    b.State,
+               },
+
+               // Creates all the data resources that aren't in the state
+               &ConfigTransformer{
+                       Concrete:   concreteDataResource,
+                       Module:     b.Module,
+                       Unique:     true,
+                       ModeFilter: true,
+                       Mode:       config.DataResourceMode,
+               },
+
+               // Attach the state
+               &AttachStateTransformer{State: b.State},
+
+               // Attach the configuration to any resources
+               &AttachResourceConfigTransformer{Module: b.Module},
+
+               // Add root variables
+               &RootVariableTransformer{Module: b.Module},
+
+               // Create all the providers
+               &MissingProviderTransformer{Providers: b.Providers, Concrete: concreteProvider},
+               &ProviderTransformer{},
+               &DisableProviderTransformer{},
+               &ParentProviderTransformer{},
+               &AttachProviderConfigTransformer{Module: b.Module},
+
+               // Add the outputs
+               &OutputTransformer{Module: b.Module},
+
+               // Add module variables
+               &ModuleVariableTransformer{Module: b.Module},
+
+               // Connect so that the references are ready for targeting. We'll
+               // have to connect again later for providers and so on.
+               &ReferenceTransformer{},
+
+               // Target
+               &TargetsTransformer{Targets: b.Targets},
+
+               // Close opened plugin connections
+               &CloseProviderTransformer{},
+
+               // Single root
+               &RootTransformer{},
+       }
+
+       if !b.DisableReduce {
+               // Perform the transitive reduction to make our graph a bit
+               // more sane if possible (it usually is possible).
+               steps = append(steps, &TransitiveReductionTransformer{})
+       }
+
+       return steps
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go b/vendor/github.com/hashicorp/terraform/terraform/graph_builder_validate.go
new file mode 100644 (file)
index 0000000..645ec7b
--- /dev/null
@@ -0,0 +1,36 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/dag"
+)
+
+// ValidateGraphBuilder creates the graph for the validate operation.
+//
+// ValidateGraphBuilder is based on the PlanGraphBuilder. We do this so that
+// we only have to validate what we'd normally plan anyways. The
+// PlanGraphBuilder given will be modified so it shouldn't be used for anything
+// else after calling this function.
+func ValidateGraphBuilder(p *PlanGraphBuilder) GraphBuilder {
+       // We're going to customize the concrete functions
+       p.CustomConcrete = true
+
+       // Set the provider to the normal provider. This will ask for input.
+       p.ConcreteProvider = func(a *NodeAbstractProvider) dag.Vertex {
+               return &NodeApplyableProvider{
+                       NodeAbstractProvider: a,
+               }
+       }
+
+       p.ConcreteResource = func(a *NodeAbstractResource) dag.Vertex {
+               return &NodeValidatableResource{
+                       NodeAbstractCountResource: &NodeAbstractCountResource{
+                               NodeAbstractResource: a,
+                       },
+               }
+       }
+
+       // We purposely don't set any other concrete types since they don't
+       // require validation.
+
+       return p
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go b/vendor/github.com/hashicorp/terraform/terraform/graph_dot.go
new file mode 100644 (file)
index 0000000..73e3821
--- /dev/null
@@ -0,0 +1,9 @@
+package terraform
+
+import "github.com/hashicorp/terraform/dag"
+
+// GraphDot returns the dot formatting of a visual representation of
+// the given Terraform graph.
+func GraphDot(g *Graph, opts *dag.DotOpts) (string, error) {
+       return string(g.Dot(opts)), nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go b/vendor/github.com/hashicorp/terraform/terraform/graph_interface_subgraph.go
new file mode 100644 (file)
index 0000000..2897eb5
--- /dev/null
@@ -0,0 +1,7 @@
+package terraform
+
+// GraphNodeSubPath says that a node is part of a graph with a
+// different path, and the context should be adjusted accordingly.
+type GraphNodeSubPath interface {
+       Path() []string
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk.go
new file mode 100644 (file)
index 0000000..34ce6f6
--- /dev/null
@@ -0,0 +1,60 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/dag"
+)
+
+// GraphWalker is an interface that can be implemented that when used
+// with Graph.Walk will invoke the given callbacks under certain events.
+type GraphWalker interface {
+       EnterPath([]string) EvalContext
+       ExitPath([]string)
+       EnterVertex(dag.Vertex)
+       ExitVertex(dag.Vertex, error)
+       EnterEvalTree(dag.Vertex, EvalNode) EvalNode
+       ExitEvalTree(dag.Vertex, interface{}, error) error
+}
+
+// GrpahWalkerPanicwrapper can be optionally implemented to catch panics
+// that occur while walking the graph. This is not generally recommended
+// since panics should crash Terraform and result in a bug report. However,
+// this is particularly useful for situations like the shadow graph where
+// you don't ever want to cause a panic.
+type GraphWalkerPanicwrapper interface {
+       GraphWalker
+
+       // Panic is called when a panic occurs. This will halt the panic from
+       // propogating so if the walker wants it to crash still it should panic
+       // again. This is called from within a defer so runtime/debug.Stack can
+       // be used to get the stack trace of the panic.
+       Panic(dag.Vertex, interface{})
+}
+
+// GraphWalkerPanicwrap wraps an existing Graphwalker to wrap and swallow
+// the panics. This doesn't lose the panics since the panics are still
+// returned as errors as part of a graph walk.
+func GraphWalkerPanicwrap(w GraphWalker) GraphWalkerPanicwrapper {
+       return &graphWalkerPanicwrapper{
+               GraphWalker: w,
+       }
+}
+
+type graphWalkerPanicwrapper struct {
+       GraphWalker
+}
+
+func (graphWalkerPanicwrapper) Panic(dag.Vertex, interface{}) {}
+
+// NullGraphWalker is a GraphWalker implementation that does nothing.
+// This can be embedded within other GraphWalker implementations for easily
+// implementing all the required functions.
+type NullGraphWalker struct{}
+
+func (NullGraphWalker) EnterPath([]string) EvalContext                  { return new(MockEvalContext) }
+func (NullGraphWalker) ExitPath([]string)                               {}
+func (NullGraphWalker) EnterVertex(dag.Vertex)                          {}
+func (NullGraphWalker) ExitVertex(dag.Vertex, error)                    {}
+func (NullGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode { return n }
+func (NullGraphWalker) ExitEvalTree(dag.Vertex, interface{}, error) error {
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_context.go
new file mode 100644 (file)
index 0000000..e63b460
--- /dev/null
@@ -0,0 +1,157 @@
+package terraform
+
+import (
+       "context"
+       "fmt"
+       "log"
+       "sync"
+
+       "github.com/hashicorp/errwrap"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// ContextGraphWalker is the GraphWalker implementation used with the
+// Context struct to walk and evaluate the graph.
+type ContextGraphWalker struct {
+       NullGraphWalker
+
+       // Configurable values
+       Context     *Context
+       Operation   walkOperation
+       StopContext context.Context
+
+       // Outputs, do not set these. Do not read these while the graph
+       // is being walked.
+       ValidationWarnings []string
+       ValidationErrors   []error
+
+       errorLock           sync.Mutex
+       once                sync.Once
+       contexts            map[string]*BuiltinEvalContext
+       contextLock         sync.Mutex
+       interpolaterVars    map[string]map[string]interface{}
+       interpolaterVarLock sync.Mutex
+       providerCache       map[string]ResourceProvider
+       providerConfigCache map[string]*ResourceConfig
+       providerLock        sync.Mutex
+       provisionerCache    map[string]ResourceProvisioner
+       provisionerLock     sync.Mutex
+}
+
+func (w *ContextGraphWalker) EnterPath(path []string) EvalContext {
+       w.once.Do(w.init)
+
+       w.contextLock.Lock()
+       defer w.contextLock.Unlock()
+
+       // If we already have a context for this path cached, use that
+       key := PathCacheKey(path)
+       if ctx, ok := w.contexts[key]; ok {
+               return ctx
+       }
+
+       // Setup the variables for this interpolater
+       variables := make(map[string]interface{})
+       if len(path) <= 1 {
+               for k, v := range w.Context.variables {
+                       variables[k] = v
+               }
+       }
+       w.interpolaterVarLock.Lock()
+       if m, ok := w.interpolaterVars[key]; ok {
+               for k, v := range m {
+                       variables[k] = v
+               }
+       }
+       w.interpolaterVars[key] = variables
+       w.interpolaterVarLock.Unlock()
+
+       ctx := &BuiltinEvalContext{
+               StopContext:         w.StopContext,
+               PathValue:           path,
+               Hooks:               w.Context.hooks,
+               InputValue:          w.Context.uiInput,
+               Components:          w.Context.components,
+               ProviderCache:       w.providerCache,
+               ProviderConfigCache: w.providerConfigCache,
+               ProviderInputConfig: w.Context.providerInputConfig,
+               ProviderLock:        &w.providerLock,
+               ProvisionerCache:    w.provisionerCache,
+               ProvisionerLock:     &w.provisionerLock,
+               DiffValue:           w.Context.diff,
+               DiffLock:            &w.Context.diffLock,
+               StateValue:          w.Context.state,
+               StateLock:           &w.Context.stateLock,
+               Interpolater: &Interpolater{
+                       Operation:          w.Operation,
+                       Meta:               w.Context.meta,
+                       Module:             w.Context.module,
+                       State:              w.Context.state,
+                       StateLock:          &w.Context.stateLock,
+                       VariableValues:     variables,
+                       VariableValuesLock: &w.interpolaterVarLock,
+               },
+               InterpolaterVars:    w.interpolaterVars,
+               InterpolaterVarLock: &w.interpolaterVarLock,
+       }
+
+       w.contexts[key] = ctx
+       return ctx
+}
+
+func (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {
+       log.Printf("[TRACE] [%s] Entering eval tree: %s",
+               w.Operation, dag.VertexName(v))
+
+       // Acquire a lock on the semaphore
+       w.Context.parallelSem.Acquire()
+
+       // We want to filter the evaluation tree to only include operations
+       // that belong in this operation.
+       return EvalFilter(n, EvalNodeFilterOp(w.Operation))
+}
+
+func (w *ContextGraphWalker) ExitEvalTree(
+       v dag.Vertex, output interface{}, err error) error {
+       log.Printf("[TRACE] [%s] Exiting eval tree: %s",
+               w.Operation, dag.VertexName(v))
+
+       // Release the semaphore
+       w.Context.parallelSem.Release()
+
+       if err == nil {
+               return nil
+       }
+
+       // Acquire the lock because anything is going to require a lock.
+       w.errorLock.Lock()
+       defer w.errorLock.Unlock()
+
+       // Try to get a validation error out of it. If its not a validation
+       // error, then just record the normal error.
+       verr, ok := err.(*EvalValidateError)
+       if !ok {
+               return err
+       }
+
+       for _, msg := range verr.Warnings {
+               w.ValidationWarnings = append(
+                       w.ValidationWarnings,
+                       fmt.Sprintf("%s: %s", dag.VertexName(v), msg))
+       }
+       for _, e := range verr.Errors {
+               w.ValidationErrors = append(
+                       w.ValidationErrors,
+                       errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", dag.VertexName(v)), e))
+       }
+
+       return nil
+}
+
+func (w *ContextGraphWalker) init() {
+       w.contexts = make(map[string]*BuiltinEvalContext, 5)
+       w.providerCache = make(map[string]ResourceProvider, 5)
+       w.providerConfigCache = make(map[string]*ResourceConfig, 5)
+       w.provisionerCache = make(map[string]ResourceProvisioner, 5)
+       w.interpolaterVars = make(map[string]map[string]interface{}, 5)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go b/vendor/github.com/hashicorp/terraform/terraform/graph_walk_operation.go
new file mode 100644 (file)
index 0000000..3fb3748
--- /dev/null
@@ -0,0 +1,18 @@
+package terraform
+
+//go:generate stringer -type=walkOperation graph_walk_operation.go
+
+// walkOperation is an enum which tells the walkContext what to do.
+type walkOperation byte
+
+const (
+       walkInvalid walkOperation = iota
+       walkInput
+       walkApply
+       walkPlan
+       walkPlanDestroy
+       walkRefresh
+       walkValidate
+       walkDestroy
+       walkImport
+)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go
new file mode 100644 (file)
index 0000000..e97b485
--- /dev/null
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=GraphType context_graph_type.go"; DO NOT EDIT.
+
+package terraform
+
+import "fmt"
+
+const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate"
+
+var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125}
+
+func (i GraphType) String() string {
+       if i >= GraphType(len(_GraphType_index)-1) {
+               return fmt.Sprintf("GraphType(%d)", i)
+       }
+       return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook.go b/vendor/github.com/hashicorp/terraform/terraform/hook.go
new file mode 100644 (file)
index 0000000..ab11e8e
--- /dev/null
@@ -0,0 +1,137 @@
+package terraform
+
+// HookAction is an enum of actions that can be taken as a result of a hook
+// callback. This allows you to modify the behavior of Terraform at runtime.
+type HookAction byte
+
+const (
+       // HookActionContinue continues with processing as usual.
+       HookActionContinue HookAction = iota
+
+       // HookActionHalt halts immediately: no more hooks are processed
+       // and the action that Terraform was about to take is cancelled.
+       HookActionHalt
+)
+
+// Hook is the interface that must be implemented to hook into various
+// parts of Terraform, allowing you to inspect or change behavior at runtime.
+//
+// There are MANY hook points into Terraform. If you only want to implement
+// some hook points, but not all (which is the likely case), then embed the
+// NilHook into your struct, which implements all of the interface but does
+// nothing. Then, override only the functions you want to implement.
+type Hook interface {
+       // PreApply and PostApply are called before and after a single
+       // resource is applied. The error argument in PostApply is the
+       // error, if any, that was returned from the provider Apply call itself.
+       PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error)
+       PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error)
+
+       // PreDiff and PostDiff are called before and after a single resource
+       // resource is diffed.
+       PreDiff(*InstanceInfo, *InstanceState) (HookAction, error)
+       PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error)
+
+       // Provisioning hooks
+       //
+       // All should be self-explanatory. ProvisionOutput is called with
+       // output sent back by the provisioners. This will be called multiple
+       // times as output comes in, but each call should represent a line of
+       // output. The ProvisionOutput method cannot control whether the
+       // hook continues running.
+       PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error)
+       PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error)
+       PreProvision(*InstanceInfo, string) (HookAction, error)
+       PostProvision(*InstanceInfo, string, error) (HookAction, error)
+       ProvisionOutput(*InstanceInfo, string, string)
+
+       // PreRefresh and PostRefresh are called before and after a single
+       // resource state is refreshed, respectively.
+       PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error)
+       PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error)
+
+       // PostStateUpdate is called after the state is updated.
+       PostStateUpdate(*State) (HookAction, error)
+
+       // PreImportState and PostImportState are called before and after
+       // a single resource's state is being improted.
+       PreImportState(*InstanceInfo, string) (HookAction, error)
+       PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error)
+}
+
+// NilHook is a Hook implementation that does nothing. It exists only to
+// simplify implementing hooks. You can embed this into your Hook implementation
+// and only implement the functions you are interested in.
+type NilHook struct{}
+
+func (*NilHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) {
+       return HookActionContinue, nil
+}
+
+func (*NilHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) {
+       return HookActionContinue, nil
+}
+
+func (*NilHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) {
+       return HookActionContinue, nil
+}
+
+func (*NilHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) {
+       return HookActionContinue, nil
+}
+
+func (*NilHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+       return HookActionContinue, nil
+}
+
+func (*NilHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+       return HookActionContinue, nil
+}
+
+func (*NilHook) PreProvision(*InstanceInfo, string) (HookAction, error) {
+       return HookActionContinue, nil
+}
+
+func (*NilHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) {
+       return HookActionContinue, nil
+}
+
+func (*NilHook) ProvisionOutput(
+       *InstanceInfo, string, string) {
+}
+
+func (*NilHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+       return HookActionContinue, nil
+}
+
+func (*NilHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+       return HookActionContinue, nil
+}
+
+func (*NilHook) PreImportState(*InstanceInfo, string) (HookAction, error) {
+       return HookActionContinue, nil
+}
+
+func (*NilHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) {
+       return HookActionContinue, nil
+}
+
+func (*NilHook) PostStateUpdate(*State) (HookAction, error) {
+       return HookActionContinue, nil
+}
+
+// handleHook turns hook actions into panics. This lets you use the
+// panic/recover mechanism in Go as a flow control mechanism for hook
+// actions.
+func handleHook(a HookAction, err error) {
+       if err != nil {
+               // TODO: handle errors
+       }
+
+       switch a {
+       case HookActionContinue:
+               return
+       case HookActionHalt:
+               panic(HookActionHalt)
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go b/vendor/github.com/hashicorp/terraform/terraform/hook_mock.go
new file mode 100644 (file)
index 0000000..0e46400
--- /dev/null
@@ -0,0 +1,245 @@
+package terraform
+
+import "sync"
+
+// MockHook is an implementation of Hook that can be used for tests.
+// It records all of its function calls.
+type MockHook struct {
+       sync.Mutex
+
+       PreApplyCalled bool
+       PreApplyInfo   *InstanceInfo
+       PreApplyDiff   *InstanceDiff
+       PreApplyState  *InstanceState
+       PreApplyReturn HookAction
+       PreApplyError  error
+
+       PostApplyCalled      bool
+       PostApplyInfo        *InstanceInfo
+       PostApplyState       *InstanceState
+       PostApplyError       error
+       PostApplyReturn      HookAction
+       PostApplyReturnError error
+       PostApplyFn          func(*InstanceInfo, *InstanceState, error) (HookAction, error)
+
+       PreDiffCalled bool
+       PreDiffInfo   *InstanceInfo
+       PreDiffState  *InstanceState
+       PreDiffReturn HookAction
+       PreDiffError  error
+
+       PostDiffCalled bool
+       PostDiffInfo   *InstanceInfo
+       PostDiffDiff   *InstanceDiff
+       PostDiffReturn HookAction
+       PostDiffError  error
+
+       PreProvisionResourceCalled bool
+       PreProvisionResourceInfo   *InstanceInfo
+       PreProvisionInstanceState  *InstanceState
+       PreProvisionResourceReturn HookAction
+       PreProvisionResourceError  error
+
+       PostProvisionResourceCalled bool
+       PostProvisionResourceInfo   *InstanceInfo
+       PostProvisionInstanceState  *InstanceState
+       PostProvisionResourceReturn HookAction
+       PostProvisionResourceError  error
+
+       PreProvisionCalled        bool
+       PreProvisionInfo          *InstanceInfo
+       PreProvisionProvisionerId string
+       PreProvisionReturn        HookAction
+       PreProvisionError         error
+
+       PostProvisionCalled        bool
+       PostProvisionInfo          *InstanceInfo
+       PostProvisionProvisionerId string
+       PostProvisionErrorArg      error
+       PostProvisionReturn        HookAction
+       PostProvisionError         error
+
+       ProvisionOutputCalled        bool
+       ProvisionOutputInfo          *InstanceInfo
+       ProvisionOutputProvisionerId string
+       ProvisionOutputMessage       string
+
+       PostRefreshCalled bool
+       PostRefreshInfo   *InstanceInfo
+       PostRefreshState  *InstanceState
+       PostRefreshReturn HookAction
+       PostRefreshError  error
+
+       PreRefreshCalled bool
+       PreRefreshInfo   *InstanceInfo
+       PreRefreshState  *InstanceState
+       PreRefreshReturn HookAction
+       PreRefreshError  error
+
+       PreImportStateCalled bool
+       PreImportStateInfo   *InstanceInfo
+       PreImportStateId     string
+       PreImportStateReturn HookAction
+       PreImportStateError  error
+
+       PostImportStateCalled bool
+       PostImportStateInfo   *InstanceInfo
+       PostImportStateState  []*InstanceState
+       PostImportStateReturn HookAction
+       PostImportStateError  error
+
+       PostStateUpdateCalled bool
+       PostStateUpdateState  *State
+       PostStateUpdateReturn HookAction
+       PostStateUpdateError  error
+}
+
+func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) (HookAction, error) {
+       h.Lock()
+       defer h.Unlock()
+
+       h.PreApplyCalled = true
+       h.PreApplyInfo = n
+       h.PreApplyDiff = d
+       h.PreApplyState = s
+       return h.PreApplyReturn, h.PreApplyError
+}
+
+func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAction, error) {
+       h.Lock()
+       defer h.Unlock()
+
+       h.PostApplyCalled = true
+       h.PostApplyInfo = n
+       h.PostApplyState = s
+       h.PostApplyError = e
+
+       if h.PostApplyFn != nil {
+               return h.PostApplyFn(n, s, e)
+       }
+
+       return h.PostApplyReturn, h.PostApplyReturnError
+}
+
+func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+       h.Lock()
+       defer h.Unlock()
+
+       h.PreDiffCalled = true
+       h.PreDiffInfo = n
+       h.PreDiffState = s
+       return h.PreDiffReturn, h.PreDiffError
+}
+
+func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error) {
+       h.Lock()
+       defer h.Unlock()
+
+       h.PostDiffCalled = true
+       h.PostDiffInfo = n
+       h.PostDiffDiff = d
+       return h.PostDiffReturn, h.PostDiffError
+}
+
+func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+       h.Lock()
+       defer h.Unlock()
+
+       h.PreProvisionResourceCalled = true
+       h.PreProvisionResourceInfo = n
+       h.PreProvisionInstanceState = s
+       return h.PreProvisionResourceReturn, h.PreProvisionResourceError
+}
+
+func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+       h.Lock()
+       defer h.Unlock()
+
+       h.PostProvisionResourceCalled = true
+       h.PostProvisionResourceInfo = n
+       h.PostProvisionInstanceState = s
+       return h.PostProvisionResourceReturn, h.PostProvisionResourceError
+}
+
+func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, error) {
+       h.Lock()
+       defer h.Unlock()
+
+       h.PreProvisionCalled = true
+       h.PreProvisionInfo = n
+       h.PreProvisionProvisionerId = provId
+       return h.PreProvisionReturn, h.PreProvisionError
+}
+
+func (h *MockHook) PostProvision(n *InstanceInfo, provId string, err error) (HookAction, error) {
+       h.Lock()
+       defer h.Unlock()
+
+       h.PostProvisionCalled = true
+       h.PostProvisionInfo = n
+       h.PostProvisionProvisionerId = provId
+       h.PostProvisionErrorArg = err
+       return h.PostProvisionReturn, h.PostProvisionError
+}
+
+func (h *MockHook) ProvisionOutput(
+       n *InstanceInfo,
+       provId string,
+       msg string) {
+       h.Lock()
+       defer h.Unlock()
+
+       h.ProvisionOutputCalled = true
+       h.ProvisionOutputInfo = n
+       h.ProvisionOutputProvisionerId = provId
+       h.ProvisionOutputMessage = msg
+}
+
+func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+       h.Lock()
+       defer h.Unlock()
+
+       h.PreRefreshCalled = true
+       h.PreRefreshInfo = n
+       h.PreRefreshState = s
+       return h.PreRefreshReturn, h.PreRefreshError
+}
+
+func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) {
+       h.Lock()
+       defer h.Unlock()
+
+       h.PostRefreshCalled = true
+       h.PostRefreshInfo = n
+       h.PostRefreshState = s
+       return h.PostRefreshReturn, h.PostRefreshError
+}
+
+func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, error) {
+       h.Lock()
+       defer h.Unlock()
+
+       h.PreImportStateCalled = true
+       h.PreImportStateInfo = info
+       h.PreImportStateId = id
+       return h.PreImportStateReturn, h.PreImportStateError
+}
+
+func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (HookAction, error) {
+       h.Lock()
+       defer h.Unlock()
+
+       h.PostImportStateCalled = true
+       h.PostImportStateInfo = info
+       h.PostImportStateState = s
+       return h.PostImportStateReturn, h.PostImportStateError
+}
+
+func (h *MockHook) PostStateUpdate(s *State) (HookAction, error) {
+       h.Lock()
+       defer h.Unlock()
+
+       h.PostStateUpdateCalled = true
+       h.PostStateUpdateState = s
+       return h.PostStateUpdateReturn, h.PostStateUpdateError
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go b/vendor/github.com/hashicorp/terraform/terraform/hook_stop.go
new file mode 100644 (file)
index 0000000..104d009
--- /dev/null
@@ -0,0 +1,87 @@
+package terraform
+
+import (
+       "sync/atomic"
+)
+
+// stopHook is a private Hook implementation that Terraform uses to
+// signal when to stop or cancel actions.
+type stopHook struct {
+       stop uint32
+}
+
+func (h *stopHook) PreApply(*InstanceInfo, *InstanceState, *InstanceDiff) (HookAction, error) {
+       return h.hook()
+}
+
+func (h *stopHook) PostApply(*InstanceInfo, *InstanceState, error) (HookAction, error) {
+       return h.hook()
+}
+
+func (h *stopHook) PreDiff(*InstanceInfo, *InstanceState) (HookAction, error) {
+       return h.hook()
+}
+
+func (h *stopHook) PostDiff(*InstanceInfo, *InstanceDiff) (HookAction, error) {
+       return h.hook()
+}
+
+func (h *stopHook) PreProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+       return h.hook()
+}
+
+func (h *stopHook) PostProvisionResource(*InstanceInfo, *InstanceState) (HookAction, error) {
+       return h.hook()
+}
+
+func (h *stopHook) PreProvision(*InstanceInfo, string) (HookAction, error) {
+       return h.hook()
+}
+
+func (h *stopHook) PostProvision(*InstanceInfo, string, error) (HookAction, error) {
+       return h.hook()
+}
+
+func (h *stopHook) ProvisionOutput(*InstanceInfo, string, string) {
+}
+
+func (h *stopHook) PreRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+       return h.hook()
+}
+
+func (h *stopHook) PostRefresh(*InstanceInfo, *InstanceState) (HookAction, error) {
+       return h.hook()
+}
+
+func (h *stopHook) PreImportState(*InstanceInfo, string) (HookAction, error) {
+       return h.hook()
+}
+
+func (h *stopHook) PostImportState(*InstanceInfo, []*InstanceState) (HookAction, error) {
+       return h.hook()
+}
+
+func (h *stopHook) PostStateUpdate(*State) (HookAction, error) {
+       return h.hook()
+}
+
+func (h *stopHook) hook() (HookAction, error) {
+       if h.Stopped() {
+               return HookActionHalt, nil
+       }
+
+       return HookActionContinue, nil
+}
+
+// reset should be called within the lock context
+func (h *stopHook) Reset() {
+       atomic.StoreUint32(&h.stop, 0)
+}
+
+func (h *stopHook) Stop() {
+       atomic.StoreUint32(&h.stop, 1)
+}
+
+func (h *stopHook) Stopped() bool {
+       return atomic.LoadUint32(&h.stop) == 1
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype.go
new file mode 100644 (file)
index 0000000..0895971
--- /dev/null
@@ -0,0 +1,13 @@
+package terraform
+
+//go:generate stringer -type=InstanceType instancetype.go
+
+// InstanceType is an enum of the various types of instances store in the State
+type InstanceType int
+
+const (
+       TypeInvalid InstanceType = iota
+       TypePrimary
+       TypeTainted
+       TypeDeposed
+)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go
new file mode 100644 (file)
index 0000000..f69267c
--- /dev/null
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=InstanceType instancetype.go"; DO NOT EDIT.
+
+package terraform
+
+import "fmt"
+
+const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed"
+
+var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44}
+
+func (i InstanceType) String() string {
+       if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) {
+               return fmt.Sprintf("InstanceType(%d)", i)
+       }
+       return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go
new file mode 100644 (file)
index 0000000..19dcf21
--- /dev/null
@@ -0,0 +1,782 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+       "os"
+       "strconv"
+       "strings"
+       "sync"
+
+       "github.com/hashicorp/hil"
+       "github.com/hashicorp/hil/ast"
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/flatmap"
+)
+
+const (
+       // VarEnvPrefix is the prefix of variables that are read from
+       // the environment to set variables here.
+       VarEnvPrefix = "TF_VAR_"
+)
+
+// Interpolater is the structure responsible for determining the values
+// for interpolations such as `aws_instance.foo.bar`.
+type Interpolater struct {
+       Operation          walkOperation
+       Meta               *ContextMeta
+       Module             *module.Tree
+       State              *State
+       StateLock          *sync.RWMutex
+       VariableValues     map[string]interface{}
+       VariableValuesLock *sync.Mutex
+}
+
+// InterpolationScope is the current scope of execution. This is required
+// since some variables which are interpolated are dependent on what we're
+// operating on and where we are.
+type InterpolationScope struct {
+       Path     []string
+       Resource *Resource
+}
+
+// Values returns the values for all the variables in the given map.
+func (i *Interpolater) Values(
+       scope *InterpolationScope,
+       vars map[string]config.InterpolatedVariable) (map[string]ast.Variable, error) {
+       if scope == nil {
+               scope = &InterpolationScope{}
+       }
+
+       result := make(map[string]ast.Variable, len(vars))
+
+       // Copy the default variables
+       if i.Module != nil && scope != nil {
+               mod := i.Module
+               if len(scope.Path) > 1 {
+                       mod = i.Module.Child(scope.Path[1:])
+               }
+               for _, v := range mod.Config().Variables {
+                       // Set default variables
+                       if v.Default == nil {
+                               continue
+                       }
+
+                       n := fmt.Sprintf("var.%s", v.Name)
+                       variable, err := hil.InterfaceToVariable(v.Default)
+                       if err != nil {
+                               return nil, fmt.Errorf("invalid default map value for %s: %v", v.Name, v.Default)
+                       }
+
+                       result[n] = variable
+               }
+       }
+
+       for n, rawV := range vars {
+               var err error
+               switch v := rawV.(type) {
+               case *config.CountVariable:
+                       err = i.valueCountVar(scope, n, v, result)
+               case *config.ModuleVariable:
+                       err = i.valueModuleVar(scope, n, v, result)
+               case *config.PathVariable:
+                       err = i.valuePathVar(scope, n, v, result)
+               case *config.ResourceVariable:
+                       err = i.valueResourceVar(scope, n, v, result)
+               case *config.SelfVariable:
+                       err = i.valueSelfVar(scope, n, v, result)
+               case *config.SimpleVariable:
+                       err = i.valueSimpleVar(scope, n, v, result)
+               case *config.TerraformVariable:
+                       err = i.valueTerraformVar(scope, n, v, result)
+               case *config.UserVariable:
+                       err = i.valueUserVar(scope, n, v, result)
+               default:
+                       err = fmt.Errorf("%s: unknown variable type: %T", n, rawV)
+               }
+
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       return result, nil
+}
+
+func (i *Interpolater) valueCountVar(
+       scope *InterpolationScope,
+       n string,
+       v *config.CountVariable,
+       result map[string]ast.Variable) error {
+       switch v.Type {
+       case config.CountValueIndex:
+               if scope.Resource == nil {
+                       return fmt.Errorf("%s: count.index is only valid within resources", n)
+               }
+               result[n] = ast.Variable{
+                       Value: scope.Resource.CountIndex,
+                       Type:  ast.TypeInt,
+               }
+               return nil
+       default:
+               return fmt.Errorf("%s: unknown count type: %#v", n, v.Type)
+       }
+}
+
+func unknownVariable() ast.Variable {
+       return ast.Variable{
+               Type:  ast.TypeUnknown,
+               Value: config.UnknownVariableValue,
+       }
+}
+
+func unknownValue() string {
+       return hil.UnknownValue
+}
+
+func (i *Interpolater) valueModuleVar(
+       scope *InterpolationScope,
+       n string,
+       v *config.ModuleVariable,
+       result map[string]ast.Variable) error {
+
+       // Build the path to the child module we want
+       path := make([]string, len(scope.Path), len(scope.Path)+1)
+       copy(path, scope.Path)
+       path = append(path, v.Name)
+
+       // Grab the lock so that if other interpolations are running or
+       // state is being modified, we'll be safe.
+       i.StateLock.RLock()
+       defer i.StateLock.RUnlock()
+
+       // Get the module where we're looking for the value
+       mod := i.State.ModuleByPath(path)
+       if mod == nil {
+               // If the module doesn't exist, then we can return an empty string.
+               // This happens usually only in Refresh() when we haven't populated
+               // a state. During validation, we semantically verify that all
+               // modules reference other modules, and graph ordering should
+               // ensure that the module is in the state, so if we reach this
+               // point otherwise it really is a panic.
+               result[n] = unknownVariable()
+
+               // During apply this is always an error
+               if i.Operation == walkApply {
+                       return fmt.Errorf(
+                               "Couldn't find module %q for var: %s",
+                               v.Name, v.FullKey())
+               }
+       } else {
+               // Get the value from the outputs
+               if outputState, ok := mod.Outputs[v.Field]; ok {
+                       output, err := hil.InterfaceToVariable(outputState.Value)
+                       if err != nil {
+                               return err
+                       }
+                       result[n] = output
+               } else {
+                       // Same reasons as the comment above.
+                       result[n] = unknownVariable()
+
+                       // During apply this is always an error
+                       if i.Operation == walkApply {
+                               return fmt.Errorf(
+                                       "Couldn't find output %q for module var: %s",
+                                       v.Field, v.FullKey())
+                       }
+               }
+       }
+
+       return nil
+}
+
+func (i *Interpolater) valuePathVar(
+       scope *InterpolationScope,
+       n string,
+       v *config.PathVariable,
+       result map[string]ast.Variable) error {
+       switch v.Type {
+       case config.PathValueCwd:
+               wd, err := os.Getwd()
+               if err != nil {
+                       return fmt.Errorf(
+                               "Couldn't get cwd for var %s: %s",
+                               v.FullKey(), err)
+               }
+
+               result[n] = ast.Variable{
+                       Value: wd,
+                       Type:  ast.TypeString,
+               }
+       case config.PathValueModule:
+               if t := i.Module.Child(scope.Path[1:]); t != nil {
+                       result[n] = ast.Variable{
+                               Value: t.Config().Dir,
+                               Type:  ast.TypeString,
+                       }
+               }
+       case config.PathValueRoot:
+               result[n] = ast.Variable{
+                       Value: i.Module.Config().Dir,
+                       Type:  ast.TypeString,
+               }
+       default:
+               return fmt.Errorf("%s: unknown path type: %#v", n, v.Type)
+       }
+
+       return nil
+
+}
+
+func (i *Interpolater) valueResourceVar(
+       scope *InterpolationScope,
+       n string,
+       v *config.ResourceVariable,
+       result map[string]ast.Variable) error {
+       // If we're computing all dynamic fields, then module vars count
+       // and we mark it as computed.
+       if i.Operation == walkValidate {
+               result[n] = unknownVariable()
+               return nil
+       }
+
+       var variable *ast.Variable
+       var err error
+
+       if v.Multi && v.Index == -1 {
+               variable, err = i.computeResourceMultiVariable(scope, v)
+       } else {
+               variable, err = i.computeResourceVariable(scope, v)
+       }
+
+       if err != nil {
+               return err
+       }
+
+       if variable == nil {
+               // During the input walk we tolerate missing variables because
+               // we haven't yet had a chance to refresh state, so dynamic data may
+               // not yet be complete.
+               // If it truly is missing, we'll catch it on a later walk.
+               // This applies only to graph nodes that interpolate during the
+               // config walk, e.g. providers.
+               if i.Operation == walkInput || i.Operation == walkRefresh {
+                       result[n] = unknownVariable()
+                       return nil
+               }
+
+               return fmt.Errorf("variable %q is nil, but no error was reported", v.Name)
+       }
+
+       result[n] = *variable
+       return nil
+}
+
+func (i *Interpolater) valueSelfVar(
+       scope *InterpolationScope,
+       n string,
+       v *config.SelfVariable,
+       result map[string]ast.Variable) error {
+       if scope == nil || scope.Resource == nil {
+               return fmt.Errorf(
+                       "%s: invalid scope, self variables are only valid on resources", n)
+       }
+
+       rv, err := config.NewResourceVariable(fmt.Sprintf(
+               "%s.%s.%d.%s",
+               scope.Resource.Type,
+               scope.Resource.Name,
+               scope.Resource.CountIndex,
+               v.Field))
+       if err != nil {
+               return err
+       }
+
+       return i.valueResourceVar(scope, n, rv, result)
+}
+
+func (i *Interpolater) valueSimpleVar(
+       scope *InterpolationScope,
+       n string,
+       v *config.SimpleVariable,
+       result map[string]ast.Variable) error {
+       // This error message includes some information for people who
+       // relied on this for their template_file data sources. We should
+       // remove this at some point but there isn't any rush.
+       return fmt.Errorf(
+               "invalid variable syntax: %q. Did you mean 'var.%s'? If this is part of inline `template` parameter\n"+
+                       "then you must escape the interpolation with two dollar signs. For\n"+
+                       "example: ${a} becomes $${a}.",
+               n, n)
+}
+
+func (i *Interpolater) valueTerraformVar(
+       scope *InterpolationScope,
+       n string,
+       v *config.TerraformVariable,
+       result map[string]ast.Variable) error {
+       if v.Field != "env" {
+               return fmt.Errorf(
+                       "%s: only supported key for 'terraform.X' interpolations is 'env'", n)
+       }
+
+       if i.Meta == nil {
+               return fmt.Errorf(
+                       "%s: internal error: nil Meta. Please report a bug.", n)
+       }
+
+       result[n] = ast.Variable{Type: ast.TypeString, Value: i.Meta.Env}
+       return nil
+}
+
+func (i *Interpolater) valueUserVar(
+       scope *InterpolationScope,
+       n string,
+       v *config.UserVariable,
+       result map[string]ast.Variable) error {
+       i.VariableValuesLock.Lock()
+       defer i.VariableValuesLock.Unlock()
+       val, ok := i.VariableValues[v.Name]
+       if ok {
+               varValue, err := hil.InterfaceToVariable(val)
+               if err != nil {
+                       return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s",
+                               v.Name, val, err)
+               }
+               result[n] = varValue
+               return nil
+       }
+
+       if _, ok := result[n]; !ok && i.Operation == walkValidate {
+               result[n] = unknownVariable()
+               return nil
+       }
+
+       // Look up if we have any variables with this prefix because
+       // those are map overrides. Include those.
+       for k, val := range i.VariableValues {
+               if strings.HasPrefix(k, v.Name+".") {
+                       keyComponents := strings.Split(k, ".")
+                       overrideKey := keyComponents[len(keyComponents)-1]
+
+                       mapInterface, ok := result["var."+v.Name]
+                       if !ok {
+                               return fmt.Errorf("override for non-existent variable: %s", v.Name)
+                       }
+
+                       mapVariable := mapInterface.Value.(map[string]ast.Variable)
+
+                       varValue, err := hil.InterfaceToVariable(val)
+                       if err != nil {
+                               return fmt.Errorf("cannot convert %s value %q to an ast.Variable for interpolation: %s",
+                                       v.Name, val, err)
+                       }
+                       mapVariable[overrideKey] = varValue
+               }
+       }
+
+       return nil
+}
+
+func (i *Interpolater) computeResourceVariable(
+       scope *InterpolationScope,
+       v *config.ResourceVariable) (*ast.Variable, error) {
+       id := v.ResourceId()
+       if v.Multi {
+               id = fmt.Sprintf("%s.%d", id, v.Index)
+       }
+
+       i.StateLock.RLock()
+       defer i.StateLock.RUnlock()
+
+       unknownVariable := unknownVariable()
+
+       // These variables must be declared early because of the use of GOTO
+       var isList bool
+       var isMap bool
+
+       // Get the information about this resource variable, and verify
+       // that it exists and such.
+       module, cr, err := i.resourceVariableInfo(scope, v)
+       if err != nil {
+               return nil, err
+       }
+
+       // If we're requesting "count" its a special variable that we grab
+       // directly from the config itself.
+       if v.Field == "count" {
+               var count int
+               if cr != nil {
+                       count, err = cr.Count()
+               } else {
+                       count, err = i.resourceCountMax(module, cr, v)
+               }
+               if err != nil {
+                       return nil, fmt.Errorf(
+                               "Error reading %s count: %s",
+                               v.ResourceId(),
+                               err)
+               }
+
+               return &ast.Variable{Type: ast.TypeInt, Value: count}, nil
+       }
+
+       // Get the resource out from the state. We know the state exists
+       // at this point and if there is a state, we expect there to be a
+       // resource with the given name.
+       var r *ResourceState
+       if module != nil && len(module.Resources) > 0 {
+               var ok bool
+               r, ok = module.Resources[id]
+               if !ok && v.Multi && v.Index == 0 {
+                       r, ok = module.Resources[v.ResourceId()]
+               }
+               if !ok {
+                       r = nil
+               }
+       }
+       if r == nil || r.Primary == nil {
+               if i.Operation == walkApply || i.Operation == walkPlan {
+                       return nil, fmt.Errorf(
+                               "Resource '%s' not found for variable '%s'",
+                               v.ResourceId(),
+                               v.FullKey())
+               }
+
+               // If we have no module in the state yet or count, return empty.
+               // NOTE(@mitchellh): I actually don't know why this is here. During
+               // a refactor I kept this here to maintain the same behavior, but
+               // I'm not sure why its here.
+               if module == nil || len(module.Resources) == 0 {
+                       return nil, nil
+               }
+
+               goto MISSING
+       }
+
+       if attr, ok := r.Primary.Attributes[v.Field]; ok {
+               v, err := hil.InterfaceToVariable(attr)
+               return &v, err
+       }
+
+       // computed list or map attribute
+       _, isList = r.Primary.Attributes[v.Field+".#"]
+       _, isMap = r.Primary.Attributes[v.Field+".%"]
+       if isList || isMap {
+               variable, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes)
+               return &variable, err
+       }
+
+       // At apply time, we can't do the "maybe has it" check below
+       // that we need for plans since parent elements might be computed.
+       // Therefore, it is an error and we're missing the key.
+       //
+       // TODO: test by creating a state and configuration that is referencing
+       // a non-existent variable "foo.bar" where the state only has "foo"
+       // and verify plan works, but apply doesn't.
+       if i.Operation == walkApply || i.Operation == walkDestroy {
+               goto MISSING
+       }
+
+       // We didn't find the exact field, so lets separate the dots
+       // and see if anything along the way is a computed set. i.e. if
+       // we have "foo.0.bar" as the field, check to see if "foo" is
+       // a computed list. If so, then the whole thing is computed.
+       if parts := strings.Split(v.Field, "."); len(parts) > 1 {
+               for i := 1; i < len(parts); i++ {
+                       // Lists and sets make this
+                       key := fmt.Sprintf("%s.#", strings.Join(parts[:i], "."))
+                       if attr, ok := r.Primary.Attributes[key]; ok {
+                               v, err := hil.InterfaceToVariable(attr)
+                               return &v, err
+                       }
+
+                       // Maps make this
+                       key = fmt.Sprintf("%s", strings.Join(parts[:i], "."))
+                       if attr, ok := r.Primary.Attributes[key]; ok {
+                               v, err := hil.InterfaceToVariable(attr)
+                               return &v, err
+                       }
+               }
+       }
+
+MISSING:
+       // Validation for missing interpolations should happen at a higher
+       // semantic level. If we reached this point and don't have variables,
+       // just return the computed value.
+       if scope == nil && scope.Resource == nil {
+               return &unknownVariable, nil
+       }
+
+       // If the operation is refresh, it isn't an error for a value to
+       // be unknown. Instead, we return that the value is computed so
+       // that the graph can continue to refresh other nodes. It doesn't
+       // matter because the config isn't interpolated anyways.
+       //
+       // For a Destroy, we're also fine with computed values, since our goal is
+       // only to get destroy nodes for existing resources.
+       //
+       // For an input walk, computed values are okay to return because we're only
+       // looking for missing variables to prompt the user for.
+       if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkInput {
+               return &unknownVariable, nil
+       }
+
+       return nil, fmt.Errorf(
+               "Resource '%s' does not have attribute '%s' "+
+                       "for variable '%s'",
+               id,
+               v.Field,
+               v.FullKey())
+}
+
+func (i *Interpolater) computeResourceMultiVariable(
+       scope *InterpolationScope,
+       v *config.ResourceVariable) (*ast.Variable, error) {
+       i.StateLock.RLock()
+       defer i.StateLock.RUnlock()
+
+       unknownVariable := unknownVariable()
+
+       // If we're only looking for input, we don't need to expand a
+       // multi-variable. This prevents us from encountering things that should be
+       // known but aren't because the state has yet to be refreshed.
+       if i.Operation == walkInput {
+               return &unknownVariable, nil
+       }
+
+       // Get the information about this resource variable, and verify
+       // that it exists and such.
+       module, cr, err := i.resourceVariableInfo(scope, v)
+       if err != nil {
+               return nil, err
+       }
+
+       // Get the keys for all the resources that are created for this resource
+       countMax, err := i.resourceCountMax(module, cr, v)
+       if err != nil {
+               return nil, err
+       }
+
+       // If count is zero, we return an empty list
+       if countMax == 0 {
+               return &ast.Variable{Type: ast.TypeList, Value: []ast.Variable{}}, nil
+       }
+
+       // If we have no module in the state yet or count, return unknown
+       if module == nil || len(module.Resources) == 0 {
+               return &unknownVariable, nil
+       }
+
+       var values []interface{}
+       for idx := 0; idx < countMax; idx++ {
+               id := fmt.Sprintf("%s.%d", v.ResourceId(), idx)
+
+               // ID doesn't have a trailing index. We try both here, but if a value
+               // without a trailing index is found we prefer that. This choice
+               // is for legacy reasons: older versions of TF preferred it.
+               if id == v.ResourceId()+".0" {
+                       potential := v.ResourceId()
+                       if _, ok := module.Resources[potential]; ok {
+                               id = potential
+                       }
+               }
+
+               r, ok := module.Resources[id]
+               if !ok {
+                       continue
+               }
+
+               if r.Primary == nil {
+                       continue
+               }
+
+               if singleAttr, ok := r.Primary.Attributes[v.Field]; ok {
+                       values = append(values, singleAttr)
+                       continue
+               }
+
+               // computed list or map attribute
+               _, isList := r.Primary.Attributes[v.Field+".#"]
+               _, isMap := r.Primary.Attributes[v.Field+".%"]
+               if !(isList || isMap) {
+                       continue
+               }
+               multiAttr, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes)
+               if err != nil {
+                       return nil, err
+               }
+
+               values = append(values, multiAttr)
+       }
+
+       if len(values) == 0 {
+               // If the operation is refresh, it isn't an error for a value to
+               // be unknown. Instead, we return that the value is computed so
+               // that the graph can continue to refresh other nodes. It doesn't
+               // matter because the config isn't interpolated anyways.
+               //
+               // For a Destroy, we're also fine with computed values, since our goal is
+               // only to get destroy nodes for existing resources.
+               //
+               // For an input walk, computed values are okay to return because we're only
+               // looking for missing variables to prompt the user for.
+               if i.Operation == walkRefresh || i.Operation == walkPlanDestroy || i.Operation == walkDestroy || i.Operation == walkInput {
+                       return &unknownVariable, nil
+               }
+
+               return nil, fmt.Errorf(
+                       "Resource '%s' does not have attribute '%s' "+
+                               "for variable '%s'",
+                       v.ResourceId(),
+                       v.Field,
+                       v.FullKey())
+       }
+
+       variable, err := hil.InterfaceToVariable(values)
+       return &variable, err
+}
+
+func (i *Interpolater) interpolateComplexTypeAttribute(
+       resourceID string,
+       attributes map[string]string) (ast.Variable, error) {
+
+       // We can now distinguish between lists and maps in state by the count field:
+       //    - lists (and by extension, sets) use the traditional .# notation
+       //    - maps use the newer .% notation
+       // Consequently here we can decide how to deal with the keys appropriately
+       // based on whether the type is a map of list.
+       if lengthAttr, isList := attributes[resourceID+".#"]; isList {
+               log.Printf("[DEBUG] Interpolating computed list element attribute %s (%s)",
+                       resourceID, lengthAttr)
+
+               // In Terraform's internal dotted representation of list-like attributes, the
+               // ".#" count field is marked as unknown to indicate "this whole list is
+               // unknown". We must honor that meaning here so computed references can be
+               // treated properly during the plan phase.
+               if lengthAttr == config.UnknownVariableValue {
+                       return unknownVariable(), nil
+               }
+
+               expanded := flatmap.Expand(attributes, resourceID)
+               return hil.InterfaceToVariable(expanded)
+       }
+
+       if lengthAttr, isMap := attributes[resourceID+".%"]; isMap {
+               log.Printf("[DEBUG] Interpolating computed map element attribute %s (%s)",
+                       resourceID, lengthAttr)
+
+               // In Terraform's internal dotted representation of map attributes, the
+               // ".%" count field is marked as unknown to indicate "this whole list is
+               // unknown". We must honor that meaning here so computed references can be
+               // treated properly during the plan phase.
+               if lengthAttr == config.UnknownVariableValue {
+                       return unknownVariable(), nil
+               }
+
+               expanded := flatmap.Expand(attributes, resourceID)
+               return hil.InterfaceToVariable(expanded)
+       }
+
+       return ast.Variable{}, fmt.Errorf("No complex type %s found", resourceID)
+}
+
+func (i *Interpolater) resourceVariableInfo(
+       scope *InterpolationScope,
+       v *config.ResourceVariable) (*ModuleState, *config.Resource, error) {
+       // Get the module tree that contains our current path. This is
+       // either the current module (path is empty) or a child.
+       modTree := i.Module
+       if len(scope.Path) > 1 {
+               modTree = i.Module.Child(scope.Path[1:])
+       }
+
+       // Get the resource from the configuration so we can verify
+       // that the resource is in the configuration and so we can access
+       // the configuration if we need to.
+       var cr *config.Resource
+       for _, r := range modTree.Config().Resources {
+               if r.Id() == v.ResourceId() {
+                       cr = r
+                       break
+               }
+       }
+
+       // Get the relevant module
+       module := i.State.ModuleByPath(scope.Path)
+       return module, cr, nil
+}
+
+func (i *Interpolater) resourceCountMax(
+       ms *ModuleState,
+       cr *config.Resource,
+       v *config.ResourceVariable) (int, error) {
+       id := v.ResourceId()
+
+       // If we're NOT applying, then we assume we can read the count
+       // from the state. Plan and so on may not have any state yet so
+       // we do a full interpolation.
+       if i.Operation != walkApply {
+               if cr == nil {
+                       return 0, nil
+               }
+
+               count, err := cr.Count()
+               if err != nil {
+                       return 0, err
+               }
+
+               return count, nil
+       }
+
+       // We need to determine the list of resource keys to get values from.
+       // This needs to be sorted so the order is deterministic. We used to
+       // use "cr.Count()" but that doesn't work if the count is interpolated
+       // and we can't guarantee that so we instead depend on the state.
+       max := -1
+       for k, _ := range ms.Resources {
+               // Get the index number for this resource
+               index := ""
+               if k == id {
+                       // If the key is the id, then its just 0 (no explicit index)
+                       index = "0"
+               } else if strings.HasPrefix(k, id+".") {
+                       // Grab the index number out of the state
+                       index = k[len(id+"."):]
+                       if idx := strings.IndexRune(index, '.'); idx >= 0 {
+                               index = index[:idx]
+                       }
+               }
+
+               // If there was no index then this resource didn't match
+               // the one we're looking for, exit.
+               if index == "" {
+                       continue
+               }
+
+               // Turn the index into an int
+               raw, err := strconv.ParseInt(index, 0, 0)
+               if err != nil {
+                       return 0, fmt.Errorf(
+                               "%s: error parsing index %q as int: %s",
+                               id, index, err)
+               }
+
+               // Keep track of this index if its the max
+               if new := int(raw); new > max {
+                       max = new
+               }
+       }
+
+       // If we never found any matching resources in the state, we
+       // have zero.
+       if max == -1 {
+               return 0, nil
+       }
+
+       // The result value is "max+1" because we're returning the
+       // max COUNT, not the max INDEX, and we zero-index.
+       return max + 1, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/node_count_boundary.go
new file mode 100644 (file)
index 0000000..bd32c79
--- /dev/null
@@ -0,0 +1,14 @@
+package terraform
+
+// NodeCountBoundary fixes any "count boundarie" in the state: resources
+// that are named "foo.0" when they should be named "foo"
+type NodeCountBoundary struct{}
+
+func (n *NodeCountBoundary) Name() string {
+       return "meta.count-boundary (count boundary fixup)"
+}
+
+// GraphNodeEvalable
+func (n *NodeCountBoundary) EvalTree() EvalNode {
+       return &EvalCountFixZeroOneBoundaryGlobal{}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_destroy.go
new file mode 100644 (file)
index 0000000..e32cea8
--- /dev/null
@@ -0,0 +1,22 @@
+package terraform
+
+// NodeDestroyableDataResource represents a resource that is "plannable":
+// it is ready to be planned in order to create a diff.
+type NodeDestroyableDataResource struct {
+       *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodeDestroyableDataResource) EvalTree() EvalNode {
+       addr := n.NodeAbstractResource.Addr
+
+       // stateId is the ID to put into the state
+       stateId := addr.stateId()
+
+       // Just destroy it.
+       var state *InstanceState
+       return &EvalWriteState{
+               Name:  stateId,
+               State: &state, // state is nil here
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_data_refresh.go
new file mode 100644 (file)
index 0000000..d504c89
--- /dev/null
@@ -0,0 +1,198 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/dag"
+)
+
+// NodeRefreshableDataResource represents a resource that is "plannable":
+// it is ready to be planned in order to create a diff.
+type NodeRefreshableDataResource struct {
+       *NodeAbstractCountResource
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodeRefreshableDataResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+       // Grab the state which we read
+       state, lock := ctx.State()
+       lock.RLock()
+       defer lock.RUnlock()
+
+       // Expand the resource count which must be available by now from EvalTree
+       count, err := n.Config.Count()
+       if err != nil {
+               return nil, err
+       }
+
+       // The concrete resource factory we'll use
+       concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+               // Add the config and state since we don't do that via transforms
+               a.Config = n.Config
+
+               return &NodeRefreshableDataResourceInstance{
+                       NodeAbstractResource: a,
+               }
+       }
+
+       // Start creating the steps
+       steps := []GraphTransformer{
+               // Expand the count.
+               &ResourceCountTransformer{
+                       Concrete: concreteResource,
+                       Count:    count,
+                       Addr:     n.ResourceAddr(),
+               },
+
+               // Attach the state
+               &AttachStateTransformer{State: state},
+
+               // Targeting
+               &TargetsTransformer{ParsedTargets: n.Targets},
+
+               // Connect references so ordering is correct
+               &ReferenceTransformer{},
+
+               // Make sure there is a single root
+               &RootTransformer{},
+       }
+
+       // Build the graph
+       b := &BasicGraphBuilder{
+               Steps:    steps,
+               Validate: true,
+               Name:     "NodeRefreshableDataResource",
+       }
+
+       return b.Build(ctx.Path())
+}
+
+// NodeRefreshableDataResourceInstance represents a _single_ resource instance
+// that is refreshable.
+type NodeRefreshableDataResourceInstance struct {
+       *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodeRefreshableDataResourceInstance) EvalTree() EvalNode {
+       addr := n.NodeAbstractResource.Addr
+
+       // stateId is the ID to put into the state
+       stateId := addr.stateId()
+
+       // Build the instance info. More of this will be populated during eval
+       info := &InstanceInfo{
+               Id:   stateId,
+               Type: addr.Type,
+       }
+
+       // Get the state if we have it, if not we build it
+       rs := n.ResourceState
+       if rs == nil {
+               rs = &ResourceState{}
+       }
+
+       // If the config isn't empty we update the state
+       if n.Config != nil {
+               rs = &ResourceState{
+                       Type:         n.Config.Type,
+                       Provider:     n.Config.Provider,
+                       Dependencies: n.StateReferences(),
+               }
+       }
+
+       // Build the resource for eval
+       resource := &Resource{
+               Name:       addr.Name,
+               Type:       addr.Type,
+               CountIndex: addr.Index,
+       }
+       if resource.CountIndex < 0 {
+               resource.CountIndex = 0
+       }
+
+       // Declare a bunch of variables that are used for state during
+       // evaluation. Most of this are written to by-address below.
+       var config *ResourceConfig
+       var diff *InstanceDiff
+       var provider ResourceProvider
+       var state *InstanceState
+
+       return &EvalSequence{
+               Nodes: []EvalNode{
+                       // Always destroy the existing state first, since we must
+                       // make sure that values from a previous read will not
+                       // get interpolated if we end up needing to defer our
+                       // loading until apply time.
+                       &EvalWriteState{
+                               Name:         stateId,
+                               ResourceType: rs.Type,
+                               Provider:     rs.Provider,
+                               Dependencies: rs.Dependencies,
+                               State:        &state, // state is nil here
+                       },
+
+                       &EvalInterpolate{
+                               Config:   n.Config.RawConfig.Copy(),
+                               Resource: resource,
+                               Output:   &config,
+                       },
+
+                       // The rest of this pass can proceed only if there are no
+                       // computed values in our config.
+                       // (If there are, we'll deal with this during the plan and
+                       // apply phases.)
+                       &EvalIf{
+                               If: func(ctx EvalContext) (bool, error) {
+                                       if config.ComputedKeys != nil && len(config.ComputedKeys) > 0 {
+                                               return true, EvalEarlyExitError{}
+                                       }
+
+                                       // If the config explicitly has a depends_on for this
+                                       // data source, assume the intention is to prevent
+                                       // refreshing ahead of that dependency.
+                                       if len(n.Config.DependsOn) > 0 {
+                                               return true, EvalEarlyExitError{}
+                                       }
+
+                                       return true, nil
+                               },
+
+                               Then: EvalNoop{},
+                       },
+
+                       // The remainder of this pass is the same as running
+                       // a "plan" pass immediately followed by an "apply" pass,
+                       // populating the state early so it'll be available to
+                       // provider configurations that need this data during
+                       // refresh/plan.
+                       &EvalGetProvider{
+                               Name:   n.ProvidedBy()[0],
+                               Output: &provider,
+                       },
+
+                       &EvalReadDataDiff{
+                               Info:        info,
+                               Config:      &config,
+                               Provider:    &provider,
+                               Output:      &diff,
+                               OutputState: &state,
+                       },
+
+                       &EvalReadDataApply{
+                               Info:     info,
+                               Diff:     &diff,
+                               Provider: &provider,
+                               Output:   &state,
+                       },
+
+                       &EvalWriteState{
+                               Name:         stateId,
+                               ResourceType: rs.Type,
+                               Provider:     rs.Provider,
+                               Dependencies: rs.Dependencies,
+                               State:        &state,
+                       },
+
+                       &EvalUpdateStateHook{},
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_destroy.go
new file mode 100644 (file)
index 0000000..319df1e
--- /dev/null
@@ -0,0 +1,29 @@
+package terraform
+
+import (
+       "fmt"
+)
+
+// NodeDestroyableModule represents a module destruction.
+type NodeDestroyableModuleVariable struct {
+       PathValue []string
+}
+
+func (n *NodeDestroyableModuleVariable) Name() string {
+       result := "plan-destroy"
+       if len(n.PathValue) > 1 {
+               result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+       }
+
+       return result
+}
+
+// GraphNodeSubPath
+func (n *NodeDestroyableModuleVariable) Path() []string {
+       return n.PathValue
+}
+
+// GraphNodeEvalable
+func (n *NodeDestroyableModuleVariable) EvalTree() EvalNode {
+       return &EvalDiffDestroyModule{Path: n.PathValue}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_module_variable.go
new file mode 100644 (file)
index 0000000..13fe8fc
--- /dev/null
@@ -0,0 +1,125 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/module"
+)
+
+// NodeApplyableModuleVariable represents a module variable input during
+// the apply step.
+type NodeApplyableModuleVariable struct {
+       PathValue []string
+       Config    *config.Variable  // Config is the var in the config
+       Value     *config.RawConfig // Value is the value that is set
+
+       Module *module.Tree // Antiquated, want to remove
+}
+
+func (n *NodeApplyableModuleVariable) Name() string {
+       result := fmt.Sprintf("var.%s", n.Config.Name)
+       if len(n.PathValue) > 1 {
+               result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+       }
+
+       return result
+}
+
+// GraphNodeSubPath
+func (n *NodeApplyableModuleVariable) Path() []string {
+       // We execute in the parent scope (above our own module) so that
+       // we can access the proper interpolations.
+       if len(n.PathValue) > 2 {
+               return n.PathValue[:len(n.PathValue)-1]
+       }
+
+       return rootModulePath
+}
+
+// RemovableIfNotTargeted
+func (n *NodeApplyableModuleVariable) RemoveIfNotTargeted() bool {
+       // We need to add this so that this node will be removed if
+       // it isn't targeted or a dependency of a target.
+       return true
+}
+
+// GraphNodeReferenceGlobal
+func (n *NodeApplyableModuleVariable) ReferenceGlobal() bool {
+       // We have to create fully qualified references because we cross
+       // boundaries here: our ReferenceableName is in one path and our
+       // References are from another path.
+       return true
+}
+
+// GraphNodeReferenceable
+func (n *NodeApplyableModuleVariable) ReferenceableName() []string {
+       return []string{n.Name()}
+}
+
+// GraphNodeReferencer
+func (n *NodeApplyableModuleVariable) References() []string {
+       // If we have no value set, we depend on nothing
+       if n.Value == nil {
+               return nil
+       }
+
+       // Can't depend on anything if we're in the root
+       if len(n.PathValue) < 2 {
+               return nil
+       }
+
+       // Otherwise, we depend on anything that is in our value, but
+       // specifically in the namespace of the parent path.
+       // Create the prefix based on the path
+       var prefix string
+       if p := n.Path(); len(p) > 0 {
+               prefix = modulePrefixStr(p)
+       }
+
+       result := ReferencesFromConfig(n.Value)
+       return modulePrefixList(result, prefix)
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableModuleVariable) EvalTree() EvalNode {
+       // If we have no value, do nothing
+       if n.Value == nil {
+               return &EvalNoop{}
+       }
+
+       // Otherwise, interpolate the value of this variable and set it
+       // within the variables mapping.
+       var config *ResourceConfig
+       variables := make(map[string]interface{})
+       return &EvalSequence{
+               Nodes: []EvalNode{
+                       &EvalInterpolate{
+                               Config: n.Value,
+                               Output: &config,
+                       },
+
+                       &EvalVariableBlock{
+                               Config:         &config,
+                               VariableValues: variables,
+                       },
+
+                       &EvalCoerceMapVariable{
+                               Variables:  variables,
+                               ModulePath: n.PathValue,
+                               ModuleTree: n.Module,
+                       },
+
+                       &EvalTypeCheckVariable{
+                               Variables:  variables,
+                               ModulePath: n.PathValue,
+                               ModuleTree: n.Module,
+                       },
+
+                       &EvalSetVariables{
+                               Module:    &n.PathValue[len(n.PathValue)-1],
+                               Variables: variables,
+                       },
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output.go b/vendor/github.com/hashicorp/terraform/terraform/node_output.go
new file mode 100644 (file)
index 0000000..e28e6f0
--- /dev/null
@@ -0,0 +1,76 @@
+package terraform
+
+import (
+       "fmt"
+       "strings"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// NodeApplyableOutput represents an output that is "applyable":
+// it is ready to be applied.
+type NodeApplyableOutput struct {
+       PathValue []string
+       Config    *config.Output // Config is the output in the config
+}
+
+func (n *NodeApplyableOutput) Name() string {
+       result := fmt.Sprintf("output.%s", n.Config.Name)
+       if len(n.PathValue) > 1 {
+               result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+       }
+
+       return result
+}
+
+// GraphNodeSubPath
+func (n *NodeApplyableOutput) Path() []string {
+       return n.PathValue
+}
+
+// RemovableIfNotTargeted
+func (n *NodeApplyableOutput) RemoveIfNotTargeted() bool {
+       // We need to add this so that this node will be removed if
+       // it isn't targeted or a dependency of a target.
+       return true
+}
+
+// GraphNodeReferenceable
+func (n *NodeApplyableOutput) ReferenceableName() []string {
+       name := fmt.Sprintf("output.%s", n.Config.Name)
+       return []string{name}
+}
+
+// GraphNodeReferencer
+func (n *NodeApplyableOutput) References() []string {
+       var result []string
+       result = append(result, n.Config.DependsOn...)
+       result = append(result, ReferencesFromConfig(n.Config.RawConfig)...)
+       for _, v := range result {
+               split := strings.Split(v, "/")
+               for i, s := range split {
+                       split[i] = s + ".destroy"
+               }
+
+               result = append(result, strings.Join(split, "/"))
+       }
+
+       return result
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableOutput) EvalTree() EvalNode {
+       return &EvalOpFilter{
+               Ops: []walkOperation{walkRefresh, walkPlan, walkApply,
+                       walkDestroy, walkInput, walkValidate},
+               Node: &EvalSequence{
+                       Nodes: []EvalNode{
+                               &EvalWriteOutput{
+                                       Name:      n.Config.Name,
+                                       Sensitive: n.Config.Sensitive,
+                                       Value:     n.Config.RawConfig,
+                               },
+                       },
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_output_orphan.go
new file mode 100644 (file)
index 0000000..636a15d
--- /dev/null
@@ -0,0 +1,35 @@
+package terraform
+
+import (
+       "fmt"
+)
+
+// NodeOutputOrphan represents an output that is an orphan.
+type NodeOutputOrphan struct {
+       OutputName string
+       PathValue  []string
+}
+
+func (n *NodeOutputOrphan) Name() string {
+       result := fmt.Sprintf("output.%s (orphan)", n.OutputName)
+       if len(n.PathValue) > 1 {
+               result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+       }
+
+       return result
+}
+
+// GraphNodeSubPath
+func (n *NodeOutputOrphan) Path() []string {
+       return n.PathValue
+}
+
+// GraphNodeEvalable
+func (n *NodeOutputOrphan) EvalTree() EvalNode {
+       return &EvalOpFilter{
+               Ops: []walkOperation{walkRefresh, walkApply, walkDestroy},
+               Node: &EvalDeleteOutput{
+                       Name: n.OutputName,
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider.go
new file mode 100644 (file)
index 0000000..8e2c176
--- /dev/null
@@ -0,0 +1,11 @@
+package terraform
+
+// NodeApplyableProvider represents a provider during an apply.
+type NodeApplyableProvider struct {
+       *NodeAbstractProvider
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableProvider) EvalTree() EvalNode {
+       return ProviderEvalTree(n.NameValue, n.ProviderConfig())
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_abstract.go
new file mode 100644 (file)
index 0000000..6cc8365
--- /dev/null
@@ -0,0 +1,85 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// ConcreteProviderNodeFunc is a callback type used to convert an
+// abstract provider to a concrete one of some type.
+type ConcreteProviderNodeFunc func(*NodeAbstractProvider) dag.Vertex
+
+// NodeAbstractProvider represents a provider that has no associated operations.
+// It registers all the common interfaces across operations for providers.
+type NodeAbstractProvider struct {
+       NameValue string
+       PathValue []string
+
+       // The fields below will be automatically set using the Attach
+       // interfaces if you're running those transforms, but also be explicitly
+       // set if you already have that information.
+
+       Config *config.ProviderConfig
+}
+
+func (n *NodeAbstractProvider) Name() string {
+       result := fmt.Sprintf("provider.%s", n.NameValue)
+       if len(n.PathValue) > 1 {
+               result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+       }
+
+       return result
+}
+
+// GraphNodeSubPath
+func (n *NodeAbstractProvider) Path() []string {
+       return n.PathValue
+}
+
+// RemovableIfNotTargeted
+func (n *NodeAbstractProvider) RemoveIfNotTargeted() bool {
+       // We need to add this so that this node will be removed if
+       // it isn't targeted or a dependency of a target.
+       return true
+}
+
+// GraphNodeReferencer
+func (n *NodeAbstractProvider) References() []string {
+       if n.Config == nil {
+               return nil
+       }
+
+       return ReferencesFromConfig(n.Config.RawConfig)
+}
+
+// GraphNodeProvider
+func (n *NodeAbstractProvider) ProviderName() string {
+       return n.NameValue
+}
+
+// GraphNodeProvider
+func (n *NodeAbstractProvider) ProviderConfig() *config.RawConfig {
+       if n.Config == nil {
+               return nil
+       }
+
+       return n.Config.RawConfig
+}
+
+// GraphNodeAttachProvider
+func (n *NodeAbstractProvider) AttachProvider(c *config.ProviderConfig) {
+       n.Config = c
+}
+
+// GraphNodeDotter impl.
+func (n *NodeAbstractProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
+       return &dag.DotNode{
+               Name: name,
+               Attrs: map[string]string{
+                       "label": n.Name(),
+                       "shape": "diamond",
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go b/vendor/github.com/hashicorp/terraform/terraform/node_provider_disabled.go
new file mode 100644 (file)
index 0000000..25e7e62
--- /dev/null
@@ -0,0 +1,38 @@
+package terraform
+
+import (
+       "fmt"
+)
+
+// NodeDisabledProvider represents a provider that is disabled. A disabled
+// provider does nothing. It exists to properly set inheritance information
+// for child providers.
+type NodeDisabledProvider struct {
+       *NodeAbstractProvider
+}
+
+func (n *NodeDisabledProvider) Name() string {
+       return fmt.Sprintf("%s (disabled)", n.NodeAbstractProvider.Name())
+}
+
+// GraphNodeEvalable
+func (n *NodeDisabledProvider) EvalTree() EvalNode {
+       var resourceConfig *ResourceConfig
+       return &EvalSequence{
+               Nodes: []EvalNode{
+                       &EvalInterpolate{
+                               Config: n.ProviderConfig(),
+                               Output: &resourceConfig,
+                       },
+                       &EvalBuildProviderConfig{
+                               Provider: n.ProviderName(),
+                               Config:   &resourceConfig,
+                               Output:   &resourceConfig,
+                       },
+                       &EvalSetProviderConfig{
+                               Provider: n.ProviderName(),
+                               Config:   &resourceConfig,
+                       },
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/node_provisioner.go
new file mode 100644 (file)
index 0000000..bb117c1
--- /dev/null
@@ -0,0 +1,44 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// NodeProvisioner represents a provider that has no associated operations.
+// It registers all the common interfaces across operations for providers.
+type NodeProvisioner struct {
+       NameValue string
+       PathValue []string
+
+       // The fields below will be automatically set using the Attach
+       // interfaces if you're running those transforms, but also be explicitly
+       // set if you already have that information.
+
+       Config *config.ProviderConfig
+}
+
+func (n *NodeProvisioner) Name() string {
+       result := fmt.Sprintf("provisioner.%s", n.NameValue)
+       if len(n.PathValue) > 1 {
+               result = fmt.Sprintf("%s.%s", modulePrefixStr(n.PathValue), result)
+       }
+
+       return result
+}
+
+// GraphNodeSubPath
+func (n *NodeProvisioner) Path() []string {
+       return n.PathValue
+}
+
+// GraphNodeProvisioner
+func (n *NodeProvisioner) ProvisionerName() string {
+       return n.NameValue
+}
+
+// GraphNodeEvalable impl.
+func (n *NodeProvisioner) EvalTree() EvalNode {
+       return &EvalInitProvisioner{Name: n.NameValue}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract.go
new file mode 100644 (file)
index 0000000..50bb707
--- /dev/null
@@ -0,0 +1,240 @@
+package terraform
+
+import (
+       "fmt"
+       "strings"
+
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// ConcreteResourceNodeFunc is a callback type used to convert an
+// abstract resource to a concrete one of some type.
+type ConcreteResourceNodeFunc func(*NodeAbstractResource) dag.Vertex
+
+// GraphNodeResource is implemented by any nodes that represent a resource.
+// The type of operation cannot be assumed, only that this node represents
+// the given resource.
+type GraphNodeResource interface {
+       ResourceAddr() *ResourceAddress
+}
+
+// NodeAbstractResource represents a resource that has no associated
+// operations. It registers all the interfaces for a resource that common
+// across multiple operation types.
+type NodeAbstractResource struct {
+       Addr *ResourceAddress // Addr is the address for this resource
+
+       // The fields below will be automatically set using the Attach
+       // interfaces if you're running those transforms, but also be explicitly
+       // set if you already have that information.
+
+       Config        *config.Resource // Config is the resource in the config
+       ResourceState *ResourceState   // ResourceState is the ResourceState for this
+
+       Targets []ResourceAddress // Set from GraphNodeTargetable
+}
+
+func (n *NodeAbstractResource) Name() string {
+       return n.Addr.String()
+}
+
+// GraphNodeSubPath
+func (n *NodeAbstractResource) Path() []string {
+       return n.Addr.Path
+}
+
+// GraphNodeReferenceable
+func (n *NodeAbstractResource) ReferenceableName() []string {
+       // We always are referenceable as "type.name" as long as
+       // we have a config or address. Determine what that value is.
+       var id string
+       if n.Config != nil {
+               id = n.Config.Id()
+       } else if n.Addr != nil {
+               addrCopy := n.Addr.Copy()
+               addrCopy.Path = nil // ReferenceTransformer handles paths
+               addrCopy.Index = -1 // We handle indexes below
+               id = addrCopy.String()
+       } else {
+               // No way to determine our type.name, just return
+               return nil
+       }
+
+       var result []string
+
+       // Always include our own ID. This is primarily for backwards
+       // compatibility with states that didn't yet support the more
+       // specific dep string.
+       result = append(result, id)
+
+       // We represent all multi-access
+       result = append(result, fmt.Sprintf("%s.*", id))
+
+       // We represent either a specific number, or all numbers
+       suffix := "N"
+       if n.Addr != nil {
+               idx := n.Addr.Index
+               if idx == -1 {
+                       idx = 0
+               }
+
+               suffix = fmt.Sprintf("%d", idx)
+       }
+       result = append(result, fmt.Sprintf("%s.%s", id, suffix))
+
+       return result
+}
+
+// GraphNodeReferencer
+func (n *NodeAbstractResource) References() []string {
+       // If we have a config, that is our source of truth
+       if c := n.Config; c != nil {
+               // Grab all the references
+               var result []string
+               result = append(result, c.DependsOn...)
+               result = append(result, ReferencesFromConfig(c.RawCount)...)
+               result = append(result, ReferencesFromConfig(c.RawConfig)...)
+               for _, p := range c.Provisioners {
+                       if p.When == config.ProvisionerWhenCreate {
+                               result = append(result, ReferencesFromConfig(p.ConnInfo)...)
+                               result = append(result, ReferencesFromConfig(p.RawConfig)...)
+                       }
+               }
+
+               return uniqueStrings(result)
+       }
+
+       // If we have state, that is our next source
+       if s := n.ResourceState; s != nil {
+               return s.Dependencies
+       }
+
+       return nil
+}
+
+// StateReferences returns the dependencies to put into the state for
+// this resource.
+func (n *NodeAbstractResource) StateReferences() []string {
+       self := n.ReferenceableName()
+
+       // Determine what our "prefix" is for checking for references to
+       // ourself.
+       addrCopy := n.Addr.Copy()
+       addrCopy.Index = -1
+       selfPrefix := addrCopy.String() + "."
+
+       depsRaw := n.References()
+       deps := make([]string, 0, len(depsRaw))
+       for _, d := range depsRaw {
+               // Ignore any variable dependencies
+               if strings.HasPrefix(d, "var.") {
+                       continue
+               }
+
+               // If this has a backup ref, ignore those for now. The old state
+               // file never contained those and I'd rather store the rich types we
+               // add in the future.
+               if idx := strings.IndexRune(d, '/'); idx != -1 {
+                       d = d[:idx]
+               }
+
+               // If we're referencing ourself, then ignore it
+               found := false
+               for _, s := range self {
+                       if d == s {
+                               found = true
+                       }
+               }
+               if found {
+                       continue
+               }
+
+               // If this is a reference to ourself and a specific index, we keep
+               // it. For example, if this resource is "foo.bar" and the reference
+               // is "foo.bar.0" then we keep it exact. Otherwise, we strip it.
+               if strings.HasSuffix(d, ".0") && !strings.HasPrefix(d, selfPrefix) {
+                       d = d[:len(d)-2]
+               }
+
+               // This is sad. The dependencies are currently in the format of
+               // "module.foo.bar" (the full field). This strips the field off.
+               if strings.HasPrefix(d, "module.") {
+                       parts := strings.SplitN(d, ".", 3)
+                       d = strings.Join(parts[0:2], ".")
+               }
+
+               deps = append(deps, d)
+       }
+
+       return deps
+}
+
+// GraphNodeProviderConsumer
+func (n *NodeAbstractResource) ProvidedBy() []string {
+       // If we have a config we prefer that above all else
+       if n.Config != nil {
+               return []string{resourceProvider(n.Config.Type, n.Config.Provider)}
+       }
+
+       // If we have state, then we will use the provider from there
+       if n.ResourceState != nil && n.ResourceState.Provider != "" {
+               return []string{n.ResourceState.Provider}
+       }
+
+       // Use our type
+       return []string{resourceProvider(n.Addr.Type, "")}
+}
+
+// GraphNodeProvisionerConsumer
+func (n *NodeAbstractResource) ProvisionedBy() []string {
+       // If we have no configuration, then we have no provisioners
+       if n.Config == nil {
+               return nil
+       }
+
+       // Build the list of provisioners we need based on the configuration.
+       // It is okay to have duplicates here.
+       result := make([]string, len(n.Config.Provisioners))
+       for i, p := range n.Config.Provisioners {
+               result[i] = p.Type
+       }
+
+       return result
+}
+
+// GraphNodeResource, GraphNodeAttachResourceState
+func (n *NodeAbstractResource) ResourceAddr() *ResourceAddress {
+       return n.Addr
+}
+
+// GraphNodeAddressable, TODO: remove, used by target, should unify
+func (n *NodeAbstractResource) ResourceAddress() *ResourceAddress {
+       return n.ResourceAddr()
+}
+
+// GraphNodeTargetable
+func (n *NodeAbstractResource) SetTargets(targets []ResourceAddress) {
+       n.Targets = targets
+}
+
+// GraphNodeAttachResourceState
+func (n *NodeAbstractResource) AttachResourceState(s *ResourceState) {
+       n.ResourceState = s
+}
+
+// GraphNodeAttachResourceConfig
+func (n *NodeAbstractResource) AttachResourceConfig(c *config.Resource) {
+       n.Config = c
+}
+
+// GraphNodeDotter impl.
+func (n *NodeAbstractResource) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
+       return &dag.DotNode{
+               Name: name,
+               Attrs: map[string]string{
+                       "label": n.Name(),
+                       "shape": "box",
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_abstract_count.go
new file mode 100644 (file)
index 0000000..573570d
--- /dev/null
@@ -0,0 +1,50 @@
+package terraform
+
+// NodeAbstractCountResource should be embedded instead of NodeAbstractResource
+// if the resource has a `count` value that needs to be expanded.
+//
+// The embedder should implement `DynamicExpand` to process the count.
+type NodeAbstractCountResource struct {
+       *NodeAbstractResource
+
+       // Validate, if true, will perform the validation for the count.
+       // This should only be turned on for the "validate" operation.
+       Validate bool
+}
+
+// GraphNodeEvalable
+func (n *NodeAbstractCountResource) EvalTree() EvalNode {
+       // We only check if the count is computed if we're not validating.
+       // If we're validating we allow computed counts since they just turn
+       // into more computed values.
+       var evalCountCheckComputed EvalNode
+       if !n.Validate {
+               evalCountCheckComputed = &EvalCountCheckComputed{Resource: n.Config}
+       }
+
+       return &EvalSequence{
+               Nodes: []EvalNode{
+                       // The EvalTree for a plannable resource primarily involves
+                       // interpolating the count since it can contain variables
+                       // we only just received access to.
+                       //
+                       // With the interpolated count, we can then DynamicExpand
+                       // into the proper number of instances.
+                       &EvalInterpolate{Config: n.Config.RawCount},
+
+                       // Check if the count is computed
+                       evalCountCheckComputed,
+
+                       // If validation is enabled, perform the validation
+                       &EvalIf{
+                               If: func(ctx EvalContext) (bool, error) {
+                                       return n.Validate, nil
+                               },
+
+                               Then: &EvalValidateCount{Resource: n.Config},
+                       },
+
+                       &EvalCountFixZeroOneBoundary{Resource: n.Config},
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_apply.go
new file mode 100644 (file)
index 0000000..3599782
--- /dev/null
@@ -0,0 +1,357 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// NodeApplyableResource represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodeApplyableResource struct {
+       *NodeAbstractResource
+}
+
+// GraphNodeCreator
+func (n *NodeApplyableResource) CreateAddr() *ResourceAddress {
+       return n.NodeAbstractResource.Addr
+}
+
+// GraphNodeReferencer, overriding NodeAbstractResource
+func (n *NodeApplyableResource) References() []string {
+       result := n.NodeAbstractResource.References()
+
+       // The "apply" side of a resource generally also depends on the
+       // destruction of its dependencies as well. For example, if a LB
+       // references a set of VMs with ${vm.foo.*.id}, then we must wait for
+       // the destruction so we get the newly updated list of VMs.
+       //
+       // The exception here is CBD. When CBD is set, we don't do this since
+       // it would create a cycle. By not creating a cycle, we require two
+       // applies since the first apply the creation step will use the OLD
+       // values (pre-destroy) and the second step will update.
+       //
+       // This is how Terraform behaved with "legacy" graphs (TF <= 0.7.x).
+       // We mimic that behavior here now and can improve upon it in the future.
+       //
+       // This behavior is tested in graph_build_apply_test.go to test ordering.
+       cbd := n.Config != nil && n.Config.Lifecycle.CreateBeforeDestroy
+       if !cbd {
+               // The "apply" side of a resource always depends on the destruction
+               // of all its dependencies in addition to the creation.
+               for _, v := range result {
+                       result = append(result, v+".destroy")
+               }
+       }
+
+       return result
+}
+
+// GraphNodeEvalable
+func (n *NodeApplyableResource) EvalTree() EvalNode {
+       addr := n.NodeAbstractResource.Addr
+
+       // stateId is the ID to put into the state
+       stateId := addr.stateId()
+
+       // Build the instance info. More of this will be populated during eval
+       info := &InstanceInfo{
+               Id:   stateId,
+               Type: addr.Type,
+       }
+
+       // Build the resource for eval
+       resource := &Resource{
+               Name:       addr.Name,
+               Type:       addr.Type,
+               CountIndex: addr.Index,
+       }
+       if resource.CountIndex < 0 {
+               resource.CountIndex = 0
+       }
+
+       // Determine the dependencies for the state.
+       stateDeps := n.StateReferences()
+
+       // Eval info is different depending on what kind of resource this is
+       switch n.Config.Mode {
+       case config.ManagedResourceMode:
+               return n.evalTreeManagedResource(
+                       stateId, info, resource, stateDeps,
+               )
+       case config.DataResourceMode:
+               return n.evalTreeDataResource(
+                       stateId, info, resource, stateDeps)
+       default:
+               panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
+       }
+}
+
+func (n *NodeApplyableResource) evalTreeDataResource(
+       stateId string, info *InstanceInfo,
+       resource *Resource, stateDeps []string) EvalNode {
+       var provider ResourceProvider
+       var config *ResourceConfig
+       var diff *InstanceDiff
+       var state *InstanceState
+
+       return &EvalSequence{
+               Nodes: []EvalNode{
+                       // Build the instance info
+                       &EvalInstanceInfo{
+                               Info: info,
+                       },
+
+                       // Get the saved diff for apply
+                       &EvalReadDiff{
+                               Name: stateId,
+                               Diff: &diff,
+                       },
+
+                       // Stop here if we don't actually have a diff
+                       &EvalIf{
+                               If: func(ctx EvalContext) (bool, error) {
+                                       if diff == nil {
+                                               return true, EvalEarlyExitError{}
+                                       }
+
+                                       if diff.GetAttributesLen() == 0 {
+                                               return true, EvalEarlyExitError{}
+                                       }
+
+                                       return true, nil
+                               },
+                               Then: EvalNoop{},
+                       },
+
+                       // We need to re-interpolate the config here, rather than
+                       // just using the diff's values directly, because we've
+                       // potentially learned more variable values during the
+                       // apply pass that weren't known when the diff was produced.
+                       &EvalInterpolate{
+                               Config:   n.Config.RawConfig.Copy(),
+                               Resource: resource,
+                               Output:   &config,
+                       },
+
+                       &EvalGetProvider{
+                               Name:   n.ProvidedBy()[0],
+                               Output: &provider,
+                       },
+
+                       // Make a new diff with our newly-interpolated config.
+                       &EvalReadDataDiff{
+                               Info:     info,
+                               Config:   &config,
+                               Previous: &diff,
+                               Provider: &provider,
+                               Output:   &diff,
+                       },
+
+                       &EvalReadDataApply{
+                               Info:     info,
+                               Diff:     &diff,
+                               Provider: &provider,
+                               Output:   &state,
+                       },
+
+                       &EvalWriteState{
+                               Name:         stateId,
+                               ResourceType: n.Config.Type,
+                               Provider:     n.Config.Provider,
+                               Dependencies: stateDeps,
+                               State:        &state,
+                       },
+
+                       // Clear the diff now that we've applied it, so
+                       // later nodes won't see a diff that's now a no-op.
+                       &EvalWriteDiff{
+                               Name: stateId,
+                               Diff: nil,
+                       },
+
+                       &EvalUpdateStateHook{},
+               },
+       }
+}
+
+func (n *NodeApplyableResource) evalTreeManagedResource(
+       stateId string, info *InstanceInfo,
+       resource *Resource, stateDeps []string) EvalNode {
+       // Declare a bunch of variables that are used for state during
+       // evaluation. Most of this are written to by-address below.
+       var provider ResourceProvider
+       var diff, diffApply *InstanceDiff
+       var state *InstanceState
+       var resourceConfig *ResourceConfig
+       var err error
+       var createNew bool
+       var createBeforeDestroyEnabled bool
+
+       return &EvalSequence{
+               Nodes: []EvalNode{
+                       // Build the instance info
+                       &EvalInstanceInfo{
+                               Info: info,
+                       },
+
+                       // Get the saved diff for apply
+                       &EvalReadDiff{
+                               Name: stateId,
+                               Diff: &diffApply,
+                       },
+
+                       // We don't want to do any destroys
+                       &EvalIf{
+                               If: func(ctx EvalContext) (bool, error) {
+                                       if diffApply == nil {
+                                               return true, EvalEarlyExitError{}
+                                       }
+
+                                       if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 {
+                                               return true, EvalEarlyExitError{}
+                                       }
+
+                                       diffApply.SetDestroy(false)
+                                       return true, nil
+                               },
+                               Then: EvalNoop{},
+                       },
+
+                       &EvalIf{
+                               If: func(ctx EvalContext) (bool, error) {
+                                       destroy := false
+                                       if diffApply != nil {
+                                               destroy = diffApply.GetDestroy() || diffApply.RequiresNew()
+                                       }
+
+                                       createBeforeDestroyEnabled =
+                                               n.Config.Lifecycle.CreateBeforeDestroy &&
+                                                       destroy
+
+                                       return createBeforeDestroyEnabled, nil
+                               },
+                               Then: &EvalDeposeState{
+                                       Name: stateId,
+                               },
+                       },
+
+                       &EvalInterpolate{
+                               Config:   n.Config.RawConfig.Copy(),
+                               Resource: resource,
+                               Output:   &resourceConfig,
+                       },
+                       &EvalGetProvider{
+                               Name:   n.ProvidedBy()[0],
+                               Output: &provider,
+                       },
+                       &EvalReadState{
+                               Name:   stateId,
+                               Output: &state,
+                       },
+                       // Re-run validation to catch any errors we missed, e.g. type
+                       // mismatches on computed values.
+                       &EvalValidateResource{
+                               Provider:       &provider,
+                               Config:         &resourceConfig,
+                               ResourceName:   n.Config.Name,
+                               ResourceType:   n.Config.Type,
+                               ResourceMode:   n.Config.Mode,
+                               IgnoreWarnings: true,
+                       },
+                       &EvalDiff{
+                               Info:       info,
+                               Config:     &resourceConfig,
+                               Resource:   n.Config,
+                               Provider:   &provider,
+                               Diff:       &diffApply,
+                               State:      &state,
+                               OutputDiff: &diffApply,
+                       },
+
+                       // Get the saved diff
+                       &EvalReadDiff{
+                               Name: stateId,
+                               Diff: &diff,
+                       },
+
+                       // Compare the diffs
+                       &EvalCompareDiff{
+                               Info: info,
+                               One:  &diff,
+                               Two:  &diffApply,
+                       },
+
+                       &EvalGetProvider{
+                               Name:   n.ProvidedBy()[0],
+                               Output: &provider,
+                       },
+                       &EvalReadState{
+                               Name:   stateId,
+                               Output: &state,
+                       },
+                       // Call pre-apply hook
+                       &EvalApplyPre{
+                               Info:  info,
+                               State: &state,
+                               Diff:  &diffApply,
+                       },
+                       &EvalApply{
+                               Info:      info,
+                               State:     &state,
+                               Diff:      &diffApply,
+                               Provider:  &provider,
+                               Output:    &state,
+                               Error:     &err,
+                               CreateNew: &createNew,
+                       },
+                       &EvalWriteState{
+                               Name:         stateId,
+                               ResourceType: n.Config.Type,
+                               Provider:     n.Config.Provider,
+                               Dependencies: stateDeps,
+                               State:        &state,
+                       },
+                       &EvalApplyProvisioners{
+                               Info:           info,
+                               State:          &state,
+                               Resource:       n.Config,
+                               InterpResource: resource,
+                               CreateNew:      &createNew,
+                               Error:          &err,
+                               When:           config.ProvisionerWhenCreate,
+                       },
+                       &EvalIf{
+                               If: func(ctx EvalContext) (bool, error) {
+                                       return createBeforeDestroyEnabled && err != nil, nil
+                               },
+                               Then: &EvalUndeposeState{
+                                       Name:  stateId,
+                                       State: &state,
+                               },
+                               Else: &EvalWriteState{
+                                       Name:         stateId,
+                                       ResourceType: n.Config.Type,
+                                       Provider:     n.Config.Provider,
+                                       Dependencies: stateDeps,
+                                       State:        &state,
+                               },
+                       },
+
+                       // We clear the diff out here so that future nodes
+                       // don't see a diff that is already complete. There
+                       // is no longer a diff!
+                       &EvalWriteDiff{
+                               Name: stateId,
+                               Diff: nil,
+                       },
+
+                       &EvalApplyPost{
+                               Info:  info,
+                               State: &state,
+                               Error: &err,
+                       },
+                       &EvalUpdateStateHook{},
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_destroy.go
new file mode 100644 (file)
index 0000000..c2efd2c
--- /dev/null
@@ -0,0 +1,288 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// NodeDestroyResource represents a resource that is to be destroyed.
+type NodeDestroyResource struct {
+       *NodeAbstractResource
+}
+
+func (n *NodeDestroyResource) Name() string {
+       return n.NodeAbstractResource.Name() + " (destroy)"
+}
+
+// GraphNodeDestroyer
+func (n *NodeDestroyResource) DestroyAddr() *ResourceAddress {
+       return n.Addr
+}
+
+// GraphNodeDestroyerCBD
+func (n *NodeDestroyResource) CreateBeforeDestroy() bool {
+       // If we have no config, we just assume no
+       if n.Config == nil {
+               return false
+       }
+
+       return n.Config.Lifecycle.CreateBeforeDestroy
+}
+
+// GraphNodeDestroyerCBD
+func (n *NodeDestroyResource) ModifyCreateBeforeDestroy(v bool) error {
+       // If we have no config, do nothing since it won't affect the
+       // create step anyways.
+       if n.Config == nil {
+               return nil
+       }
+
+       // Set CBD to true
+       n.Config.Lifecycle.CreateBeforeDestroy = true
+
+       return nil
+}
+
+// GraphNodeReferenceable, overriding NodeAbstractResource
+func (n *NodeDestroyResource) ReferenceableName() []string {
+       // We modify our referenceable name to have the suffix of ".destroy"
+       // since depending on the creation side doesn't necessarilly mean
+       // depending on destruction.
+       suffix := ".destroy"
+
+       // If we're CBD, we also append "-cbd". This is because CBD will setup
+       // its own edges (in CBDEdgeTransformer). Depending on the "destroy"
+       // side generally doesn't mean depending on CBD as well. See GH-11349
+       if n.CreateBeforeDestroy() {
+               suffix += "-cbd"
+       }
+
+       result := n.NodeAbstractResource.ReferenceableName()
+       for i, v := range result {
+               result[i] = v + suffix
+       }
+
+       return result
+}
+
+// GraphNodeReferencer, overriding NodeAbstractResource
+func (n *NodeDestroyResource) References() []string {
+       // If we have a config, then we need to include destroy-time dependencies
+       if c := n.Config; c != nil {
+               var result []string
+               for _, p := range c.Provisioners {
+                       // We include conn info and config for destroy time provisioners
+                       // as dependencies that we have.
+                       if p.When == config.ProvisionerWhenDestroy {
+                               result = append(result, ReferencesFromConfig(p.ConnInfo)...)
+                               result = append(result, ReferencesFromConfig(p.RawConfig)...)
+                       }
+               }
+
+               return result
+       }
+
+       return nil
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodeDestroyResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+       // If we have no config we do nothing
+       if n.Addr == nil {
+               return nil, nil
+       }
+
+       state, lock := ctx.State()
+       lock.RLock()
+       defer lock.RUnlock()
+
+       // Start creating the steps
+       steps := make([]GraphTransformer, 0, 5)
+
+       // We want deposed resources in the state to be destroyed
+       steps = append(steps, &DeposedTransformer{
+               State: state,
+               View:  n.Addr.stateId(),
+       })
+
+       // Target
+       steps = append(steps, &TargetsTransformer{
+               ParsedTargets: n.Targets,
+       })
+
+       // Always end with the root being added
+       steps = append(steps, &RootTransformer{})
+
+       // Build the graph
+       b := &BasicGraphBuilder{
+               Steps: steps,
+               Name:  "NodeResourceDestroy",
+       }
+       return b.Build(ctx.Path())
+}
+
+// GraphNodeEvalable
+func (n *NodeDestroyResource) EvalTree() EvalNode {
+       // stateId is the ID to put into the state
+       stateId := n.Addr.stateId()
+
+       // Build the instance info. More of this will be populated during eval
+       info := &InstanceInfo{
+               Id:          stateId,
+               Type:        n.Addr.Type,
+               uniqueExtra: "destroy",
+       }
+
+       // Build the resource for eval
+       addr := n.Addr
+       resource := &Resource{
+               Name:       addr.Name,
+               Type:       addr.Type,
+               CountIndex: addr.Index,
+       }
+       if resource.CountIndex < 0 {
+               resource.CountIndex = 0
+       }
+
+       // Get our state
+       rs := n.ResourceState
+       if rs == nil {
+               rs = &ResourceState{}
+       }
+
+       var diffApply *InstanceDiff
+       var provider ResourceProvider
+       var state *InstanceState
+       var err error
+       return &EvalOpFilter{
+               Ops: []walkOperation{walkApply, walkDestroy},
+               Node: &EvalSequence{
+                       Nodes: []EvalNode{
+                               // Get the saved diff for apply
+                               &EvalReadDiff{
+                                       Name: stateId,
+                                       Diff: &diffApply,
+                               },
+
+                               // Filter the diff so we only get the destroy
+                               &EvalFilterDiff{
+                                       Diff:    &diffApply,
+                                       Output:  &diffApply,
+                                       Destroy: true,
+                               },
+
+                               // If we're not destroying, then compare diffs
+                               &EvalIf{
+                                       If: func(ctx EvalContext) (bool, error) {
+                                               if diffApply != nil && diffApply.GetDestroy() {
+                                                       return true, nil
+                                               }
+
+                                               return true, EvalEarlyExitError{}
+                                       },
+                                       Then: EvalNoop{},
+                               },
+
+                               // Load the instance info so we have the module path set
+                               &EvalInstanceInfo{Info: info},
+
+                               &EvalGetProvider{
+                                       Name:   n.ProvidedBy()[0],
+                                       Output: &provider,
+                               },
+                               &EvalReadState{
+                                       Name:   stateId,
+                                       Output: &state,
+                               },
+                               &EvalRequireState{
+                                       State: &state,
+                               },
+
+                               // Call pre-apply hook
+                               &EvalApplyPre{
+                                       Info:  info,
+                                       State: &state,
+                                       Diff:  &diffApply,
+                               },
+
+                               // Run destroy provisioners if not tainted
+                               &EvalIf{
+                                       If: func(ctx EvalContext) (bool, error) {
+                                               if state != nil && state.Tainted {
+                                                       return false, nil
+                                               }
+
+                                               return true, nil
+                                       },
+
+                                       Then: &EvalApplyProvisioners{
+                                               Info:           info,
+                                               State:          &state,
+                                               Resource:       n.Config,
+                                               InterpResource: resource,
+                                               Error:          &err,
+                                               When:           config.ProvisionerWhenDestroy,
+                                       },
+                               },
+
+                               // If we have a provisioning error, then we just call
+                               // the post-apply hook now.
+                               &EvalIf{
+                                       If: func(ctx EvalContext) (bool, error) {
+                                               return err != nil, nil
+                                       },
+
+                                       Then: &EvalApplyPost{
+                                               Info:  info,
+                                               State: &state,
+                                               Error: &err,
+                                       },
+                               },
+
+                               // Make sure we handle data sources properly.
+                               &EvalIf{
+                                       If: func(ctx EvalContext) (bool, error) {
+                                               if n.Addr == nil {
+                                                       return false, fmt.Errorf("nil address")
+                                               }
+
+                                               if n.Addr.Mode == config.DataResourceMode {
+                                                       return true, nil
+                                               }
+
+                                               return false, nil
+                                       },
+
+                                       Then: &EvalReadDataApply{
+                                               Info:     info,
+                                               Diff:     &diffApply,
+                                               Provider: &provider,
+                                               Output:   &state,
+                                       },
+                                       Else: &EvalApply{
+                                               Info:     info,
+                                               State:    &state,
+                                               Diff:     &diffApply,
+                                               Provider: &provider,
+                                               Output:   &state,
+                                               Error:    &err,
+                                       },
+                               },
+                               &EvalWriteState{
+                                       Name:         stateId,
+                                       ResourceType: n.Addr.Type,
+                                       Provider:     rs.Provider,
+                                       Dependencies: rs.Dependencies,
+                                       State:        &state,
+                               },
+                               &EvalApplyPost{
+                                       Info:  info,
+                                       State: &state,
+                                       Error: &err,
+                               },
+                               &EvalUpdateStateHook{},
+                       },
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan.go
new file mode 100644 (file)
index 0000000..52bbf88
--- /dev/null
@@ -0,0 +1,83 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/dag"
+)
+
+// NodePlannableResource represents a resource that is "plannable":
+// it is ready to be planned in order to create a diff.
+type NodePlannableResource struct {
+       *NodeAbstractCountResource
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+       // Grab the state which we read
+       state, lock := ctx.State()
+       lock.RLock()
+       defer lock.RUnlock()
+
+       // Expand the resource count which must be available by now from EvalTree
+       count, err := n.Config.Count()
+       if err != nil {
+               return nil, err
+       }
+
+       // The concrete resource factory we'll use
+       concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+               // Add the config and state since we don't do that via transforms
+               a.Config = n.Config
+
+               return &NodePlannableResourceInstance{
+                       NodeAbstractResource: a,
+               }
+       }
+
+       // The concrete resource factory we'll use for oprhans
+       concreteResourceOrphan := func(a *NodeAbstractResource) dag.Vertex {
+               // Add the config and state since we don't do that via transforms
+               a.Config = n.Config
+
+               return &NodePlannableResourceOrphan{
+                       NodeAbstractResource: a,
+               }
+       }
+
+       // Start creating the steps
+       steps := []GraphTransformer{
+               // Expand the count.
+               &ResourceCountTransformer{
+                       Concrete: concreteResource,
+                       Count:    count,
+                       Addr:     n.ResourceAddr(),
+               },
+
+               // Add the count orphans
+               &OrphanResourceCountTransformer{
+                       Concrete: concreteResourceOrphan,
+                       Count:    count,
+                       Addr:     n.ResourceAddr(),
+                       State:    state,
+               },
+
+               // Attach the state
+               &AttachStateTransformer{State: state},
+
+               // Targeting
+               &TargetsTransformer{ParsedTargets: n.Targets},
+
+               // Connect references so ordering is correct
+               &ReferenceTransformer{},
+
+               // Make sure there is a single root
+               &RootTransformer{},
+       }
+
+       // Build the graph
+       b := &BasicGraphBuilder{
+               Steps:    steps,
+               Validate: true,
+               Name:     "NodePlannableResource",
+       }
+       return b.Build(ctx.Path())
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_destroy.go
new file mode 100644 (file)
index 0000000..9b02362
--- /dev/null
@@ -0,0 +1,53 @@
+package terraform
+
+// NodePlanDestroyableResource represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodePlanDestroyableResource struct {
+       *NodeAbstractResource
+}
+
+// GraphNodeDestroyer
+func (n *NodePlanDestroyableResource) DestroyAddr() *ResourceAddress {
+       return n.Addr
+}
+
+// GraphNodeEvalable
+func (n *NodePlanDestroyableResource) EvalTree() EvalNode {
+       addr := n.NodeAbstractResource.Addr
+
+       // stateId is the ID to put into the state
+       stateId := addr.stateId()
+
+       // Build the instance info. More of this will be populated during eval
+       info := &InstanceInfo{
+               Id:   stateId,
+               Type: addr.Type,
+       }
+
+       // Declare a bunch of variables that are used for state during
+       // evaluation. Most of this are written to by-address below.
+       var diff *InstanceDiff
+       var state *InstanceState
+
+       return &EvalSequence{
+               Nodes: []EvalNode{
+                       &EvalReadState{
+                               Name:   stateId,
+                               Output: &state,
+                       },
+                       &EvalDiffDestroy{
+                               Info:   info,
+                               State:  &state,
+                               Output: &diff,
+                       },
+                       &EvalCheckPreventDestroy{
+                               Resource: n.Config,
+                               Diff:     &diff,
+                       },
+                       &EvalWriteDiff{
+                               Name: stateId,
+                               Diff: &diff,
+                       },
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_instance.go
new file mode 100644 (file)
index 0000000..b529569
--- /dev/null
@@ -0,0 +1,190 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// NodePlannableResourceInstance represents a _single_ resource
+// instance that is plannable. This means this represents a single
+// count index, for example.
+type NodePlannableResourceInstance struct {
+       *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodePlannableResourceInstance) EvalTree() EvalNode {
+       addr := n.NodeAbstractResource.Addr
+
+       // stateId is the ID to put into the state
+       stateId := addr.stateId()
+
+       // Build the instance info. More of this will be populated during eval
+       info := &InstanceInfo{
+               Id:         stateId,
+               Type:       addr.Type,
+               ModulePath: normalizeModulePath(addr.Path),
+       }
+
+       // Build the resource for eval
+       resource := &Resource{
+               Name:       addr.Name,
+               Type:       addr.Type,
+               CountIndex: addr.Index,
+       }
+       if resource.CountIndex < 0 {
+               resource.CountIndex = 0
+       }
+
+       // Determine the dependencies for the state.
+       stateDeps := n.StateReferences()
+
+       // Eval info is different depending on what kind of resource this is
+       switch n.Config.Mode {
+       case config.ManagedResourceMode:
+               return n.evalTreeManagedResource(
+                       stateId, info, resource, stateDeps,
+               )
+       case config.DataResourceMode:
+               return n.evalTreeDataResource(
+                       stateId, info, resource, stateDeps)
+       default:
+               panic(fmt.Errorf("unsupported resource mode %s", n.Config.Mode))
+       }
+}
+
+func (n *NodePlannableResourceInstance) evalTreeDataResource(
+       stateId string, info *InstanceInfo,
+       resource *Resource, stateDeps []string) EvalNode {
+       var provider ResourceProvider
+       var config *ResourceConfig
+       var diff *InstanceDiff
+       var state *InstanceState
+
+       return &EvalSequence{
+               Nodes: []EvalNode{
+                       &EvalReadState{
+                               Name:   stateId,
+                               Output: &state,
+                       },
+
+                       // We need to re-interpolate the config here because some
+                       // of the attributes may have become computed during
+                       // earlier planning, due to other resources having
+                       // "requires new resource" diffs.
+                       &EvalInterpolate{
+                               Config:   n.Config.RawConfig.Copy(),
+                               Resource: resource,
+                               Output:   &config,
+                       },
+
+                       &EvalIf{
+                               If: func(ctx EvalContext) (bool, error) {
+                                       computed := config.ComputedKeys != nil && len(config.ComputedKeys) > 0
+
+                                       // If the configuration is complete and we
+                                       // already have a state then we don't need to
+                                       // do any further work during apply, because we
+                                       // already populated the state during refresh.
+                                       if !computed && state != nil {
+                                               return true, EvalEarlyExitError{}
+                                       }
+
+                                       return true, nil
+                               },
+                               Then: EvalNoop{},
+                       },
+
+                       &EvalGetProvider{
+                               Name:   n.ProvidedBy()[0],
+                               Output: &provider,
+                       },
+
+                       &EvalReadDataDiff{
+                               Info:        info,
+                               Config:      &config,
+                               Provider:    &provider,
+                               Output:      &diff,
+                               OutputState: &state,
+                       },
+
+                       &EvalWriteState{
+                               Name:         stateId,
+                               ResourceType: n.Config.Type,
+                               Provider:     n.Config.Provider,
+                               Dependencies: stateDeps,
+                               State:        &state,
+                       },
+
+                       &EvalWriteDiff{
+                               Name: stateId,
+                               Diff: &diff,
+                       },
+               },
+       }
+}
+
+func (n *NodePlannableResourceInstance) evalTreeManagedResource(
+       stateId string, info *InstanceInfo,
+       resource *Resource, stateDeps []string) EvalNode {
+       // Declare a bunch of variables that are used for state during
+       // evaluation. Most of this are written to by-address below.
+       var provider ResourceProvider
+       var diff *InstanceDiff
+       var state *InstanceState
+       var resourceConfig *ResourceConfig
+
+       return &EvalSequence{
+               Nodes: []EvalNode{
+                       &EvalInterpolate{
+                               Config:   n.Config.RawConfig.Copy(),
+                               Resource: resource,
+                               Output:   &resourceConfig,
+                       },
+                       &EvalGetProvider{
+                               Name:   n.ProvidedBy()[0],
+                               Output: &provider,
+                       },
+                       // Re-run validation to catch any errors we missed, e.g. type
+                       // mismatches on computed values.
+                       &EvalValidateResource{
+                               Provider:       &provider,
+                               Config:         &resourceConfig,
+                               ResourceName:   n.Config.Name,
+                               ResourceType:   n.Config.Type,
+                               ResourceMode:   n.Config.Mode,
+                               IgnoreWarnings: true,
+                       },
+                       &EvalReadState{
+                               Name:   stateId,
+                               Output: &state,
+                       },
+                       &EvalDiff{
+                               Name:        stateId,
+                               Info:        info,
+                               Config:      &resourceConfig,
+                               Resource:    n.Config,
+                               Provider:    &provider,
+                               State:       &state,
+                               OutputDiff:  &diff,
+                               OutputState: &state,
+                       },
+                       &EvalCheckPreventDestroy{
+                               Resource: n.Config,
+                               Diff:     &diff,
+                       },
+                       &EvalWriteState{
+                               Name:         stateId,
+                               ResourceType: n.Config.Type,
+                               Provider:     n.Config.Provider,
+                               Dependencies: stateDeps,
+                               State:        &state,
+                       },
+                       &EvalWriteDiff{
+                               Name: stateId,
+                               Diff: &diff,
+                       },
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_plan_orphan.go
new file mode 100644 (file)
index 0000000..73d6e41
--- /dev/null
@@ -0,0 +1,54 @@
+package terraform
+
+// NodePlannableResourceOrphan represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodePlannableResourceOrphan struct {
+       *NodeAbstractResource
+}
+
+func (n *NodePlannableResourceOrphan) Name() string {
+       return n.NodeAbstractResource.Name() + " (orphan)"
+}
+
+// GraphNodeEvalable
+func (n *NodePlannableResourceOrphan) EvalTree() EvalNode {
+       addr := n.NodeAbstractResource.Addr
+
+       // stateId is the ID to put into the state
+       stateId := addr.stateId()
+
+       // Build the instance info. More of this will be populated during eval
+       info := &InstanceInfo{
+               Id:         stateId,
+               Type:       addr.Type,
+               ModulePath: normalizeModulePath(addr.Path),
+       }
+
+       // Declare a bunch of variables that are used for state during
+       // evaluation. Most of this are written to by-address below.
+       var diff *InstanceDiff
+       var state *InstanceState
+
+       return &EvalSequence{
+               Nodes: []EvalNode{
+                       &EvalReadState{
+                               Name:   stateId,
+                               Output: &state,
+                       },
+                       &EvalDiffDestroy{
+                               Info:   info,
+                               State:  &state,
+                               Output: &diff,
+                       },
+                       &EvalCheckPreventDestroy{
+                               Resource:   n.Config,
+                               ResourceId: stateId,
+                               Diff:       &diff,
+                       },
+                       &EvalWriteDiff{
+                               Name: stateId,
+                               Diff: &diff,
+                       },
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_refresh.go
new file mode 100644 (file)
index 0000000..3a44926
--- /dev/null
@@ -0,0 +1,100 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// NodeRefreshableResource represents a resource that is "applyable":
+// it is ready to be applied and is represented by a diff.
+type NodeRefreshableResource struct {
+       *NodeAbstractResource
+}
+
+// GraphNodeDestroyer
+func (n *NodeRefreshableResource) DestroyAddr() *ResourceAddress {
+       return n.Addr
+}
+
+// GraphNodeEvalable
+func (n *NodeRefreshableResource) EvalTree() EvalNode {
+       // Eval info is different depending on what kind of resource this is
+       switch mode := n.Addr.Mode; mode {
+       case config.ManagedResourceMode:
+               return n.evalTreeManagedResource()
+
+       case config.DataResourceMode:
+               // Get the data source node. If we don't have a configuration
+               // then it is an orphan so we destroy it (remove it from the state).
+               var dn GraphNodeEvalable
+               if n.Config != nil {
+                       dn = &NodeRefreshableDataResourceInstance{
+                               NodeAbstractResource: n.NodeAbstractResource,
+                       }
+               } else {
+                       dn = &NodeDestroyableDataResource{
+                               NodeAbstractResource: n.NodeAbstractResource,
+                       }
+               }
+
+               return dn.EvalTree()
+       default:
+               panic(fmt.Errorf("unsupported resource mode %s", mode))
+       }
+}
+
+func (n *NodeRefreshableResource) evalTreeManagedResource() EvalNode {
+       addr := n.NodeAbstractResource.Addr
+
+       // stateId is the ID to put into the state
+       stateId := addr.stateId()
+
+       // Build the instance info. More of this will be populated during eval
+       info := &InstanceInfo{
+               Id:   stateId,
+               Type: addr.Type,
+       }
+
+       // Declare a bunch of variables that are used for state during
+       // evaluation. Most of this are written to by-address below.
+       var provider ResourceProvider
+       var state *InstanceState
+
+       // This happened during initial development. All known cases were
+       // fixed and tested but as a sanity check let's assert here.
+       if n.ResourceState == nil {
+               err := fmt.Errorf(
+                       "No resource state attached for addr: %s\n\n"+
+                               "This is a bug. Please report this to Terraform with your configuration\n"+
+                               "and state attached. Please be careful to scrub any sensitive information.",
+                       addr)
+               return &EvalReturnError{Error: &err}
+       }
+
+       return &EvalSequence{
+               Nodes: []EvalNode{
+                       &EvalGetProvider{
+                               Name:   n.ProvidedBy()[0],
+                               Output: &provider,
+                       },
+                       &EvalReadState{
+                               Name:   stateId,
+                               Output: &state,
+                       },
+                       &EvalRefresh{
+                               Info:     info,
+                               Provider: &provider,
+                               State:    &state,
+                               Output:   &state,
+                       },
+                       &EvalWriteState{
+                               Name:         stateId,
+                               ResourceType: n.ResourceState.Type,
+                               Provider:     n.ResourceState.Provider,
+                               Dependencies: n.ResourceState.Dependencies,
+                               State:        &state,
+                       },
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go b/vendor/github.com/hashicorp/terraform/terraform/node_resource_validate.go
new file mode 100644 (file)
index 0000000..f528f24
--- /dev/null
@@ -0,0 +1,158 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/dag"
+)
+
+// NodeValidatableResource represents a resource that is used for validation
+// only.
+type NodeValidatableResource struct {
+       *NodeAbstractCountResource
+}
+
+// GraphNodeEvalable
+func (n *NodeValidatableResource) EvalTree() EvalNode {
+       // Ensure we're validating
+       c := n.NodeAbstractCountResource
+       c.Validate = true
+       return c.EvalTree()
+}
+
+// GraphNodeDynamicExpandable
+func (n *NodeValidatableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+       // Grab the state which we read
+       state, lock := ctx.State()
+       lock.RLock()
+       defer lock.RUnlock()
+
+       // Expand the resource count which must be available by now from EvalTree
+       count := 1
+       if n.Config.RawCount.Value() != unknownValue() {
+               var err error
+               count, err = n.Config.Count()
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       // The concrete resource factory we'll use
+       concreteResource := func(a *NodeAbstractResource) dag.Vertex {
+               // Add the config and state since we don't do that via transforms
+               a.Config = n.Config
+
+               return &NodeValidatableResourceInstance{
+                       NodeAbstractResource: a,
+               }
+       }
+
+       // Start creating the steps
+       steps := []GraphTransformer{
+               // Expand the count.
+               &ResourceCountTransformer{
+                       Concrete: concreteResource,
+                       Count:    count,
+                       Addr:     n.ResourceAddr(),
+               },
+
+               // Attach the state
+               &AttachStateTransformer{State: state},
+
+               // Targeting
+               &TargetsTransformer{ParsedTargets: n.Targets},
+
+               // Connect references so ordering is correct
+               &ReferenceTransformer{},
+
+               // Make sure there is a single root
+               &RootTransformer{},
+       }
+
+       // Build the graph
+       b := &BasicGraphBuilder{
+               Steps:    steps,
+               Validate: true,
+               Name:     "NodeValidatableResource",
+       }
+
+       return b.Build(ctx.Path())
+}
+
+// This represents a _single_ resource instance to validate.
+type NodeValidatableResourceInstance struct {
+       *NodeAbstractResource
+}
+
+// GraphNodeEvalable
+func (n *NodeValidatableResourceInstance) EvalTree() EvalNode {
+       addr := n.NodeAbstractResource.Addr
+
+       // Build the resource for eval
+       resource := &Resource{
+               Name:       addr.Name,
+               Type:       addr.Type,
+               CountIndex: addr.Index,
+       }
+       if resource.CountIndex < 0 {
+               resource.CountIndex = 0
+       }
+
+       // Declare a bunch of variables that are used for state during
+       // evaluation. Most of this are written to by-address below.
+       var config *ResourceConfig
+       var provider ResourceProvider
+
+       seq := &EvalSequence{
+               Nodes: []EvalNode{
+                       &EvalValidateResourceSelfRef{
+                               Addr:   &addr,
+                               Config: &n.Config.RawConfig,
+                       },
+                       &EvalGetProvider{
+                               Name:   n.ProvidedBy()[0],
+                               Output: &provider,
+                       },
+                       &EvalInterpolate{
+                               Config:   n.Config.RawConfig.Copy(),
+                               Resource: resource,
+                               Output:   &config,
+                       },
+                       &EvalValidateResource{
+                               Provider:     &provider,
+                               Config:       &config,
+                               ResourceName: n.Config.Name,
+                               ResourceType: n.Config.Type,
+                               ResourceMode: n.Config.Mode,
+                       },
+               },
+       }
+
+       // Validate all the provisioners
+       for _, p := range n.Config.Provisioners {
+               var provisioner ResourceProvisioner
+               var connConfig *ResourceConfig
+               seq.Nodes = append(
+                       seq.Nodes,
+                       &EvalGetProvisioner{
+                               Name:   p.Type,
+                               Output: &provisioner,
+                       },
+                       &EvalInterpolate{
+                               Config:   p.RawConfig.Copy(),
+                               Resource: resource,
+                               Output:   &config,
+                       },
+                       &EvalInterpolate{
+                               Config:   p.ConnInfo.Copy(),
+                               Resource: resource,
+                               Output:   &connConfig,
+                       },
+                       &EvalValidateProvisioner{
+                               Provisioner: &provisioner,
+                               Config:      &config,
+                               ConnConfig:  &connConfig,
+                       },
+               )
+       }
+
+       return seq
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go b/vendor/github.com/hashicorp/terraform/terraform/node_root_variable.go
new file mode 100644 (file)
index 0000000..cb61a4e
--- /dev/null
@@ -0,0 +1,22 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// NodeRootVariable represents a root variable input.
+type NodeRootVariable struct {
+       Config *config.Variable
+}
+
+func (n *NodeRootVariable) Name() string {
+       result := fmt.Sprintf("var.%s", n.Config.Name)
+       return result
+}
+
+// GraphNodeReferenceable
+func (n *NodeRootVariable) ReferenceableName() []string {
+       return []string{n.Name()}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/path.go b/vendor/github.com/hashicorp/terraform/terraform/path.go
new file mode 100644 (file)
index 0000000..ca99685
--- /dev/null
@@ -0,0 +1,24 @@
+package terraform
+
+import (
+       "crypto/md5"
+       "encoding/hex"
+)
+
+// PathCacheKey returns a cache key for a module path.
+//
+// TODO: test
+func PathCacheKey(path []string) string {
+       // There is probably a better way to do this, but this is working for now.
+       // We just create an MD5 hash of all the MD5 hashes of all the path
+       // elements. This gets us the property that it is unique per ordering.
+       hash := md5.New()
+       for _, p := range path {
+               single := md5.Sum([]byte(p))
+               if _, err := hash.Write(single[:]); err != nil {
+                       panic(err)
+               }
+       }
+
+       return hex.EncodeToString(hash.Sum(nil))
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/plan.go b/vendor/github.com/hashicorp/terraform/terraform/plan.go
new file mode 100644 (file)
index 0000000..ea08845
--- /dev/null
@@ -0,0 +1,153 @@
+package terraform
+
+import (
+       "bytes"
+       "encoding/gob"
+       "errors"
+       "fmt"
+       "io"
+       "sync"
+
+       "github.com/hashicorp/terraform/config/module"
+)
+
+func init() {
+       gob.Register(make([]interface{}, 0))
+       gob.Register(make([]map[string]interface{}, 0))
+       gob.Register(make(map[string]interface{}))
+       gob.Register(make(map[string]string))
+}
+
+// Plan represents a single Terraform execution plan, which contains
+// all the information necessary to make an infrastructure change.
+//
+// A plan has to contain basically the entire state of the world
+// necessary to make a change: the state, diff, config, backend config, etc.
+// This is so that it can run alone without any other data.
+type Plan struct {
+       Diff    *Diff
+       Module  *module.Tree
+       State   *State
+       Vars    map[string]interface{}
+       Targets []string
+
+       // Backend is the backend that this plan should use and store data with.
+       Backend *BackendState
+
+       once sync.Once
+}
+
+// Context returns a Context with the data encapsulated in this plan.
+//
+// The following fields in opts are overridden by the plan: Config,
+// Diff, State, Variables.
+func (p *Plan) Context(opts *ContextOpts) (*Context, error) {
+       opts.Diff = p.Diff
+       opts.Module = p.Module
+       opts.State = p.State
+       opts.Targets = p.Targets
+
+       opts.Variables = make(map[string]interface{})
+       for k, v := range p.Vars {
+               opts.Variables[k] = v
+       }
+
+       return NewContext(opts)
+}
+
+func (p *Plan) String() string {
+       buf := new(bytes.Buffer)
+       buf.WriteString("DIFF:\n\n")
+       buf.WriteString(p.Diff.String())
+       buf.WriteString("\n\nSTATE:\n\n")
+       buf.WriteString(p.State.String())
+       return buf.String()
+}
+
+func (p *Plan) init() {
+       p.once.Do(func() {
+               if p.Diff == nil {
+                       p.Diff = new(Diff)
+                       p.Diff.init()
+               }
+
+               if p.State == nil {
+                       p.State = new(State)
+                       p.State.init()
+               }
+
+               if p.Vars == nil {
+                       p.Vars = make(map[string]interface{})
+               }
+       })
+}
+
+// The format byte is prefixed into the plan file format so that we have
+// the ability in the future to change the file format if we want for any
+// reason.
+const planFormatMagic = "tfplan"
+const planFormatVersion byte = 1
+
+// ReadPlan reads a plan structure out of a reader in the format that
+// was written by WritePlan.
+func ReadPlan(src io.Reader) (*Plan, error) {
+       var result *Plan
+       var err error
+       n := 0
+
+       // Verify the magic bytes
+       magic := make([]byte, len(planFormatMagic))
+       for n < len(magic) {
+               n, err = src.Read(magic[n:])
+               if err != nil {
+                       return nil, fmt.Errorf("error while reading magic bytes: %s", err)
+               }
+       }
+       if string(magic) != planFormatMagic {
+               return nil, fmt.Errorf("not a valid plan file")
+       }
+
+       // Verify the version is something we can read
+       var formatByte [1]byte
+       n, err = src.Read(formatByte[:])
+       if err != nil {
+               return nil, err
+       }
+       if n != len(formatByte) {
+               return nil, errors.New("failed to read plan version byte")
+       }
+
+       if formatByte[0] != planFormatVersion {
+               return nil, fmt.Errorf("unknown plan file version: %d", formatByte[0])
+       }
+
+       dec := gob.NewDecoder(src)
+       if err := dec.Decode(&result); err != nil {
+               return nil, err
+       }
+
+       return result, nil
+}
+
+// WritePlan writes a plan somewhere in a binary format.
+func WritePlan(d *Plan, dst io.Writer) error {
+       // Write the magic bytes so we can determine the file format later
+       n, err := dst.Write([]byte(planFormatMagic))
+       if err != nil {
+               return err
+       }
+       if n != len(planFormatMagic) {
+               return errors.New("failed to write plan format magic bytes")
+       }
+
+       // Write a version byte so we can iterate on version at some point
+       n, err = dst.Write([]byte{planFormatVersion})
+       if err != nil {
+               return err
+       }
+       if n != 1 {
+               return errors.New("failed to write plan version byte")
+       }
+
+       return gob.NewEncoder(dst).Encode(d)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go
new file mode 100644 (file)
index 0000000..0acf0be
--- /dev/null
@@ -0,0 +1,360 @@
+package terraform
+
+import (
+       "fmt"
+       "reflect"
+       "sort"
+       "strconv"
+       "strings"
+
+       "github.com/hashicorp/terraform/config"
+       "github.com/mitchellh/copystructure"
+       "github.com/mitchellh/reflectwalk"
+)
+
+// ResourceProvisionerConfig is used to pair a provisioner
+// with its provided configuration. This allows us to use singleton
+// instances of each ResourceProvisioner and to keep the relevant
+// configuration instead of instantiating a new Provisioner for each
+// resource.
+type ResourceProvisionerConfig struct {
+       Type        string
+       Provisioner ResourceProvisioner
+       Config      *ResourceConfig
+       RawConfig   *config.RawConfig
+       ConnInfo    *config.RawConfig
+}
+
+// Resource encapsulates a resource, its configuration, its provider,
+// its current state, and potentially a desired diff from the state it
+// wants to reach.
+type Resource struct {
+       // These are all used by the new EvalNode stuff.
+       Name       string
+       Type       string
+       CountIndex int
+
+       // These aren't really used anymore anywhere, but we keep them around
+       // since we haven't done a proper cleanup yet.
+       Id           string
+       Info         *InstanceInfo
+       Config       *ResourceConfig
+       Dependencies []string
+       Diff         *InstanceDiff
+       Provider     ResourceProvider
+       State        *InstanceState
+       Provisioners []*ResourceProvisionerConfig
+       Flags        ResourceFlag
+}
+
+// ResourceKind specifies what kind of instance we're working with, whether
+// its a primary instance, a tainted instance, or an orphan.
+type ResourceFlag byte
+
+// InstanceInfo is used to hold information about the instance and/or
+// resource being modified.
+type InstanceInfo struct {
+       // Id is a unique name to represent this instance. This is not related
+       // to InstanceState.ID in any way.
+       Id string
+
+       // ModulePath is the complete path of the module containing this
+       // instance.
+       ModulePath []string
+
+       // Type is the resource type of this instance
+       Type string
+
+       // uniqueExtra is an internal field that can be populated to supply
+       // extra metadata that is used to identify a unique instance in
+       // the graph walk. This will be appended to HumanID when uniqueId
+       // is called.
+       uniqueExtra string
+}
+
+// HumanId is a unique Id that is human-friendly and useful for UI elements.
+func (i *InstanceInfo) HumanId() string {
+       if i == nil {
+               return "<nil>"
+       }
+
+       if len(i.ModulePath) <= 1 {
+               return i.Id
+       }
+
+       return fmt.Sprintf(
+               "module.%s.%s",
+               strings.Join(i.ModulePath[1:], "."),
+               i.Id)
+}
+
+func (i *InstanceInfo) uniqueId() string {
+       prefix := i.HumanId()
+       if v := i.uniqueExtra; v != "" {
+               prefix += " " + v
+       }
+
+       return prefix
+}
+
+// ResourceConfig holds the configuration given for a resource. This is
+// done instead of a raw `map[string]interface{}` type so that rich
+// methods can be added to it to make dealing with it easier.
+type ResourceConfig struct {
+       ComputedKeys []string
+       Raw          map[string]interface{}
+       Config       map[string]interface{}
+
+       raw *config.RawConfig
+}
+
+// NewResourceConfig creates a new ResourceConfig from a config.RawConfig.
+func NewResourceConfig(c *config.RawConfig) *ResourceConfig {
+       result := &ResourceConfig{raw: c}
+       result.interpolateForce()
+       return result
+}
+
+// DeepCopy performs a deep copy of the configuration. This makes it safe
+// to modify any of the structures that are part of the resource config without
+// affecting the original configuration.
+func (c *ResourceConfig) DeepCopy() *ResourceConfig {
+       // DeepCopying a nil should return a nil to avoid panics
+       if c == nil {
+               return nil
+       }
+
+       // Copy, this will copy all the exported attributes
+       copy, err := copystructure.Config{Lock: true}.Copy(c)
+       if err != nil {
+               panic(err)
+       }
+
+       // Force the type
+       result := copy.(*ResourceConfig)
+
+       // For the raw configuration, we can just use its own copy method
+       result.raw = c.raw.Copy()
+
+       return result
+}
+
+// Equal checks the equality of two resource configs.
+func (c *ResourceConfig) Equal(c2 *ResourceConfig) bool {
+       // If either are nil, then they're only equal if they're both nil
+       if c == nil || c2 == nil {
+               return c == c2
+       }
+
+       // Sort the computed keys so they're deterministic
+       sort.Strings(c.ComputedKeys)
+       sort.Strings(c2.ComputedKeys)
+
+       // Two resource configs if their exported properties are equal.
+       // We don't compare "raw" because it is never used again after
+       // initialization and for all intents and purposes they are equal
+       // if the exported properties are equal.
+       check := [][2]interface{}{
+               {c.ComputedKeys, c2.ComputedKeys},
+               {c.Raw, c2.Raw},
+               {c.Config, c2.Config},
+       }
+       for _, pair := range check {
+               if !reflect.DeepEqual(pair[0], pair[1]) {
+                       return false
+               }
+       }
+
+       return true
+}
+
+// CheckSet checks that the given list of configuration keys is
+// properly set. If not, errors are returned for each unset key.
+//
+// This is useful to be called in the Validate method of a ResourceProvider.
+func (c *ResourceConfig) CheckSet(keys []string) []error {
+       var errs []error
+
+       for _, k := range keys {
+               if !c.IsSet(k) {
+                       errs = append(errs, fmt.Errorf("%s must be set", k))
+               }
+       }
+
+       return errs
+}
+
+// Get looks up a configuration value by key and returns the value.
+//
+// The second return value is true if the get was successful. Get will
+// return the raw value if the key is computed, so you should pair this
+// with IsComputed.
+func (c *ResourceConfig) Get(k string) (interface{}, bool) {
+       // We aim to get a value from the configuration. If it is computed,
+       // then we return the pure raw value.
+       source := c.Config
+       if c.IsComputed(k) {
+               source = c.Raw
+       }
+
+       return c.get(k, source)
+}
+
+// GetRaw looks up a configuration value by key and returns the value,
+// from the raw, uninterpolated config.
+//
+// The second return value is true if the get was successful. Get will
+// not succeed if the value is being computed.
+func (c *ResourceConfig) GetRaw(k string) (interface{}, bool) {
+       return c.get(k, c.Raw)
+}
+
+// IsComputed returns whether the given key is computed or not.
+func (c *ResourceConfig) IsComputed(k string) bool {
+       // The next thing we do is check the config if we get a computed
+       // value out of it.
+       v, ok := c.get(k, c.Config)
+       if !ok {
+               return false
+       }
+
+       // If value is nil, then it isn't computed
+       if v == nil {
+               return false
+       }
+
+       // Test if the value contains an unknown value
+       var w unknownCheckWalker
+       if err := reflectwalk.Walk(v, &w); err != nil {
+               panic(err)
+       }
+
+       return w.Unknown
+}
+
+// IsSet checks if the key in the configuration is set. A key is set if
+// it has a value or the value is being computed (is unknown currently).
+//
+// This function should be used rather than checking the keys of the
+// raw configuration itself, since a key may be omitted from the raw
+// configuration if it is being computed.
+func (c *ResourceConfig) IsSet(k string) bool {
+       if c == nil {
+               return false
+       }
+
+       if c.IsComputed(k) {
+               return true
+       }
+
+       if _, ok := c.Get(k); ok {
+               return true
+       }
+
+       return false
+}
+
+func (c *ResourceConfig) get(
+       k string, raw map[string]interface{}) (interface{}, bool) {
+       parts := strings.Split(k, ".")
+       if len(parts) == 1 && parts[0] == "" {
+               parts = nil
+       }
+
+       var current interface{} = raw
+       var previous interface{} = nil
+       for i, part := range parts {
+               if current == nil {
+                       return nil, false
+               }
+
+               cv := reflect.ValueOf(current)
+               switch cv.Kind() {
+               case reflect.Map:
+                       previous = current
+                       v := cv.MapIndex(reflect.ValueOf(part))
+                       if !v.IsValid() {
+                               if i > 0 && i != (len(parts)-1) {
+                                       tryKey := strings.Join(parts[i:], ".")
+                                       v := cv.MapIndex(reflect.ValueOf(tryKey))
+                                       if !v.IsValid() {
+                                               return nil, false
+                                       }
+
+                                       return v.Interface(), true
+                               }
+
+                               return nil, false
+                       }
+
+                       current = v.Interface()
+               case reflect.Slice:
+                       previous = current
+
+                       if part == "#" {
+                               // If any value in a list is computed, this whole thing
+                               // is computed and we can't read any part of it.
+                               for i := 0; i < cv.Len(); i++ {
+                                       if v := cv.Index(i).Interface(); v == unknownValue() {
+                                               return v, true
+                                       }
+                               }
+
+                               current = cv.Len()
+                       } else {
+                               i, err := strconv.ParseInt(part, 0, 0)
+                               if err != nil {
+                                       return nil, false
+                               }
+                               if i >= int64(cv.Len()) {
+                                       return nil, false
+                               }
+                               current = cv.Index(int(i)).Interface()
+                       }
+               case reflect.String:
+                       // This happens when map keys contain "." and have a common
+                       // prefix so were split as path components above.
+                       actualKey := strings.Join(parts[i-1:], ".")
+                       if prevMap, ok := previous.(map[string]interface{}); ok {
+                               v, ok := prevMap[actualKey]
+                               return v, ok
+                       }
+
+                       return nil, false
+               default:
+                       panic(fmt.Sprintf("Unknown kind: %s", cv.Kind()))
+               }
+       }
+
+       return current, true
+}
+
+// interpolateForce is a temporary thing. We want to get rid of interpolate
+// above and likewise this, but it can only be done after the f-ast-graph
+// refactor is complete.
+func (c *ResourceConfig) interpolateForce() {
+       if c.raw == nil {
+               var err error
+               c.raw, err = config.NewRawConfig(make(map[string]interface{}))
+               if err != nil {
+                       panic(err)
+               }
+       }
+
+       c.ComputedKeys = c.raw.UnknownKeys()
+       c.Raw = c.raw.RawMap()
+       c.Config = c.raw.Config()
+}
+
+// unknownCheckWalker
+type unknownCheckWalker struct {
+       Unknown bool
+}
+
+func (w *unknownCheckWalker) Primitive(v reflect.Value) error {
+       if v.Interface() == unknownValue() {
+               w.Unknown = true
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_address.go b/vendor/github.com/hashicorp/terraform/terraform/resource_address.go
new file mode 100644 (file)
index 0000000..a8a0c95
--- /dev/null
@@ -0,0 +1,301 @@
+package terraform
+
+import (
+       "fmt"
+       "reflect"
+       "regexp"
+       "strconv"
+       "strings"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// ResourceAddress is a way of identifying an individual resource (or,
+// eventually, a subset of resources) within the state. It is used for Targets.
+type ResourceAddress struct {
+       // Addresses a resource falling somewhere in the module path
+       // When specified alone, addresses all resources within a module path
+       Path []string
+
+       // Addresses a specific resource that occurs in a list
+       Index int
+
+       InstanceType    InstanceType
+       InstanceTypeSet bool
+       Name            string
+       Type            string
+       Mode            config.ResourceMode // significant only if InstanceTypeSet
+}
+
+// Copy returns a copy of this ResourceAddress
+func (r *ResourceAddress) Copy() *ResourceAddress {
+       if r == nil {
+               return nil
+       }
+
+       n := &ResourceAddress{
+               Path:         make([]string, 0, len(r.Path)),
+               Index:        r.Index,
+               InstanceType: r.InstanceType,
+               Name:         r.Name,
+               Type:         r.Type,
+               Mode:         r.Mode,
+       }
+       for _, p := range r.Path {
+               n.Path = append(n.Path, p)
+       }
+       return n
+}
+
+// String outputs the address that parses into this address.
+func (r *ResourceAddress) String() string {
+       var result []string
+       for _, p := range r.Path {
+               result = append(result, "module", p)
+       }
+
+       switch r.Mode {
+       case config.ManagedResourceMode:
+               // nothing to do
+       case config.DataResourceMode:
+               result = append(result, "data")
+       default:
+               panic(fmt.Errorf("unsupported resource mode %s", r.Mode))
+       }
+
+       if r.Type != "" {
+               result = append(result, r.Type)
+       }
+
+       if r.Name != "" {
+               name := r.Name
+               if r.InstanceTypeSet {
+                       switch r.InstanceType {
+                       case TypePrimary:
+                               name += ".primary"
+                       case TypeDeposed:
+                               name += ".deposed"
+                       case TypeTainted:
+                               name += ".tainted"
+                       }
+               }
+
+               if r.Index >= 0 {
+                       name += fmt.Sprintf("[%d]", r.Index)
+               }
+               result = append(result, name)
+       }
+
+       return strings.Join(result, ".")
+}
+
+// stateId returns the ID that this resource should be entered with
+// in the state. This is also used for diffs. In the future, we'd like to
+// move away from this string field so I don't export this.
+func (r *ResourceAddress) stateId() string {
+       result := fmt.Sprintf("%s.%s", r.Type, r.Name)
+       switch r.Mode {
+       case config.ManagedResourceMode:
+               // Done
+       case config.DataResourceMode:
+               result = fmt.Sprintf("data.%s", result)
+       default:
+               panic(fmt.Errorf("unknown resource mode: %s", r.Mode))
+       }
+       if r.Index >= 0 {
+               result += fmt.Sprintf(".%d", r.Index)
+       }
+
+       return result
+}
+
+// parseResourceAddressConfig creates a resource address from a config.Resource
+func parseResourceAddressConfig(r *config.Resource) (*ResourceAddress, error) {
+       return &ResourceAddress{
+               Type:         r.Type,
+               Name:         r.Name,
+               Index:        -1,
+               InstanceType: TypePrimary,
+               Mode:         r.Mode,
+       }, nil
+}
+
+// parseResourceAddressInternal parses the somewhat bespoke resource
+// identifier used in states and diffs, such as "instance.name.0".
+func parseResourceAddressInternal(s string) (*ResourceAddress, error) {
+       // Split based on ".". Every resource address should have at least two
+       // elements (type and name).
+       parts := strings.Split(s, ".")
+       if len(parts) < 2 || len(parts) > 4 {
+               return nil, fmt.Errorf("Invalid internal resource address format: %s", s)
+       }
+
+       // Data resource if we have at least 3 parts and the first one is data
+       mode := config.ManagedResourceMode
+       if len(parts) > 2 && parts[0] == "data" {
+               mode = config.DataResourceMode
+               parts = parts[1:]
+       }
+
+       // If we're not a data resource and we have more than 3, then it is an error
+       if len(parts) > 3 && mode != config.DataResourceMode {
+               return nil, fmt.Errorf("Invalid internal resource address format: %s", s)
+       }
+
+       // Build the parts of the resource address that are guaranteed to exist
+       addr := &ResourceAddress{
+               Type:         parts[0],
+               Name:         parts[1],
+               Index:        -1,
+               InstanceType: TypePrimary,
+               Mode:         mode,
+       }
+
+       // If we have more parts, then we have an index. Parse that.
+       if len(parts) > 2 {
+               idx, err := strconv.ParseInt(parts[2], 0, 0)
+               if err != nil {
+                       return nil, fmt.Errorf("Error parsing resource address %q: %s", s, err)
+               }
+
+               addr.Index = int(idx)
+       }
+
+       return addr, nil
+}
+
+func ParseResourceAddress(s string) (*ResourceAddress, error) {
+       matches, err := tokenizeResourceAddress(s)
+       if err != nil {
+               return nil, err
+       }
+       mode := config.ManagedResourceMode
+       if matches["data_prefix"] != "" {
+               mode = config.DataResourceMode
+       }
+       resourceIndex, err := ParseResourceIndex(matches["index"])
+       if err != nil {
+               return nil, err
+       }
+       instanceType, err := ParseInstanceType(matches["instance_type"])
+       if err != nil {
+               return nil, err
+       }
+       path := ParseResourcePath(matches["path"])
+
+       // not allowed to say "data." without a type following
+       if mode == config.DataResourceMode && matches["type"] == "" {
+               return nil, fmt.Errorf("must target specific data instance")
+       }
+
+       return &ResourceAddress{
+               Path:            path,
+               Index:           resourceIndex,
+               InstanceType:    instanceType,
+               InstanceTypeSet: matches["instance_type"] != "",
+               Name:            matches["name"],
+               Type:            matches["type"],
+               Mode:            mode,
+       }, nil
+}
+
+func (addr *ResourceAddress) Equals(raw interface{}) bool {
+       other, ok := raw.(*ResourceAddress)
+       if !ok {
+               return false
+       }
+
+       pathMatch := len(addr.Path) == 0 && len(other.Path) == 0 ||
+               reflect.DeepEqual(addr.Path, other.Path)
+
+       indexMatch := addr.Index == -1 ||
+               other.Index == -1 ||
+               addr.Index == other.Index
+
+       nameMatch := addr.Name == "" ||
+               other.Name == "" ||
+               addr.Name == other.Name
+
+       typeMatch := addr.Type == "" ||
+               other.Type == "" ||
+               addr.Type == other.Type
+
+       // mode is significant only when type is set
+       modeMatch := addr.Type == "" ||
+               other.Type == "" ||
+               addr.Mode == other.Mode
+
+       return pathMatch &&
+               indexMatch &&
+               addr.InstanceType == other.InstanceType &&
+               nameMatch &&
+               typeMatch &&
+               modeMatch
+}
+
+func ParseResourceIndex(s string) (int, error) {
+       if s == "" {
+               return -1, nil
+       }
+       return strconv.Atoi(s)
+}
+
+func ParseResourcePath(s string) []string {
+       if s == "" {
+               return nil
+       }
+       parts := strings.Split(s, ".")
+       path := make([]string, 0, len(parts))
+       for _, s := range parts {
+               // Due to the limitations of the regexp match below, the path match has
+               // some noise in it we have to filter out :|
+               if s == "" || s == "module" {
+                       continue
+               }
+               path = append(path, s)
+       }
+       return path
+}
+
+func ParseInstanceType(s string) (InstanceType, error) {
+       switch s {
+       case "", "primary":
+               return TypePrimary, nil
+       case "deposed":
+               return TypeDeposed, nil
+       case "tainted":
+               return TypeTainted, nil
+       default:
+               return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s)
+       }
+}
+
+func tokenizeResourceAddress(s string) (map[string]string, error) {
+       // Example of portions of the regexp below using the
+       // string "aws_instance.web.tainted[1]"
+       re := regexp.MustCompile(`\A` +
+               // "module.foo.module.bar" (optional)
+               `(?P<path>(?:module\.[^.]+\.?)*)` +
+               // possibly "data.", if targeting is a data resource
+               `(?P<data_prefix>(?:data\.)?)` +
+               // "aws_instance.web" (optional when module path specified)
+               `(?:(?P<type>[^.]+)\.(?P<name>[^.[]+))?` +
+               // "tainted" (optional, omission implies: "primary")
+               `(?:\.(?P<instance_type>\w+))?` +
+               // "1" (optional, omission implies: "0")
+               `(?:\[(?P<index>\d+)\])?` +
+               `\z`)
+
+       groupNames := re.SubexpNames()
+       rawMatches := re.FindAllStringSubmatch(s, -1)
+       if len(rawMatches) != 1 {
+               return nil, fmt.Errorf("Problem parsing address: %q", s)
+       }
+
+       matches := make(map[string]string)
+       for i, m := range rawMatches[0] {
+               matches[groupNames[i]] = m
+       }
+
+       return matches, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider.go
new file mode 100644 (file)
index 0000000..1a68c86
--- /dev/null
@@ -0,0 +1,204 @@
+package terraform
+
+// ResourceProvider is an interface that must be implemented by any
+// resource provider: the thing that creates and manages the resources in
+// a Terraform configuration.
+//
+// Important implementation note: All returned pointers, such as
+// *ResourceConfig, *InstanceState, *InstanceDiff, etc. must not point to
+// shared data. Terraform is highly parallel and assumes that this data is safe
+// to read/write in parallel so it must be unique references. Note that it is
+// safe to return arguments as results, however.
+type ResourceProvider interface {
+       /*********************************************************************
+       * Functions related to the provider
+       *********************************************************************/
+
+       // Input is called to ask the provider to ask the user for input
+       // for completing the configuration if necesarry.
+       //
+       // This may or may not be called, so resource provider writers shouldn't
+       // rely on this being available to set some default values for validate
+       // later. Example of a situation where this wouldn't be called is if
+       // the user is not using a TTY.
+       Input(UIInput, *ResourceConfig) (*ResourceConfig, error)
+
+       // Validate is called once at the beginning with the raw configuration
+       // (no interpolation done) and can return a list of warnings and/or
+       // errors.
+       //
+       // This is called once with the provider configuration only. It may not
+       // be called at all if no provider configuration is given.
+       //
+       // This should not assume that any values of the configurations are valid.
+       // The primary use case of this call is to check that required keys are
+       // set.
+       Validate(*ResourceConfig) ([]string, []error)
+
+       // Configure configures the provider itself with the configuration
+       // given. This is useful for setting things like access keys.
+       //
+       // This won't be called at all if no provider configuration is given.
+       //
+       // Configure returns an error if it occurred.
+       Configure(*ResourceConfig) error
+
+       // Resources returns all the available resource types that this provider
+       // knows how to manage.
+       Resources() []ResourceType
+
+       // Stop is called when the provider should halt any in-flight actions.
+       //
+       // This can be used to make a nicer Ctrl-C experience for Terraform.
+       // Even if this isn't implemented to do anything (just returns nil),
+       // Terraform will still cleanly stop after the currently executing
+       // graph node is complete. However, this API can be used to make more
+       // efficient halts.
+       //
+       // Stop doesn't have to and shouldn't block waiting for in-flight actions
+       // to complete. It should take any action it wants and return immediately
+       // acknowledging it has received the stop request. Terraform core will
+       // automatically not make any further API calls to the provider soon
+       // after Stop is called (technically exactly once the currently executing
+       // graph nodes are complete).
+       //
+       // The error returned, if non-nil, is assumed to mean that signaling the
+       // stop somehow failed and that the user should expect potentially waiting
+       // a longer period of time.
+       Stop() error
+
+       /*********************************************************************
+       * Functions related to individual resources
+       *********************************************************************/
+
+       // ValidateResource is called once at the beginning with the raw
+       // configuration (no interpolation done) and can return a list of warnings
+       // and/or errors.
+       //
+       // This is called once per resource.
+       //
+       // This should not assume any of the values in the resource configuration
+       // are valid since it is possible they have to be interpolated still.
+       // The primary use case of this call is to check that the required keys
+       // are set and that the general structure is correct.
+       ValidateResource(string, *ResourceConfig) ([]string, []error)
+
+       // Apply applies a diff to a specific resource and returns the new
+       // resource state along with an error.
+       //
+       // If the resource state given has an empty ID, then a new resource
+       // is expected to be created.
+       Apply(
+               *InstanceInfo,
+               *InstanceState,
+               *InstanceDiff) (*InstanceState, error)
+
+       // Diff diffs a resource versus a desired state and returns
+       // a diff.
+       Diff(
+               *InstanceInfo,
+               *InstanceState,
+               *ResourceConfig) (*InstanceDiff, error)
+
+       // Refresh refreshes a resource and updates all of its attributes
+       // with the latest information.
+       Refresh(*InstanceInfo, *InstanceState) (*InstanceState, error)
+
+       /*********************************************************************
+       * Functions related to importing
+       *********************************************************************/
+
+       // ImportState requests that the given resource be imported.
+       //
+       // The returned InstanceState only requires ID be set. Importing
+       // will always call Refresh after the state to complete it.
+       //
+       // IMPORTANT: InstanceState doesn't have the resource type attached
+       // to it. A type must be specified on the state via the Ephemeral
+       // field on the state.
+       //
+       // This function can return multiple states. Normally, an import
+       // will map 1:1 to a physical resource. However, some resources map
+       // to multiple. For example, an AWS security group may contain many rules.
+       // Each rule is represented by a separate resource in Terraform,
+       // therefore multiple states are returned.
+       ImportState(*InstanceInfo, string) ([]*InstanceState, error)
+
+       /*********************************************************************
+       * Functions related to data resources
+       *********************************************************************/
+
+       // ValidateDataSource is called once at the beginning with the raw
+       // configuration (no interpolation done) and can return a list of warnings
+       // and/or errors.
+       //
+       // This is called once per data source instance.
+       //
+       // This should not assume any of the values in the resource configuration
+       // are valid since it is possible they have to be interpolated still.
+       // The primary use case of this call is to check that the required keys
+       // are set and that the general structure is correct.
+       ValidateDataSource(string, *ResourceConfig) ([]string, []error)
+
+       // DataSources returns all of the available data sources that this
+       // provider implements.
+       DataSources() []DataSource
+
+       // ReadDataDiff produces a diff that represents the state that will
+       // be produced when the given data source is read using a later call
+       // to ReadDataApply.
+       ReadDataDiff(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)
+
+       // ReadDataApply initializes a data instance using the configuration
+       // in a diff produced by ReadDataDiff.
+       ReadDataApply(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
+}
+
+// ResourceProviderCloser is an interface that providers that can close
+// connections that aren't needed anymore must implement.
+type ResourceProviderCloser interface {
+       Close() error
+}
+
+// ResourceType is a type of resource that a resource provider can manage.
+type ResourceType struct {
+       Name       string // Name of the resource, example "instance" (no provider prefix)
+       Importable bool   // Whether this resource supports importing
+}
+
+// DataSource is a data source that a resource provider implements.
+type DataSource struct {
+       Name string
+}
+
+// ResourceProviderFactory is a function type that creates a new instance
+// of a resource provider.
+type ResourceProviderFactory func() (ResourceProvider, error)
+
+// ResourceProviderFactoryFixed is a helper that creates a
+// ResourceProviderFactory that just returns some fixed provider.
+func ResourceProviderFactoryFixed(p ResourceProvider) ResourceProviderFactory {
+       return func() (ResourceProvider, error) {
+               return p, nil
+       }
+}
+
+func ProviderHasResource(p ResourceProvider, n string) bool {
+       for _, rt := range p.Resources() {
+               if rt.Name == n {
+                       return true
+               }
+       }
+
+       return false
+}
+
+func ProviderHasDataSource(p ResourceProvider, n string) bool {
+       for _, rt := range p.DataSources() {
+               if rt.Name == n {
+                       return true
+               }
+       }
+
+       return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go
new file mode 100644 (file)
index 0000000..f531533
--- /dev/null
@@ -0,0 +1,297 @@
+package terraform
+
+import "sync"
+
+// MockResourceProvider implements ResourceProvider but mocks out all the
+// calls for testing purposes.
+type MockResourceProvider struct {
+       sync.Mutex
+
+       // Anything you want, in case you need to store extra data with the mock.
+       Meta interface{}
+
+       CloseCalled                    bool
+       CloseError                     error
+       InputCalled                    bool
+       InputInput                     UIInput
+       InputConfig                    *ResourceConfig
+       InputReturnConfig              *ResourceConfig
+       InputReturnError               error
+       InputFn                        func(UIInput, *ResourceConfig) (*ResourceConfig, error)
+       ApplyCalled                    bool
+       ApplyInfo                      *InstanceInfo
+       ApplyState                     *InstanceState
+       ApplyDiff                      *InstanceDiff
+       ApplyFn                        func(*InstanceInfo, *InstanceState, *InstanceDiff) (*InstanceState, error)
+       ApplyReturn                    *InstanceState
+       ApplyReturnError               error
+       ConfigureCalled                bool
+       ConfigureConfig                *ResourceConfig
+       ConfigureFn                    func(*ResourceConfig) error
+       ConfigureReturnError           error
+       DiffCalled                     bool
+       DiffInfo                       *InstanceInfo
+       DiffState                      *InstanceState
+       DiffDesired                    *ResourceConfig
+       DiffFn                         func(*InstanceInfo, *InstanceState, *ResourceConfig) (*InstanceDiff, error)
+       DiffReturn                     *InstanceDiff
+       DiffReturnError                error
+       RefreshCalled                  bool
+       RefreshInfo                    *InstanceInfo
+       RefreshState                   *InstanceState
+       RefreshFn                      func(*InstanceInfo, *InstanceState) (*InstanceState, error)
+       RefreshReturn                  *InstanceState
+       RefreshReturnError             error
+       ResourcesCalled                bool
+       ResourcesReturn                []ResourceType
+       ReadDataApplyCalled            bool
+       ReadDataApplyInfo              *InstanceInfo
+       ReadDataApplyDiff              *InstanceDiff
+       ReadDataApplyFn                func(*InstanceInfo, *InstanceDiff) (*InstanceState, error)
+       ReadDataApplyReturn            *InstanceState
+       ReadDataApplyReturnError       error
+       ReadDataDiffCalled             bool
+       ReadDataDiffInfo               *InstanceInfo
+       ReadDataDiffDesired            *ResourceConfig
+       ReadDataDiffFn                 func(*InstanceInfo, *ResourceConfig) (*InstanceDiff, error)
+       ReadDataDiffReturn             *InstanceDiff
+       ReadDataDiffReturnError        error
+       StopCalled                     bool
+       StopFn                         func() error
+       StopReturnError                error
+       DataSourcesCalled              bool
+       DataSourcesReturn              []DataSource
+       ValidateCalled                 bool
+       ValidateConfig                 *ResourceConfig
+       ValidateFn                     func(*ResourceConfig) ([]string, []error)
+       ValidateReturnWarns            []string
+       ValidateReturnErrors           []error
+       ValidateResourceFn             func(string, *ResourceConfig) ([]string, []error)
+       ValidateResourceCalled         bool
+       ValidateResourceType           string
+       ValidateResourceConfig         *ResourceConfig
+       ValidateResourceReturnWarns    []string
+       ValidateResourceReturnErrors   []error
+       ValidateDataSourceFn           func(string, *ResourceConfig) ([]string, []error)
+       ValidateDataSourceCalled       bool
+       ValidateDataSourceType         string
+       ValidateDataSourceConfig       *ResourceConfig
+       ValidateDataSourceReturnWarns  []string
+       ValidateDataSourceReturnErrors []error
+
+       ImportStateCalled      bool
+       ImportStateInfo        *InstanceInfo
+       ImportStateID          string
+       ImportStateReturn      []*InstanceState
+       ImportStateReturnError error
+       ImportStateFn          func(*InstanceInfo, string) ([]*InstanceState, error)
+}
+
+func (p *MockResourceProvider) Close() error {
+       p.CloseCalled = true
+       return p.CloseError
+}
+
+func (p *MockResourceProvider) Input(
+       input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
+       p.InputCalled = true
+       p.InputInput = input
+       p.InputConfig = c
+       if p.InputFn != nil {
+               return p.InputFn(input, c)
+       }
+       return p.InputReturnConfig, p.InputReturnError
+}
+
+func (p *MockResourceProvider) Validate(c *ResourceConfig) ([]string, []error) {
+       p.Lock()
+       defer p.Unlock()
+
+       p.ValidateCalled = true
+       p.ValidateConfig = c
+       if p.ValidateFn != nil {
+               return p.ValidateFn(c)
+       }
+       return p.ValidateReturnWarns, p.ValidateReturnErrors
+}
+
+func (p *MockResourceProvider) ValidateResource(t string, c *ResourceConfig) ([]string, []error) {
+       p.Lock()
+       defer p.Unlock()
+
+       p.ValidateResourceCalled = true
+       p.ValidateResourceType = t
+       p.ValidateResourceConfig = c
+
+       if p.ValidateResourceFn != nil {
+               return p.ValidateResourceFn(t, c)
+       }
+
+       return p.ValidateResourceReturnWarns, p.ValidateResourceReturnErrors
+}
+
+func (p *MockResourceProvider) Configure(c *ResourceConfig) error {
+       p.Lock()
+       defer p.Unlock()
+
+       p.ConfigureCalled = true
+       p.ConfigureConfig = c
+
+       if p.ConfigureFn != nil {
+               return p.ConfigureFn(c)
+       }
+
+       return p.ConfigureReturnError
+}
+
+func (p *MockResourceProvider) Stop() error {
+       p.Lock()
+       defer p.Unlock()
+
+       p.StopCalled = true
+       if p.StopFn != nil {
+               return p.StopFn()
+       }
+
+       return p.StopReturnError
+}
+
+func (p *MockResourceProvider) Apply(
+       info *InstanceInfo,
+       state *InstanceState,
+       diff *InstanceDiff) (*InstanceState, error) {
+       // We only lock while writing data. Reading is fine
+       p.Lock()
+       p.ApplyCalled = true
+       p.ApplyInfo = info
+       p.ApplyState = state
+       p.ApplyDiff = diff
+       p.Unlock()
+
+       if p.ApplyFn != nil {
+               return p.ApplyFn(info, state, diff)
+       }
+
+       return p.ApplyReturn.DeepCopy(), p.ApplyReturnError
+}
+
+func (p *MockResourceProvider) Diff(
+       info *InstanceInfo,
+       state *InstanceState,
+       desired *ResourceConfig) (*InstanceDiff, error) {
+       p.Lock()
+       defer p.Unlock()
+
+       p.DiffCalled = true
+       p.DiffInfo = info
+       p.DiffState = state
+       p.DiffDesired = desired
+       if p.DiffFn != nil {
+               return p.DiffFn(info, state, desired)
+       }
+
+       return p.DiffReturn.DeepCopy(), p.DiffReturnError
+}
+
+func (p *MockResourceProvider) Refresh(
+       info *InstanceInfo,
+       s *InstanceState) (*InstanceState, error) {
+       p.Lock()
+       defer p.Unlock()
+
+       p.RefreshCalled = true
+       p.RefreshInfo = info
+       p.RefreshState = s
+
+       if p.RefreshFn != nil {
+               return p.RefreshFn(info, s)
+       }
+
+       return p.RefreshReturn.DeepCopy(), p.RefreshReturnError
+}
+
+func (p *MockResourceProvider) Resources() []ResourceType {
+       p.Lock()
+       defer p.Unlock()
+
+       p.ResourcesCalled = true
+       return p.ResourcesReturn
+}
+
+func (p *MockResourceProvider) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) {
+       p.Lock()
+       defer p.Unlock()
+
+       p.ImportStateCalled = true
+       p.ImportStateInfo = info
+       p.ImportStateID = id
+       if p.ImportStateFn != nil {
+               return p.ImportStateFn(info, id)
+       }
+
+       var result []*InstanceState
+       if p.ImportStateReturn != nil {
+               result = make([]*InstanceState, len(p.ImportStateReturn))
+               for i, v := range p.ImportStateReturn {
+                       result[i] = v.DeepCopy()
+               }
+       }
+
+       return result, p.ImportStateReturnError
+}
+
+func (p *MockResourceProvider) ValidateDataSource(t string, c *ResourceConfig) ([]string, []error) {
+       p.Lock()
+       defer p.Unlock()
+
+       p.ValidateDataSourceCalled = true
+       p.ValidateDataSourceType = t
+       p.ValidateDataSourceConfig = c
+
+       if p.ValidateDataSourceFn != nil {
+               return p.ValidateDataSourceFn(t, c)
+       }
+
+       return p.ValidateDataSourceReturnWarns, p.ValidateDataSourceReturnErrors
+}
+
+func (p *MockResourceProvider) ReadDataDiff(
+       info *InstanceInfo,
+       desired *ResourceConfig) (*InstanceDiff, error) {
+       p.Lock()
+       defer p.Unlock()
+
+       p.ReadDataDiffCalled = true
+       p.ReadDataDiffInfo = info
+       p.ReadDataDiffDesired = desired
+       if p.ReadDataDiffFn != nil {
+               return p.ReadDataDiffFn(info, desired)
+       }
+
+       return p.ReadDataDiffReturn.DeepCopy(), p.ReadDataDiffReturnError
+}
+
+func (p *MockResourceProvider) ReadDataApply(
+       info *InstanceInfo,
+       d *InstanceDiff) (*InstanceState, error) {
+       p.Lock()
+       defer p.Unlock()
+
+       p.ReadDataApplyCalled = true
+       p.ReadDataApplyInfo = info
+       p.ReadDataApplyDiff = d
+
+       if p.ReadDataApplyFn != nil {
+               return p.ReadDataApplyFn(info, d)
+       }
+
+       return p.ReadDataApplyReturn.DeepCopy(), p.ReadDataApplyReturnError
+}
+
+func (p *MockResourceProvider) DataSources() []DataSource {
+       p.Lock()
+       defer p.Unlock()
+
+       p.DataSourcesCalled = true
+       return p.DataSourcesReturn
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner.go
new file mode 100644 (file)
index 0000000..361ec1e
--- /dev/null
@@ -0,0 +1,54 @@
+package terraform
+
+// ResourceProvisioner is an interface that must be implemented by any
+// resource provisioner: the thing that initializes resources in
+// a Terraform configuration.
+type ResourceProvisioner interface {
+       // Validate is called once at the beginning with the raw
+       // configuration (no interpolation done) and can return a list of warnings
+       // and/or errors.
+       //
+       // This is called once per resource.
+       //
+       // This should not assume any of the values in the resource configuration
+       // are valid since it is possible they have to be interpolated still.
+       // The primary use case of this call is to check that the required keys
+       // are set and that the general structure is correct.
+       Validate(*ResourceConfig) ([]string, []error)
+
+       // Apply runs the provisioner on a specific resource and returns the new
+       // resource state along with an error. Instead of a diff, the ResourceConfig
+       // is provided since provisioners only run after a resource has been
+       // newly created.
+       Apply(UIOutput, *InstanceState, *ResourceConfig) error
+
+       // Stop is called when the provisioner should halt any in-flight actions.
+       //
+       // This can be used to make a nicer Ctrl-C experience for Terraform.
+       // Even if this isn't implemented to do anything (just returns nil),
+       // Terraform will still cleanly stop after the currently executing
+       // graph node is complete. However, this API can be used to make more
+       // efficient halts.
+       //
+       // Stop doesn't have to and shouldn't block waiting for in-flight actions
+       // to complete. It should take any action it wants and return immediately
+       // acknowledging it has received the stop request. Terraform core will
+       // automatically not make any further API calls to the provider soon
+       // after Stop is called (technically exactly once the currently executing
+       // graph nodes are complete).
+       //
+       // The error returned, if non-nil, is assumed to mean that signaling the
+       // stop somehow failed and that the user should expect potentially waiting
+       // a longer period of time.
+       Stop() error
+}
+
+// ResourceProvisionerCloser is an interface that provisioners that can close
+// connections that aren't needed anymore must implement.
+type ResourceProvisionerCloser interface {
+       Close() error
+}
+
+// ResourceProvisionerFactory is a function type that creates a new instance
+// of a resource provisioner.
+type ResourceProvisionerFactory func() (ResourceProvisioner, error)
diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provisioner_mock.go
new file mode 100644 (file)
index 0000000..f471a51
--- /dev/null
@@ -0,0 +1,72 @@
+package terraform
+
+import "sync"
+
+// MockResourceProvisioner implements ResourceProvisioner but mocks out all the
+// calls for testing purposes.
+type MockResourceProvisioner struct {
+       sync.Mutex
+       // Anything you want, in case you need to store extra data with the mock.
+       Meta interface{}
+
+       ApplyCalled      bool
+       ApplyOutput      UIOutput
+       ApplyState       *InstanceState
+       ApplyConfig      *ResourceConfig
+       ApplyFn          func(*InstanceState, *ResourceConfig) error
+       ApplyReturnError error
+
+       ValidateCalled       bool
+       ValidateConfig       *ResourceConfig
+       ValidateFn           func(c *ResourceConfig) ([]string, []error)
+       ValidateReturnWarns  []string
+       ValidateReturnErrors []error
+
+       StopCalled      bool
+       StopFn          func() error
+       StopReturnError error
+}
+
+func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) {
+       p.Lock()
+       defer p.Unlock()
+
+       p.ValidateCalled = true
+       p.ValidateConfig = c
+       if p.ValidateFn != nil {
+               return p.ValidateFn(c)
+       }
+       return p.ValidateReturnWarns, p.ValidateReturnErrors
+}
+
+func (p *MockResourceProvisioner) Apply(
+       output UIOutput,
+       state *InstanceState,
+       c *ResourceConfig) error {
+       p.Lock()
+
+       p.ApplyCalled = true
+       p.ApplyOutput = output
+       p.ApplyState = state
+       p.ApplyConfig = c
+       if p.ApplyFn != nil {
+               fn := p.ApplyFn
+               p.Unlock()
+               return fn(state, c)
+       }
+
+       defer p.Unlock()
+       return p.ApplyReturnError
+}
+
+func (p *MockResourceProvisioner) Stop() error {
+       p.Lock()
+       defer p.Unlock()
+
+       p.StopCalled = true
+       if p.StopFn != nil {
+               return p.StopFn()
+       }
+
+       return p.StopReturnError
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/semantics.go b/vendor/github.com/hashicorp/terraform/terraform/semantics.go
new file mode 100644 (file)
index 0000000..20f1d8a
--- /dev/null
@@ -0,0 +1,132 @@
+package terraform
+
+import (
+       "fmt"
+       "strings"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// GraphSemanticChecker is the interface that semantic checks across
+// the entire Terraform graph implement.
+//
+// The graph should NOT be modified by the semantic checker.
+type GraphSemanticChecker interface {
+       Check(*dag.Graph) error
+}
+
+// UnorderedSemanticCheckRunner is an implementation of GraphSemanticChecker
+// that runs a list of SemanticCheckers against the vertices of the graph
+// in no specified order.
+type UnorderedSemanticCheckRunner struct {
+       Checks []SemanticChecker
+}
+
+func (sc *UnorderedSemanticCheckRunner) Check(g *dag.Graph) error {
+       var err error
+       for _, v := range g.Vertices() {
+               for _, check := range sc.Checks {
+                       if e := check.Check(g, v); e != nil {
+                               err = multierror.Append(err, e)
+                       }
+               }
+       }
+
+       return err
+}
+
+// SemanticChecker is the interface that semantic checks across the
+// Terraform graph implement. Errors are accumulated. Even after an error
+// is returned, child vertices in the graph will still be visited.
+//
+// The graph should NOT be modified by the semantic checker.
+//
+// The order in which vertices are visited is left unspecified, so the
+// semantic checks should not rely on that.
+type SemanticChecker interface {
+       Check(*dag.Graph, dag.Vertex) error
+}
+
+// smcUserVariables does all the semantic checks to verify that the
+// variables given satisfy the configuration itself.
+func smcUserVariables(c *config.Config, vs map[string]interface{}) []error {
+       var errs []error
+
+       cvs := make(map[string]*config.Variable)
+       for _, v := range c.Variables {
+               cvs[v.Name] = v
+       }
+
+       // Check that all required variables are present
+       required := make(map[string]struct{})
+       for _, v := range c.Variables {
+               if v.Required() {
+                       required[v.Name] = struct{}{}
+               }
+       }
+       for k, _ := range vs {
+               delete(required, k)
+       }
+       if len(required) > 0 {
+               for k, _ := range required {
+                       errs = append(errs, fmt.Errorf(
+                               "Required variable not set: %s", k))
+               }
+       }
+
+       // Check that types match up
+       for name, proposedValue := range vs {
+               // Check for "map.key" fields. These stopped working with Terraform
+               // 0.7 but we do this to surface a better error message informing
+               // the user what happened.
+               if idx := strings.Index(name, "."); idx > 0 {
+                       key := name[:idx]
+                       if _, ok := cvs[key]; ok {
+                               errs = append(errs, fmt.Errorf(
+                                       "%s: Overriding map keys with the format `name.key` is no "+
+                                               "longer allowed. You may still override keys by setting "+
+                                               "`name = { key = value }`. The maps will be merged. This "+
+                                               "behavior appeared in 0.7.0.", name))
+                               continue
+                       }
+               }
+
+               schema, ok := cvs[name]
+               if !ok {
+                       continue
+               }
+
+               declaredType := schema.Type()
+
+               switch declaredType {
+               case config.VariableTypeString:
+                       switch proposedValue.(type) {
+                       case string:
+                               continue
+                       }
+               case config.VariableTypeMap:
+                       switch v := proposedValue.(type) {
+                       case map[string]interface{}:
+                               continue
+                       case []map[string]interface{}:
+                               // if we have a list of 1 map, it will get coerced later as needed
+                               if len(v) == 1 {
+                                       continue
+                               }
+                       }
+               case config.VariableTypeList:
+                       switch proposedValue.(type) {
+                       case []interface{}:
+                               continue
+                       }
+               }
+               errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s",
+                       name, declaredType.Printable(), hclTypeName(proposedValue)))
+       }
+
+       // TODO(mitchellh): variables that are unknown
+
+       return errs
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow.go b/vendor/github.com/hashicorp/terraform/terraform/shadow.go
new file mode 100644 (file)
index 0000000..4632559
--- /dev/null
@@ -0,0 +1,28 @@
+package terraform
+
+// Shadow is the interface that any "shadow" structures must implement.
+//
+// A shadow structure is an interface implementation (typically) that
+// shadows a real implementation and verifies that the same behavior occurs
+// on both. The semantics of this behavior are up to the interface itself.
+//
+// A shadow NEVER modifies real values or state. It must always be safe to use.
+//
+// For example, a ResourceProvider shadow ensures that the same operations
+// are done on the same resources with the same configurations.
+//
+// The typical usage of a shadow following this interface is to complete
+// the real operations, then call CloseShadow which tells the shadow that
+// the real side is done. Then, once the shadow is also complete, call
+// ShadowError to find any errors that may have been caught.
+type Shadow interface {
+       // CloseShadow tells the shadow that the REAL implementation is
+       // complete. Therefore, any calls that would block should now return
+       // immediately since no more changes will happen to the real side.
+       CloseShadow() error
+
+       // ShadowError returns the errors that the shadow has found.
+       // This should be called AFTER CloseShadow and AFTER the shadow is
+       // known to be complete (no more calls to it).
+       ShadowError() error
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_components.go
new file mode 100644 (file)
index 0000000..116cf84
--- /dev/null
@@ -0,0 +1,273 @@
+package terraform
+
+import (
+       "fmt"
+       "sync"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/hashicorp/terraform/helper/shadow"
+)
+
+// newShadowComponentFactory creates a shadowed contextComponentFactory
+// so that requests to create new components result in both a real and
+// shadow side.
+func newShadowComponentFactory(
+       f contextComponentFactory) (contextComponentFactory, *shadowComponentFactory) {
+       // Create the shared data
+       shared := &shadowComponentFactoryShared{contextComponentFactory: f}
+
+       // Create the real side
+       real := &shadowComponentFactory{
+               shadowComponentFactoryShared: shared,
+       }
+
+       // Create the shadow
+       shadow := &shadowComponentFactory{
+               shadowComponentFactoryShared: shared,
+               Shadow: true,
+       }
+
+       return real, shadow
+}
+
+// shadowComponentFactory is the shadow side. Any components created
+// with this factory are fake and will not cause real work to happen.
+//
+// Unlike other shadowers, the shadow component factory will allow the
+// shadow to create _any_ component even if it is never requested on the
+// real side. This is because errors will happen later downstream as function
+// calls are made to the shadows that are never matched on the real side.
+type shadowComponentFactory struct {
+       *shadowComponentFactoryShared
+
+       Shadow bool // True if this should return the shadow
+       lock   sync.Mutex
+}
+
+func (f *shadowComponentFactory) ResourceProvider(
+       n, uid string) (ResourceProvider, error) {
+       f.lock.Lock()
+       defer f.lock.Unlock()
+
+       real, shadow, err := f.shadowComponentFactoryShared.ResourceProvider(n, uid)
+       var result ResourceProvider = real
+       if f.Shadow {
+               result = shadow
+       }
+
+       return result, err
+}
+
+func (f *shadowComponentFactory) ResourceProvisioner(
+       n, uid string) (ResourceProvisioner, error) {
+       f.lock.Lock()
+       defer f.lock.Unlock()
+
+       real, shadow, err := f.shadowComponentFactoryShared.ResourceProvisioner(n, uid)
+       var result ResourceProvisioner = real
+       if f.Shadow {
+               result = shadow
+       }
+
+       return result, err
+}
+
+// CloseShadow is called when the _real_ side is complete. This will cause
+// all future blocking operations to return immediately on the shadow to
+// ensure the shadow also completes.
+func (f *shadowComponentFactory) CloseShadow() error {
+       // If we aren't the shadow, just return
+       if !f.Shadow {
+               return nil
+       }
+
+       // Lock ourselves so we don't modify state
+       f.lock.Lock()
+       defer f.lock.Unlock()
+
+       // Grab our shared state
+       shared := f.shadowComponentFactoryShared
+
+       // If we're already closed, its an error
+       if shared.closed {
+               return fmt.Errorf("component factory shadow already closed")
+       }
+
+       // Close all the providers and provisioners and return the error
+       var result error
+       for _, n := range shared.providerKeys {
+               _, shadow, err := shared.ResourceProvider(n, n)
+               if err == nil && shadow != nil {
+                       if err := shadow.CloseShadow(); err != nil {
+                               result = multierror.Append(result, err)
+                       }
+               }
+       }
+
+       for _, n := range shared.provisionerKeys {
+               _, shadow, err := shared.ResourceProvisioner(n, n)
+               if err == nil && shadow != nil {
+                       if err := shadow.CloseShadow(); err != nil {
+                               result = multierror.Append(result, err)
+                       }
+               }
+       }
+
+       // Mark ourselves as closed
+       shared.closed = true
+
+       return result
+}
+
+func (f *shadowComponentFactory) ShadowError() error {
+       // If we aren't the shadow, just return
+       if !f.Shadow {
+               return nil
+       }
+
+       // Lock ourselves so we don't modify state
+       f.lock.Lock()
+       defer f.lock.Unlock()
+
+       // Grab our shared state
+       shared := f.shadowComponentFactoryShared
+
+       // If we're not closed, its an error
+       if !shared.closed {
+               return fmt.Errorf("component factory must be closed to retrieve errors")
+       }
+
+       // Close all the providers and provisioners and return the error
+       var result error
+       for _, n := range shared.providerKeys {
+               _, shadow, err := shared.ResourceProvider(n, n)
+               if err == nil && shadow != nil {
+                       if err := shadow.ShadowError(); err != nil {
+                               result = multierror.Append(result, err)
+                       }
+               }
+       }
+
+       for _, n := range shared.provisionerKeys {
+               _, shadow, err := shared.ResourceProvisioner(n, n)
+               if err == nil && shadow != nil {
+                       if err := shadow.ShadowError(); err != nil {
+                               result = multierror.Append(result, err)
+                       }
+               }
+       }
+
+       return result
+}
+
+// shadowComponentFactoryShared is shared data between the two factories.
+//
+// It is NOT SAFE to run any function on this struct in parallel. Lock
+// access to this struct.
+type shadowComponentFactoryShared struct {
+       contextComponentFactory
+
+       closed          bool
+       providers       shadow.KeyedValue
+       providerKeys    []string
+       provisioners    shadow.KeyedValue
+       provisionerKeys []string
+}
+
+// shadowResourceProviderFactoryEntry is the entry that is stored in
+// the Shadows key/value for a provider.
+type shadowComponentFactoryProviderEntry struct {
+       Real   ResourceProvider
+       Shadow shadowResourceProvider
+       Err    error
+}
+
+type shadowComponentFactoryProvisionerEntry struct {
+       Real   ResourceProvisioner
+       Shadow shadowResourceProvisioner
+       Err    error
+}
+
+func (f *shadowComponentFactoryShared) ResourceProvider(
+       n, uid string) (ResourceProvider, shadowResourceProvider, error) {
+       // Determine if we already have a value
+       raw, ok := f.providers.ValueOk(uid)
+       if !ok {
+               // Build the entry
+               var entry shadowComponentFactoryProviderEntry
+
+               // No value, initialize. Create the original
+               p, err := f.contextComponentFactory.ResourceProvider(n, uid)
+               if err != nil {
+                       entry.Err = err
+                       p = nil // Just to be sure
+               }
+
+               if p != nil {
+                       // Create the shadow
+                       real, shadow := newShadowResourceProvider(p)
+                       entry.Real = real
+                       entry.Shadow = shadow
+
+                       if f.closed {
+                               shadow.CloseShadow()
+                       }
+               }
+
+               // Store the value
+               f.providers.SetValue(uid, &entry)
+               f.providerKeys = append(f.providerKeys, uid)
+               raw = &entry
+       }
+
+       // Read the entry
+       entry, ok := raw.(*shadowComponentFactoryProviderEntry)
+       if !ok {
+               return nil, nil, fmt.Errorf("Unknown value for shadow provider: %#v", raw)
+       }
+
+       // Return
+       return entry.Real, entry.Shadow, entry.Err
+}
+
+func (f *shadowComponentFactoryShared) ResourceProvisioner(
+       n, uid string) (ResourceProvisioner, shadowResourceProvisioner, error) {
+       // Determine if we already have a value
+       raw, ok := f.provisioners.ValueOk(uid)
+       if !ok {
+               // Build the entry
+               var entry shadowComponentFactoryProvisionerEntry
+
+               // No value, initialize. Create the original
+               p, err := f.contextComponentFactory.ResourceProvisioner(n, uid)
+               if err != nil {
+                       entry.Err = err
+                       p = nil // Just to be sure
+               }
+
+               if p != nil {
+                       // For now, just create a mock since we don't support provisioners yet
+                       real, shadow := newShadowResourceProvisioner(p)
+                       entry.Real = real
+                       entry.Shadow = shadow
+
+                       if f.closed {
+                               shadow.CloseShadow()
+                       }
+               }
+
+               // Store the value
+               f.provisioners.SetValue(uid, &entry)
+               f.provisionerKeys = append(f.provisionerKeys, uid)
+               raw = &entry
+       }
+
+       // Read the entry
+       entry, ok := raw.(*shadowComponentFactoryProvisionerEntry)
+       if !ok {
+               return nil, nil, fmt.Errorf("Unknown value for shadow provisioner: %#v", raw)
+       }
+
+       // Return
+       return entry.Real, entry.Shadow, entry.Err
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_context.go
new file mode 100644 (file)
index 0000000..5588af2
--- /dev/null
@@ -0,0 +1,158 @@
+package terraform
+
+import (
+       "fmt"
+       "strings"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/mitchellh/copystructure"
+)
+
+// newShadowContext creates a new context that will shadow the given context
+// when walking the graph. The resulting context should be used _only once_
+// for a graph walk.
+//
+// The returned Shadow should be closed after the graph walk with the
+// real context is complete. Errors from the shadow can be retrieved there.
+//
+// Most importantly, any operations done on the shadow context (the returned
+// context) will NEVER affect the real context. All structures are deep
+// copied, no real providers or resources are used, etc.
+func newShadowContext(c *Context) (*Context, *Context, Shadow) {
+       // Copy the targets
+       targetRaw, err := copystructure.Copy(c.targets)
+       if err != nil {
+               panic(err)
+       }
+
+       // Copy the variables
+       varRaw, err := copystructure.Copy(c.variables)
+       if err != nil {
+               panic(err)
+       }
+
+       // Copy the provider inputs
+       providerInputRaw, err := copystructure.Copy(c.providerInputConfig)
+       if err != nil {
+               panic(err)
+       }
+
+       // The factories
+       componentsReal, componentsShadow := newShadowComponentFactory(c.components)
+
+       // Create the shadow
+       shadow := &Context{
+               components: componentsShadow,
+               destroy:    c.destroy,
+               diff:       c.diff.DeepCopy(),
+               hooks:      nil,
+               meta:       c.meta,
+               module:     c.module,
+               state:      c.state.DeepCopy(),
+               targets:    targetRaw.([]string),
+               variables:  varRaw.(map[string]interface{}),
+
+               // NOTE(mitchellh): This is not going to work for shadows that are
+               // testing that input results in the proper end state. At the time
+               // of writing, input is not used in any state-changing graph
+               // walks anyways, so this checks nothing. We set it to this to avoid
+               // any panics but even a "nil" value worked here.
+               uiInput: new(MockUIInput),
+
+               // Hardcoded to 4 since parallelism in the shadow doesn't matter
+               // a ton since we're doing far less compared to the real side
+               // and our operations are MUCH faster.
+               parallelSem:         NewSemaphore(4),
+               providerInputConfig: providerInputRaw.(map[string]map[string]interface{}),
+       }
+
+       // Create the real context. This is effectively just a copy of
+       // the context given except we need to modify some of the values
+       // to point to the real side of a shadow so the shadow can compare values.
+       real := &Context{
+               // The fields below are changed.
+               components: componentsReal,
+
+               // The fields below are direct copies
+               destroy: c.destroy,
+               diff:    c.diff,
+               // diffLock - no copy
+               hooks:  c.hooks,
+               meta:   c.meta,
+               module: c.module,
+               sh:     c.sh,
+               state:  c.state,
+               // stateLock - no copy
+               targets:   c.targets,
+               uiInput:   c.uiInput,
+               variables: c.variables,
+
+               // l - no copy
+               parallelSem:         c.parallelSem,
+               providerInputConfig: c.providerInputConfig,
+               runContext:          c.runContext,
+               runContextCancel:    c.runContextCancel,
+               shadowErr:           c.shadowErr,
+       }
+
+       return real, shadow, &shadowContextCloser{
+               Components: componentsShadow,
+       }
+}
+
+// shadowContextVerify takes the real and shadow context and verifies they
+// have equal diffs and states.
+func shadowContextVerify(real, shadow *Context) error {
+       var result error
+
+       // The states compared must be pruned so they're minimal/clean
+       real.state.prune()
+       shadow.state.prune()
+
+       // Compare the states
+       if !real.state.Equal(shadow.state) {
+               result = multierror.Append(result, fmt.Errorf(
+                       "Real and shadow states do not match! "+
+                               "Real state:\n\n%s\n\n"+
+                               "Shadow state:\n\n%s\n\n",
+                       real.state, shadow.state))
+       }
+
+       // Compare the diffs
+       if !real.diff.Equal(shadow.diff) {
+               result = multierror.Append(result, fmt.Errorf(
+                       "Real and shadow diffs do not match! "+
+                               "Real diff:\n\n%s\n\n"+
+                               "Shadow diff:\n\n%s\n\n",
+                       real.diff, shadow.diff))
+       }
+
+       return result
+}
+
+// shadowContextCloser is the io.Closer returned by newShadowContext that
+// closes all the shadows and returns the results.
+type shadowContextCloser struct {
+       Components *shadowComponentFactory
+}
+
+// Close closes the shadow context.
+func (c *shadowContextCloser) CloseShadow() error {
+       return c.Components.CloseShadow()
+}
+
+func (c *shadowContextCloser) ShadowError() error {
+       err := c.Components.ShadowError()
+       if err == nil {
+               return nil
+       }
+
+       // This is a sad edge case: if the configuration contains uuid() at
+       // any point, we cannot reason aboyt the shadow execution. Tested
+       // with Context2Plan_shadowUuid.
+       if strings.Contains(err.Error(), "uuid()") {
+               err = nil
+       }
+
+       return err
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provider.go
new file mode 100644 (file)
index 0000000..9741d7e
--- /dev/null
@@ -0,0 +1,815 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+       "sync"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/hashicorp/terraform/helper/shadow"
+)
+
+// shadowResourceProvider implements ResourceProvider for the shadow
+// eval context defined in eval_context_shadow.go.
+//
+// This is used to verify behavior with a real provider. This shouldn't
+// be used directly.
+type shadowResourceProvider interface {
+       ResourceProvider
+       Shadow
+}
+
+// newShadowResourceProvider creates a new shadowed ResourceProvider.
+//
+// This will assume a well behaved real ResourceProvider. For example,
+// it assumes that the `Resources` call underneath doesn't change values
+// since once it is called on the real provider, it will be cached and
+// returned in the shadow since number of calls to that shouldn't affect
+// actual behavior.
+//
+// However, with calls like Apply, call order is taken into account,
+// parameters are checked for equality, etc.
+func newShadowResourceProvider(p ResourceProvider) (ResourceProvider, shadowResourceProvider) {
+       // Create the shared data
+       shared := shadowResourceProviderShared{}
+
+       // Create the real provider that does actual work
+       real := &shadowResourceProviderReal{
+               ResourceProvider: p,
+               Shared:           &shared,
+       }
+
+       // Create the shadow that watches the real value
+       shadow := &shadowResourceProviderShadow{
+               Shared: &shared,
+
+               resources:   p.Resources(),
+               dataSources: p.DataSources(),
+       }
+
+       return real, shadow
+}
+
+// shadowResourceProviderReal is the real resource provider. Function calls
+// to this will perform real work. This records the parameters and return
+// values and call order for the shadow to reproduce.
+type shadowResourceProviderReal struct {
+       ResourceProvider
+
+       Shared *shadowResourceProviderShared
+}
+
+func (p *shadowResourceProviderReal) Close() error {
+       var result error
+       if c, ok := p.ResourceProvider.(ResourceProviderCloser); ok {
+               result = c.Close()
+       }
+
+       p.Shared.CloseErr.SetValue(result)
+       return result
+}
+
+func (p *shadowResourceProviderReal) Input(
+       input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
+       cCopy := c.DeepCopy()
+
+       result, err := p.ResourceProvider.Input(input, c)
+       p.Shared.Input.SetValue(&shadowResourceProviderInput{
+               Config:    cCopy,
+               Result:    result.DeepCopy(),
+               ResultErr: err,
+       })
+
+       return result, err
+}
+
+func (p *shadowResourceProviderReal) Validate(c *ResourceConfig) ([]string, []error) {
+       warns, errs := p.ResourceProvider.Validate(c)
+       p.Shared.Validate.SetValue(&shadowResourceProviderValidate{
+               Config:     c.DeepCopy(),
+               ResultWarn: warns,
+               ResultErr:  errs,
+       })
+
+       return warns, errs
+}
+
+func (p *shadowResourceProviderReal) Configure(c *ResourceConfig) error {
+       cCopy := c.DeepCopy()
+
+       err := p.ResourceProvider.Configure(c)
+       p.Shared.Configure.SetValue(&shadowResourceProviderConfigure{
+               Config: cCopy,
+               Result: err,
+       })
+
+       return err
+}
+
+func (p *shadowResourceProviderReal) Stop() error {
+       return p.ResourceProvider.Stop()
+}
+
+func (p *shadowResourceProviderReal) ValidateResource(
+       t string, c *ResourceConfig) ([]string, []error) {
+       key := t
+       configCopy := c.DeepCopy()
+
+       // Real operation
+       warns, errs := p.ResourceProvider.ValidateResource(t, c)
+
+       // Initialize to ensure we always have a wrapper with a lock
+       p.Shared.ValidateResource.Init(
+               key, &shadowResourceProviderValidateResourceWrapper{})
+
+       // Get the result
+       raw := p.Shared.ValidateResource.Value(key)
+       wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
+       if !ok {
+               // If this fails then we just continue with our day... the shadow
+               // will fail to but there isn't much we can do.
+               log.Printf(
+                       "[ERROR] unknown value in ValidateResource shadow value: %#v", raw)
+               return warns, errs
+       }
+
+       // Lock the wrapper for writing and record our call
+       wrapper.Lock()
+       defer wrapper.Unlock()
+
+       wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateResource{
+               Config: configCopy,
+               Warns:  warns,
+               Errors: errs,
+       })
+
+       // With it locked, call SetValue again so that it triggers WaitForChange
+       p.Shared.ValidateResource.SetValue(key, wrapper)
+
+       // Return the result
+       return warns, errs
+}
+
+func (p *shadowResourceProviderReal) Apply(
+       info *InstanceInfo,
+       state *InstanceState,
+       diff *InstanceDiff) (*InstanceState, error) {
+       // Thse have to be copied before the call since call can modify
+       stateCopy := state.DeepCopy()
+       diffCopy := diff.DeepCopy()
+
+       result, err := p.ResourceProvider.Apply(info, state, diff)
+       p.Shared.Apply.SetValue(info.uniqueId(), &shadowResourceProviderApply{
+               State:     stateCopy,
+               Diff:      diffCopy,
+               Result:    result.DeepCopy(),
+               ResultErr: err,
+       })
+
+       return result, err
+}
+
+func (p *shadowResourceProviderReal) Diff(
+       info *InstanceInfo,
+       state *InstanceState,
+       desired *ResourceConfig) (*InstanceDiff, error) {
+       // Thse have to be copied before the call since call can modify
+       stateCopy := state.DeepCopy()
+       desiredCopy := desired.DeepCopy()
+
+       result, err := p.ResourceProvider.Diff(info, state, desired)
+       p.Shared.Diff.SetValue(info.uniqueId(), &shadowResourceProviderDiff{
+               State:     stateCopy,
+               Desired:   desiredCopy,
+               Result:    result.DeepCopy(),
+               ResultErr: err,
+       })
+
+       return result, err
+}
+
+func (p *shadowResourceProviderReal) Refresh(
+       info *InstanceInfo,
+       state *InstanceState) (*InstanceState, error) {
+       // Thse have to be copied before the call since call can modify
+       stateCopy := state.DeepCopy()
+
+       result, err := p.ResourceProvider.Refresh(info, state)
+       p.Shared.Refresh.SetValue(info.uniqueId(), &shadowResourceProviderRefresh{
+               State:     stateCopy,
+               Result:    result.DeepCopy(),
+               ResultErr: err,
+       })
+
+       return result, err
+}
+
+func (p *shadowResourceProviderReal) ValidateDataSource(
+       t string, c *ResourceConfig) ([]string, []error) {
+       key := t
+       configCopy := c.DeepCopy()
+
+       // Real operation
+       warns, errs := p.ResourceProvider.ValidateDataSource(t, c)
+
+       // Initialize
+       p.Shared.ValidateDataSource.Init(
+               key, &shadowResourceProviderValidateDataSourceWrapper{})
+
+       // Get the result
+       raw := p.Shared.ValidateDataSource.Value(key)
+       wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
+       if !ok {
+               // If this fails then we just continue with our day... the shadow
+               // will fail to but there isn't much we can do.
+               log.Printf(
+                       "[ERROR] unknown value in ValidateDataSource shadow value: %#v", raw)
+               return warns, errs
+       }
+
+       // Lock the wrapper for writing and record our call
+       wrapper.Lock()
+       defer wrapper.Unlock()
+
+       wrapper.Calls = append(wrapper.Calls, &shadowResourceProviderValidateDataSource{
+               Config: configCopy,
+               Warns:  warns,
+               Errors: errs,
+       })
+
+       // Set it
+       p.Shared.ValidateDataSource.SetValue(key, wrapper)
+
+       // Return the result
+       return warns, errs
+}
+
+func (p *shadowResourceProviderReal) ReadDataDiff(
+       info *InstanceInfo,
+       desired *ResourceConfig) (*InstanceDiff, error) {
+       // These have to be copied before the call since call can modify
+       desiredCopy := desired.DeepCopy()
+
+       result, err := p.ResourceProvider.ReadDataDiff(info, desired)
+       p.Shared.ReadDataDiff.SetValue(info.uniqueId(), &shadowResourceProviderReadDataDiff{
+               Desired:   desiredCopy,
+               Result:    result.DeepCopy(),
+               ResultErr: err,
+       })
+
+       return result, err
+}
+
+func (p *shadowResourceProviderReal) ReadDataApply(
+       info *InstanceInfo,
+       diff *InstanceDiff) (*InstanceState, error) {
+       // Thse have to be copied before the call since call can modify
+       diffCopy := diff.DeepCopy()
+
+       result, err := p.ResourceProvider.ReadDataApply(info, diff)
+       p.Shared.ReadDataApply.SetValue(info.uniqueId(), &shadowResourceProviderReadDataApply{
+               Diff:      diffCopy,
+               Result:    result.DeepCopy(),
+               ResultErr: err,
+       })
+
+       return result, err
+}
+
+// shadowResourceProviderShadow is the shadow resource provider. Function
+// calls never affect real resources. This is paired with the "real" side
+// which must be called properly to enable recording.
+type shadowResourceProviderShadow struct {
+       Shared *shadowResourceProviderShared
+
+       // Cached values that are expected to not change
+       resources   []ResourceType
+       dataSources []DataSource
+
+       Error     error // Error is the list of errors from the shadow
+       ErrorLock sync.Mutex
+}
+
+type shadowResourceProviderShared struct {
+       // NOTE: Anytime a value is added here, be sure to add it to
+       // the Close() method so that it is closed.
+
+       CloseErr           shadow.Value
+       Input              shadow.Value
+       Validate           shadow.Value
+       Configure          shadow.Value
+       ValidateResource   shadow.KeyedValue
+       Apply              shadow.KeyedValue
+       Diff               shadow.KeyedValue
+       Refresh            shadow.KeyedValue
+       ValidateDataSource shadow.KeyedValue
+       ReadDataDiff       shadow.KeyedValue
+       ReadDataApply      shadow.KeyedValue
+}
+
+func (p *shadowResourceProviderShared) Close() error {
+       return shadow.Close(p)
+}
+
+func (p *shadowResourceProviderShadow) CloseShadow() error {
+       err := p.Shared.Close()
+       if err != nil {
+               err = fmt.Errorf("close error: %s", err)
+       }
+
+       return err
+}
+
+func (p *shadowResourceProviderShadow) ShadowError() error {
+       return p.Error
+}
+
+func (p *shadowResourceProviderShadow) Resources() []ResourceType {
+       return p.resources
+}
+
+func (p *shadowResourceProviderShadow) DataSources() []DataSource {
+       return p.dataSources
+}
+
+func (p *shadowResourceProviderShadow) Close() error {
+       v := p.Shared.CloseErr.Value()
+       if v == nil {
+               return nil
+       }
+
+       return v.(error)
+}
+
+func (p *shadowResourceProviderShadow) Input(
+       input UIInput, c *ResourceConfig) (*ResourceConfig, error) {
+       // Get the result of the input call
+       raw := p.Shared.Input.Value()
+       if raw == nil {
+               return nil, nil
+       }
+
+       result, ok := raw.(*shadowResourceProviderInput)
+       if !ok {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'input' shadow value: %#v", raw))
+               return nil, nil
+       }
+
+       // Compare the parameters, which should be identical
+       if !c.Equal(result.Config) {
+               p.ErrorLock.Lock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Input had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
+                       result.Config, c))
+               p.ErrorLock.Unlock()
+       }
+
+       // Return the results
+       return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Validate(c *ResourceConfig) ([]string, []error) {
+       // Get the result of the validate call
+       raw := p.Shared.Validate.Value()
+       if raw == nil {
+               return nil, nil
+       }
+
+       result, ok := raw.(*shadowResourceProviderValidate)
+       if !ok {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'validate' shadow value: %#v", raw))
+               return nil, nil
+       }
+
+       // Compare the parameters, which should be identical
+       if !c.Equal(result.Config) {
+               p.ErrorLock.Lock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Validate had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
+                       result.Config, c))
+               p.ErrorLock.Unlock()
+       }
+
+       // Return the results
+       return result.ResultWarn, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Configure(c *ResourceConfig) error {
+       // Get the result of the call
+       raw := p.Shared.Configure.Value()
+       if raw == nil {
+               return nil
+       }
+
+       result, ok := raw.(*shadowResourceProviderConfigure)
+       if !ok {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'configure' shadow value: %#v", raw))
+               return nil
+       }
+
+       // Compare the parameters, which should be identical
+       if !c.Equal(result.Config) {
+               p.ErrorLock.Lock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Configure had unequal configurations (real, then shadow):\n\n%#v\n\n%#v",
+                       result.Config, c))
+               p.ErrorLock.Unlock()
+       }
+
+       // Return the results
+       return result.Result
+}
+
+// Stop returns immediately.
+func (p *shadowResourceProviderShadow) Stop() error {
+       return nil
+}
+
+func (p *shadowResourceProviderShadow) ValidateResource(t string, c *ResourceConfig) ([]string, []error) {
+       // Unique key
+       key := t
+
+       // Get the initial value
+       raw := p.Shared.ValidateResource.Value(key)
+
+       // Find a validation with our configuration
+       var result *shadowResourceProviderValidateResource
+       for {
+               // Get the value
+               if raw == nil {
+                       p.ErrorLock.Lock()
+                       defer p.ErrorLock.Unlock()
+                       p.Error = multierror.Append(p.Error, fmt.Errorf(
+                               "Unknown 'ValidateResource' call for %q:\n\n%#v",
+                               key, c))
+                       return nil, nil
+               }
+
+               wrapper, ok := raw.(*shadowResourceProviderValidateResourceWrapper)
+               if !ok {
+                       p.ErrorLock.Lock()
+                       defer p.ErrorLock.Unlock()
+                       p.Error = multierror.Append(p.Error, fmt.Errorf(
+                               "Unknown 'ValidateResource' shadow value for %q: %#v", key, raw))
+                       return nil, nil
+               }
+
+               // Look for the matching call with our configuration
+               wrapper.RLock()
+               for _, call := range wrapper.Calls {
+                       if call.Config.Equal(c) {
+                               result = call
+                               break
+                       }
+               }
+               wrapper.RUnlock()
+
+               // If we found a result, exit
+               if result != nil {
+                       break
+               }
+
+               // Wait for a change so we can get the wrapper again
+               raw = p.Shared.ValidateResource.WaitForChange(key)
+       }
+
+       return result.Warns, result.Errors
+}
+
+func (p *shadowResourceProviderShadow) Apply(
+       info *InstanceInfo,
+       state *InstanceState,
+       diff *InstanceDiff) (*InstanceState, error) {
+       // Unique key
+       key := info.uniqueId()
+       raw := p.Shared.Apply.Value(key)
+       if raw == nil {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'apply' call for %q:\n\n%#v\n\n%#v",
+                       key, state, diff))
+               return nil, nil
+       }
+
+       result, ok := raw.(*shadowResourceProviderApply)
+       if !ok {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'apply' shadow value for %q: %#v", key, raw))
+               return nil, nil
+       }
+
+       // Compare the parameters, which should be identical
+       if !state.Equal(result.State) {
+               p.ErrorLock.Lock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Apply %q: state had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+                       key, result.State, state))
+               p.ErrorLock.Unlock()
+       }
+
+       if !diff.Equal(result.Diff) {
+               p.ErrorLock.Lock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Apply %q: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
+                       key, result.Diff, diff))
+               p.ErrorLock.Unlock()
+       }
+
+       return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Diff(
+       info *InstanceInfo,
+       state *InstanceState,
+       desired *ResourceConfig) (*InstanceDiff, error) {
+       // Unique key
+       key := info.uniqueId()
+       raw := p.Shared.Diff.Value(key)
+       if raw == nil {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'diff' call for %q:\n\n%#v\n\n%#v",
+                       key, state, desired))
+               return nil, nil
+       }
+
+       result, ok := raw.(*shadowResourceProviderDiff)
+       if !ok {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'diff' shadow value for %q: %#v", key, raw))
+               return nil, nil
+       }
+
+       // Compare the parameters, which should be identical
+       if !state.Equal(result.State) {
+               p.ErrorLock.Lock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+                       key, result.State, state))
+               p.ErrorLock.Unlock()
+       }
+       if !desired.Equal(result.Desired) {
+               p.ErrorLock.Lock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Diff %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+                       key, result.Desired, desired))
+               p.ErrorLock.Unlock()
+       }
+
+       return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) Refresh(
+       info *InstanceInfo,
+       state *InstanceState) (*InstanceState, error) {
+       // Unique key
+       key := info.uniqueId()
+       raw := p.Shared.Refresh.Value(key)
+       if raw == nil {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'refresh' call for %q:\n\n%#v",
+                       key, state))
+               return nil, nil
+       }
+
+       result, ok := raw.(*shadowResourceProviderRefresh)
+       if !ok {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'refresh' shadow value: %#v", raw))
+               return nil, nil
+       }
+
+       // Compare the parameters, which should be identical
+       if !state.Equal(result.State) {
+               p.ErrorLock.Lock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Refresh %q had unequal states (real, then shadow):\n\n%#v\n\n%#v",
+                       key, result.State, state))
+               p.ErrorLock.Unlock()
+       }
+
+       return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) ValidateDataSource(
+       t string, c *ResourceConfig) ([]string, []error) {
+       // Unique key
+       key := t
+
+       // Get the initial value
+       raw := p.Shared.ValidateDataSource.Value(key)
+
+       // Find a validation with our configuration
+       var result *shadowResourceProviderValidateDataSource
+       for {
+               // Get the value
+               if raw == nil {
+                       p.ErrorLock.Lock()
+                       defer p.ErrorLock.Unlock()
+                       p.Error = multierror.Append(p.Error, fmt.Errorf(
+                               "Unknown 'ValidateDataSource' call for %q:\n\n%#v",
+                               key, c))
+                       return nil, nil
+               }
+
+               wrapper, ok := raw.(*shadowResourceProviderValidateDataSourceWrapper)
+               if !ok {
+                       p.ErrorLock.Lock()
+                       defer p.ErrorLock.Unlock()
+                       p.Error = multierror.Append(p.Error, fmt.Errorf(
+                               "Unknown 'ValidateDataSource' shadow value: %#v", raw))
+                       return nil, nil
+               }
+
+               // Look for the matching call with our configuration
+               wrapper.RLock()
+               for _, call := range wrapper.Calls {
+                       if call.Config.Equal(c) {
+                               result = call
+                               break
+                       }
+               }
+               wrapper.RUnlock()
+
+               // If we found a result, exit
+               if result != nil {
+                       break
+               }
+
+               // Wait for a change so we can get the wrapper again
+               raw = p.Shared.ValidateDataSource.WaitForChange(key)
+       }
+
+       return result.Warns, result.Errors
+}
+
+func (p *shadowResourceProviderShadow) ReadDataDiff(
+       info *InstanceInfo,
+       desired *ResourceConfig) (*InstanceDiff, error) {
+       // Unique key
+       key := info.uniqueId()
+       raw := p.Shared.ReadDataDiff.Value(key)
+       if raw == nil {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'ReadDataDiff' call for %q:\n\n%#v",
+                       key, desired))
+               return nil, nil
+       }
+
+       result, ok := raw.(*shadowResourceProviderReadDataDiff)
+       if !ok {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'ReadDataDiff' shadow value for %q: %#v", key, raw))
+               return nil, nil
+       }
+
+       // Compare the parameters, which should be identical
+       if !desired.Equal(result.Desired) {
+               p.ErrorLock.Lock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "ReadDataDiff %q had unequal configs (real, then shadow):\n\n%#v\n\n%#v",
+                       key, result.Desired, desired))
+               p.ErrorLock.Unlock()
+       }
+
+       return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) ReadDataApply(
+       info *InstanceInfo,
+       d *InstanceDiff) (*InstanceState, error) {
+       // Unique key
+       key := info.uniqueId()
+       raw := p.Shared.ReadDataApply.Value(key)
+       if raw == nil {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'ReadDataApply' call for %q:\n\n%#v",
+                       key, d))
+               return nil, nil
+       }
+
+       result, ok := raw.(*shadowResourceProviderReadDataApply)
+       if !ok {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'ReadDataApply' shadow value for %q: %#v", key, raw))
+               return nil, nil
+       }
+
+       // Compare the parameters, which should be identical
+       if !d.Equal(result.Diff) {
+               p.ErrorLock.Lock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "ReadDataApply: unequal diffs (real, then shadow):\n\n%#v\n\n%#v",
+                       result.Diff, d))
+               p.ErrorLock.Unlock()
+       }
+
+       return result.Result, result.ResultErr
+}
+
+func (p *shadowResourceProviderShadow) ImportState(info *InstanceInfo, id string) ([]*InstanceState, error) {
+       panic("import not supported by shadow graph")
+}
+
+// The structs for the various function calls are put below. These structs
+// are used to carry call information across the real/shadow boundaries.
+
+type shadowResourceProviderInput struct {
+       Config    *ResourceConfig
+       Result    *ResourceConfig
+       ResultErr error
+}
+
+type shadowResourceProviderValidate struct {
+       Config     *ResourceConfig
+       ResultWarn []string
+       ResultErr  []error
+}
+
+type shadowResourceProviderConfigure struct {
+       Config *ResourceConfig
+       Result error
+}
+
+type shadowResourceProviderValidateResourceWrapper struct {
+       sync.RWMutex
+
+       Calls []*shadowResourceProviderValidateResource
+}
+
+type shadowResourceProviderValidateResource struct {
+       Config *ResourceConfig
+       Warns  []string
+       Errors []error
+}
+
+type shadowResourceProviderApply struct {
+       State     *InstanceState
+       Diff      *InstanceDiff
+       Result    *InstanceState
+       ResultErr error
+}
+
+type shadowResourceProviderDiff struct {
+       State     *InstanceState
+       Desired   *ResourceConfig
+       Result    *InstanceDiff
+       ResultErr error
+}
+
+type shadowResourceProviderRefresh struct {
+       State     *InstanceState
+       Result    *InstanceState
+       ResultErr error
+}
+
+type shadowResourceProviderValidateDataSourceWrapper struct {
+       sync.RWMutex
+
+       Calls []*shadowResourceProviderValidateDataSource
+}
+
+type shadowResourceProviderValidateDataSource struct {
+       Config *ResourceConfig
+       Warns  []string
+       Errors []error
+}
+
+type shadowResourceProviderReadDataDiff struct {
+       Desired   *ResourceConfig
+       Result    *InstanceDiff
+       ResultErr error
+}
+
+type shadowResourceProviderReadDataApply struct {
+       Diff      *InstanceDiff
+       Result    *InstanceState
+       ResultErr error
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/shadow_resource_provisioner.go
new file mode 100644 (file)
index 0000000..60a4908
--- /dev/null
@@ -0,0 +1,282 @@
+package terraform
+
+import (
+       "fmt"
+       "io"
+       "log"
+       "sync"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/hashicorp/terraform/helper/shadow"
+)
+
+// shadowResourceProvisioner implements ResourceProvisioner for the shadow
+// eval context defined in eval_context_shadow.go.
+//
+// This is used to verify behavior with a real provisioner. This shouldn't
+// be used directly.
+type shadowResourceProvisioner interface {
+       ResourceProvisioner
+       Shadow
+}
+
+// newShadowResourceProvisioner creates a new shadowed ResourceProvisioner.
+func newShadowResourceProvisioner(
+       p ResourceProvisioner) (ResourceProvisioner, shadowResourceProvisioner) {
+       // Create the shared data
+       shared := shadowResourceProvisionerShared{
+               Validate: shadow.ComparedValue{
+                       Func: shadowResourceProvisionerValidateCompare,
+               },
+       }
+
+       // Create the real provisioner that does actual work
+       real := &shadowResourceProvisionerReal{
+               ResourceProvisioner: p,
+               Shared:              &shared,
+       }
+
+       // Create the shadow that watches the real value
+       shadow := &shadowResourceProvisionerShadow{
+               Shared: &shared,
+       }
+
+       return real, shadow
+}
+
+// shadowResourceProvisionerReal is the real resource provisioner. Function calls
+// to this will perform real work. This records the parameters and return
+// values and call order for the shadow to reproduce.
+type shadowResourceProvisionerReal struct {
+       ResourceProvisioner
+
+       Shared *shadowResourceProvisionerShared
+}
+
+func (p *shadowResourceProvisionerReal) Close() error {
+       var result error
+       if c, ok := p.ResourceProvisioner.(ResourceProvisionerCloser); ok {
+               result = c.Close()
+       }
+
+       p.Shared.CloseErr.SetValue(result)
+       return result
+}
+
+func (p *shadowResourceProvisionerReal) Validate(c *ResourceConfig) ([]string, []error) {
+       warns, errs := p.ResourceProvisioner.Validate(c)
+       p.Shared.Validate.SetValue(&shadowResourceProvisionerValidate{
+               Config:     c,
+               ResultWarn: warns,
+               ResultErr:  errs,
+       })
+
+       return warns, errs
+}
+
+func (p *shadowResourceProvisionerReal) Apply(
+       output UIOutput, s *InstanceState, c *ResourceConfig) error {
+       err := p.ResourceProvisioner.Apply(output, s, c)
+
+       // Write the result, grab a lock for writing. This should nver
+       // block long since the operations below don't block.
+       p.Shared.ApplyLock.Lock()
+       defer p.Shared.ApplyLock.Unlock()
+
+       key := s.ID
+       raw, ok := p.Shared.Apply.ValueOk(key)
+       if !ok {
+               // Setup a new value
+               raw = &shadow.ComparedValue{
+                       Func: shadowResourceProvisionerApplyCompare,
+               }
+
+               // Set it
+               p.Shared.Apply.SetValue(key, raw)
+       }
+
+       compareVal, ok := raw.(*shadow.ComparedValue)
+       if !ok {
+               // Just log and return so that we don't cause the real side
+               // any side effects.
+               log.Printf("[ERROR] unknown value in 'apply': %#v", raw)
+               return err
+       }
+
+       // Write the resulting value
+       compareVal.SetValue(&shadowResourceProvisionerApply{
+               Config:    c,
+               ResultErr: err,
+       })
+
+       return err
+}
+
+func (p *shadowResourceProvisionerReal) Stop() error {
+       return p.ResourceProvisioner.Stop()
+}
+
+// shadowResourceProvisionerShadow is the shadow resource provisioner. Function
+// calls never affect real resources. This is paired with the "real" side
+// which must be called properly to enable recording.
+type shadowResourceProvisionerShadow struct {
+       Shared *shadowResourceProvisionerShared
+
+       Error     error // Error is the list of errors from the shadow
+       ErrorLock sync.Mutex
+}
+
+type shadowResourceProvisionerShared struct {
+       // NOTE: Anytime a value is added here, be sure to add it to
+       // the Close() method so that it is closed.
+
+       CloseErr  shadow.Value
+       Validate  shadow.ComparedValue
+       Apply     shadow.KeyedValue
+       ApplyLock sync.Mutex // For writing only
+}
+
+func (p *shadowResourceProvisionerShared) Close() error {
+       closers := []io.Closer{
+               &p.CloseErr,
+       }
+
+       for _, c := range closers {
+               // This should never happen, but we don't panic because a panic
+               // could affect the real behavior of Terraform and a shadow should
+               // never be able to do that.
+               if err := c.Close(); err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
+
+func (p *shadowResourceProvisionerShadow) CloseShadow() error {
+       err := p.Shared.Close()
+       if err != nil {
+               err = fmt.Errorf("close error: %s", err)
+       }
+
+       return err
+}
+
+func (p *shadowResourceProvisionerShadow) ShadowError() error {
+       return p.Error
+}
+
+func (p *shadowResourceProvisionerShadow) Close() error {
+       v := p.Shared.CloseErr.Value()
+       if v == nil {
+               return nil
+       }
+
+       return v.(error)
+}
+
+func (p *shadowResourceProvisionerShadow) Validate(c *ResourceConfig) ([]string, []error) {
+       // Get the result of the validate call
+       raw := p.Shared.Validate.Value(c)
+       if raw == nil {
+               return nil, nil
+       }
+
+       result, ok := raw.(*shadowResourceProvisionerValidate)
+       if !ok {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'validate' shadow value: %#v", raw))
+               return nil, nil
+       }
+
+       // We don't need to compare configurations because we key on the
+       // configuration so just return right away.
+       return result.ResultWarn, result.ResultErr
+}
+
+func (p *shadowResourceProvisionerShadow) Apply(
+       output UIOutput, s *InstanceState, c *ResourceConfig) error {
+       // Get the value based on the key
+       key := s.ID
+       raw := p.Shared.Apply.Value(key)
+       if raw == nil {
+               return nil
+       }
+
+       compareVal, ok := raw.(*shadow.ComparedValue)
+       if !ok {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'apply' shadow value: %#v", raw))
+               return nil
+       }
+
+       // With the compared value, we compare against our config
+       raw = compareVal.Value(c)
+       if raw == nil {
+               return nil
+       }
+
+       result, ok := raw.(*shadowResourceProvisionerApply)
+       if !ok {
+               p.ErrorLock.Lock()
+               defer p.ErrorLock.Unlock()
+               p.Error = multierror.Append(p.Error, fmt.Errorf(
+                       "Unknown 'apply' shadow value: %#v", raw))
+               return nil
+       }
+
+       return result.ResultErr
+}
+
+func (p *shadowResourceProvisionerShadow) Stop() error {
+       // For the shadow, we always just return nil since a Stop indicates
+       // that we were interrupted and shadows are disabled during interrupts
+       // anyways.
+       return nil
+}
+
+// The structs for the various function calls are put below. These structs
+// are used to carry call information across the real/shadow boundaries.
+
+type shadowResourceProvisionerValidate struct {
+       Config     *ResourceConfig
+       ResultWarn []string
+       ResultErr  []error
+}
+
+type shadowResourceProvisionerApply struct {
+       Config    *ResourceConfig
+       ResultErr error
+}
+
+func shadowResourceProvisionerValidateCompare(k, v interface{}) bool {
+       c, ok := k.(*ResourceConfig)
+       if !ok {
+               return false
+       }
+
+       result, ok := v.(*shadowResourceProvisionerValidate)
+       if !ok {
+               return false
+       }
+
+       return c.Equal(result.Config)
+}
+
+func shadowResourceProvisionerApplyCompare(k, v interface{}) bool {
+       c, ok := k.(*ResourceConfig)
+       if !ok {
+               return false
+       }
+
+       result, ok := v.(*shadowResourceProvisionerApply)
+       if !ok {
+               return false
+       }
+
+       return c.Equal(result.Config)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go
new file mode 100644 (file)
index 0000000..074b682
--- /dev/null
@@ -0,0 +1,2118 @@
+package terraform
+
+import (
+       "bufio"
+       "bytes"
+       "encoding/json"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "reflect"
+       "sort"
+       "strconv"
+       "strings"
+       "sync"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/hashicorp/go-version"
+       "github.com/hashicorp/terraform/config"
+       "github.com/mitchellh/copystructure"
+       "github.com/satori/go.uuid"
+)
+
+const (
+       // StateVersion is the current version for our state file
+       StateVersion = 3
+)
+
+// rootModulePath is the path of the root module
+var rootModulePath = []string{"root"}
+
+// normalizeModulePath takes a raw module path and returns a path that
+// has the rootModulePath prepended to it. If I could go back in time I
+// would've never had a rootModulePath (empty path would be root). We can
+// still fix this but thats a big refactor that my branch doesn't make sense
+// for. Instead, this function normalizes paths.
+func normalizeModulePath(p []string) []string {
+       k := len(rootModulePath)
+
+       // If we already have a root module prefix, we're done
+       if len(p) >= len(rootModulePath) {
+               if reflect.DeepEqual(p[:k], rootModulePath) {
+                       return p
+               }
+       }
+
+       // None? Prefix it
+       result := make([]string, len(rootModulePath)+len(p))
+       copy(result, rootModulePath)
+       copy(result[k:], p)
+       return result
+}
+
+// State keeps track of a snapshot state-of-the-world that Terraform
+// can use to keep track of what real world resources it is actually
+// managing.
+type State struct {
+       // Version is the state file protocol version.
+       Version int `json:"version"`
+
+       // TFVersion is the version of Terraform that wrote this state.
+       TFVersion string `json:"terraform_version,omitempty"`
+
+       // Serial is incremented on any operation that modifies
+       // the State file. It is used to detect potentially conflicting
+       // updates.
+       Serial int64 `json:"serial"`
+
+       // Lineage is set when a new, blank state is created and then
+       // never updated. This allows us to determine whether the serials
+       // of two states can be meaningfully compared.
+       // Apart from the guarantee that collisions between two lineages
+       // are very unlikely, this value is opaque and external callers
+       // should only compare lineage strings byte-for-byte for equality.
+       Lineage string `json:"lineage"`
+
+       // Remote is used to track the metadata required to
+       // pull and push state files from a remote storage endpoint.
+       Remote *RemoteState `json:"remote,omitempty"`
+
+       // Backend tracks the configuration for the backend in use with
+       // this state. This is used to track any changes in the backend
+       // configuration.
+       Backend *BackendState `json:"backend,omitempty"`
+
+       // Modules contains all the modules in a breadth-first order
+       Modules []*ModuleState `json:"modules"`
+
+       mu sync.Mutex
+}
+
+func (s *State) Lock()   { s.mu.Lock() }
+func (s *State) Unlock() { s.mu.Unlock() }
+
+// NewState is used to initialize a blank state
+func NewState() *State {
+       s := &State{}
+       s.init()
+       return s
+}
+
+// Children returns the ModuleStates that are direct children of
+// the given path. If the path is "root", for example, then children
+// returned might be "root.child", but not "root.child.grandchild".
+func (s *State) Children(path []string) []*ModuleState {
+       s.Lock()
+       defer s.Unlock()
+       // TODO: test
+
+       return s.children(path)
+}
+
+func (s *State) children(path []string) []*ModuleState {
+       result := make([]*ModuleState, 0)
+       for _, m := range s.Modules {
+               if m == nil {
+                       continue
+               }
+
+               if len(m.Path) != len(path)+1 {
+                       continue
+               }
+               if !reflect.DeepEqual(path, m.Path[:len(path)]) {
+                       continue
+               }
+
+               result = append(result, m)
+       }
+
+       return result
+}
+
+// AddModule adds the module with the given path to the state.
+//
+// This should be the preferred method to add module states since it
+// allows us to optimize lookups later as well as control sorting.
+func (s *State) AddModule(path []string) *ModuleState {
+       s.Lock()
+       defer s.Unlock()
+
+       return s.addModule(path)
+}
+
+func (s *State) addModule(path []string) *ModuleState {
+       // check if the module exists first
+       m := s.moduleByPath(path)
+       if m != nil {
+               return m
+       }
+
+       m = &ModuleState{Path: path}
+       m.init()
+       s.Modules = append(s.Modules, m)
+       s.sort()
+       return m
+}
+
+// ModuleByPath is used to lookup the module state for the given path.
+// This should be the preferred lookup mechanism as it allows for future
+// lookup optimizations.
+func (s *State) ModuleByPath(path []string) *ModuleState {
+       if s == nil {
+               return nil
+       }
+       s.Lock()
+       defer s.Unlock()
+
+       return s.moduleByPath(path)
+}
+
+func (s *State) moduleByPath(path []string) *ModuleState {
+       for _, mod := range s.Modules {
+               if mod == nil {
+                       continue
+               }
+               if mod.Path == nil {
+                       panic("missing module path")
+               }
+               if reflect.DeepEqual(mod.Path, path) {
+                       return mod
+               }
+       }
+       return nil
+}
+
+// ModuleOrphans returns all the module orphans in this state by
+// returning their full paths. These paths can be used with ModuleByPath
+// to return the actual state.
+func (s *State) ModuleOrphans(path []string, c *config.Config) [][]string {
+       s.Lock()
+       defer s.Unlock()
+
+       return s.moduleOrphans(path, c)
+
+}
+
+func (s *State) moduleOrphans(path []string, c *config.Config) [][]string {
+       // direct keeps track of what direct children we have both in our config
+       // and in our state. childrenKeys keeps track of what isn't an orphan.
+       direct := make(map[string]struct{})
+       childrenKeys := make(map[string]struct{})
+       if c != nil {
+               for _, m := range c.Modules {
+                       childrenKeys[m.Name] = struct{}{}
+                       direct[m.Name] = struct{}{}
+               }
+       }
+
+       // Go over the direct children and find any that aren't in our keys.
+       var orphans [][]string
+       for _, m := range s.children(path) {
+               key := m.Path[len(m.Path)-1]
+
+               // Record that we found this key as a direct child. We use this
+               // later to find orphan nested modules.
+               direct[key] = struct{}{}
+
+               // If we have a direct child still in our config, it is not an orphan
+               if _, ok := childrenKeys[key]; ok {
+                       continue
+               }
+
+               orphans = append(orphans, m.Path)
+       }
+
+       // Find the orphans that are nested...
+       for _, m := range s.Modules {
+               if m == nil {
+                       continue
+               }
+
+               // We only want modules that are at least grandchildren
+               if len(m.Path) < len(path)+2 {
+                       continue
+               }
+
+               // If it isn't part of our tree, continue
+               if !reflect.DeepEqual(path, m.Path[:len(path)]) {
+                       continue
+               }
+
+               // If we have the direct child, then just skip it.
+               key := m.Path[len(path)]
+               if _, ok := direct[key]; ok {
+                       continue
+               }
+
+               orphanPath := m.Path[:len(path)+1]
+
+               // Don't double-add if we've already added this orphan (which can happen if
+               // there are multiple nested sub-modules that get orphaned together).
+               alreadyAdded := false
+               for _, o := range orphans {
+                       if reflect.DeepEqual(o, orphanPath) {
+                               alreadyAdded = true
+                               break
+                       }
+               }
+               if alreadyAdded {
+                       continue
+               }
+
+               // Add this orphan
+               orphans = append(orphans, orphanPath)
+       }
+
+       return orphans
+}
+
+// Empty returns true if the state is empty.
+func (s *State) Empty() bool {
+       if s == nil {
+               return true
+       }
+       s.Lock()
+       defer s.Unlock()
+
+       return len(s.Modules) == 0
+}
+
+// HasResources returns true if the state contains any resources.
+//
+// This is similar to !s.Empty, but returns true also in the case where the
+// state has modules but all of them are devoid of resources.
+func (s *State) HasResources() bool {
+       if s.Empty() {
+               return false
+       }
+
+       for _, mod := range s.Modules {
+               if len(mod.Resources) > 0 {
+                       return true
+               }
+       }
+
+       return false
+}
+
+// IsRemote returns true if State represents a state that exists and is
+// remote.
+func (s *State) IsRemote() bool {
+       if s == nil {
+               return false
+       }
+       s.Lock()
+       defer s.Unlock()
+
+       if s.Remote == nil {
+               return false
+       }
+       if s.Remote.Type == "" {
+               return false
+       }
+
+       return true
+}
+
+// Validate validates the integrity of this state file.
+//
+// Certain properties of the statefile are expected by Terraform in order
+// to behave properly. The core of Terraform will assume that once it
+// receives a State structure that it has been validated. This validation
+// check should be called to ensure that.
+//
+// If this returns an error, then the user should be notified. The error
+// response will include detailed information on the nature of the error.
+func (s *State) Validate() error {
+       s.Lock()
+       defer s.Unlock()
+
+       var result error
+
+       // !!!! FOR DEVELOPERS !!!!
+       //
+       // Any errors returned from this Validate function will BLOCK TERRAFORM
+       // from loading a state file. Therefore, this should only contain checks
+       // that are only resolvable through manual intervention.
+       //
+       // !!!! FOR DEVELOPERS !!!!
+
+       // Make sure there are no duplicate module states. We open a new
+       // block here so we can use basic variable names and future validations
+       // can do the same.
+       {
+               found := make(map[string]struct{})
+               for _, ms := range s.Modules {
+                       if ms == nil {
+                               continue
+                       }
+
+                       key := strings.Join(ms.Path, ".")
+                       if _, ok := found[key]; ok {
+                               result = multierror.Append(result, fmt.Errorf(
+                                       strings.TrimSpace(stateValidateErrMultiModule), key))
+                               continue
+                       }
+
+                       found[key] = struct{}{}
+               }
+       }
+
+       return result
+}
+
+// Remove removes the item in the state at the given address, returning
+// any errors that may have occurred.
+//
+// If the address references a module state or resource, it will delete
+// all children as well. To check what will be deleted, use a StateFilter
+// first.
+func (s *State) Remove(addr ...string) error {
+       s.Lock()
+       defer s.Unlock()
+
+       // Filter out what we need to delete
+       filter := &StateFilter{State: s}
+       results, err := filter.Filter(addr...)
+       if err != nil {
+               return err
+       }
+
+       // If we have no results, just exit early, we're not going to do anything.
+       // While what happens below is fairly fast, this is an important early
+       // exit since the prune below might modify the state more and we don't
+       // want to modify the state if we don't have to.
+       if len(results) == 0 {
+               return nil
+       }
+
+       // Go through each result and grab what we need
+       removed := make(map[interface{}]struct{})
+       for _, r := range results {
+               // Convert the path to our own type
+               path := append([]string{"root"}, r.Path...)
+
+               // If we removed this already, then ignore
+               if _, ok := removed[r.Value]; ok {
+                       continue
+               }
+
+               // If we removed the parent already, then ignore
+               if r.Parent != nil {
+                       if _, ok := removed[r.Parent.Value]; ok {
+                               continue
+                       }
+               }
+
+               // Add this to the removed list
+               removed[r.Value] = struct{}{}
+
+               switch v := r.Value.(type) {
+               case *ModuleState:
+                       s.removeModule(path, v)
+               case *ResourceState:
+                       s.removeResource(path, v)
+               case *InstanceState:
+                       s.removeInstance(path, r.Parent.Value.(*ResourceState), v)
+               default:
+                       return fmt.Errorf("unknown type to delete: %T", r.Value)
+               }
+       }
+
+       // Prune since the removal functions often do the bare minimum to
+       // remove a thing and may leave around dangling empty modules, resources,
+       // etc. Prune will clean that all up.
+       s.prune()
+
+       return nil
+}
+
+func (s *State) removeModule(path []string, v *ModuleState) {
+       for i, m := range s.Modules {
+               if m == v {
+                       s.Modules, s.Modules[len(s.Modules)-1] = append(s.Modules[:i], s.Modules[i+1:]...), nil
+                       return
+               }
+       }
+}
+
+func (s *State) removeResource(path []string, v *ResourceState) {
+       // Get the module this resource lives in. If it doesn't exist, we're done.
+       mod := s.moduleByPath(path)
+       if mod == nil {
+               return
+       }
+
+       // Find this resource. This is a O(N) lookup when if we had the key
+       // it could be O(1) but even with thousands of resources this shouldn't
+       // matter right now. We can easily up performance here when the time comes.
+       for k, r := range mod.Resources {
+               if r == v {
+                       // Found it
+                       delete(mod.Resources, k)
+                       return
+               }
+       }
+}
+
+func (s *State) removeInstance(path []string, r *ResourceState, v *InstanceState) {
+       // Go through the resource and find the instance that matches this
+       // (if any) and remove it.
+
+       // Check primary
+       if r.Primary == v {
+               r.Primary = nil
+               return
+       }
+
+       // Check lists
+       lists := [][]*InstanceState{r.Deposed}
+       for _, is := range lists {
+               for i, instance := range is {
+                       if instance == v {
+                               // Found it, remove it
+                               is, is[len(is)-1] = append(is[:i], is[i+1:]...), nil
+
+                               // Done
+                               return
+                       }
+               }
+       }
+}
+
+// RootModule returns the ModuleState for the root module
+func (s *State) RootModule() *ModuleState {
+       root := s.ModuleByPath(rootModulePath)
+       if root == nil {
+               panic("missing root module")
+       }
+       return root
+}
+
+// Equal tests if one state is equal to another.
+func (s *State) Equal(other *State) bool {
+       // If one is nil, we do a direct check
+       if s == nil || other == nil {
+               return s == other
+       }
+
+       s.Lock()
+       defer s.Unlock()
+       return s.equal(other)
+}
+
+func (s *State) equal(other *State) bool {
+       if s == nil || other == nil {
+               return s == other
+       }
+
+       // If the versions are different, they're certainly not equal
+       if s.Version != other.Version {
+               return false
+       }
+
+       // If any of the modules are not equal, then this state isn't equal
+       if len(s.Modules) != len(other.Modules) {
+               return false
+       }
+       for _, m := range s.Modules {
+               // This isn't very optimal currently but works.
+               otherM := other.moduleByPath(m.Path)
+               if otherM == nil {
+                       return false
+               }
+
+               // If they're not equal, then we're not equal!
+               if !m.Equal(otherM) {
+                       return false
+               }
+       }
+
+       return true
+}
+
+type StateAgeComparison int
+
+const (
+       StateAgeEqual         StateAgeComparison = 0
+       StateAgeReceiverNewer StateAgeComparison = 1
+       StateAgeReceiverOlder StateAgeComparison = -1
+)
+
+// CompareAges compares one state with another for which is "older".
+//
+// This is a simple check using the state's serial, and is thus only as
+// reliable as the serial itself. In the normal case, only one state
+// exists for a given combination of lineage/serial, but Terraform
+// does not guarantee this and so the result of this method should be
+// used with care.
+//
+// Returns an integer that is negative if the receiver is older than
+// the argument, positive if the converse, and zero if they are equal.
+// An error is returned if the two states are not of the same lineage,
+// in which case the integer returned has no meaning.
+func (s *State) CompareAges(other *State) (StateAgeComparison, error) {
+       // nil states are "older" than actual states
+       switch {
+       case s != nil && other == nil:
+               return StateAgeReceiverNewer, nil
+       case s == nil && other != nil:
+               return StateAgeReceiverOlder, nil
+       case s == nil && other == nil:
+               return StateAgeEqual, nil
+       }
+
+       if !s.SameLineage(other) {
+               return StateAgeEqual, fmt.Errorf(
+                       "can't compare two states of differing lineage",
+               )
+       }
+
+       s.Lock()
+       defer s.Unlock()
+
+       switch {
+       case s.Serial < other.Serial:
+               return StateAgeReceiverOlder, nil
+       case s.Serial > other.Serial:
+               return StateAgeReceiverNewer, nil
+       default:
+               return StateAgeEqual, nil
+       }
+}
+
+// SameLineage returns true only if the state given in argument belongs
+// to the same "lineage" of states as the receiver.
+func (s *State) SameLineage(other *State) bool {
+       s.Lock()
+       defer s.Unlock()
+
+       // If one of the states has no lineage then it is assumed to predate
+       // this concept, and so we'll accept it as belonging to any lineage
+       // so that a lineage string can be assigned to newer versions
+       // without breaking compatibility with older versions.
+       if s.Lineage == "" || other.Lineage == "" {
+               return true
+       }
+
+       return s.Lineage == other.Lineage
+}
+
+// DeepCopy performs a deep copy of the state structure and returns
+// a new structure.
+func (s *State) DeepCopy() *State {
+       copy, err := copystructure.Config{Lock: true}.Copy(s)
+       if err != nil {
+               panic(err)
+       }
+
+       return copy.(*State)
+}
+
+// IncrementSerialMaybe increments the serial number of this state
+// if it different from the other state.
+func (s *State) IncrementSerialMaybe(other *State) {
+       if s == nil {
+               return
+       }
+       if other == nil {
+               return
+       }
+       s.Lock()
+       defer s.Unlock()
+
+       if s.Serial > other.Serial {
+               return
+       }
+       if other.TFVersion != s.TFVersion || !s.equal(other) {
+               if other.Serial > s.Serial {
+                       s.Serial = other.Serial
+               }
+
+               s.Serial++
+       }
+}
+
+// FromFutureTerraform checks if this state was written by a Terraform
+// version from the future.
+func (s *State) FromFutureTerraform() bool {
+       s.Lock()
+       defer s.Unlock()
+
+       // No TF version means it is certainly from the past
+       if s.TFVersion == "" {
+               return false
+       }
+
+       v := version.Must(version.NewVersion(s.TFVersion))
+       return SemVersion.LessThan(v)
+}
+
+func (s *State) Init() {
+       s.Lock()
+       defer s.Unlock()
+       s.init()
+}
+
+func (s *State) init() {
+       if s.Version == 0 {
+               s.Version = StateVersion
+       }
+       if s.moduleByPath(rootModulePath) == nil {
+               s.addModule(rootModulePath)
+       }
+       s.ensureHasLineage()
+
+       for _, mod := range s.Modules {
+               if mod != nil {
+                       mod.init()
+               }
+       }
+
+       if s.Remote != nil {
+               s.Remote.init()
+       }
+
+}
+
+func (s *State) EnsureHasLineage() {
+       s.Lock()
+       defer s.Unlock()
+
+       s.ensureHasLineage()
+}
+
+func (s *State) ensureHasLineage() {
+       if s.Lineage == "" {
+               s.Lineage = uuid.NewV4().String()
+               log.Printf("[DEBUG] New state was assigned lineage %q\n", s.Lineage)
+       } else {
+               log.Printf("[TRACE] Preserving existing state lineage %q\n", s.Lineage)
+       }
+}
+
+// AddModuleState insert this module state and override any existing ModuleState
+func (s *State) AddModuleState(mod *ModuleState) {
+       mod.init()
+       s.Lock()
+       defer s.Unlock()
+
+       s.addModuleState(mod)
+}
+
+func (s *State) addModuleState(mod *ModuleState) {
+       for i, m := range s.Modules {
+               if reflect.DeepEqual(m.Path, mod.Path) {
+                       s.Modules[i] = mod
+                       return
+               }
+       }
+
+       s.Modules = append(s.Modules, mod)
+       s.sort()
+}
+
+// prune is used to remove any resources that are no longer required
+func (s *State) prune() {
+       if s == nil {
+               return
+       }
+
+       // Filter out empty modules.
+       // A module is always assumed to have a path, and it's length isn't always
+       // bounds checked later on. Modules may be "emptied" during destroy, but we
+       // never want to store those in the state.
+       for i := 0; i < len(s.Modules); i++ {
+               if s.Modules[i] == nil || len(s.Modules[i].Path) == 0 {
+                       s.Modules = append(s.Modules[:i], s.Modules[i+1:]...)
+                       i--
+               }
+       }
+
+       for _, mod := range s.Modules {
+               mod.prune()
+       }
+       if s.Remote != nil && s.Remote.Empty() {
+               s.Remote = nil
+       }
+}
+
+// sort sorts the modules
+func (s *State) sort() {
+       sort.Sort(moduleStateSort(s.Modules))
+
+       // Allow modules to be sorted
+       for _, m := range s.Modules {
+               if m != nil {
+                       m.sort()
+               }
+       }
+}
+
+func (s *State) String() string {
+       if s == nil {
+               return "<nil>"
+       }
+       s.Lock()
+       defer s.Unlock()
+
+       var buf bytes.Buffer
+       for _, m := range s.Modules {
+               mStr := m.String()
+
+               // If we're the root module, we just write the output directly.
+               if reflect.DeepEqual(m.Path, rootModulePath) {
+                       buf.WriteString(mStr + "\n")
+                       continue
+               }
+
+               buf.WriteString(fmt.Sprintf("module.%s:\n", strings.Join(m.Path[1:], ".")))
+
+               s := bufio.NewScanner(strings.NewReader(mStr))
+               for s.Scan() {
+                       text := s.Text()
+                       if text != "" {
+                               text = "  " + text
+                       }
+
+                       buf.WriteString(fmt.Sprintf("%s\n", text))
+               }
+       }
+
+       return strings.TrimSpace(buf.String())
+}
+
+// BackendState stores the configuration to connect to a remote backend.
+type BackendState struct {
+       Type   string                 `json:"type"`   // Backend type
+       Config map[string]interface{} `json:"config"` // Backend raw config
+
+       // Hash is the hash code to uniquely identify the original source
+       // configuration. We use this to detect when there is a change in
+       // configuration even when "type" isn't changed.
+       Hash uint64 `json:"hash"`
+}
+
+// Empty returns true if BackendState has no state.
+func (s *BackendState) Empty() bool {
+       return s == nil || s.Type == ""
+}
+
+// Rehash returns a unique content hash for this backend's configuration
+// as a uint64 value.
+// The Hash stored in the backend state needs to match the config itself, but
+// we need to compare the backend config after it has been combined with all
+// options.
+// This function must match the implementation used by config.Backend.
+func (s *BackendState) Rehash() uint64 {
+       if s == nil {
+               return 0
+       }
+
+       cfg := config.Backend{
+               Type: s.Type,
+               RawConfig: &config.RawConfig{
+                       Raw: s.Config,
+               },
+       }
+
+       return cfg.Rehash()
+}
+
+// RemoteState is used to track the information about a remote
+// state store that we push/pull state to.
+type RemoteState struct {
+       // Type controls the client we use for the remote state
+       Type string `json:"type"`
+
+       // Config is used to store arbitrary configuration that
+       // is type specific
+       Config map[string]string `json:"config"`
+
+       mu sync.Mutex
+}
+
+func (s *RemoteState) Lock()   { s.mu.Lock() }
+func (s *RemoteState) Unlock() { s.mu.Unlock() }
+
+func (r *RemoteState) init() {
+       r.Lock()
+       defer r.Unlock()
+
+       if r.Config == nil {
+               r.Config = make(map[string]string)
+       }
+}
+
+func (r *RemoteState) deepcopy() *RemoteState {
+       r.Lock()
+       defer r.Unlock()
+
+       confCopy := make(map[string]string, len(r.Config))
+       for k, v := range r.Config {
+               confCopy[k] = v
+       }
+       return &RemoteState{
+               Type:   r.Type,
+               Config: confCopy,
+       }
+}
+
+func (r *RemoteState) Empty() bool {
+       if r == nil {
+               return true
+       }
+       r.Lock()
+       defer r.Unlock()
+
+       return r.Type == ""
+}
+
+func (r *RemoteState) Equals(other *RemoteState) bool {
+       r.Lock()
+       defer r.Unlock()
+
+       if r.Type != other.Type {
+               return false
+       }
+       if len(r.Config) != len(other.Config) {
+               return false
+       }
+       for k, v := range r.Config {
+               if other.Config[k] != v {
+                       return false
+               }
+       }
+       return true
+}
+
+// OutputState is used to track the state relevant to a single output.
+type OutputState struct {
+       // Sensitive describes whether the output is considered sensitive,
+       // which may lead to masking the value on screen in some cases.
+       Sensitive bool `json:"sensitive"`
+       // Type describes the structure of Value. Valid values are "string",
+       // "map" and "list"
+       Type string `json:"type"`
+       // Value contains the value of the output, in the structure described
+       // by the Type field.
+       Value interface{} `json:"value"`
+
+       mu sync.Mutex
+}
+
+func (s *OutputState) Lock()   { s.mu.Lock() }
+func (s *OutputState) Unlock() { s.mu.Unlock() }
+
+func (s *OutputState) String() string {
+       return fmt.Sprintf("%#v", s.Value)
+}
+
+// Equal compares two OutputState structures for equality. nil values are
+// considered equal.
+func (s *OutputState) Equal(other *OutputState) bool {
+       if s == nil && other == nil {
+               return true
+       }
+
+       if s == nil || other == nil {
+               return false
+       }
+       s.Lock()
+       defer s.Unlock()
+
+       if s.Type != other.Type {
+               return false
+       }
+
+       if s.Sensitive != other.Sensitive {
+               return false
+       }
+
+       if !reflect.DeepEqual(s.Value, other.Value) {
+               return false
+       }
+
+       return true
+}
+
+func (s *OutputState) deepcopy() *OutputState {
+       if s == nil {
+               return nil
+       }
+
+       stateCopy, err := copystructure.Config{Lock: true}.Copy(s)
+       if err != nil {
+               panic(fmt.Errorf("Error copying output value: %s", err))
+       }
+
+       return stateCopy.(*OutputState)
+}
+
+// ModuleState is used to track all the state relevant to a single
+// module. Previous to Terraform 0.3, all state belonged to the "root"
+// module.
+type ModuleState struct {
+       // Path is the import path from the root module. Modules imports are
+       // always disjoint, so the path represents amodule tree
+       Path []string `json:"path"`
+
+       // Outputs declared by the module and maintained for each module
+       // even though only the root module technically needs to be kept.
+       // This allows operators to inspect values at the boundaries.
+       Outputs map[string]*OutputState `json:"outputs"`
+
+       // Resources is a mapping of the logically named resource to
+       // the state of the resource. Each resource may actually have
+       // N instances underneath, although a user only needs to think
+       // about the 1:1 case.
+       Resources map[string]*ResourceState `json:"resources"`
+
+       // Dependencies are a list of things that this module relies on
+       // existing to remain intact. For example: an module may depend
+       // on a VPC ID given by an aws_vpc resource.
+       //
+       // Terraform uses this information to build valid destruction
+       // orders and to warn the user if they're destroying a module that
+       // another resource depends on.
+       //
+       // Things can be put into this list that may not be managed by
+       // Terraform. If Terraform doesn't find a matching ID in the
+       // overall state, then it assumes it isn't managed and doesn't
+       // worry about it.
+       Dependencies []string `json:"depends_on"`
+
+       mu sync.Mutex
+}
+
+func (s *ModuleState) Lock()   { s.mu.Lock() }
+func (s *ModuleState) Unlock() { s.mu.Unlock() }
+
+// Equal tests whether one module state is equal to another.
+func (m *ModuleState) Equal(other *ModuleState) bool {
+       m.Lock()
+       defer m.Unlock()
+
+       // Paths must be equal
+       if !reflect.DeepEqual(m.Path, other.Path) {
+               return false
+       }
+
+       // Outputs must be equal
+       if len(m.Outputs) != len(other.Outputs) {
+               return false
+       }
+       for k, v := range m.Outputs {
+               if !other.Outputs[k].Equal(v) {
+                       return false
+               }
+       }
+
+       // Dependencies must be equal. This sorts these in place but
+       // this shouldn't cause any problems.
+       sort.Strings(m.Dependencies)
+       sort.Strings(other.Dependencies)
+       if len(m.Dependencies) != len(other.Dependencies) {
+               return false
+       }
+       for i, d := range m.Dependencies {
+               if other.Dependencies[i] != d {
+                       return false
+               }
+       }
+
+       // Resources must be equal
+       if len(m.Resources) != len(other.Resources) {
+               return false
+       }
+       for k, r := range m.Resources {
+               otherR, ok := other.Resources[k]
+               if !ok {
+                       return false
+               }
+
+               if !r.Equal(otherR) {
+                       return false
+               }
+       }
+
+       return true
+}
+
+// IsRoot says whether or not this module diff is for the root module.
+func (m *ModuleState) IsRoot() bool {
+       m.Lock()
+       defer m.Unlock()
+       return reflect.DeepEqual(m.Path, rootModulePath)
+}
+
+// IsDescendent returns true if other is a descendent of this module.
+func (m *ModuleState) IsDescendent(other *ModuleState) bool {
+       m.Lock()
+       defer m.Unlock()
+
+       i := len(m.Path)
+       return len(other.Path) > i && reflect.DeepEqual(other.Path[:i], m.Path)
+}
+
+// Orphans returns a list of keys of resources that are in the State
+// but aren't present in the configuration itself. Hence, these keys
+// represent the state of resources that are orphans.
+func (m *ModuleState) Orphans(c *config.Config) []string {
+       m.Lock()
+       defer m.Unlock()
+
+       keys := make(map[string]struct{})
+       for k, _ := range m.Resources {
+               keys[k] = struct{}{}
+       }
+
+       if c != nil {
+               for _, r := range c.Resources {
+                       delete(keys, r.Id())
+
+                       for k, _ := range keys {
+                               if strings.HasPrefix(k, r.Id()+".") {
+                                       delete(keys, k)
+                               }
+                       }
+               }
+       }
+
+       result := make([]string, 0, len(keys))
+       for k, _ := range keys {
+               result = append(result, k)
+       }
+
+       return result
+}
+
+// View returns a view with the given resource prefix.
+func (m *ModuleState) View(id string) *ModuleState {
+       if m == nil {
+               return m
+       }
+
+       r := m.deepcopy()
+       for k, _ := range r.Resources {
+               if id == k || strings.HasPrefix(k, id+".") {
+                       continue
+               }
+
+               delete(r.Resources, k)
+       }
+
+       return r
+}
+
+func (m *ModuleState) init() {
+       m.Lock()
+       defer m.Unlock()
+
+       if m.Path == nil {
+               m.Path = []string{}
+       }
+       if m.Outputs == nil {
+               m.Outputs = make(map[string]*OutputState)
+       }
+       if m.Resources == nil {
+               m.Resources = make(map[string]*ResourceState)
+       }
+
+       if m.Dependencies == nil {
+               m.Dependencies = make([]string, 0)
+       }
+
+       for _, rs := range m.Resources {
+               rs.init()
+       }
+}
+
+func (m *ModuleState) deepcopy() *ModuleState {
+       if m == nil {
+               return nil
+       }
+
+       stateCopy, err := copystructure.Config{Lock: true}.Copy(m)
+       if err != nil {
+               panic(err)
+       }
+
+       return stateCopy.(*ModuleState)
+}
+
+// prune is used to remove any resources that are no longer required
+func (m *ModuleState) prune() {
+       m.Lock()
+       defer m.Unlock()
+
+       for k, v := range m.Resources {
+               if v == nil || (v.Primary == nil || v.Primary.ID == "") && len(v.Deposed) == 0 {
+                       delete(m.Resources, k)
+                       continue
+               }
+
+               v.prune()
+       }
+
+       for k, v := range m.Outputs {
+               if v.Value == config.UnknownVariableValue {
+                       delete(m.Outputs, k)
+               }
+       }
+
+       m.Dependencies = uniqueStrings(m.Dependencies)
+}
+
+func (m *ModuleState) sort() {
+       for _, v := range m.Resources {
+               v.sort()
+       }
+}
+
+func (m *ModuleState) String() string {
+       m.Lock()
+       defer m.Unlock()
+
+       var buf bytes.Buffer
+
+       if len(m.Resources) == 0 {
+               buf.WriteString("<no state>")
+       }
+
+       names := make([]string, 0, len(m.Resources))
+       for name, _ := range m.Resources {
+               names = append(names, name)
+       }
+
+       sort.Sort(resourceNameSort(names))
+
+       for _, k := range names {
+               rs := m.Resources[k]
+               var id string
+               if rs.Primary != nil {
+                       id = rs.Primary.ID
+               }
+               if id == "" {
+                       id = "<not created>"
+               }
+
+               taintStr := ""
+               if rs.Primary.Tainted {
+                       taintStr = " (tainted)"
+               }
+
+               deposedStr := ""
+               if len(rs.Deposed) > 0 {
+                       deposedStr = fmt.Sprintf(" (%d deposed)", len(rs.Deposed))
+               }
+
+               buf.WriteString(fmt.Sprintf("%s:%s%s\n", k, taintStr, deposedStr))
+               buf.WriteString(fmt.Sprintf("  ID = %s\n", id))
+               if rs.Provider != "" {
+                       buf.WriteString(fmt.Sprintf("  provider = %s\n", rs.Provider))
+               }
+
+               var attributes map[string]string
+               if rs.Primary != nil {
+                       attributes = rs.Primary.Attributes
+               }
+               attrKeys := make([]string, 0, len(attributes))
+               for ak, _ := range attributes {
+                       if ak == "id" {
+                               continue
+                       }
+
+                       attrKeys = append(attrKeys, ak)
+               }
+
+               sort.Strings(attrKeys)
+
+               for _, ak := range attrKeys {
+                       av := attributes[ak]
+                       buf.WriteString(fmt.Sprintf("  %s = %s\n", ak, av))
+               }
+
+               for idx, t := range rs.Deposed {
+                       taintStr := ""
+                       if t.Tainted {
+                               taintStr = " (tainted)"
+                       }
+                       buf.WriteString(fmt.Sprintf("  Deposed ID %d = %s%s\n", idx+1, t.ID, taintStr))
+               }
+
+               if len(rs.Dependencies) > 0 {
+                       buf.WriteString(fmt.Sprintf("\n  Dependencies:\n"))
+                       for _, dep := range rs.Dependencies {
+                               buf.WriteString(fmt.Sprintf("    %s\n", dep))
+                       }
+               }
+       }
+
+       if len(m.Outputs) > 0 {
+               buf.WriteString("\nOutputs:\n\n")
+
+               ks := make([]string, 0, len(m.Outputs))
+               for k, _ := range m.Outputs {
+                       ks = append(ks, k)
+               }
+
+               sort.Strings(ks)
+
+               for _, k := range ks {
+                       v := m.Outputs[k]
+                       switch vTyped := v.Value.(type) {
+                       case string:
+                               buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
+                       case []interface{}:
+                               buf.WriteString(fmt.Sprintf("%s = %s\n", k, vTyped))
+                       case map[string]interface{}:
+                               var mapKeys []string
+                               for key, _ := range vTyped {
+                                       mapKeys = append(mapKeys, key)
+                               }
+                               sort.Strings(mapKeys)
+
+                               var mapBuf bytes.Buffer
+                               mapBuf.WriteString("{")
+                               for _, key := range mapKeys {
+                                       mapBuf.WriteString(fmt.Sprintf("%s:%s ", key, vTyped[key]))
+                               }
+                               mapBuf.WriteString("}")
+
+                               buf.WriteString(fmt.Sprintf("%s = %s\n", k, mapBuf.String()))
+                       }
+               }
+       }
+
+       return buf.String()
+}
+
+// ResourceStateKey is a structured representation of the key used for the
+// ModuleState.Resources mapping
+type ResourceStateKey struct {
+       Name  string
+       Type  string
+       Mode  config.ResourceMode
+       Index int
+}
+
+// Equal determines whether two ResourceStateKeys are the same
+func (rsk *ResourceStateKey) Equal(other *ResourceStateKey) bool {
+       if rsk == nil || other == nil {
+               return false
+       }
+       if rsk.Mode != other.Mode {
+               return false
+       }
+       if rsk.Type != other.Type {
+               return false
+       }
+       if rsk.Name != other.Name {
+               return false
+       }
+       if rsk.Index != other.Index {
+               return false
+       }
+       return true
+}
+
+func (rsk *ResourceStateKey) String() string {
+       if rsk == nil {
+               return ""
+       }
+       var prefix string
+       switch rsk.Mode {
+       case config.ManagedResourceMode:
+               prefix = ""
+       case config.DataResourceMode:
+               prefix = "data."
+       default:
+               panic(fmt.Errorf("unknown resource mode %s", rsk.Mode))
+       }
+       if rsk.Index == -1 {
+               return fmt.Sprintf("%s%s.%s", prefix, rsk.Type, rsk.Name)
+       }
+       return fmt.Sprintf("%s%s.%s.%d", prefix, rsk.Type, rsk.Name, rsk.Index)
+}
+
+// ParseResourceStateKey accepts a key in the format used by
+// ModuleState.Resources and returns a resource name and resource index. In the
+// state, a resource has the format "type.name.index" or "type.name". In the
+// latter case, the index is returned as -1.
+func ParseResourceStateKey(k string) (*ResourceStateKey, error) {
+       parts := strings.Split(k, ".")
+       mode := config.ManagedResourceMode
+       if len(parts) > 0 && parts[0] == "data" {
+               mode = config.DataResourceMode
+               // Don't need the constant "data" prefix for parsing
+               // now that we've figured out the mode.
+               parts = parts[1:]
+       }
+       if len(parts) < 2 || len(parts) > 3 {
+               return nil, fmt.Errorf("Malformed resource state key: %s", k)
+       }
+       rsk := &ResourceStateKey{
+               Mode:  mode,
+               Type:  parts[0],
+               Name:  parts[1],
+               Index: -1,
+       }
+       if len(parts) == 3 {
+               index, err := strconv.Atoi(parts[2])
+               if err != nil {
+                       return nil, fmt.Errorf("Malformed resource state key index: %s", k)
+               }
+               rsk.Index = index
+       }
+       return rsk, nil
+}
+
+// ResourceState holds the state of a resource that is used so that
+// a provider can find and manage an existing resource as well as for
+// storing attributes that are used to populate variables of child
+// resources.
+//
+// Attributes has attributes about the created resource that are
+// queryable in interpolation: "${type.id.attr}"
+//
+// Extra is just extra data that a provider can return that we store
+// for later, but is not exposed in any way to the user.
+//
+type ResourceState struct {
+       // This is filled in and managed by Terraform, and is the resource
+       // type itself such as "mycloud_instance". If a resource provider sets
+       // this value, it won't be persisted.
+       Type string `json:"type"`
+
+       // Dependencies are a list of things that this resource relies on
+       // existing to remain intact. For example: an AWS instance might
+       // depend on a subnet (which itself might depend on a VPC, and so
+       // on).
+       //
+       // Terraform uses this information to build valid destruction
+       // orders and to warn the user if they're destroying a resource that
+       // another resource depends on.
+       //
+       // Things can be put into this list that may not be managed by
+       // Terraform. If Terraform doesn't find a matching ID in the
+       // overall state, then it assumes it isn't managed and doesn't
+       // worry about it.
+       Dependencies []string `json:"depends_on"`
+
+       // Primary is the current active instance for this resource.
+       // It can be replaced but only after a successful creation.
+       // This is the instances on which providers will act.
+       Primary *InstanceState `json:"primary"`
+
+       // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
+       // Primary is Deposed to get it out of the way for the replacement Primary to
+       // be created by Apply. If the replacement Primary creates successfully, the
+       // Deposed instance is cleaned up.
+       //
+       // If there were problems creating the replacement Primary, the Deposed
+       // instance and the (now tainted) replacement Primary will be swapped so the
+       // tainted replacement will be cleaned up instead.
+       //
+       // An instance will remain in the Deposed list until it is successfully
+       // destroyed and purged.
+       Deposed []*InstanceState `json:"deposed"`
+
+       // Provider is used when a resource is connected to a provider with an alias.
+       // If this string is empty, the resource is connected to the default provider,
+       // e.g. "aws_instance" goes with the "aws" provider.
+       // If the resource block contained a "provider" key, that value will be set here.
+       Provider string `json:"provider"`
+
+       mu sync.Mutex
+}
+
+func (s *ResourceState) Lock()   { s.mu.Lock() }
+func (s *ResourceState) Unlock() { s.mu.Unlock() }
+
+// Equal tests whether two ResourceStates are equal.
+func (s *ResourceState) Equal(other *ResourceState) bool {
+       s.Lock()
+       defer s.Unlock()
+
+       if s.Type != other.Type {
+               return false
+       }
+
+       if s.Provider != other.Provider {
+               return false
+       }
+
+       // Dependencies must be equal
+       sort.Strings(s.Dependencies)
+       sort.Strings(other.Dependencies)
+       if len(s.Dependencies) != len(other.Dependencies) {
+               return false
+       }
+       for i, d := range s.Dependencies {
+               if other.Dependencies[i] != d {
+                       return false
+               }
+       }
+
+       // States must be equal
+       if !s.Primary.Equal(other.Primary) {
+               return false
+       }
+
+       return true
+}
+
+// Taint marks a resource as tainted.
+func (s *ResourceState) Taint() {
+       s.Lock()
+       defer s.Unlock()
+
+       if s.Primary != nil {
+               s.Primary.Tainted = true
+       }
+}
+
+// Untaint unmarks a resource as tainted.
+func (s *ResourceState) Untaint() {
+       s.Lock()
+       defer s.Unlock()
+
+       if s.Primary != nil {
+               s.Primary.Tainted = false
+       }
+}
+
+func (s *ResourceState) init() {
+       s.Lock()
+       defer s.Unlock()
+
+       if s.Primary == nil {
+               s.Primary = &InstanceState{}
+       }
+       s.Primary.init()
+
+       if s.Dependencies == nil {
+               s.Dependencies = []string{}
+       }
+
+       if s.Deposed == nil {
+               s.Deposed = make([]*InstanceState, 0)
+       }
+}
+
+func (s *ResourceState) deepcopy() *ResourceState {
+       copy, err := copystructure.Config{Lock: true}.Copy(s)
+       if err != nil {
+               panic(err)
+       }
+
+       return copy.(*ResourceState)
+}
+
+// prune is used to remove any instances that are no longer required
+func (s *ResourceState) prune() {
+       s.Lock()
+       defer s.Unlock()
+
+       n := len(s.Deposed)
+       for i := 0; i < n; i++ {
+               inst := s.Deposed[i]
+               if inst == nil || inst.ID == "" {
+                       copy(s.Deposed[i:], s.Deposed[i+1:])
+                       s.Deposed[n-1] = nil
+                       n--
+                       i--
+               }
+       }
+       s.Deposed = s.Deposed[:n]
+
+       s.Dependencies = uniqueStrings(s.Dependencies)
+}
+
+func (s *ResourceState) sort() {
+       s.Lock()
+       defer s.Unlock()
+
+       sort.Strings(s.Dependencies)
+}
+
+func (s *ResourceState) String() string {
+       s.Lock()
+       defer s.Unlock()
+
+       var buf bytes.Buffer
+       buf.WriteString(fmt.Sprintf("Type = %s", s.Type))
+       return buf.String()
+}
+
+// InstanceState is used to track the unique state information belonging
+// to a given instance.
+type InstanceState struct {
+       // A unique ID for this resource. This is opaque to Terraform
+       // and is only meant as a lookup mechanism for the providers.
+       ID string `json:"id"`
+
+       // Attributes are basic information about the resource. Any keys here
+       // are accessible in variable format within Terraform configurations:
+       // ${resourcetype.name.attribute}.
+       Attributes map[string]string `json:"attributes"`
+
+       // Ephemeral is used to store any state associated with this instance
+       // that is necessary for the Terraform run to complete, but is not
+       // persisted to a state file.
+       Ephemeral EphemeralState `json:"-"`
+
+       // Meta is a simple K/V map that is persisted to the State but otherwise
+       // ignored by Terraform core. It's meant to be used for accounting by
+       // external client code. The value here must only contain Go primitives
+       // and collections.
+       Meta map[string]interface{} `json:"meta"`
+
+       // Tainted is used to mark a resource for recreation.
+       Tainted bool `json:"tainted"`
+
+       mu sync.Mutex
+}
+
+func (s *InstanceState) Lock()   { s.mu.Lock() }
+func (s *InstanceState) Unlock() { s.mu.Unlock() }
+
+func (s *InstanceState) init() {
+       s.Lock()
+       defer s.Unlock()
+
+       if s.Attributes == nil {
+               s.Attributes = make(map[string]string)
+       }
+       if s.Meta == nil {
+               s.Meta = make(map[string]interface{})
+       }
+       s.Ephemeral.init()
+}
+
+// Copy all the Fields from another InstanceState
+func (s *InstanceState) Set(from *InstanceState) {
+       s.Lock()
+       defer s.Unlock()
+
+       from.Lock()
+       defer from.Unlock()
+
+       s.ID = from.ID
+       s.Attributes = from.Attributes
+       s.Ephemeral = from.Ephemeral
+       s.Meta = from.Meta
+       s.Tainted = from.Tainted
+}
+
+func (s *InstanceState) DeepCopy() *InstanceState {
+       copy, err := copystructure.Config{Lock: true}.Copy(s)
+       if err != nil {
+               panic(err)
+       }
+
+       return copy.(*InstanceState)
+}
+
+func (s *InstanceState) Empty() bool {
+       if s == nil {
+               return true
+       }
+       s.Lock()
+       defer s.Unlock()
+
+       return s.ID == ""
+}
+
+func (s *InstanceState) Equal(other *InstanceState) bool {
+       // Short circuit some nil checks
+       if s == nil || other == nil {
+               return s == other
+       }
+       s.Lock()
+       defer s.Unlock()
+
+       // IDs must be equal
+       if s.ID != other.ID {
+               return false
+       }
+
+       // Attributes must be equal
+       if len(s.Attributes) != len(other.Attributes) {
+               return false
+       }
+       for k, v := range s.Attributes {
+               otherV, ok := other.Attributes[k]
+               if !ok {
+                       return false
+               }
+
+               if v != otherV {
+                       return false
+               }
+       }
+
+       // Meta must be equal
+       if len(s.Meta) != len(other.Meta) {
+               return false
+       }
+       if s.Meta != nil && other.Meta != nil {
+               // We only do the deep check if both are non-nil. If one is nil
+               // we treat it as equal since their lengths are both zero (check
+               // above).
+               if !reflect.DeepEqual(s.Meta, other.Meta) {
+                       return false
+               }
+       }
+
+       if s.Tainted != other.Tainted {
+               return false
+       }
+
+       return true
+}
+
+// MergeDiff takes a ResourceDiff and merges the attributes into
+// this resource state in order to generate a new state. This new
+// state can be used to provide updated attribute lookups for
+// variable interpolation.
+//
+// If the diff attribute requires computing the value, and hence
+// won't be available until apply, the value is replaced with the
+// computeID.
+func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState {
+       result := s.DeepCopy()
+       if result == nil {
+               result = new(InstanceState)
+       }
+       result.init()
+
+       if s != nil {
+               s.Lock()
+               defer s.Unlock()
+               for k, v := range s.Attributes {
+                       result.Attributes[k] = v
+               }
+       }
+       if d != nil {
+               for k, diff := range d.CopyAttributes() {
+                       if diff.NewRemoved {
+                               delete(result.Attributes, k)
+                               continue
+                       }
+                       if diff.NewComputed {
+                               result.Attributes[k] = config.UnknownVariableValue
+                               continue
+                       }
+
+                       result.Attributes[k] = diff.New
+               }
+       }
+
+       return result
+}
+
+func (s *InstanceState) String() string {
+       s.Lock()
+       defer s.Unlock()
+
+       var buf bytes.Buffer
+
+       if s == nil || s.ID == "" {
+               return "<not created>"
+       }
+
+       buf.WriteString(fmt.Sprintf("ID = %s\n", s.ID))
+
+       attributes := s.Attributes
+       attrKeys := make([]string, 0, len(attributes))
+       for ak, _ := range attributes {
+               if ak == "id" {
+                       continue
+               }
+
+               attrKeys = append(attrKeys, ak)
+       }
+       sort.Strings(attrKeys)
+
+       for _, ak := range attrKeys {
+               av := attributes[ak]
+               buf.WriteString(fmt.Sprintf("%s = %s\n", ak, av))
+       }
+
+       buf.WriteString(fmt.Sprintf("Tainted = %t\n", s.Tainted))
+
+       return buf.String()
+}
+
+// EphemeralState is used for transient state that is only kept in-memory
+type EphemeralState struct {
+       // ConnInfo is used for the providers to export information which is
+       // used to connect to the resource for provisioning. For example,
+       // this could contain SSH or WinRM credentials.
+       ConnInfo map[string]string `json:"-"`
+
+       // Type is used to specify the resource type for this instance. This is only
+       // required for import operations (as documented). If the documentation
+       // doesn't state that you need to set this, then don't worry about
+       // setting it.
+       Type string `json:"-"`
+}
+
+func (e *EphemeralState) init() {
+       if e.ConnInfo == nil {
+               e.ConnInfo = make(map[string]string)
+       }
+}
+
+func (e *EphemeralState) DeepCopy() *EphemeralState {
+       copy, err := copystructure.Config{Lock: true}.Copy(e)
+       if err != nil {
+               panic(err)
+       }
+
+       return copy.(*EphemeralState)
+}
+
+type jsonStateVersionIdentifier struct {
+       Version int `json:"version"`
+}
+
+// Check if this is a V0 format - the magic bytes at the start of the file
+// should be "tfstate" if so. We no longer support upgrading this type of
+// state but return an error message explaining to a user how they can
+// upgrade via the 0.6.x series.
+func testForV0State(buf *bufio.Reader) error {
+       start, err := buf.Peek(len("tfstate"))
+       if err != nil {
+               return fmt.Errorf("Failed to check for magic bytes: %v", err)
+       }
+       if string(start) == "tfstate" {
+               return fmt.Errorf("Terraform 0.7 no longer supports upgrading the binary state\n" +
+                       "format which was used prior to Terraform 0.3. Please upgrade\n" +
+                       "this state file using Terraform 0.6.16 prior to using it with\n" +
+                       "Terraform 0.7.")
+       }
+
+       return nil
+}
+
+// ErrNoState is returned by ReadState when the io.Reader contains no data
+var ErrNoState = errors.New("no state")
+
+// ReadState reads a state structure out of a reader in the format that
+// was written by WriteState.
+func ReadState(src io.Reader) (*State, error) {
+       buf := bufio.NewReader(src)
+       if _, err := buf.Peek(1); err != nil {
+               // the error is either io.EOF or "invalid argument", and both are from
+               // an empty state.
+               return nil, ErrNoState
+       }
+
+       if err := testForV0State(buf); err != nil {
+               return nil, err
+       }
+
+       // If we are JSON we buffer the whole thing in memory so we can read it twice.
+       // This is suboptimal, but will work for now.
+       jsonBytes, err := ioutil.ReadAll(buf)
+       if err != nil {
+               return nil, fmt.Errorf("Reading state file failed: %v", err)
+       }
+
+       versionIdentifier := &jsonStateVersionIdentifier{}
+       if err := json.Unmarshal(jsonBytes, versionIdentifier); err != nil {
+               return nil, fmt.Errorf("Decoding state file version failed: %v", err)
+       }
+
+       var result *State
+       switch versionIdentifier.Version {
+       case 0:
+               return nil, fmt.Errorf("State version 0 is not supported as JSON.")
+       case 1:
+               v1State, err := ReadStateV1(jsonBytes)
+               if err != nil {
+                       return nil, err
+               }
+
+               v2State, err := upgradeStateV1ToV2(v1State)
+               if err != nil {
+                       return nil, err
+               }
+
+               v3State, err := upgradeStateV2ToV3(v2State)
+               if err != nil {
+                       return nil, err
+               }
+
+               // increment the Serial whenever we upgrade state
+               v3State.Serial++
+               result = v3State
+       case 2:
+               v2State, err := ReadStateV2(jsonBytes)
+               if err != nil {
+                       return nil, err
+               }
+               v3State, err := upgradeStateV2ToV3(v2State)
+               if err != nil {
+                       return nil, err
+               }
+
+               v3State.Serial++
+               result = v3State
+       case 3:
+               v3State, err := ReadStateV3(jsonBytes)
+               if err != nil {
+                       return nil, err
+               }
+
+               result = v3State
+       default:
+               return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
+                       SemVersion.String(), versionIdentifier.Version)
+       }
+
+       // If we reached this place we must have a result set
+       if result == nil {
+               panic("resulting state in load not set, assertion failed")
+       }
+
+       // Prune the state when read it. Its possible to write unpruned states or
+       // for a user to make a state unpruned (nil-ing a module state for example).
+       result.prune()
+
+       // Validate the state file is valid
+       if err := result.Validate(); err != nil {
+               return nil, err
+       }
+
+       return result, nil
+}
+
+func ReadStateV1(jsonBytes []byte) (*stateV1, error) {
+       v1State := &stateV1{}
+       if err := json.Unmarshal(jsonBytes, v1State); err != nil {
+               return nil, fmt.Errorf("Decoding state file failed: %v", err)
+       }
+
+       if v1State.Version != 1 {
+               return nil, fmt.Errorf("Decoded state version did not match the decoder selection: "+
+                       "read %d, expected 1", v1State.Version)
+       }
+
+       return v1State, nil
+}
+
+func ReadStateV2(jsonBytes []byte) (*State, error) {
+       state := &State{}
+       if err := json.Unmarshal(jsonBytes, state); err != nil {
+               return nil, fmt.Errorf("Decoding state file failed: %v", err)
+       }
+
+       // Check the version, this to ensure we don't read a future
+       // version that we don't understand
+       if state.Version > StateVersion {
+               return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
+                       SemVersion.String(), state.Version)
+       }
+
+       // Make sure the version is semantic
+       if state.TFVersion != "" {
+               if _, err := version.NewVersion(state.TFVersion); err != nil {
+                       return nil, fmt.Errorf(
+                               "State contains invalid version: %s\n\n"+
+                                       "Terraform validates the version format prior to writing it. This\n"+
+                                       "means that this is invalid of the state becoming corrupted through\n"+
+                                       "some external means. Please manually modify the Terraform version\n"+
+                                       "field to be a proper semantic version.",
+                               state.TFVersion)
+               }
+       }
+
+       // catch any unitialized fields in the state
+       state.init()
+
+       // Sort it
+       state.sort()
+
+       return state, nil
+}
+
+func ReadStateV3(jsonBytes []byte) (*State, error) {
+       state := &State{}
+       if err := json.Unmarshal(jsonBytes, state); err != nil {
+               return nil, fmt.Errorf("Decoding state file failed: %v", err)
+       }
+
+       // Check the version, this to ensure we don't read a future
+       // version that we don't understand
+       if state.Version > StateVersion {
+               return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.",
+                       SemVersion.String(), state.Version)
+       }
+
+       // Make sure the version is semantic
+       if state.TFVersion != "" {
+               if _, err := version.NewVersion(state.TFVersion); err != nil {
+                       return nil, fmt.Errorf(
+                               "State contains invalid version: %s\n\n"+
+                                       "Terraform validates the version format prior to writing it. This\n"+
+                                       "means that this is invalid of the state becoming corrupted through\n"+
+                                       "some external means. Please manually modify the Terraform version\n"+
+                                       "field to be a proper semantic version.",
+                               state.TFVersion)
+               }
+       }
+
+       // catch any unitialized fields in the state
+       state.init()
+
+       // Sort it
+       state.sort()
+
+       // Now we write the state back out to detect any changes in normaliztion.
+       // If our state is now written out differently, bump the serial number to
+       // prevent conflicts.
+       var buf bytes.Buffer
+       err := WriteState(state, &buf)
+       if err != nil {
+               return nil, err
+       }
+
+       if !bytes.Equal(jsonBytes, buf.Bytes()) {
+               log.Println("[INFO] state modified during read or write. incrementing serial number")
+               state.Serial++
+       }
+
+       return state, nil
+}
+
+// WriteState writes a state somewhere in a binary format.
+func WriteState(d *State, dst io.Writer) error {
+       // writing a nil state is a noop.
+       if d == nil {
+               return nil
+       }
+
+       // make sure we have no uninitialized fields
+       d.init()
+
+       // Make sure it is sorted
+       d.sort()
+
+       // Ensure the version is set
+       d.Version = StateVersion
+
+       // If the TFVersion is set, verify it. We used to just set the version
+       // here, but this isn't safe since it changes the MD5 sum on some remote
+       // state storage backends such as Atlas. We now leave it be if needed.
+       if d.TFVersion != "" {
+               if _, err := version.NewVersion(d.TFVersion); err != nil {
+                       return fmt.Errorf(
+                               "Error writing state, invalid version: %s\n\n"+
+                                       "The Terraform version when writing the state must be a semantic\n"+
+                                       "version.",
+                               d.TFVersion)
+               }
+       }
+
+       // Encode the data in a human-friendly way
+       data, err := json.MarshalIndent(d, "", "    ")
+       if err != nil {
+               return fmt.Errorf("Failed to encode state: %s", err)
+       }
+
+       // We append a newline to the data because MarshalIndent doesn't
+       data = append(data, '\n')
+
+       // Write the data out to the dst
+       if _, err := io.Copy(dst, bytes.NewReader(data)); err != nil {
+               return fmt.Errorf("Failed to write state: %v", err)
+       }
+
+       return nil
+}
+
+// resourceNameSort implements the sort.Interface to sort name parts lexically for
+// strings and numerically for integer indexes.
+type resourceNameSort []string
+
+func (r resourceNameSort) Len() int      { return len(r) }
+func (r resourceNameSort) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
+
+func (r resourceNameSort) Less(i, j int) bool {
+       iParts := strings.Split(r[i], ".")
+       jParts := strings.Split(r[j], ".")
+
+       end := len(iParts)
+       if len(jParts) < end {
+               end = len(jParts)
+       }
+
+       for idx := 0; idx < end; idx++ {
+               if iParts[idx] == jParts[idx] {
+                       continue
+               }
+
+               // sort on the first non-matching part
+               iInt, iIntErr := strconv.Atoi(iParts[idx])
+               jInt, jIntErr := strconv.Atoi(jParts[idx])
+
+               switch {
+               case iIntErr == nil && jIntErr == nil:
+                       // sort numerically if both parts are integers
+                       return iInt < jInt
+               case iIntErr == nil:
+                       // numbers sort before strings
+                       return true
+               case jIntErr == nil:
+                       return false
+               default:
+                       return iParts[idx] < jParts[idx]
+               }
+       }
+
+       return r[i] < r[j]
+}
+
+// moduleStateSort implements sort.Interface to sort module states
+type moduleStateSort []*ModuleState
+
+func (s moduleStateSort) Len() int {
+       return len(s)
+}
+
+func (s moduleStateSort) Less(i, j int) bool {
+       a := s[i]
+       b := s[j]
+
+       // If either is nil, then the nil one is "less" than
+       if a == nil || b == nil {
+               return a == nil
+       }
+
+       // If the lengths are different, then the shorter one always wins
+       if len(a.Path) != len(b.Path) {
+               return len(a.Path) < len(b.Path)
+       }
+
+       // Otherwise, compare lexically
+       return strings.Join(a.Path, ".") < strings.Join(b.Path, ".")
+}
+
+func (s moduleStateSort) Swap(i, j int) {
+       s[i], s[j] = s[j], s[i]
+}
+
+const stateValidateErrMultiModule = `
+Multiple modules with the same path: %s
+
+This means that there are multiple entries in the "modules" field
+in your state file that point to the same module. This will cause Terraform
+to behave in unexpected and error prone ways and is invalid. Please back up
+and modify your state file manually to resolve this.
+`
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_add.go b/vendor/github.com/hashicorp/terraform/terraform/state_add.go
new file mode 100644 (file)
index 0000000..1163730
--- /dev/null
@@ -0,0 +1,374 @@
+package terraform
+
+import "fmt"
+
+// Add adds the item in the state at the given address.
+//
+// The item can be a ModuleState, ResourceState, or InstanceState. Depending
+// on the item type, the address may or may not be valid. For example, a
+// module cannot be moved to a resource address, however a resource can be
+// moved to a module address (it retains the same name, under that resource).
+//
+// The item can also be a []*ModuleState, which is the case for nested
+// modules. In this case, Add will expect the zero-index to be the top-most
+// module to add and will only nest children from there. For semantics, this
+// is equivalent to module => module.
+//
+// The full semantics of Add:
+//
+//                      ┌───────────────────┬───────────────────┬───────────────────┐
+//                      │  Module Address   │ Resource Address  │ Instance Address  │
+//    ┌─────────────────┼───────────────────┼───────────────────┼───────────────────┤
+//    │   ModuleState   │         ✓         │         x         │         x         │
+//    ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
+//    │  ResourceState  │         ✓         │         ✓         │      maybe*       │
+//    ├─────────────────┼───────────────────┼───────────────────┼───────────────────┤
+//    │ Instance State  │         ✓         │         ✓         │         ✓         │
+//    └─────────────────┴───────────────────┴───────────────────┴───────────────────┘
+//
+// *maybe - Resources can be added at an instance address only if the resource
+//          represents a single instance (primary). Example:
+//          "aws_instance.foo" can be moved to "aws_instance.bar.tainted"
+//
+func (s *State) Add(fromAddrRaw string, toAddrRaw string, raw interface{}) error {
+       // Parse the address
+
+       toAddr, err := ParseResourceAddress(toAddrRaw)
+       if err != nil {
+               return err
+       }
+
+       // Parse the from address
+       fromAddr, err := ParseResourceAddress(fromAddrRaw)
+       if err != nil {
+               return err
+       }
+
+       // Determine the types
+       from := detectValueAddLoc(raw)
+       to := detectAddrAddLoc(toAddr)
+
+       // Find the function to do this
+       fromMap, ok := stateAddFuncs[from]
+       if !ok {
+               return fmt.Errorf("invalid source to add to state: %T", raw)
+       }
+       f, ok := fromMap[to]
+       if !ok {
+               return fmt.Errorf("invalid destination: %s (%d)", toAddr, to)
+       }
+
+       // Call the migrator
+       if err := f(s, fromAddr, toAddr, raw); err != nil {
+               return err
+       }
+
+       // Prune the state
+       s.prune()
+       return nil
+}
+
+func stateAddFunc_Module_Module(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
+       // raw can be either *ModuleState or []*ModuleState. The former means
+       // we're moving just one module. The latter means we're moving a module
+       // and children.
+       root := raw
+       var rest []*ModuleState
+       if list, ok := raw.([]*ModuleState); ok {
+               // We need at least one item
+               if len(list) == 0 {
+                       return fmt.Errorf("module move with no value to: %s", addr)
+               }
+
+               // The first item is always the root
+               root = list[0]
+               if len(list) > 1 {
+                       rest = list[1:]
+               }
+       }
+
+       // Get the actual module state
+       src := root.(*ModuleState).deepcopy()
+
+       // If the target module exists, it is an error
+       path := append([]string{"root"}, addr.Path...)
+       if s.ModuleByPath(path) != nil {
+               return fmt.Errorf("module target is not empty: %s", addr)
+       }
+
+       // Create it and copy our outputs and dependencies
+       mod := s.AddModule(path)
+       mod.Outputs = src.Outputs
+       mod.Dependencies = src.Dependencies
+
+       // Go through the resources perform an add for each of those
+       for k, v := range src.Resources {
+               resourceKey, err := ParseResourceStateKey(k)
+               if err != nil {
+                       return err
+               }
+
+               // Update the resource address for this
+               addrCopy := *addr
+               addrCopy.Type = resourceKey.Type
+               addrCopy.Name = resourceKey.Name
+               addrCopy.Index = resourceKey.Index
+               addrCopy.Mode = resourceKey.Mode
+
+               // Perform an add
+               if err := s.Add(fromAddr.String(), addrCopy.String(), v); err != nil {
+                       return err
+               }
+       }
+
+       // Add all the children if we have them
+       for _, item := range rest {
+               // If item isn't a descendent of our root, then ignore it
+               if !src.IsDescendent(item) {
+                       continue
+               }
+
+               // It is! Strip the leading prefix and attach that to our address
+               extra := item.Path[len(src.Path):]
+               addrCopy := addr.Copy()
+               addrCopy.Path = append(addrCopy.Path, extra...)
+
+               // Add it
+               s.Add(fromAddr.String(), addrCopy.String(), item)
+       }
+
+       return nil
+}
+
+func stateAddFunc_Resource_Module(
+       s *State, from, to *ResourceAddress, raw interface{}) error {
+       // Build the more specific to addr
+       addr := *to
+       addr.Type = from.Type
+       addr.Name = from.Name
+
+       return s.Add(from.String(), addr.String(), raw)
+}
+
+func stateAddFunc_Resource_Resource(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
+       // raw can be either *ResourceState or []*ResourceState. The former means
+       // we're moving just one resource. The latter means we're moving a count
+       // of resources.
+       if list, ok := raw.([]*ResourceState); ok {
+               // We need at least one item
+               if len(list) == 0 {
+                       return fmt.Errorf("resource move with no value to: %s", addr)
+               }
+
+               // If there is an index, this is an error since we can't assign
+               // a set of resources to a single index
+               if addr.Index >= 0 && len(list) > 1 {
+                       return fmt.Errorf(
+                               "multiple resources can't be moved to a single index: "+
+                                       "%s => %s", fromAddr, addr)
+               }
+
+               // Add each with a specific index
+               for i, rs := range list {
+                       addrCopy := addr.Copy()
+                       addrCopy.Index = i
+
+                       if err := s.Add(fromAddr.String(), addrCopy.String(), rs); err != nil {
+                               return err
+                       }
+               }
+
+               return nil
+       }
+
+       src := raw.(*ResourceState).deepcopy()
+
+       // Initialize the resource
+       resourceRaw, exists := stateAddInitAddr(s, addr)
+       if exists {
+               return fmt.Errorf("resource exists and not empty: %s", addr)
+       }
+       resource := resourceRaw.(*ResourceState)
+       resource.Type = src.Type
+       resource.Dependencies = src.Dependencies
+       resource.Provider = src.Provider
+
+       // Move the primary
+       if src.Primary != nil {
+               addrCopy := *addr
+               addrCopy.InstanceType = TypePrimary
+               addrCopy.InstanceTypeSet = true
+               if err := s.Add(fromAddr.String(), addrCopy.String(), src.Primary); err != nil {
+                       return err
+               }
+       }
+
+       // Move all deposed
+       if len(src.Deposed) > 0 {
+               resource.Deposed = src.Deposed
+       }
+
+       return nil
+}
+
+func stateAddFunc_Instance_Instance(s *State, fromAddr, addr *ResourceAddress, raw interface{}) error {
+       src := raw.(*InstanceState).DeepCopy()
+
+       // Create the instance
+       instanceRaw, _ := stateAddInitAddr(s, addr)
+       instance := instanceRaw.(*InstanceState)
+
+       // Set it
+       instance.Set(src)
+
+       return nil
+}
+
+func stateAddFunc_Instance_Module(
+       s *State, from, to *ResourceAddress, raw interface{}) error {
+       addr := *to
+       addr.Type = from.Type
+       addr.Name = from.Name
+
+       return s.Add(from.String(), addr.String(), raw)
+}
+
+func stateAddFunc_Instance_Resource(
+       s *State, from, to *ResourceAddress, raw interface{}) error {
+       addr := *to
+       addr.InstanceType = TypePrimary
+       addr.InstanceTypeSet = true
+
+       return s.Add(from.String(), addr.String(), raw)
+}
+
+// stateAddFunc is the type of function for adding an item to a state
+type stateAddFunc func(s *State, from, to *ResourceAddress, item interface{}) error
+
+// stateAddFuncs has the full matrix mapping of the state adders.
+var stateAddFuncs map[stateAddLoc]map[stateAddLoc]stateAddFunc
+
+func init() {
+       stateAddFuncs = map[stateAddLoc]map[stateAddLoc]stateAddFunc{
+               stateAddModule: {
+                       stateAddModule: stateAddFunc_Module_Module,
+               },
+               stateAddResource: {
+                       stateAddModule:   stateAddFunc_Resource_Module,
+                       stateAddResource: stateAddFunc_Resource_Resource,
+               },
+               stateAddInstance: {
+                       stateAddInstance: stateAddFunc_Instance_Instance,
+                       stateAddModule:   stateAddFunc_Instance_Module,
+                       stateAddResource: stateAddFunc_Instance_Resource,
+               },
+       }
+}
+
+// stateAddLoc is an enum to represent the location where state is being
+// moved from/to. We use this for quick lookups in a function map.
+type stateAddLoc uint
+
+const (
+       stateAddInvalid stateAddLoc = iota
+       stateAddModule
+       stateAddResource
+       stateAddInstance
+)
+
+// detectAddrAddLoc detects the state type for the given address. This
+// function is specifically not unit tested since we consider the State.Add
+// functionality to be comprehensive enough to cover this.
+func detectAddrAddLoc(addr *ResourceAddress) stateAddLoc {
+       if addr.Name == "" {
+               return stateAddModule
+       }
+
+       if !addr.InstanceTypeSet {
+               return stateAddResource
+       }
+
+       return stateAddInstance
+}
+
+// detectValueAddLoc determines the stateAddLoc value from the raw value
+// that is some State structure.
+func detectValueAddLoc(raw interface{}) stateAddLoc {
+       switch raw.(type) {
+       case *ModuleState:
+               return stateAddModule
+       case []*ModuleState:
+               return stateAddModule
+       case *ResourceState:
+               return stateAddResource
+       case []*ResourceState:
+               return stateAddResource
+       case *InstanceState:
+               return stateAddInstance
+       default:
+               return stateAddInvalid
+       }
+}
+
+// stateAddInitAddr takes a ResourceAddress and creates the non-existing
+// resources up to that point, returning the empty (or existing) interface
+// at that address.
+func stateAddInitAddr(s *State, addr *ResourceAddress) (interface{}, bool) {
+       addType := detectAddrAddLoc(addr)
+
+       // Get the module
+       path := append([]string{"root"}, addr.Path...)
+       exists := true
+       mod := s.ModuleByPath(path)
+       if mod == nil {
+               mod = s.AddModule(path)
+               exists = false
+       }
+       if addType == stateAddModule {
+               return mod, exists
+       }
+
+       // Add the resource
+       resourceKey := (&ResourceStateKey{
+               Name:  addr.Name,
+               Type:  addr.Type,
+               Index: addr.Index,
+               Mode:  addr.Mode,
+       }).String()
+       exists = true
+       resource, ok := mod.Resources[resourceKey]
+       if !ok {
+               resource = &ResourceState{Type: addr.Type}
+               resource.init()
+               mod.Resources[resourceKey] = resource
+               exists = false
+       }
+       if addType == stateAddResource {
+               return resource, exists
+       }
+
+       // Get the instance
+       exists = true
+       instance := &InstanceState{}
+       switch addr.InstanceType {
+       case TypePrimary, TypeTainted:
+               if v := resource.Primary; v != nil {
+                       instance = resource.Primary
+               } else {
+                       exists = false
+               }
+       case TypeDeposed:
+               idx := addr.Index
+               if addr.Index < 0 {
+                       idx = 0
+               }
+               if len(resource.Deposed) > idx {
+                       instance = resource.Deposed[idx]
+               } else {
+                       resource.Deposed = append(resource.Deposed, instance)
+                       exists = false
+               }
+       }
+
+       return instance, exists
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_filter.go b/vendor/github.com/hashicorp/terraform/terraform/state_filter.go
new file mode 100644 (file)
index 0000000..2dcb11b
--- /dev/null
@@ -0,0 +1,267 @@
+package terraform
+
+import (
+       "fmt"
+       "sort"
+)
+
+// StateFilter is responsible for filtering and searching a state.
+//
+// This is a separate struct from State rather than a method on State
+// because StateFilter might create sidecar data structures to optimize
+// filtering on the state.
+//
+// If you change the State, the filter created is invalid and either
+// Reset should be called or a new one should be allocated. StateFilter
+// will not watch State for changes and do this for you. If you filter after
+// changing the State without calling Reset, the behavior is not defined.
+type StateFilter struct {
+       State *State
+}
+
+// Filter takes the addresses specified by fs and finds all the matches.
+// The values of fs are resource addressing syntax that can be parsed by
+// ParseResourceAddress.
+func (f *StateFilter) Filter(fs ...string) ([]*StateFilterResult, error) {
+       // Parse all the addresses
+       as := make([]*ResourceAddress, len(fs))
+       for i, v := range fs {
+               a, err := ParseResourceAddress(v)
+               if err != nil {
+                       return nil, fmt.Errorf("Error parsing address '%s': %s", v, err)
+               }
+
+               as[i] = a
+       }
+
+       // If we weren't given any filters, then we list all
+       if len(fs) == 0 {
+               as = append(as, &ResourceAddress{Index: -1})
+       }
+
+       // Filter each of the address. We keep track of this in a map to
+       // strip duplicates.
+       resultSet := make(map[string]*StateFilterResult)
+       for _, a := range as {
+               for _, r := range f.filterSingle(a) {
+                       resultSet[r.String()] = r
+               }
+       }
+
+       // Make the result list
+       results := make([]*StateFilterResult, 0, len(resultSet))
+       for _, v := range resultSet {
+               results = append(results, v)
+       }
+
+       // Sort them and return
+       sort.Sort(StateFilterResultSlice(results))
+       return results, nil
+}
+
+func (f *StateFilter) filterSingle(a *ResourceAddress) []*StateFilterResult {
+       // The slice to keep track of results
+       var results []*StateFilterResult
+
+       // Go through modules first.
+       modules := make([]*ModuleState, 0, len(f.State.Modules))
+       for _, m := range f.State.Modules {
+               if f.relevant(a, m) {
+                       modules = append(modules, m)
+
+                       // Only add the module to the results if we haven't specified a type.
+                       // We also ignore the root module.
+                       if a.Type == "" && len(m.Path) > 1 {
+                               results = append(results, &StateFilterResult{
+                                       Path:    m.Path[1:],
+                                       Address: (&ResourceAddress{Path: m.Path[1:]}).String(),
+                                       Value:   m,
+                               })
+                       }
+               }
+       }
+
+       // With the modules set, go through all the resources within
+       // the modules to find relevant resources.
+       for _, m := range modules {
+               for n, r := range m.Resources {
+                       // The name in the state contains valuable information. Parse.
+                       key, err := ParseResourceStateKey(n)
+                       if err != nil {
+                               // If we get an error parsing, then just ignore it
+                               // out of the state.
+                               continue
+                       }
+
+                       // Older states and test fixtures often don't contain the
+                       // type directly on the ResourceState. We add this so StateFilter
+                       // is a bit more robust.
+                       if r.Type == "" {
+                               r.Type = key.Type
+                       }
+
+                       if f.relevant(a, r) {
+                               if a.Name != "" && a.Name != key.Name {
+                                       // Name doesn't match
+                                       continue
+                               }
+
+                               if a.Index >= 0 && key.Index != a.Index {
+                                       // Index doesn't match
+                                       continue
+                               }
+
+                               if a.Name != "" && a.Name != key.Name {
+                                       continue
+                               }
+
+                               // Build the address for this resource
+                               addr := &ResourceAddress{
+                                       Path:  m.Path[1:],
+                                       Name:  key.Name,
+                                       Type:  key.Type,
+                                       Index: key.Index,
+                               }
+
+                               // Add the resource level result
+                               resourceResult := &StateFilterResult{
+                                       Path:    addr.Path,
+                                       Address: addr.String(),
+                                       Value:   r,
+                               }
+                               if !a.InstanceTypeSet {
+                                       results = append(results, resourceResult)
+                               }
+
+                               // Add the instances
+                               if r.Primary != nil {
+                                       addr.InstanceType = TypePrimary
+                                       addr.InstanceTypeSet = false
+                                       results = append(results, &StateFilterResult{
+                                               Path:    addr.Path,
+                                               Address: addr.String(),
+                                               Parent:  resourceResult,
+                                               Value:   r.Primary,
+                                       })
+                               }
+
+                               for _, instance := range r.Deposed {
+                                       if f.relevant(a, instance) {
+                                               addr.InstanceType = TypeDeposed
+                                               addr.InstanceTypeSet = true
+                                               results = append(results, &StateFilterResult{
+                                                       Path:    addr.Path,
+                                                       Address: addr.String(),
+                                                       Parent:  resourceResult,
+                                                       Value:   instance,
+                                               })
+                                       }
+                               }
+                       }
+               }
+       }
+
+       return results
+}
+
+// relevant checks for relevance of this address against the given value.
+func (f *StateFilter) relevant(addr *ResourceAddress, raw interface{}) bool {
+       switch v := raw.(type) {
+       case *ModuleState:
+               path := v.Path[1:]
+
+               if len(addr.Path) > len(path) {
+                       // Longer path in address means there is no way we match.
+                       return false
+               }
+
+               // Check for a prefix match
+               for i, p := range addr.Path {
+                       if path[i] != p {
+                               // Any mismatches don't match.
+                               return false
+                       }
+               }
+
+               return true
+       case *ResourceState:
+               if addr.Type == "" {
+                       // If we have no resource type, then we're interested in all!
+                       return true
+               }
+
+               // If the type doesn't match we fail immediately
+               if v.Type != addr.Type {
+                       return false
+               }
+
+               return true
+       default:
+               // If we don't know about it, let's just say no
+               return false
+       }
+}
+
+// StateFilterResult is a single result from a filter operation. Filter
+// can match multiple things within a state (module, resource, instance, etc.)
+// and this unifies that.
+type StateFilterResult struct {
+       // Module path of the result
+       Path []string
+
+       // Address is the address that can be used to reference this exact result.
+       Address string
+
+       // Parent, if non-nil, is a parent of this result. For instances, the
+       // parent would be a resource. For resources, the parent would be
+       // a module. For modules, this is currently nil.
+       Parent *StateFilterResult
+
+       // Value is the actual value. This must be type switched on. It can be
+       // any data structures that `State` can hold: `ModuleState`,
+       // `ResourceState`, `InstanceState`.
+       Value interface{}
+}
+
+func (r *StateFilterResult) String() string {
+       return fmt.Sprintf("%T: %s", r.Value, r.Address)
+}
+
+func (r *StateFilterResult) sortedType() int {
+       switch r.Value.(type) {
+       case *ModuleState:
+               return 0
+       case *ResourceState:
+               return 1
+       case *InstanceState:
+               return 2
+       default:
+               return 50
+       }
+}
+
+// StateFilterResultSlice is a slice of results that implements
+// sort.Interface. The sorting goal is what is most appealing to
+// human output.
+type StateFilterResultSlice []*StateFilterResult
+
+func (s StateFilterResultSlice) Len() int      { return len(s) }
+func (s StateFilterResultSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s StateFilterResultSlice) Less(i, j int) bool {
+       a, b := s[i], s[j]
+
+       // if these address contain an index, we want to sort by index rather than name
+       addrA, errA := ParseResourceAddress(a.Address)
+       addrB, errB := ParseResourceAddress(b.Address)
+       if errA == nil && errB == nil && addrA.Name == addrB.Name && addrA.Index != addrB.Index {
+               return addrA.Index < addrB.Index
+       }
+
+       // If the addresses are different it is just lexographic sorting
+       if a.Address != b.Address {
+               return a.Address < b.Address
+       }
+
+       // Addresses are the same, which means it matters on the type
+       return a.sortedType() < b.sortedType()
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v1_to_v2.go
new file mode 100644 (file)
index 0000000..aa13cce
--- /dev/null
@@ -0,0 +1,189 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/mitchellh/copystructure"
+)
+
+// upgradeStateV1ToV2 is used to upgrade a V1 state representation
+// into a V2 state representation
+func upgradeStateV1ToV2(old *stateV1) (*State, error) {
+       if old == nil {
+               return nil, nil
+       }
+
+       remote, err := old.Remote.upgradeToV2()
+       if err != nil {
+               return nil, fmt.Errorf("Error upgrading State V1: %v", err)
+       }
+
+       modules := make([]*ModuleState, len(old.Modules))
+       for i, module := range old.Modules {
+               upgraded, err := module.upgradeToV2()
+               if err != nil {
+                       return nil, fmt.Errorf("Error upgrading State V1: %v", err)
+               }
+               modules[i] = upgraded
+       }
+       if len(modules) == 0 {
+               modules = nil
+       }
+
+       newState := &State{
+               Version: 2,
+               Serial:  old.Serial,
+               Remote:  remote,
+               Modules: modules,
+       }
+
+       newState.sort()
+       newState.init()
+
+       return newState, nil
+}
+
+func (old *remoteStateV1) upgradeToV2() (*RemoteState, error) {
+       if old == nil {
+               return nil, nil
+       }
+
+       config, err := copystructure.Copy(old.Config)
+       if err != nil {
+               return nil, fmt.Errorf("Error upgrading RemoteState V1: %v", err)
+       }
+
+       return &RemoteState{
+               Type:   old.Type,
+               Config: config.(map[string]string),
+       }, nil
+}
+
+func (old *moduleStateV1) upgradeToV2() (*ModuleState, error) {
+       if old == nil {
+               return nil, nil
+       }
+
+       pathRaw, err := copystructure.Copy(old.Path)
+       if err != nil {
+               return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
+       }
+       path, ok := pathRaw.([]string)
+       if !ok {
+               return nil, fmt.Errorf("Error upgrading ModuleState V1: path is not a list of strings")
+       }
+       if len(path) == 0 {
+               // We found some V1 states with a nil path. Assume root and catch
+               // duplicate path errors later (as part of Validate).
+               path = rootModulePath
+       }
+
+       // Outputs needs upgrading to use the new structure
+       outputs := make(map[string]*OutputState)
+       for key, output := range old.Outputs {
+               outputs[key] = &OutputState{
+                       Type:      "string",
+                       Value:     output,
+                       Sensitive: false,
+               }
+       }
+
+       resources := make(map[string]*ResourceState)
+       for key, oldResource := range old.Resources {
+               upgraded, err := oldResource.upgradeToV2()
+               if err != nil {
+                       return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
+               }
+               resources[key] = upgraded
+       }
+
+       dependencies, err := copystructure.Copy(old.Dependencies)
+       if err != nil {
+               return nil, fmt.Errorf("Error upgrading ModuleState V1: %v", err)
+       }
+
+       return &ModuleState{
+               Path:         path,
+               Outputs:      outputs,
+               Resources:    resources,
+               Dependencies: dependencies.([]string),
+       }, nil
+}
+
+func (old *resourceStateV1) upgradeToV2() (*ResourceState, error) {
+       if old == nil {
+               return nil, nil
+       }
+
+       dependencies, err := copystructure.Copy(old.Dependencies)
+       if err != nil {
+               return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
+       }
+
+       primary, err := old.Primary.upgradeToV2()
+       if err != nil {
+               return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
+       }
+
+       deposed := make([]*InstanceState, len(old.Deposed))
+       for i, v := range old.Deposed {
+               upgraded, err := v.upgradeToV2()
+               if err != nil {
+                       return nil, fmt.Errorf("Error upgrading ResourceState V1: %v", err)
+               }
+               deposed[i] = upgraded
+       }
+       if len(deposed) == 0 {
+               deposed = nil
+       }
+
+       return &ResourceState{
+               Type:         old.Type,
+               Dependencies: dependencies.([]string),
+               Primary:      primary,
+               Deposed:      deposed,
+               Provider:     old.Provider,
+       }, nil
+}
+
+func (old *instanceStateV1) upgradeToV2() (*InstanceState, error) {
+       if old == nil {
+               return nil, nil
+       }
+
+       attributes, err := copystructure.Copy(old.Attributes)
+       if err != nil {
+               return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
+       }
+       ephemeral, err := old.Ephemeral.upgradeToV2()
+       if err != nil {
+               return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
+       }
+
+       meta, err := copystructure.Copy(old.Meta)
+       if err != nil {
+               return nil, fmt.Errorf("Error upgrading InstanceState V1: %v", err)
+       }
+
+       newMeta := make(map[string]interface{})
+       for k, v := range meta.(map[string]string) {
+               newMeta[k] = v
+       }
+
+       return &InstanceState{
+               ID:         old.ID,
+               Attributes: attributes.(map[string]string),
+               Ephemeral:  *ephemeral,
+               Meta:       newMeta,
+       }, nil
+}
+
+func (old *ephemeralStateV1) upgradeToV2() (*EphemeralState, error) {
+       connInfo, err := copystructure.Copy(old.ConnInfo)
+       if err != nil {
+               return nil, fmt.Errorf("Error upgrading EphemeralState V1: %v", err)
+       }
+       return &EphemeralState{
+               ConnInfo: connInfo.(map[string]string),
+       }, nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go b/vendor/github.com/hashicorp/terraform/terraform/state_upgrade_v2_to_v3.go
new file mode 100644 (file)
index 0000000..e52d35f
--- /dev/null
@@ -0,0 +1,142 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+       "regexp"
+       "sort"
+       "strconv"
+       "strings"
+)
+
+// The upgrade process from V2 to V3 state does not affect the structure,
+// so we do not need to redeclare all of the structs involved - we just
+// take a deep copy of the old structure and assert the version number is
+// as we expect.
+func upgradeStateV2ToV3(old *State) (*State, error) {
+       new := old.DeepCopy()
+
+       // Ensure the copied version is v2 before attempting to upgrade
+       if new.Version != 2 {
+               return nil, fmt.Errorf("Cannot apply v2->v3 state upgrade to " +
+                       "a state which is not version 2.")
+       }
+
+       // Set the new version number
+       new.Version = 3
+
+       // Change the counts for things which look like maps to use the %
+       // syntax. Remove counts for empty collections - they will be added
+       // back in later.
+       for _, module := range new.Modules {
+               for _, resource := range module.Resources {
+                       // Upgrade Primary
+                       if resource.Primary != nil {
+                               upgradeAttributesV2ToV3(resource.Primary)
+                       }
+
+                       // Upgrade Deposed
+                       if resource.Deposed != nil {
+                               for _, deposed := range resource.Deposed {
+                                       upgradeAttributesV2ToV3(deposed)
+                               }
+                       }
+               }
+       }
+
+       return new, nil
+}
+
+func upgradeAttributesV2ToV3(instanceState *InstanceState) error {
+       collectionKeyRegexp := regexp.MustCompile(`^(.*\.)#$`)
+       collectionSubkeyRegexp := regexp.MustCompile(`^([^\.]+)\..*`)
+
+       // Identify the key prefix of anything which is a collection
+       var collectionKeyPrefixes []string
+       for key := range instanceState.Attributes {
+               if submatches := collectionKeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
+                       collectionKeyPrefixes = append(collectionKeyPrefixes, submatches[0][1])
+               }
+       }
+       sort.Strings(collectionKeyPrefixes)
+
+       log.Printf("[STATE UPGRADE] Detected the following collections in state: %v", collectionKeyPrefixes)
+
+       // This could be rolled into fewer loops, but it is somewhat clearer this way, and will not
+       // run very often.
+       for _, prefix := range collectionKeyPrefixes {
+               // First get the actual keys that belong to this prefix
+               var potentialKeysMatching []string
+               for key := range instanceState.Attributes {
+                       if strings.HasPrefix(key, prefix) {
+                               potentialKeysMatching = append(potentialKeysMatching, strings.TrimPrefix(key, prefix))
+                       }
+               }
+               sort.Strings(potentialKeysMatching)
+
+               var actualKeysMatching []string
+               for _, key := range potentialKeysMatching {
+                       if submatches := collectionSubkeyRegexp.FindAllStringSubmatch(key, -1); len(submatches) > 0 {
+                               actualKeysMatching = append(actualKeysMatching, submatches[0][1])
+                       } else {
+                               if key != "#" {
+                                       actualKeysMatching = append(actualKeysMatching, key)
+                               }
+                       }
+               }
+               actualKeysMatching = uniqueSortedStrings(actualKeysMatching)
+
+               // Now inspect the keys in order to determine whether this is most likely to be
+               // a map, list or set. There is room for error here, so we log in each case. If
+               // there is no method of telling, we remove the key from the InstanceState in
+               // order that it will be recreated. Again, this could be rolled into fewer loops
+               // but we prefer clarity.
+
+               oldCountKey := fmt.Sprintf("%s#", prefix)
+
+               // First, detect "obvious" maps - which have non-numeric keys (mostly).
+               hasNonNumericKeys := false
+               for _, key := range actualKeysMatching {
+                       if _, err := strconv.Atoi(key); err != nil {
+                               hasNonNumericKeys = true
+                       }
+               }
+               if hasNonNumericKeys {
+                       newCountKey := fmt.Sprintf("%s%%", prefix)
+
+                       instanceState.Attributes[newCountKey] = instanceState.Attributes[oldCountKey]
+                       delete(instanceState.Attributes, oldCountKey)
+                       log.Printf("[STATE UPGRADE] Detected %s as a map. Replaced count = %s",
+                               strings.TrimSuffix(prefix, "."), instanceState.Attributes[newCountKey])
+               }
+
+               // Now detect empty collections and remove them from state.
+               if len(actualKeysMatching) == 0 {
+                       delete(instanceState.Attributes, oldCountKey)
+                       log.Printf("[STATE UPGRADE] Detected %s as an empty collection. Removed from state.",
+                               strings.TrimSuffix(prefix, "."))
+               }
+       }
+
+       return nil
+}
+
+// uniqueSortedStrings removes duplicates from a slice of strings and returns
+// a sorted slice of the unique strings.
+func uniqueSortedStrings(input []string) []string {
+       uniquemap := make(map[string]struct{})
+       for _, str := range input {
+               uniquemap[str] = struct{}{}
+       }
+
+       output := make([]string, len(uniquemap))
+
+       i := 0
+       for key := range uniquemap {
+               output[i] = key
+               i = i + 1
+       }
+
+       sort.Strings(output)
+       return output
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/state_v1.go b/vendor/github.com/hashicorp/terraform/terraform/state_v1.go
new file mode 100644 (file)
index 0000000..68cffb4
--- /dev/null
@@ -0,0 +1,145 @@
+package terraform
+
+// stateV1 keeps track of a snapshot state-of-the-world that Terraform
+// can use to keep track of what real world resources it is actually
+// managing.
+//
+// stateV1 is _only used for the purposes of backwards compatibility
+// and is no longer used in Terraform.
+//
+// For the upgrade process, see state_upgrade_v1_to_v2.go
+type stateV1 struct {
+       // Version is the protocol version. "1" for a StateV1.
+       Version int `json:"version"`
+
+       // Serial is incremented on any operation that modifies
+       // the State file. It is used to detect potentially conflicting
+       // updates.
+       Serial int64 `json:"serial"`
+
+       // Remote is used to track the metadata required to
+       // pull and push state files from a remote storage endpoint.
+       Remote *remoteStateV1 `json:"remote,omitempty"`
+
+       // Modules contains all the modules in a breadth-first order
+       Modules []*moduleStateV1 `json:"modules"`
+}
+
+type remoteStateV1 struct {
+       // Type controls the client we use for the remote state
+       Type string `json:"type"`
+
+       // Config is used to store arbitrary configuration that
+       // is type specific
+       Config map[string]string `json:"config"`
+}
+
+type moduleStateV1 struct {
+       // Path is the import path from the root module. Modules imports are
+       // always disjoint, so the path represents amodule tree
+       Path []string `json:"path"`
+
+       // Outputs declared by the module and maintained for each module
+       // even though only the root module technically needs to be kept.
+       // This allows operators to inspect values at the boundaries.
+       Outputs map[string]string `json:"outputs"`
+
+       // Resources is a mapping of the logically named resource to
+       // the state of the resource. Each resource may actually have
+       // N instances underneath, although a user only needs to think
+       // about the 1:1 case.
+       Resources map[string]*resourceStateV1 `json:"resources"`
+
+       // Dependencies are a list of things that this module relies on
+       // existing to remain intact. For example: an module may depend
+       // on a VPC ID given by an aws_vpc resource.
+       //
+       // Terraform uses this information to build valid destruction
+       // orders and to warn the user if they're destroying a module that
+       // another resource depends on.
+       //
+       // Things can be put into this list that may not be managed by
+       // Terraform. If Terraform doesn't find a matching ID in the
+       // overall state, then it assumes it isn't managed and doesn't
+       // worry about it.
+       Dependencies []string `json:"depends_on,omitempty"`
+}
+
+type resourceStateV1 struct {
+       // This is filled in and managed by Terraform, and is the resource
+       // type itself such as "mycloud_instance". If a resource provider sets
+       // this value, it won't be persisted.
+       Type string `json:"type"`
+
+       // Dependencies are a list of things that this resource relies on
+       // existing to remain intact. For example: an AWS instance might
+       // depend on a subnet (which itself might depend on a VPC, and so
+       // on).
+       //
+       // Terraform uses this information to build valid destruction
+       // orders and to warn the user if they're destroying a resource that
+       // another resource depends on.
+       //
+       // Things can be put into this list that may not be managed by
+       // Terraform. If Terraform doesn't find a matching ID in the
+       // overall state, then it assumes it isn't managed and doesn't
+       // worry about it.
+       Dependencies []string `json:"depends_on,omitempty"`
+
+       // Primary is the current active instance for this resource.
+       // It can be replaced but only after a successful creation.
+       // This is the instances on which providers will act.
+       Primary *instanceStateV1 `json:"primary"`
+
+       // Tainted is used to track any underlying instances that
+       // have been created but are in a bad or unknown state and
+       // need to be cleaned up subsequently.  In the
+       // standard case, there is only at most a single instance.
+       // However, in pathological cases, it is possible for the number
+       // of instances to accumulate.
+       Tainted []*instanceStateV1 `json:"tainted,omitempty"`
+
+       // Deposed is used in the mechanics of CreateBeforeDestroy: the existing
+       // Primary is Deposed to get it out of the way for the replacement Primary to
+       // be created by Apply. If the replacement Primary creates successfully, the
+       // Deposed instance is cleaned up. If there were problems creating the
+       // replacement, the instance remains in the Deposed list so it can be
+       // destroyed in a future run. Functionally, Deposed instances are very
+       // similar to Tainted instances in that Terraform is only tracking them in
+       // order to remember to destroy them.
+       Deposed []*instanceStateV1 `json:"deposed,omitempty"`
+
+       // Provider is used when a resource is connected to a provider with an alias.
+       // If this string is empty, the resource is connected to the default provider,
+       // e.g. "aws_instance" goes with the "aws" provider.
+       // If the resource block contained a "provider" key, that value will be set here.
+       Provider string `json:"provider,omitempty"`
+}
+
+type instanceStateV1 struct {
+       // A unique ID for this resource. This is opaque to Terraform
+       // and is only meant as a lookup mechanism for the providers.
+       ID string `json:"id"`
+
+       // Attributes are basic information about the resource. Any keys here
+       // are accessible in variable format within Terraform configurations:
+       // ${resourcetype.name.attribute}.
+       Attributes map[string]string `json:"attributes,omitempty"`
+
+       // Ephemeral is used to store any state associated with this instance
+       // that is necessary for the Terraform run to complete, but is not
+       // persisted to a state file.
+       Ephemeral ephemeralStateV1 `json:"-"`
+
+       // Meta is a simple K/V map that is persisted to the State but otherwise
+       // ignored by Terraform core. It's meant to be used for accounting by
+       // external client code.
+       Meta map[string]string `json:"meta,omitempty"`
+}
+
+type ephemeralStateV1 struct {
+       // ConnInfo is used for the providers to export information which is
+       // used to connect to the resource for provisioning. For example,
+       // this could contain SSH or WinRM credentials.
+       ConnInfo map[string]string `json:"-"`
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/testing.go b/vendor/github.com/hashicorp/terraform/terraform/testing.go
new file mode 100644 (file)
index 0000000..3f0418d
--- /dev/null
@@ -0,0 +1,19 @@
+package terraform
+
+import (
+       "os"
+       "testing"
+)
+
+// TestStateFile writes the given state to the path.
+func TestStateFile(t *testing.T, path string, state *State) {
+       f, err := os.Create(path)
+       if err != nil {
+               t.Fatalf("err: %s", err)
+       }
+       defer f.Close()
+
+       if err := WriteState(state, f); err != nil {
+               t.Fatalf("err: %s", err)
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform.go b/vendor/github.com/hashicorp/terraform/terraform/transform.go
new file mode 100644 (file)
index 0000000..f4a431a
--- /dev/null
@@ -0,0 +1,52 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/dag"
+)
+
+// GraphTransformer is the interface that transformers implement. This
+// interface is only for transforms that need entire graph visibility.
+type GraphTransformer interface {
+       Transform(*Graph) error
+}
+
+// GraphVertexTransformer is an interface that transforms a single
+// Vertex within with graph. This is a specialization of GraphTransformer
+// that makes it easy to do vertex replacement.
+//
+// The GraphTransformer that runs through the GraphVertexTransformers is
+// VertexTransformer.
+type GraphVertexTransformer interface {
+       Transform(dag.Vertex) (dag.Vertex, error)
+}
+
+// GraphTransformIf is a helper function that conditionally returns a
+// GraphTransformer given. This is useful for calling inline a sequence
+// of transforms without having to split it up into multiple append() calls.
+func GraphTransformIf(f func() bool, then GraphTransformer) GraphTransformer {
+       if f() {
+               return then
+       }
+
+       return nil
+}
+
+type graphTransformerMulti struct {
+       Transforms []GraphTransformer
+}
+
+func (t *graphTransformerMulti) Transform(g *Graph) error {
+       for _, t := range t.Transforms {
+               if err := t.Transform(g); err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
+
+// GraphTransformMulti combines multiple graph transformers into a single
+// GraphTransformer that runs all the individual graph transformers.
+func GraphTransformMulti(ts ...GraphTransformer) GraphTransformer {
+       return &graphTransformerMulti{Transforms: ts}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_provider.go
new file mode 100644 (file)
index 0000000..10506ea
--- /dev/null
@@ -0,0 +1,80 @@
+package terraform
+
+import (
+       "log"
+
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/module"
+)
+
+// GraphNodeAttachProvider is an interface that must be implemented by nodes
+// that want provider configurations attached.
+type GraphNodeAttachProvider interface {
+       // Must be implemented to determine the path for the configuration
+       GraphNodeSubPath
+
+       // ProviderName with no module prefix. Example: "aws".
+       ProviderName() string
+
+       // Sets the configuration
+       AttachProvider(*config.ProviderConfig)
+}
+
+// AttachProviderConfigTransformer goes through the graph and attaches
+// provider configuration structures to nodes that implement the interfaces
+// above.
+//
+// The attached configuration structures are directly from the configuration.
+// If they're going to be modified, a copy should be made.
+type AttachProviderConfigTransformer struct {
+       Module *module.Tree // Module is the root module for the config
+}
+
+func (t *AttachProviderConfigTransformer) Transform(g *Graph) error {
+       if err := t.attachProviders(g); err != nil {
+               return err
+       }
+
+       return nil
+}
+
+func (t *AttachProviderConfigTransformer) attachProviders(g *Graph) error {
+       // Go through and find GraphNodeAttachProvider
+       for _, v := range g.Vertices() {
+               // Only care about GraphNodeAttachProvider implementations
+               apn, ok := v.(GraphNodeAttachProvider)
+               if !ok {
+                       continue
+               }
+
+               // Determine what we're looking for
+               path := normalizeModulePath(apn.Path())
+               path = path[1:]
+               name := apn.ProviderName()
+               log.Printf("[TRACE] Attach provider request: %#v %s", path, name)
+
+               // Get the configuration.
+               tree := t.Module.Child(path)
+               if tree == nil {
+                       continue
+               }
+
+               // Go through the provider configs to find the matching config
+               for _, p := range tree.Config().ProviderConfigs {
+                       // Build the name, which is "name.alias" if an alias exists
+                       current := p.Name
+                       if p.Alias != "" {
+                               current += "." + p.Alias
+                       }
+
+                       // If the configs match then attach!
+                       if current == name {
+                               log.Printf("[TRACE] Attaching provider config: %#v", p)
+                               apn.AttachProvider(p)
+                               break
+                       }
+               }
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_config_resource.go
new file mode 100644 (file)
index 0000000..f2ee37e
--- /dev/null
@@ -0,0 +1,78 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/module"
+)
+
+// GraphNodeAttachResourceConfig is an interface that must be implemented by nodes
+// that want resource configurations attached.
+type GraphNodeAttachResourceConfig interface {
+       // ResourceAddr is the address to the resource
+       ResourceAddr() *ResourceAddress
+
+       // Sets the configuration
+       AttachResourceConfig(*config.Resource)
+}
+
+// AttachResourceConfigTransformer goes through the graph and attaches
+// resource configuration structures to nodes that implement the interfaces
+// above.
+//
+// The attached configuration structures are directly from the configuration.
+// If they're going to be modified, a copy should be made.
+type AttachResourceConfigTransformer struct {
+       Module *module.Tree // Module is the root module for the config
+}
+
+func (t *AttachResourceConfigTransformer) Transform(g *Graph) error {
+       log.Printf("[TRACE] AttachResourceConfigTransformer: Beginning...")
+
+       // Go through and find GraphNodeAttachResource
+       for _, v := range g.Vertices() {
+               // Only care about GraphNodeAttachResource implementations
+               arn, ok := v.(GraphNodeAttachResourceConfig)
+               if !ok {
+                       continue
+               }
+
+               // Determine what we're looking for
+               addr := arn.ResourceAddr()
+               log.Printf(
+                       "[TRACE] AttachResourceConfigTransformer: Attach resource "+
+                               "config request: %s", addr)
+
+               // Get the configuration.
+               path := normalizeModulePath(addr.Path)
+               path = path[1:]
+               tree := t.Module.Child(path)
+               if tree == nil {
+                       continue
+               }
+
+               // Go through the resource configs to find the matching config
+               for _, r := range tree.Config().Resources {
+                       // Get a resource address so we can compare
+                       a, err := parseResourceAddressConfig(r)
+                       if err != nil {
+                               panic(fmt.Sprintf(
+                                       "Error parsing config address, this is a bug: %#v", r))
+                       }
+                       a.Path = addr.Path
+
+                       // If this is not the same resource, then continue
+                       if !a.Equals(addr) {
+                               continue
+                       }
+
+                       log.Printf("[TRACE] Attaching resource config: %#v", r)
+                       arn.AttachResourceConfig(r)
+                       break
+               }
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_attach_state.go
new file mode 100644 (file)
index 0000000..564ff08
--- /dev/null
@@ -0,0 +1,68 @@
+package terraform
+
+import (
+       "log"
+
+       "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeAttachResourceState is an interface that can be implemented
+// to request that a ResourceState is attached to the node.
+type GraphNodeAttachResourceState interface {
+       // The address to the resource for the state
+       ResourceAddr() *ResourceAddress
+
+       // Sets the state
+       AttachResourceState(*ResourceState)
+}
+
+// AttachStateTransformer goes through the graph and attaches
+// state to nodes that implement the interfaces above.
+type AttachStateTransformer struct {
+       State *State // State is the root state
+}
+
+func (t *AttachStateTransformer) Transform(g *Graph) error {
+       // If no state, then nothing to do
+       if t.State == nil {
+               log.Printf("[DEBUG] Not attaching any state: state is nil")
+               return nil
+       }
+
+       filter := &StateFilter{State: t.State}
+       for _, v := range g.Vertices() {
+               // Only care about nodes requesting we're adding state
+               an, ok := v.(GraphNodeAttachResourceState)
+               if !ok {
+                       continue
+               }
+               addr := an.ResourceAddr()
+
+               // Get the module state
+               results, err := filter.Filter(addr.String())
+               if err != nil {
+                       return err
+               }
+
+               // Attach the first resource state we get
+               found := false
+               for _, result := range results {
+                       if rs, ok := result.Value.(*ResourceState); ok {
+                               log.Printf(
+                                       "[DEBUG] Attaching resource state to %q: %#v",
+                                       dag.VertexName(v), rs)
+                               an.AttachResourceState(rs)
+                               found = true
+                               break
+                       }
+               }
+
+               if !found {
+                       log.Printf(
+                               "[DEBUG] Resource state not found for %q: %s",
+                               dag.VertexName(v), addr)
+               }
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config.go
new file mode 100644 (file)
index 0000000..61bce85
--- /dev/null
@@ -0,0 +1,135 @@
+package terraform
+
+import (
+       "errors"
+       "fmt"
+       "log"
+       "sync"
+
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// ConfigTransformer is a GraphTransformer that adds all the resources
+// from the configuration to the graph.
+//
+// The module used to configure this transformer must be the root module.
+//
+// Only resources are added to the graph. Variables, outputs, and
+// providers must be added via other transforms.
+//
+// Unlike ConfigTransformerOld, this transformer creates a graph with
+// all resources including module resources, rather than creating module
+// nodes that are then "flattened".
+type ConfigTransformer struct {
+       Concrete ConcreteResourceNodeFunc
+
+       // Module is the module to add resources from.
+       Module *module.Tree
+
+       // Unique will only add resources that aren't already present in the graph.
+       Unique bool
+
+       // Mode will only add resources that match the given mode
+       ModeFilter bool
+       Mode       config.ResourceMode
+
+       l         sync.Mutex
+       uniqueMap map[string]struct{}
+}
+
+func (t *ConfigTransformer) Transform(g *Graph) error {
+       // Lock since we use some internal state
+       t.l.Lock()
+       defer t.l.Unlock()
+
+       // If no module is given, we don't do anything
+       if t.Module == nil {
+               return nil
+       }
+
+       // If the module isn't loaded, that is simply an error
+       if !t.Module.Loaded() {
+               return errors.New("module must be loaded for ConfigTransformer")
+       }
+
+       // Reset the uniqueness map. If we're tracking uniques, then populate
+       // it with addresses.
+       t.uniqueMap = make(map[string]struct{})
+       defer func() { t.uniqueMap = nil }()
+       if t.Unique {
+               for _, v := range g.Vertices() {
+                       if rn, ok := v.(GraphNodeResource); ok {
+                               t.uniqueMap[rn.ResourceAddr().String()] = struct{}{}
+                       }
+               }
+       }
+
+       // Start the transformation process
+       return t.transform(g, t.Module)
+}
+
+func (t *ConfigTransformer) transform(g *Graph, m *module.Tree) error {
+       // If no config, do nothing
+       if m == nil {
+               return nil
+       }
+
+       // Add our resources
+       if err := t.transformSingle(g, m); err != nil {
+               return err
+       }
+
+       // Transform all the children.
+       for _, c := range m.Children() {
+               if err := t.transform(g, c); err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
+
+func (t *ConfigTransformer) transformSingle(g *Graph, m *module.Tree) error {
+       log.Printf("[TRACE] ConfigTransformer: Starting for path: %v", m.Path())
+
+       // Get the configuration for this module
+       conf := m.Config()
+
+       // Build the path we're at
+       path := m.Path()
+
+       // Write all the resources out
+       for _, r := range conf.Resources {
+               // Build the resource address
+               addr, err := parseResourceAddressConfig(r)
+               if err != nil {
+                       panic(fmt.Sprintf(
+                               "Error parsing config address, this is a bug: %#v", r))
+               }
+               addr.Path = path
+
+               // If this is already in our uniqueness map, don't add it again
+               if _, ok := t.uniqueMap[addr.String()]; ok {
+                       continue
+               }
+
+               // Remove non-matching modes
+               if t.ModeFilter && addr.Mode != t.Mode {
+                       continue
+               }
+
+               // Build the abstract node and the concrete one
+               abstract := &NodeAbstractResource{Addr: addr}
+               var node dag.Vertex = abstract
+               if f := t.Concrete; f != nil {
+                       node = f(abstract)
+               }
+
+               // Add it to the graph
+               g.Add(node)
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_flat.go
new file mode 100644 (file)
index 0000000..92f9888
--- /dev/null
@@ -0,0 +1,80 @@
+package terraform
+
+import (
+       "errors"
+
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// FlatConfigTransformer is a GraphTransformer that adds the configuration
+// to the graph. The module used to configure this transformer must be
+// the root module.
+//
+// This transform adds the nodes but doesn't connect any of the references.
+// The ReferenceTransformer should be used for that.
+//
+// NOTE: In relation to ConfigTransformer: this is a newer generation config
+// transformer. It puts the _entire_ config into the graph (there is no
+// "flattening" step as before).
+type FlatConfigTransformer struct {
+       Concrete ConcreteResourceNodeFunc // What to turn resources into
+
+       Module *module.Tree
+}
+
+func (t *FlatConfigTransformer) Transform(g *Graph) error {
+       // If no module, we do nothing
+       if t.Module == nil {
+               return nil
+       }
+
+       // If the module is not loaded, that is an error
+       if !t.Module.Loaded() {
+               return errors.New("module must be loaded")
+       }
+
+       return t.transform(g, t.Module)
+}
+
+func (t *FlatConfigTransformer) transform(g *Graph, m *module.Tree) error {
+       // If no module, no problem
+       if m == nil {
+               return nil
+       }
+
+       // Transform all the children.
+       for _, c := range m.Children() {
+               if err := t.transform(g, c); err != nil {
+                       return err
+               }
+       }
+
+       // Get the configuration for this module
+       config := m.Config()
+
+       // Write all the resources out
+       for _, r := range config.Resources {
+               // Grab the address for this resource
+               addr, err := parseResourceAddressConfig(r)
+               if err != nil {
+                       return err
+               }
+               addr.Path = m.Path()
+
+               // Build the abstract resource. We have the config already so
+               // we'll just pre-populate that.
+               abstract := &NodeAbstractResource{
+                       Addr:   addr,
+                       Config: r,
+               }
+               var node dag.Vertex = abstract
+               if f := t.Concrete; f != nil {
+                       node = f(abstract)
+               }
+
+               g.Add(node)
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go b/vendor/github.com/hashicorp/terraform/terraform/transform_config_old.go
new file mode 100644 (file)
index 0000000..ec41258
--- /dev/null
@@ -0,0 +1,23 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/config"
+)
+
+// varNameForVar returns the VarName value for an interpolated variable.
+// This value is compared to the VarName() value for the nodes within the
+// graph to build the graph edges.
+func varNameForVar(raw config.InterpolatedVariable) string {
+       switch v := raw.(type) {
+       case *config.ModuleVariable:
+               return fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)
+       case *config.ResourceVariable:
+               return v.ResourceId()
+       case *config.UserVariable:
+               return fmt.Sprintf("var.%s", v.Name)
+       default:
+               return ""
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go b/vendor/github.com/hashicorp/terraform/terraform/transform_count_boundary.go
new file mode 100644 (file)
index 0000000..83415f3
--- /dev/null
@@ -0,0 +1,28 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/dag"
+)
+
+// CountBoundaryTransformer adds a node that depends on everything else
+// so that it runs last in order to clean up the state for nodes that
+// are on the "count boundary": "foo.0" when only one exists becomes "foo"
+type CountBoundaryTransformer struct{}
+
+func (t *CountBoundaryTransformer) Transform(g *Graph) error {
+       node := &NodeCountBoundary{}
+       g.Add(node)
+
+       // Depends on everything
+       for _, v := range g.Vertices() {
+               // Don't connect to ourselves
+               if v == node {
+                       continue
+               }
+
+               // Connect!
+               g.Connect(dag.BasicEdge(node, v))
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go b/vendor/github.com/hashicorp/terraform/terraform/transform_deposed.go
new file mode 100644 (file)
index 0000000..2148cef
--- /dev/null
@@ -0,0 +1,168 @@
+package terraform
+
+import "fmt"
+
+// DeposedTransformer is a GraphTransformer that adds deposed resources
+// to the graph.
+type DeposedTransformer struct {
+       // State is the global state. We'll automatically find the correct
+       // ModuleState based on the Graph.Path that is being transformed.
+       State *State
+
+       // View, if non-empty, is the ModuleState.View used around the state
+       // to find deposed resources.
+       View string
+}
+
+func (t *DeposedTransformer) Transform(g *Graph) error {
+       state := t.State.ModuleByPath(g.Path)
+       if state == nil {
+               // If there is no state for our module there can't be any deposed
+               // resources, since they live in the state.
+               return nil
+       }
+
+       // If we have a view, apply it now
+       if t.View != "" {
+               state = state.View(t.View)
+       }
+
+       // Go through all the resources in our state to look for deposed resources
+       for k, rs := range state.Resources {
+               // If we have no deposed resources, then move on
+               if len(rs.Deposed) == 0 {
+                       continue
+               }
+               deposed := rs.Deposed
+
+               for i, _ := range deposed {
+                       g.Add(&graphNodeDeposedResource{
+                               Index:        i,
+                               ResourceName: k,
+                               ResourceType: rs.Type,
+                               Provider:     rs.Provider,
+                       })
+               }
+       }
+
+       return nil
+}
+
+// graphNodeDeposedResource is the graph vertex representing a deposed resource.
+type graphNodeDeposedResource struct {
+       Index        int
+       ResourceName string
+       ResourceType string
+       Provider     string
+}
+
+func (n *graphNodeDeposedResource) Name() string {
+       return fmt.Sprintf("%s (deposed #%d)", n.ResourceName, n.Index)
+}
+
+func (n *graphNodeDeposedResource) ProvidedBy() []string {
+       return []string{resourceProvider(n.ResourceName, n.Provider)}
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeDeposedResource) EvalTree() EvalNode {
+       var provider ResourceProvider
+       var state *InstanceState
+
+       seq := &EvalSequence{Nodes: make([]EvalNode, 0, 5)}
+
+       // Build instance info
+       info := &InstanceInfo{Id: n.Name(), Type: n.ResourceType}
+       seq.Nodes = append(seq.Nodes, &EvalInstanceInfo{Info: info})
+
+       // Refresh the resource
+       seq.Nodes = append(seq.Nodes, &EvalOpFilter{
+               Ops: []walkOperation{walkRefresh},
+               Node: &EvalSequence{
+                       Nodes: []EvalNode{
+                               &EvalGetProvider{
+                                       Name:   n.ProvidedBy()[0],
+                                       Output: &provider,
+                               },
+                               &EvalReadStateDeposed{
+                                       Name:   n.ResourceName,
+                                       Output: &state,
+                                       Index:  n.Index,
+                               },
+                               &EvalRefresh{
+                                       Info:     info,
+                                       Provider: &provider,
+                                       State:    &state,
+                                       Output:   &state,
+                               },
+                               &EvalWriteStateDeposed{
+                                       Name:         n.ResourceName,
+                                       ResourceType: n.ResourceType,
+                                       Provider:     n.Provider,
+                                       State:        &state,
+                                       Index:        n.Index,
+                               },
+                       },
+               },
+       })
+
+       // Apply
+       var diff *InstanceDiff
+       var err error
+       seq.Nodes = append(seq.Nodes, &EvalOpFilter{
+               Ops: []walkOperation{walkApply, walkDestroy},
+               Node: &EvalSequence{
+                       Nodes: []EvalNode{
+                               &EvalGetProvider{
+                                       Name:   n.ProvidedBy()[0],
+                                       Output: &provider,
+                               },
+                               &EvalReadStateDeposed{
+                                       Name:   n.ResourceName,
+                                       Output: &state,
+                                       Index:  n.Index,
+                               },
+                               &EvalDiffDestroy{
+                                       Info:   info,
+                                       State:  &state,
+                                       Output: &diff,
+                               },
+                               // Call pre-apply hook
+                               &EvalApplyPre{
+                                       Info:  info,
+                                       State: &state,
+                                       Diff:  &diff,
+                               },
+                               &EvalApply{
+                                       Info:     info,
+                                       State:    &state,
+                                       Diff:     &diff,
+                                       Provider: &provider,
+                                       Output:   &state,
+                                       Error:    &err,
+                               },
+                               // Always write the resource back to the state deposed... if it
+                               // was successfully destroyed it will be pruned. If it was not, it will
+                               // be caught on the next run.
+                               &EvalWriteStateDeposed{
+                                       Name:         n.ResourceName,
+                                       ResourceType: n.ResourceType,
+                                       Provider:     n.Provider,
+                                       State:        &state,
+                                       Index:        n.Index,
+                               },
+                               &EvalApplyPost{
+                                       Info:  info,
+                                       State: &state,
+                                       Error: &err,
+                               },
+                               &EvalReturnError{
+                                       Error: &err,
+                               },
+                               &EvalUpdateStateHook{},
+                       },
+               },
+       })
+
+       return seq
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_cbd.go
new file mode 100644 (file)
index 0000000..edfb460
--- /dev/null
@@ -0,0 +1,257 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeDestroyerCBD must be implemented by nodes that might be
+// create-before-destroy destroyers.
+type GraphNodeDestroyerCBD interface {
+       GraphNodeDestroyer
+
+       // CreateBeforeDestroy returns true if this node represents a node
+       // that is doing a CBD.
+       CreateBeforeDestroy() bool
+
+       // ModifyCreateBeforeDestroy is called when the CBD state of a node
+       // is changed dynamically. This can return an error if this isn't
+       // allowed.
+       ModifyCreateBeforeDestroy(bool) error
+}
+
+// CBDEdgeTransformer modifies the edges of CBD nodes that went through
+// the DestroyEdgeTransformer to have the right dependencies. There are
+// two real tasks here:
+//
+//   1. With CBD, the destroy edge is inverted: the destroy depends on
+//      the creation.
+//
+//   2. A_d must depend on resources that depend on A. This is to enable
+//      the destroy to only happen once nodes that depend on A successfully
+//      update to A. Example: adding a web server updates the load balancer
+//      before deleting the old web server.
+//
+type CBDEdgeTransformer struct {
+       // Module and State are only needed to look up dependencies in
+       // any way possible. Either can be nil if not availabile.
+       Module *module.Tree
+       State  *State
+}
+
+func (t *CBDEdgeTransformer) Transform(g *Graph) error {
+       log.Printf("[TRACE] CBDEdgeTransformer: Beginning CBD transformation...")
+
+       // Go through and reverse any destroy edges
+       destroyMap := make(map[string][]dag.Vertex)
+       for _, v := range g.Vertices() {
+               dn, ok := v.(GraphNodeDestroyerCBD)
+               if !ok {
+                       continue
+               }
+
+               if !dn.CreateBeforeDestroy() {
+                       // If there are no CBD ancestors (dependent nodes), then we
+                       // do nothing here.
+                       if !t.hasCBDAncestor(g, v) {
+                               continue
+                       }
+
+                       // If this isn't naturally a CBD node, this means that an ancestor is
+                       // and we need to auto-upgrade this node to CBD. We do this because
+                       // a CBD node depending on non-CBD will result in cycles. To avoid this,
+                       // we always attempt to upgrade it.
+                       if err := dn.ModifyCreateBeforeDestroy(true); err != nil {
+                               return fmt.Errorf(
+                                       "%s: must have create before destroy enabled because "+
+                                               "a dependent resource has CBD enabled. However, when "+
+                                               "attempting to automatically do this, an error occurred: %s",
+                                       dag.VertexName(v), err)
+                       }
+               }
+
+               // Find the destroy edge. There should only be one.
+               for _, e := range g.EdgesTo(v) {
+                       // Not a destroy edge, ignore it
+                       de, ok := e.(*DestroyEdge)
+                       if !ok {
+                               continue
+                       }
+
+                       log.Printf("[TRACE] CBDEdgeTransformer: inverting edge: %s => %s",
+                               dag.VertexName(de.Source()), dag.VertexName(de.Target()))
+
+                       // Found it! Invert.
+                       g.RemoveEdge(de)
+                       g.Connect(&DestroyEdge{S: de.Target(), T: de.Source()})
+               }
+
+               // If the address has an index, we strip that. Our depMap creation
+               // graph doesn't expand counts so we don't currently get _exact_
+               // dependencies. One day when we limit dependencies more exactly
+               // this will have to change. We have a test case covering this
+               // (depNonCBDCountBoth) so it'll be caught.
+               addr := dn.DestroyAddr()
+               if addr.Index >= 0 {
+                       addr = addr.Copy() // Copy so that we don't modify any pointers
+                       addr.Index = -1
+               }
+
+               // Add this to the list of nodes that we need to fix up
+               // the edges for (step 2 above in the docs).
+               key := addr.String()
+               destroyMap[key] = append(destroyMap[key], v)
+       }
+
+       // If we have no CBD nodes, then our work here is done
+       if len(destroyMap) == 0 {
+               return nil
+       }
+
+       // We have CBD nodes. We now have to move on to the much more difficult
+       // task of connecting dependencies of the creation side of the destroy
+       // to the destruction node. The easiest way to explain this is an example:
+       //
+       // Given a pre-destroy dependence of: A => B
+       //   And A has CBD set.
+       //
+       // The resulting graph should be: A => B => A_d
+       //
+       // They key here is that B happens before A is destroyed. This is to
+       // facilitate the primary purpose for CBD: making sure that downstreams
+       // are properly updated to avoid downtime before the resource is destroyed.
+       //
+       // We can't trust that the resource being destroyed or anything that
+       // depends on it is actually in our current graph so we make a new
+       // graph in order to determine those dependencies and add them in.
+       log.Printf("[TRACE] CBDEdgeTransformer: building graph to find dependencies...")
+       depMap, err := t.depMap(destroyMap)
+       if err != nil {
+               return err
+       }
+
+       // We now have the mapping of resource addresses to the destroy
+       // nodes they need to depend on. We now go through our own vertices to
+       // find any matching these addresses and make the connection.
+       for _, v := range g.Vertices() {
+               // We're looking for creators
+               rn, ok := v.(GraphNodeCreator)
+               if !ok {
+                       continue
+               }
+
+               // Get the address
+               addr := rn.CreateAddr()
+
+               // If the address has an index, we strip that. Our depMap creation
+               // graph doesn't expand counts so we don't currently get _exact_
+               // dependencies. One day when we limit dependencies more exactly
+               // this will have to change. We have a test case covering this
+               // (depNonCBDCount) so it'll be caught.
+               if addr.Index >= 0 {
+                       addr = addr.Copy() // Copy so that we don't modify any pointers
+                       addr.Index = -1
+               }
+
+               // If there is nothing this resource should depend on, ignore it
+               key := addr.String()
+               dns, ok := depMap[key]
+               if !ok {
+                       continue
+               }
+
+               // We have nodes! Make the connection
+               for _, dn := range dns {
+                       log.Printf("[TRACE] CBDEdgeTransformer: destroy depends on dependence: %s => %s",
+                               dag.VertexName(dn), dag.VertexName(v))
+                       g.Connect(dag.BasicEdge(dn, v))
+               }
+       }
+
+       return nil
+}
+
+func (t *CBDEdgeTransformer) depMap(
+       destroyMap map[string][]dag.Vertex) (map[string][]dag.Vertex, error) {
+       // Build the graph of our config, this ensures that all resources
+       // are present in the graph.
+       g, err := (&BasicGraphBuilder{
+               Steps: []GraphTransformer{
+                       &FlatConfigTransformer{Module: t.Module},
+                       &AttachResourceConfigTransformer{Module: t.Module},
+                       &AttachStateTransformer{State: t.State},
+                       &ReferenceTransformer{},
+               },
+               Name: "CBDEdgeTransformer",
+       }).Build(nil)
+       if err != nil {
+               return nil, err
+       }
+
+       // Using this graph, build the list of destroy nodes that each resource
+       // address should depend on. For example, when we find B, we map the
+       // address of B to A_d in the "depMap" variable below.
+       depMap := make(map[string][]dag.Vertex)
+       for _, v := range g.Vertices() {
+               // We're looking for resources.
+               rn, ok := v.(GraphNodeResource)
+               if !ok {
+                       continue
+               }
+
+               // Get the address
+               addr := rn.ResourceAddr()
+               key := addr.String()
+
+               // Get the destroy nodes that are destroying this resource.
+               // If there aren't any, then we don't need to worry about
+               // any connections.
+               dns, ok := destroyMap[key]
+               if !ok {
+                       continue
+               }
+
+               // Get the nodes that depend on this on. In the example above:
+               // finding B in A => B.
+               for _, v := range g.UpEdges(v).List() {
+                       // We're looking for resources.
+                       rn, ok := v.(GraphNodeResource)
+                       if !ok {
+                               continue
+                       }
+
+                       // Keep track of the destroy nodes that this address
+                       // needs to depend on.
+                       key := rn.ResourceAddr().String()
+                       depMap[key] = append(depMap[key], dns...)
+               }
+       }
+
+       return depMap, nil
+}
+
+// hasCBDAncestor returns true if any ancestor (node that depends on this)
+// has CBD set.
+func (t *CBDEdgeTransformer) hasCBDAncestor(g *Graph, v dag.Vertex) bool {
+       s, _ := g.Ancestors(v)
+       if s == nil {
+               return true
+       }
+
+       for _, v := range s.List() {
+               dn, ok := v.(GraphNodeDestroyerCBD)
+               if !ok {
+                       continue
+               }
+
+               if dn.CreateBeforeDestroy() {
+                       // some ancestor is CreateBeforeDestroy, so we need to follow suit
+                       return true
+               }
+       }
+
+       return false
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go b/vendor/github.com/hashicorp/terraform/terraform/transform_destroy_edge.go
new file mode 100644 (file)
index 0000000..22be1ab
--- /dev/null
@@ -0,0 +1,269 @@
+package terraform
+
+import (
+       "log"
+
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeDestroyer must be implemented by nodes that destroy resources.
+type GraphNodeDestroyer interface {
+       dag.Vertex
+
+       // ResourceAddr is the address of the resource that is being
+       // destroyed by this node. If this returns nil, then this node
+       // is not destroying anything.
+       DestroyAddr() *ResourceAddress
+}
+
+// GraphNodeCreator must be implemented by nodes that create OR update resources.
+type GraphNodeCreator interface {
+       // ResourceAddr is the address of the resource being created or updated
+       CreateAddr() *ResourceAddress
+}
+
+// DestroyEdgeTransformer is a GraphTransformer that creates the proper
+// references for destroy resources. Destroy resources are more complex
+// in that they must be depend on the destruction of resources that
+// in turn depend on the CREATION of the node being destroy.
+//
+// That is complicated. Visually:
+//
+//   B_d -> A_d -> A -> B
+//
+// Notice that A destroy depends on B destroy, while B create depends on
+// A create. They're inverted. This must be done for example because often
+// dependent resources will block parent resources from deleting. Concrete
+// example: VPC with subnets, the VPC can't be deleted while there are
+// still subnets.
+type DestroyEdgeTransformer struct {
+       // These are needed to properly build the graph of dependencies
+       // to determine what a destroy node depends on. Any of these can be nil.
+       Module *module.Tree
+       State  *State
+}
+
+func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
+       log.Printf("[TRACE] DestroyEdgeTransformer: Beginning destroy edge transformation...")
+
+       // Build a map of what is being destroyed (by address string) to
+       // the list of destroyers. In general there will only be one destroyer
+       // but to make it more robust we support multiple.
+       destroyers := make(map[string][]GraphNodeDestroyer)
+       for _, v := range g.Vertices() {
+               dn, ok := v.(GraphNodeDestroyer)
+               if !ok {
+                       continue
+               }
+
+               addr := dn.DestroyAddr()
+               if addr == nil {
+                       continue
+               }
+
+               key := addr.String()
+               log.Printf(
+                       "[TRACE] DestroyEdgeTransformer: %s destroying %q",
+                       dag.VertexName(dn), key)
+               destroyers[key] = append(destroyers[key], dn)
+       }
+
+       // If we aren't destroying anything, there will be no edges to make
+       // so just exit early and avoid future work.
+       if len(destroyers) == 0 {
+               return nil
+       }
+
+       // Go through and connect creators to destroyers. Going along with
+       // our example, this makes: A_d => A
+       for _, v := range g.Vertices() {
+               cn, ok := v.(GraphNodeCreator)
+               if !ok {
+                       continue
+               }
+
+               addr := cn.CreateAddr()
+               if addr == nil {
+                       continue
+               }
+
+               key := addr.String()
+               ds := destroyers[key]
+               if len(ds) == 0 {
+                       continue
+               }
+
+               for _, d := range ds {
+                       // For illustrating our example
+                       a_d := d.(dag.Vertex)
+                       a := v
+
+                       log.Printf(
+                               "[TRACE] DestroyEdgeTransformer: connecting creator/destroyer: %s, %s",
+                               dag.VertexName(a), dag.VertexName(a_d))
+
+                       g.Connect(&DestroyEdge{S: a, T: a_d})
+               }
+       }
+
+       // This is strange but is the easiest way to get the dependencies
+       // of a node that is being destroyed. We use another graph to make sure
+       // the resource is in the graph and ask for references. We have to do this
+       // because the node that is being destroyed may NOT be in the graph.
+       //
+       // Example: resource A is force new, then destroy A AND create A are
+       // in the graph. BUT if resource A is just pure destroy, then only
+       // destroy A is in the graph, and create A is not.
+       providerFn := func(a *NodeAbstractProvider) dag.Vertex {
+               return &NodeApplyableProvider{NodeAbstractProvider: a}
+       }
+       steps := []GraphTransformer{
+               // Add outputs and metadata
+               &OutputTransformer{Module: t.Module},
+               &AttachResourceConfigTransformer{Module: t.Module},
+               &AttachStateTransformer{State: t.State},
+
+               // Add providers since they can affect destroy order as well
+               &MissingProviderTransformer{AllowAny: true, Concrete: providerFn},
+               &ProviderTransformer{},
+               &DisableProviderTransformer{},
+               &ParentProviderTransformer{},
+               &AttachProviderConfigTransformer{Module: t.Module},
+
+               // Add all the variables. We can depend on resources through
+               // variables due to module parameters, and we need to properly
+               // determine that.
+               &RootVariableTransformer{Module: t.Module},
+               &ModuleVariableTransformer{Module: t.Module},
+
+               &ReferenceTransformer{},
+       }
+
+       // Go through all the nodes being destroyed and create a graph.
+       // The resulting graph is only of things being CREATED. For example,
+       // following our example, the resulting graph would be:
+       //
+       //   A, B (with no edges)
+       //
+       var tempG Graph
+       var tempDestroyed []dag.Vertex
+       for d, _ := range destroyers {
+               // d is what is being destroyed. We parse the resource address
+               // which it came from it is a panic if this fails.
+               addr, err := ParseResourceAddress(d)
+               if err != nil {
+                       panic(err)
+               }
+
+               // This part is a little bit weird but is the best way to
+               // find the dependencies we need to: build a graph and use the
+               // attach config and state transformers then ask for references.
+               abstract := &NodeAbstractResource{Addr: addr}
+               tempG.Add(abstract)
+               tempDestroyed = append(tempDestroyed, abstract)
+
+               // We also add the destroy version here since the destroy can
+               // depend on things that the creation doesn't (destroy provisioners).
+               destroy := &NodeDestroyResource{NodeAbstractResource: abstract}
+               tempG.Add(destroy)
+               tempDestroyed = append(tempDestroyed, destroy)
+       }
+
+       // Run the graph transforms so we have the information we need to
+       // build references.
+       for _, s := range steps {
+               if err := s.Transform(&tempG); err != nil {
+                       return err
+               }
+       }
+
+       log.Printf("[TRACE] DestroyEdgeTransformer: reference graph: %s", tempG.String())
+
+       // Go through all the nodes in the graph and determine what they
+       // depend on.
+       for _, v := range tempDestroyed {
+               // Find all ancestors of this to determine the edges we'll depend on
+               vs, err := tempG.Ancestors(v)
+               if err != nil {
+                       return err
+               }
+
+               refs := make([]dag.Vertex, 0, vs.Len())
+               for _, raw := range vs.List() {
+                       refs = append(refs, raw.(dag.Vertex))
+               }
+
+               refNames := make([]string, len(refs))
+               for i, ref := range refs {
+                       refNames[i] = dag.VertexName(ref)
+               }
+               log.Printf(
+                       "[TRACE] DestroyEdgeTransformer: creation node %q references %s",
+                       dag.VertexName(v), refNames)
+
+               // If we have no references, then we won't need to do anything
+               if len(refs) == 0 {
+                       continue
+               }
+
+               // Get the destroy node for this. In the example of our struct,
+               // we are currently at B and we're looking for B_d.
+               rn, ok := v.(GraphNodeResource)
+               if !ok {
+                       continue
+               }
+
+               addr := rn.ResourceAddr()
+               if addr == nil {
+                       continue
+               }
+
+               dns := destroyers[addr.String()]
+
+               // We have dependencies, check if any are being destroyed
+               // to build the list of things that we must depend on!
+               //
+               // In the example of the struct, if we have:
+               //
+               //   B_d => A_d => A => B
+               //
+               // Then at this point in the algorithm we started with B_d,
+               // we built B (to get dependencies), and we found A. We're now looking
+               // to see if A_d exists.
+               var depDestroyers []dag.Vertex
+               for _, v := range refs {
+                       rn, ok := v.(GraphNodeResource)
+                       if !ok {
+                               continue
+                       }
+
+                       addr := rn.ResourceAddr()
+                       if addr == nil {
+                               continue
+                       }
+
+                       key := addr.String()
+                       if ds, ok := destroyers[key]; ok {
+                               for _, d := range ds {
+                                       depDestroyers = append(depDestroyers, d.(dag.Vertex))
+                                       log.Printf(
+                                               "[TRACE] DestroyEdgeTransformer: destruction of %q depends on %s",
+                                               key, dag.VertexName(d))
+                               }
+                       }
+               }
+
+               // Go through and make the connections. Use the variable
+               // names "a_d" and "b_d" to reference our example.
+               for _, a_d := range dns {
+                       for _, b_d := range depDestroyers {
+                               if b_d != a_d {
+                                       g.Connect(dag.BasicEdge(b_d, a_d))
+                               }
+                       }
+               }
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go b/vendor/github.com/hashicorp/terraform/terraform/transform_diff.go
new file mode 100644 (file)
index 0000000..ad46d3c
--- /dev/null
@@ -0,0 +1,86 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// DiffTransformer is a GraphTransformer that adds the elements of
+// the diff to the graph.
+//
+// This transform is used for example by the ApplyGraphBuilder to ensure
+// that only resources that are being modified are represented in the graph.
+//
+// Module and State is still required for the DiffTransformer for annotations
+// since the Diff doesn't contain all the information required to build the
+// complete graph (such as create-before-destroy information). The graph
+// is built based on the diff first, though, ensuring that only resources
+// that are being modified are present in the graph.
+type DiffTransformer struct {
+       Concrete ConcreteResourceNodeFunc
+
+       Diff   *Diff
+       Module *module.Tree
+       State  *State
+}
+
+func (t *DiffTransformer) Transform(g *Graph) error {
+       // If the diff is nil or empty (nil is empty) then do nothing
+       if t.Diff.Empty() {
+               return nil
+       }
+
+       // Go through all the modules in the diff.
+       log.Printf("[TRACE] DiffTransformer: starting")
+       var nodes []dag.Vertex
+       for _, m := range t.Diff.Modules {
+               log.Printf("[TRACE] DiffTransformer: Module: %s", m)
+               // TODO: If this is a destroy diff then add a module destroy node
+
+               // Go through all the resources in this module.
+               for name, inst := range m.Resources {
+                       log.Printf("[TRACE] DiffTransformer: Resource %q: %#v", name, inst)
+
+                       // We have changes! This is a create or update operation.
+                       // First grab the address so we have a unique way to
+                       // reference this resource.
+                       addr, err := parseResourceAddressInternal(name)
+                       if err != nil {
+                               panic(fmt.Sprintf(
+                                       "Error parsing internal name, this is a bug: %q", name))
+                       }
+
+                       // Very important: add the module path for this resource to
+                       // the address. Remove "root" from it.
+                       addr.Path = m.Path[1:]
+
+                       // If we're destroying, add the destroy node
+                       if inst.Destroy || inst.GetDestroyDeposed() {
+                               abstract := &NodeAbstractResource{Addr: addr}
+                               g.Add(&NodeDestroyResource{NodeAbstractResource: abstract})
+                       }
+
+                       // If we have changes, then add the applyable version
+                       if len(inst.Attributes) > 0 {
+                               // Add the resource to the graph
+                               abstract := &NodeAbstractResource{Addr: addr}
+                               var node dag.Vertex = abstract
+                               if f := t.Concrete; f != nil {
+                                       node = f(abstract)
+                               }
+
+                               nodes = append(nodes, node)
+                       }
+               }
+       }
+
+       // Add all the nodes to the graph
+       for _, n := range nodes {
+               g.Add(n)
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go b/vendor/github.com/hashicorp/terraform/terraform/transform_expand.go
new file mode 100644 (file)
index 0000000..982c098
--- /dev/null
@@ -0,0 +1,48 @@
+package terraform
+
+import (
+       "log"
+
+       "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeExapndable is an interface that nodes can implement to
+// signal that they can be expanded. Expanded nodes turn into
+// GraphNodeSubgraph nodes within the graph.
+type GraphNodeExpandable interface {
+       Expand(GraphBuilder) (GraphNodeSubgraph, error)
+}
+
+// GraphNodeDynamicExpandable is an interface that nodes can implement
+// to signal that they can be expanded at eval-time (hence dynamic).
+// These nodes are given the eval context and are expected to return
+// a new subgraph.
+type GraphNodeDynamicExpandable interface {
+       DynamicExpand(EvalContext) (*Graph, error)
+}
+
+// GraphNodeSubgraph is an interface a node can implement if it has
+// a larger subgraph that should be walked.
+type GraphNodeSubgraph interface {
+       Subgraph() dag.Grapher
+}
+
+// ExpandTransform is a transformer that does a subgraph expansion
+// at graph transform time (vs. at eval time). The benefit of earlier
+// subgraph expansion is that errors with the graph build can be detected
+// at an earlier stage.
+type ExpandTransform struct {
+       Builder GraphBuilder
+}
+
+func (t *ExpandTransform) Transform(v dag.Vertex) (dag.Vertex, error) {
+       ev, ok := v.(GraphNodeExpandable)
+       if !ok {
+               // This isn't an expandable vertex, so just ignore it.
+               return v, nil
+       }
+
+       // Expand the subgraph!
+       log.Printf("[DEBUG] vertex %q: static expanding", dag.VertexName(ev))
+       return ev.Expand(t.Builder)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_provider.go
new file mode 100644 (file)
index 0000000..3673771
--- /dev/null
@@ -0,0 +1,38 @@
+package terraform
+
+import (
+       "fmt"
+       "strings"
+)
+
+// ImportProviderValidateTransformer is a GraphTransformer that goes through
+// the providers in the graph and validates that they only depend on variables.
+type ImportProviderValidateTransformer struct{}
+
+func (t *ImportProviderValidateTransformer) Transform(g *Graph) error {
+       for _, v := range g.Vertices() {
+               // We only care about providers
+               pv, ok := v.(GraphNodeProvider)
+               if !ok {
+                       continue
+               }
+
+               // We only care about providers that reference things
+               rn, ok := pv.(GraphNodeReferencer)
+               if !ok {
+                       continue
+               }
+
+               for _, ref := range rn.References() {
+                       if !strings.HasPrefix(ref, "var.") {
+                               return fmt.Errorf(
+                                       "Provider %q depends on non-var %q. Providers for import can currently\n"+
+                                               "only depend on variables or must be hardcoded. You can stop import\n"+
+                                               "from loading configurations by specifying `-config=\"\"`.",
+                                       pv.ProviderName(), ref)
+                       }
+               }
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_import_state.go
new file mode 100644 (file)
index 0000000..081df2f
--- /dev/null
@@ -0,0 +1,241 @@
+package terraform
+
+import (
+       "fmt"
+)
+
+// ImportStateTransformer is a GraphTransformer that adds nodes to the
+// graph to represent the imports we want to do for resources.
+type ImportStateTransformer struct {
+       Targets []*ImportTarget
+}
+
+func (t *ImportStateTransformer) Transform(g *Graph) error {
+       nodes := make([]*graphNodeImportState, 0, len(t.Targets))
+       for _, target := range t.Targets {
+               addr, err := ParseResourceAddress(target.Addr)
+               if err != nil {
+                       return fmt.Errorf(
+                               "failed to parse resource address '%s': %s",
+                               target.Addr, err)
+               }
+
+               nodes = append(nodes, &graphNodeImportState{
+                       Addr:     addr,
+                       ID:       target.ID,
+                       Provider: target.Provider,
+               })
+       }
+
+       // Build the graph vertices
+       for _, n := range nodes {
+               g.Add(n)
+       }
+
+       return nil
+}
+
+type graphNodeImportState struct {
+       Addr     *ResourceAddress // Addr is the resource address to import to
+       ID       string           // ID is the ID to import as
+       Provider string           // Provider string
+
+       states []*InstanceState
+}
+
+func (n *graphNodeImportState) Name() string {
+       return fmt.Sprintf("%s (import id: %s)", n.Addr, n.ID)
+}
+
+func (n *graphNodeImportState) ProvidedBy() []string {
+       return []string{resourceProvider(n.Addr.Type, n.Provider)}
+}
+
+// GraphNodeSubPath
+func (n *graphNodeImportState) Path() []string {
+       return normalizeModulePath(n.Addr.Path)
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeImportState) EvalTree() EvalNode {
+       var provider ResourceProvider
+       info := &InstanceInfo{
+               Id:         fmt.Sprintf("%s.%s", n.Addr.Type, n.Addr.Name),
+               ModulePath: n.Path(),
+               Type:       n.Addr.Type,
+       }
+
+       // Reset our states
+       n.states = nil
+
+       // Return our sequence
+       return &EvalSequence{
+               Nodes: []EvalNode{
+                       &EvalGetProvider{
+                               Name:   n.ProvidedBy()[0],
+                               Output: &provider,
+                       },
+                       &EvalImportState{
+                               Provider: &provider,
+                               Info:     info,
+                               Id:       n.ID,
+                               Output:   &n.states,
+                       },
+               },
+       }
+}
+
+// GraphNodeDynamicExpandable impl.
+//
+// We use DynamicExpand as a way to generate the subgraph of refreshes
+// and state inserts we need to do for our import state. Since they're new
+// resources they don't depend on anything else and refreshes are isolated
+// so this is nearly a perfect use case for dynamic expand.
+func (n *graphNodeImportState) DynamicExpand(ctx EvalContext) (*Graph, error) {
+       g := &Graph{Path: ctx.Path()}
+
+       // nameCounter is used to de-dup names in the state.
+       nameCounter := make(map[string]int)
+
+       // Compile the list of addresses that we'll be inserting into the state.
+       // We do this ahead of time so we can verify that we aren't importing
+       // something that already exists.
+       addrs := make([]*ResourceAddress, len(n.states))
+       for i, state := range n.states {
+               addr := *n.Addr
+               if t := state.Ephemeral.Type; t != "" {
+                       addr.Type = t
+               }
+
+               // Determine if we need to suffix the name to de-dup
+               key := addr.String()
+               count, ok := nameCounter[key]
+               if ok {
+                       count++
+                       addr.Name += fmt.Sprintf("-%d", count)
+               }
+               nameCounter[key] = count
+
+               // Add it to our list
+               addrs[i] = &addr
+       }
+
+       // Verify that all the addresses are clear
+       state, lock := ctx.State()
+       lock.RLock()
+       defer lock.RUnlock()
+       filter := &StateFilter{State: state}
+       for _, addr := range addrs {
+               result, err := filter.Filter(addr.String())
+               if err != nil {
+                       return nil, fmt.Errorf("Error verifying address %s: %s", addr, err)
+               }
+
+               // Go through the filter results and it is an error if we find
+               // a matching InstanceState, meaning that we would have a collision.
+               for _, r := range result {
+                       if _, ok := r.Value.(*InstanceState); ok {
+                               return nil, fmt.Errorf(
+                                       "Can't import %s, would collide with an existing resource.\n\n"+
+                                               "Please remove or rename this resource before continuing.",
+                                       addr)
+                       }
+               }
+       }
+
+       // For each of the states, we add a node to handle the refresh/add to state.
+       // "n.states" is populated by our own EvalTree with the result of
+       // ImportState. Since DynamicExpand is always called after EvalTree, this
+       // is safe.
+       for i, state := range n.states {
+               g.Add(&graphNodeImportStateSub{
+                       Target:   addrs[i],
+                       Path_:    n.Path(),
+                       State:    state,
+                       Provider: n.Provider,
+               })
+       }
+
+       // Root transform for a single root
+       t := &RootTransformer{}
+       if err := t.Transform(g); err != nil {
+               return nil, err
+       }
+
+       // Done!
+       return g, nil
+}
+
+// graphNodeImportStateSub is the sub-node of graphNodeImportState
+// and is part of the subgraph. This node is responsible for refreshing
+// and adding a resource to the state once it is imported.
+type graphNodeImportStateSub struct {
+       Target   *ResourceAddress
+       State    *InstanceState
+       Path_    []string
+       Provider string
+}
+
+func (n *graphNodeImportStateSub) Name() string {
+       return fmt.Sprintf("import %s result: %s", n.Target, n.State.ID)
+}
+
+func (n *graphNodeImportStateSub) Path() []string {
+       return n.Path_
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeImportStateSub) EvalTree() EvalNode {
+       // If the Ephemeral type isn't set, then it is an error
+       if n.State.Ephemeral.Type == "" {
+               err := fmt.Errorf(
+                       "import of %s didn't set type for %s",
+                       n.Target.String(), n.State.ID)
+               return &EvalReturnError{Error: &err}
+       }
+
+       // DeepCopy so we're only modifying our local copy
+       state := n.State.DeepCopy()
+
+       // Build the resource info
+       info := &InstanceInfo{
+               Id:         fmt.Sprintf("%s.%s", n.Target.Type, n.Target.Name),
+               ModulePath: n.Path_,
+               Type:       n.State.Ephemeral.Type,
+       }
+
+       // Key is the resource key
+       key := &ResourceStateKey{
+               Name:  n.Target.Name,
+               Type:  info.Type,
+               Index: n.Target.Index,
+       }
+
+       // The eval sequence
+       var provider ResourceProvider
+       return &EvalSequence{
+               Nodes: []EvalNode{
+                       &EvalGetProvider{
+                               Name:   resourceProvider(info.Type, n.Provider),
+                               Output: &provider,
+                       },
+                       &EvalRefresh{
+                               Provider: &provider,
+                               State:    &state,
+                               Info:     info,
+                               Output:   &state,
+                       },
+                       &EvalImportStateVerify{
+                               Info:  info,
+                               Id:    n.State.ID,
+                               State: &state,
+                       },
+                       &EvalWriteState{
+                               Name:         key.String(),
+                               ResourceType: info.Type,
+                               Provider:     resourceProvider(info.Type, n.Provider),
+                               State:        &state,
+                       },
+               },
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_module_variable.go
new file mode 100644 (file)
index 0000000..467950b
--- /dev/null
@@ -0,0 +1,120 @@
+package terraform
+
+import (
+       "log"
+
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// ModuleVariableTransformer is a GraphTransformer that adds all the variables
+// in the configuration to the graph.
+//
+// This only adds variables that are referenced by other things in the graph.
+// If a module variable is not referenced, it won't be added to the graph.
+type ModuleVariableTransformer struct {
+       Module *module.Tree
+
+       DisablePrune bool // True if pruning unreferenced should be disabled
+}
+
+func (t *ModuleVariableTransformer) Transform(g *Graph) error {
+       return t.transform(g, nil, t.Module)
+}
+
+func (t *ModuleVariableTransformer) transform(g *Graph, parent, m *module.Tree) error {
+       // If no config, no variables
+       if m == nil {
+               return nil
+       }
+
+       // Transform all the children. This must be done BEFORE the transform
+       // above since child module variables can reference parent module variables.
+       for _, c := range m.Children() {
+               if err := t.transform(g, m, c); err != nil {
+                       return err
+               }
+       }
+
+       // If we have a parent, we can determine if a module variable is being
+       // used, so we transform this.
+       if parent != nil {
+               if err := t.transformSingle(g, parent, m); err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
+
+func (t *ModuleVariableTransformer) transformSingle(g *Graph, parent, m *module.Tree) error {
+       // If we have no vars, we're done!
+       vars := m.Config().Variables
+       if len(vars) == 0 {
+               log.Printf("[TRACE] Module %#v has no variables, skipping.", m.Path())
+               return nil
+       }
+
+       // Look for usage of this module
+       var mod *config.Module
+       for _, modUse := range parent.Config().Modules {
+               if modUse.Name == m.Name() {
+                       mod = modUse
+                       break
+               }
+       }
+       if mod == nil {
+               log.Printf("[INFO] Module %#v not used, not adding variables", m.Path())
+               return nil
+       }
+
+       // Build the reference map so we can determine if we're referencing things.
+       refMap := NewReferenceMap(g.Vertices())
+
+       // Add all variables here
+       for _, v := range vars {
+               // Determine the value of the variable. If it isn't in the
+               // configuration then it was never set and that's not a problem.
+               var value *config.RawConfig
+               if raw, ok := mod.RawConfig.Raw[v.Name]; ok {
+                       var err error
+                       value, err = config.NewRawConfig(map[string]interface{}{
+                               v.Name: raw,
+                       })
+                       if err != nil {
+                               // This shouldn't happen because it is already in
+                               // a RawConfig above meaning it worked once before.
+                               panic(err)
+                       }
+               }
+
+               // Build the node.
+               //
+               // NOTE: For now this is just an "applyable" variable. As we build
+               // new graph builders for the other operations I suspect we'll
+               // find a way to parameterize this, require new transforms, etc.
+               node := &NodeApplyableModuleVariable{
+                       PathValue: normalizeModulePath(m.Path()),
+                       Config:    v,
+                       Value:     value,
+                       Module:    t.Module,
+               }
+
+               if !t.DisablePrune {
+                       // If the node is not referenced by anything, then we don't need
+                       // to include it since it won't be used.
+                       if matches := refMap.ReferencedBy(node); len(matches) == 0 {
+                               log.Printf(
+                                       "[INFO] Not including %q in graph, nothing depends on it",
+                                       dag.VertexName(node))
+                               continue
+                       }
+               }
+
+               // Add it!
+               g.Add(node)
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_count.go
new file mode 100644 (file)
index 0000000..b256a25
--- /dev/null
@@ -0,0 +1,110 @@
+package terraform
+
+import (
+       "log"
+
+       "github.com/hashicorp/terraform/dag"
+)
+
+// OrphanResourceCountTransformer is a GraphTransformer that adds orphans
+// for an expanded count to the graph. The determination of this depends
+// on the count argument given.
+//
+// Orphans are found by comparing the count to what is found in the state.
+// This transform assumes that if an element in the state is within the count
+// bounds given, that it is not an orphan.
+type OrphanResourceCountTransformer struct {
+       Concrete ConcreteResourceNodeFunc
+
+       Count int              // Actual count of the resource
+       Addr  *ResourceAddress // Addr of the resource to look for orphans
+       State *State           // Full global state
+}
+
+func (t *OrphanResourceCountTransformer) Transform(g *Graph) error {
+       log.Printf("[TRACE] OrphanResourceCount: Starting...")
+
+       // Grab the module in the state just for this resource address
+       ms := t.State.ModuleByPath(normalizeModulePath(t.Addr.Path))
+       if ms == nil {
+               // If no state, there can't be orphans
+               return nil
+       }
+
+       orphanIndex := -1
+       if t.Count == 1 {
+               orphanIndex = 0
+       }
+
+       // Go through the orphans and add them all to the state
+       for key, _ := range ms.Resources {
+               // Build the address
+               addr, err := parseResourceAddressInternal(key)
+               if err != nil {
+                       return err
+               }
+               addr.Path = ms.Path[1:]
+
+               // Copy the address for comparison. If we aren't looking at
+               // the same resource, then just ignore it.
+               addrCopy := addr.Copy()
+               addrCopy.Index = -1
+               if !addrCopy.Equals(t.Addr) {
+                       continue
+               }
+
+               log.Printf("[TRACE] OrphanResourceCount: Checking: %s", addr)
+
+               idx := addr.Index
+
+               // If we have zero and the index here is 0 or 1, then we
+               // change the index to a high number so that we treat it as
+               // an orphan.
+               if t.Count <= 0 && idx <= 0 {
+                       idx = t.Count + 1
+               }
+
+               // If we have a count greater than 0 and we're at the zero index,
+               // we do a special case check to see if our state also has a
+               // -1 index value. If so, this is an orphan because our rules are
+               // that if both a -1 and 0 are in the state, the 0 is destroyed.
+               if t.Count > 0 && idx == orphanIndex {
+                       // This is a piece of cleverness (beware), but its simple:
+                       // if orphanIndex is 0, then check -1, else check 0.
+                       checkIndex := (orphanIndex + 1) * -1
+
+                       key := &ResourceStateKey{
+                               Name:  addr.Name,
+                               Type:  addr.Type,
+                               Mode:  addr.Mode,
+                               Index: checkIndex,
+                       }
+
+                       if _, ok := ms.Resources[key.String()]; ok {
+                               // We have a -1 index, too. Make an arbitrarily high
+                               // index so that we always mark this as an orphan.
+                               log.Printf(
+                                       "[WARN] OrphanResourceCount: %q both -1 and 0 index found, orphaning %d",
+                                       addr, orphanIndex)
+                               idx = t.Count + 1
+                       }
+               }
+
+               // If the index is within the count bounds, it is not an orphan
+               if idx < t.Count {
+                       continue
+               }
+
+               // Build the abstract node and the concrete one
+               abstract := &NodeAbstractResource{Addr: addr}
+               var node dag.Vertex = abstract
+               if f := t.Concrete; f != nil {
+                       node = f(abstract)
+               }
+
+               // Add it to the graph
+               g.Add(node)
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_output.go
new file mode 100644 (file)
index 0000000..49568d5
--- /dev/null
@@ -0,0 +1,64 @@
+package terraform
+
+import (
+       "log"
+
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/module"
+)
+
+// OrphanOutputTransformer finds the outputs that aren't present
+// in the given config that are in the state and adds them to the graph
+// for deletion.
+type OrphanOutputTransformer struct {
+       Module *module.Tree // Root module
+       State  *State       // State is the root state
+}
+
+func (t *OrphanOutputTransformer) Transform(g *Graph) error {
+       if t.State == nil {
+               log.Printf("[DEBUG] No state, no orphan outputs")
+               return nil
+       }
+
+       return t.transform(g, t.Module)
+}
+
+func (t *OrphanOutputTransformer) transform(g *Graph, m *module.Tree) error {
+       // Get our configuration, and recurse into children
+       var c *config.Config
+       if m != nil {
+               c = m.Config()
+               for _, child := range m.Children() {
+                       if err := t.transform(g, child); err != nil {
+                               return err
+                       }
+               }
+       }
+
+       // Get the state. If there is no state, then we have no orphans!
+       path := normalizeModulePath(m.Path())
+       state := t.State.ModuleByPath(path)
+       if state == nil {
+               return nil
+       }
+
+       // Make a map of the valid outputs
+       valid := make(map[string]struct{})
+       for _, o := range c.Outputs {
+               valid[o.Name] = struct{}{}
+       }
+
+       // Go through the outputs and find the ones that aren't in our config.
+       for n, _ := range state.Outputs {
+               // If it is in the valid map, then ignore
+               if _, ok := valid[n]; ok {
+                       continue
+               }
+
+               // Orphan!
+               g.Add(&NodeOutputOrphan{OutputName: n, PathValue: path})
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go b/vendor/github.com/hashicorp/terraform/terraform/transform_orphan_resource.go
new file mode 100644 (file)
index 0000000..e42d3c8
--- /dev/null
@@ -0,0 +1,78 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// OrphanResourceTransformer is a GraphTransformer that adds resource
+// orphans to the graph. A resource orphan is a resource that is
+// represented in the state but not in the configuration.
+//
+// This only adds orphans that have no representation at all in the
+// configuration.
+type OrphanResourceTransformer struct {
+       Concrete ConcreteResourceNodeFunc
+
+       // State is the global state. We require the global state to
+       // properly find module orphans at our path.
+       State *State
+
+       // Module is the root module. We'll look up the proper configuration
+       // using the graph path.
+       Module *module.Tree
+}
+
+func (t *OrphanResourceTransformer) Transform(g *Graph) error {
+       if t.State == nil {
+               // If the entire state is nil, there can't be any orphans
+               return nil
+       }
+
+       // Go through the modules and for each module transform in order
+       // to add the orphan.
+       for _, ms := range t.State.Modules {
+               if err := t.transform(g, ms); err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
+
+func (t *OrphanResourceTransformer) transform(g *Graph, ms *ModuleState) error {
+       if ms == nil {
+               return nil
+       }
+
+       // Get the configuration for this path. The configuration might be
+       // nil if the module was removed from the configuration. This is okay,
+       // this just means that every resource is an orphan.
+       var c *config.Config
+       if m := t.Module.Child(ms.Path[1:]); m != nil {
+               c = m.Config()
+       }
+
+       // Go through the orphans and add them all to the state
+       for _, key := range ms.Orphans(c) {
+               // Build the abstract resource
+               addr, err := parseResourceAddressInternal(key)
+               if err != nil {
+                       return err
+               }
+               addr.Path = ms.Path[1:]
+
+               // Build the abstract node and the concrete one
+               abstract := &NodeAbstractResource{Addr: addr}
+               var node dag.Vertex = abstract
+               if f := t.Concrete; f != nil {
+                       node = f(abstract)
+               }
+
+               // Add it to the graph
+               g.Add(node)
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_output.go b/vendor/github.com/hashicorp/terraform/terraform/transform_output.go
new file mode 100644 (file)
index 0000000..b260f4c
--- /dev/null
@@ -0,0 +1,59 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/config/module"
+)
+
+// OutputTransformer is a GraphTransformer that adds all the outputs
+// in the configuration to the graph.
+//
+// This is done for the apply graph builder even if dependent nodes
+// aren't changing since there is no downside: the state will be available
+// even if the dependent items aren't changing.
+type OutputTransformer struct {
+       Module *module.Tree
+}
+
+func (t *OutputTransformer) Transform(g *Graph) error {
+       return t.transform(g, t.Module)
+}
+
+func (t *OutputTransformer) transform(g *Graph, m *module.Tree) error {
+       // If no config, no outputs
+       if m == nil {
+               return nil
+       }
+
+       // Transform all the children. We must do this first because
+       // we can reference module outputs and they must show up in the
+       // reference map.
+       for _, c := range m.Children() {
+               if err := t.transform(g, c); err != nil {
+                       return err
+               }
+       }
+
+       // If we have no outputs, we're done!
+       os := m.Config().Outputs
+       if len(os) == 0 {
+               return nil
+       }
+
+       // Add all outputs here
+       for _, o := range os {
+               // Build the node.
+               //
+               // NOTE: For now this is just an "applyable" output. As we build
+               // new graph builders for the other operations I suspect we'll
+               // find a way to parameterize this, require new transforms, etc.
+               node := &NodeApplyableOutput{
+                       PathValue: normalizeModulePath(m.Path()),
+                       Config:    o,
+               }
+
+               // Add it!
+               g.Add(node)
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go
new file mode 100644 (file)
index 0000000..b9695d5
--- /dev/null
@@ -0,0 +1,380 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+       "strings"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeProvider is an interface that nodes that can be a provider
+// must implement. The ProviderName returned is the name of the provider
+// they satisfy.
+type GraphNodeProvider interface {
+       ProviderName() string
+}
+
+// GraphNodeCloseProvider is an interface that nodes that can be a close
+// provider must implement. The CloseProviderName returned is the name of
+// the provider they satisfy.
+type GraphNodeCloseProvider interface {
+       CloseProviderName() string
+}
+
+// GraphNodeProviderConsumer is an interface that nodes that require
+// a provider must implement. ProvidedBy must return the name of the provider
+// to use.
+type GraphNodeProviderConsumer interface {
+       ProvidedBy() []string
+}
+
+// ProviderTransformer is a GraphTransformer that maps resources to
+// providers within the graph. This will error if there are any resources
+// that don't map to proper resources.
+type ProviderTransformer struct{}
+
+func (t *ProviderTransformer) Transform(g *Graph) error {
+       // Go through the other nodes and match them to providers they need
+       var err error
+       m := providerVertexMap(g)
+       for _, v := range g.Vertices() {
+               if pv, ok := v.(GraphNodeProviderConsumer); ok {
+                       for _, p := range pv.ProvidedBy() {
+                               target := m[providerMapKey(p, pv)]
+                               if target == nil {
+                                       println(fmt.Sprintf("%#v\n\n%#v", m, providerMapKey(p, pv)))
+                                       err = multierror.Append(err, fmt.Errorf(
+                                               "%s: provider %s couldn't be found",
+                                               dag.VertexName(v), p))
+                                       continue
+                               }
+
+                               g.Connect(dag.BasicEdge(v, target))
+                       }
+               }
+       }
+
+       return err
+}
+
+// CloseProviderTransformer is a GraphTransformer that adds nodes to the
+// graph that will close open provider connections that aren't needed anymore.
+// A provider connection is not needed anymore once all depended resources
+// in the graph are evaluated.
+type CloseProviderTransformer struct{}
+
+func (t *CloseProviderTransformer) Transform(g *Graph) error {
+       pm := providerVertexMap(g)
+       cpm := closeProviderVertexMap(g)
+       var err error
+       for _, v := range g.Vertices() {
+               if pv, ok := v.(GraphNodeProviderConsumer); ok {
+                       for _, p := range pv.ProvidedBy() {
+                               key := p
+                               source := cpm[key]
+
+                               if source == nil {
+                                       // Create a new graphNodeCloseProvider and add it to the graph
+                                       source = &graphNodeCloseProvider{ProviderNameValue: p}
+                                       g.Add(source)
+
+                                       // Close node needs to depend on provider
+                                       provider, ok := pm[key]
+                                       if !ok {
+                                               err = multierror.Append(err, fmt.Errorf(
+                                                       "%s: provider %s couldn't be found for closing",
+                                                       dag.VertexName(v), p))
+                                               continue
+                                       }
+                                       g.Connect(dag.BasicEdge(source, provider))
+
+                                       // Make sure we also add the new graphNodeCloseProvider to the map
+                                       // so we don't create and add any duplicate graphNodeCloseProviders.
+                                       cpm[key] = source
+                               }
+
+                               // Close node depends on all nodes provided by the provider
+                               g.Connect(dag.BasicEdge(source, v))
+                       }
+               }
+       }
+
+       return err
+}
+
+// MissingProviderTransformer is a GraphTransformer that adds nodes
+// for missing providers into the graph. Specifically, it creates provider
+// configuration nodes for all the providers that we support. These are
+// pruned later during an optimization pass.
+type MissingProviderTransformer struct {
+       // Providers is the list of providers we support.
+       Providers []string
+
+       // AllowAny will not check that a provider is supported before adding
+       // it to the graph.
+       AllowAny bool
+
+       // Concrete, if set, overrides how the providers are made.
+       Concrete ConcreteProviderNodeFunc
+}
+
+func (t *MissingProviderTransformer) Transform(g *Graph) error {
+       // Initialize factory
+       if t.Concrete == nil {
+               t.Concrete = func(a *NodeAbstractProvider) dag.Vertex {
+                       return a
+               }
+       }
+
+       // Create a set of our supported providers
+       supported := make(map[string]struct{}, len(t.Providers))
+       for _, v := range t.Providers {
+               supported[v] = struct{}{}
+       }
+
+       // Get the map of providers we already have in our graph
+       m := providerVertexMap(g)
+
+       // Go through all the provider consumers and make sure we add
+       // that provider if it is missing. We use a for loop here instead
+       // of "range" since we'll modify check as we go to add more to check.
+       check := g.Vertices()
+       for i := 0; i < len(check); i++ {
+               v := check[i]
+
+               pv, ok := v.(GraphNodeProviderConsumer)
+               if !ok {
+                       continue
+               }
+
+               // If this node has a subpath, then we use that as a prefix
+               // into our map to check for an existing provider.
+               var path []string
+               if sp, ok := pv.(GraphNodeSubPath); ok {
+                       raw := normalizeModulePath(sp.Path())
+                       if len(raw) > len(rootModulePath) {
+                               path = raw
+                       }
+               }
+
+               for _, p := range pv.ProvidedBy() {
+                       key := providerMapKey(p, pv)
+                       if _, ok := m[key]; ok {
+                               // This provider already exists as a configure node
+                               continue
+                       }
+
+                       // If the provider has an alias in it, we just want the type
+                       ptype := p
+                       if idx := strings.IndexRune(p, '.'); idx != -1 {
+                               ptype = p[:idx]
+                       }
+
+                       if !t.AllowAny {
+                               if _, ok := supported[ptype]; !ok {
+                                       // If we don't support the provider type, skip it.
+                                       // Validation later will catch this as an error.
+                                       continue
+                               }
+                       }
+
+                       // Add the missing provider node to the graph
+                       v := t.Concrete(&NodeAbstractProvider{
+                               NameValue: p,
+                               PathValue: path,
+                       }).(dag.Vertex)
+                       if len(path) > 0 {
+                               // We'll need the parent provider as well, so let's
+                               // add a dummy node to check to make sure that we add
+                               // that parent provider.
+                               check = append(check, &graphNodeProviderConsumerDummy{
+                                       ProviderValue: p,
+                                       PathValue:     path[:len(path)-1],
+                               })
+                       }
+
+                       m[key] = g.Add(v)
+               }
+       }
+
+       return nil
+}
+
+// ParentProviderTransformer connects provider nodes to their parents.
+//
+// This works by finding nodes that are both GraphNodeProviders and
+// GraphNodeSubPath. It then connects the providers to their parent
+// path.
+type ParentProviderTransformer struct{}
+
+func (t *ParentProviderTransformer) Transform(g *Graph) error {
+       // Make a mapping of path to dag.Vertex, where path is: "path.name"
+       m := make(map[string]dag.Vertex)
+
+       // Also create a map that maps a provider to its parent
+       parentMap := make(map[dag.Vertex]string)
+       for _, raw := range g.Vertices() {
+               // If it is the flat version, then make it the non-flat version.
+               // We eventually want to get rid of the flat version entirely so
+               // this is a stop-gap while it still exists.
+               var v dag.Vertex = raw
+
+               // Only care about providers
+               pn, ok := v.(GraphNodeProvider)
+               if !ok || pn.ProviderName() == "" {
+                       continue
+               }
+
+               // Also require a subpath, if there is no subpath then we
+               // just totally ignore it. The expectation of this transform is
+               // that it is used with a graph builder that is already flattened.
+               var path []string
+               if pn, ok := raw.(GraphNodeSubPath); ok {
+                       path = pn.Path()
+               }
+               path = normalizeModulePath(path)
+
+               // Build the key with path.name i.e. "child.subchild.aws"
+               key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
+               m[key] = raw
+
+               // Determine the parent if we're non-root. This is length 1 since
+               // the 0 index should be "root" since we normalize above.
+               if len(path) > 1 {
+                       path = path[:len(path)-1]
+                       key := fmt.Sprintf("%s.%s", strings.Join(path, "."), pn.ProviderName())
+                       parentMap[raw] = key
+               }
+       }
+
+       // Connect!
+       for v, key := range parentMap {
+               if parent, ok := m[key]; ok {
+                       g.Connect(dag.BasicEdge(v, parent))
+               }
+       }
+
+       return nil
+}
+
+// PruneProviderTransformer is a GraphTransformer that prunes all the
+// providers that aren't needed from the graph. A provider is unneeded if
+// no resource or module is using that provider.
+type PruneProviderTransformer struct{}
+
+func (t *PruneProviderTransformer) Transform(g *Graph) error {
+       for _, v := range g.Vertices() {
+               // We only care about the providers
+               if pn, ok := v.(GraphNodeProvider); !ok || pn.ProviderName() == "" {
+                       continue
+               }
+               // Does anything depend on this? If not, then prune it.
+               if s := g.UpEdges(v); s.Len() == 0 {
+                       if nv, ok := v.(dag.NamedVertex); ok {
+                               log.Printf("[DEBUG] Pruning provider with no dependencies: %s", nv.Name())
+                       }
+                       g.Remove(v)
+               }
+       }
+
+       return nil
+}
+
+// providerMapKey is a helper that gives us the key to use for the
+// maps returned by things such as providerVertexMap.
+func providerMapKey(k string, v dag.Vertex) string {
+       pathPrefix := ""
+       if sp, ok := v.(GraphNodeSubPath); ok {
+               raw := normalizeModulePath(sp.Path())
+               if len(raw) > len(rootModulePath) {
+                       pathPrefix = modulePrefixStr(raw) + "."
+               }
+       }
+
+       return pathPrefix + k
+}
+
+func providerVertexMap(g *Graph) map[string]dag.Vertex {
+       m := make(map[string]dag.Vertex)
+       for _, v := range g.Vertices() {
+               if pv, ok := v.(GraphNodeProvider); ok {
+                       key := providerMapKey(pv.ProviderName(), v)
+                       m[key] = v
+               }
+       }
+
+       return m
+}
+
+func closeProviderVertexMap(g *Graph) map[string]dag.Vertex {
+       m := make(map[string]dag.Vertex)
+       for _, v := range g.Vertices() {
+               if pv, ok := v.(GraphNodeCloseProvider); ok {
+                       m[pv.CloseProviderName()] = v
+               }
+       }
+
+       return m
+}
+
+type graphNodeCloseProvider struct {
+       ProviderNameValue string
+}
+
+func (n *graphNodeCloseProvider) Name() string {
+       return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue)
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeCloseProvider) EvalTree() EvalNode {
+       return CloseProviderEvalTree(n.ProviderNameValue)
+}
+
+// GraphNodeDependable impl.
+func (n *graphNodeCloseProvider) DependableName() []string {
+       return []string{n.Name()}
+}
+
+func (n *graphNodeCloseProvider) CloseProviderName() string {
+       return n.ProviderNameValue
+}
+
+// GraphNodeDotter impl.
+func (n *graphNodeCloseProvider) DotNode(name string, opts *dag.DotOpts) *dag.DotNode {
+       if !opts.Verbose {
+               return nil
+       }
+       return &dag.DotNode{
+               Name: name,
+               Attrs: map[string]string{
+                       "label": n.Name(),
+                       "shape": "diamond",
+               },
+       }
+}
+
+// RemovableIfNotTargeted
+func (n *graphNodeCloseProvider) RemoveIfNotTargeted() bool {
+       // We need to add this so that this node will be removed if
+       // it isn't targeted or a dependency of a target.
+       return true
+}
+
+// graphNodeProviderConsumerDummy is a struct that never enters the real
+// graph (though it could to no ill effect). It implements
+// GraphNodeProviderConsumer and GraphNodeSubpath as a way to force
+// certain transformations.
+type graphNodeProviderConsumerDummy struct {
+       ProviderValue string
+       PathValue     []string
+}
+
+func (n *graphNodeProviderConsumerDummy) Path() []string {
+       return n.PathValue
+}
+
+func (n *graphNodeProviderConsumerDummy) ProvidedBy() []string {
+       return []string{n.ProviderValue}
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider_disable.go
new file mode 100644 (file)
index 0000000..d9919f3
--- /dev/null
@@ -0,0 +1,50 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/dag"
+)
+
+// DisableProviderTransformer "disables" any providers that are not actually
+// used by anything. This avoids the provider being initialized and configured.
+// This both saves resources but also avoids errors since configuration
+// may imply initialization which may require auth.
+type DisableProviderTransformer struct{}
+
+func (t *DisableProviderTransformer) Transform(g *Graph) error {
+       for _, v := range g.Vertices() {
+               // We only care about providers
+               pn, ok := v.(GraphNodeProvider)
+               if !ok || pn.ProviderName() == "" {
+                       continue
+               }
+
+               // If we have dependencies, then don't disable
+               if g.UpEdges(v).Len() > 0 {
+                       continue
+               }
+
+               // Get the path
+               var path []string
+               if pn, ok := v.(GraphNodeSubPath); ok {
+                       path = pn.Path()
+               }
+
+               // Disable the provider by replacing it with a "disabled" provider
+               disabled := &NodeDisabledProvider{
+                       NodeAbstractProvider: &NodeAbstractProvider{
+                               NameValue: pn.ProviderName(),
+                               PathValue: path,
+                       },
+               }
+
+               if !g.Replace(v, disabled) {
+                       panic(fmt.Sprintf(
+                               "vertex disappeared from under us: %s",
+                               dag.VertexName(v)))
+               }
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provisioner.go
new file mode 100644 (file)
index 0000000..f49d824
--- /dev/null
@@ -0,0 +1,206 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/go-multierror"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeProvisioner is an interface that nodes that can be a provisioner
+// must implement. The ProvisionerName returned is the name of the provisioner
+// they satisfy.
+type GraphNodeProvisioner interface {
+       ProvisionerName() string
+}
+
+// GraphNodeCloseProvisioner is an interface that nodes that can be a close
+// provisioner must implement. The CloseProvisionerName returned is the name
+// of the provisioner they satisfy.
+type GraphNodeCloseProvisioner interface {
+       CloseProvisionerName() string
+}
+
+// GraphNodeProvisionerConsumer is an interface that nodes that require
+// a provisioner must implement. ProvisionedBy must return the name of the
+// provisioner to use.
+type GraphNodeProvisionerConsumer interface {
+       ProvisionedBy() []string
+}
+
+// ProvisionerTransformer is a GraphTransformer that maps resources to
+// provisioners within the graph. This will error if there are any resources
+// that don't map to proper resources.
+type ProvisionerTransformer struct{}
+
+func (t *ProvisionerTransformer) Transform(g *Graph) error {
+       // Go through the other nodes and match them to provisioners they need
+       var err error
+       m := provisionerVertexMap(g)
+       for _, v := range g.Vertices() {
+               if pv, ok := v.(GraphNodeProvisionerConsumer); ok {
+                       for _, p := range pv.ProvisionedBy() {
+                               key := provisionerMapKey(p, pv)
+                               if m[key] == nil {
+                                       err = multierror.Append(err, fmt.Errorf(
+                                               "%s: provisioner %s couldn't be found",
+                                               dag.VertexName(v), p))
+                                       continue
+                               }
+
+                               g.Connect(dag.BasicEdge(v, m[key]))
+                       }
+               }
+       }
+
+       return err
+}
+
+// MissingProvisionerTransformer is a GraphTransformer that adds nodes
+// for missing provisioners into the graph.
+type MissingProvisionerTransformer struct {
+       // Provisioners is the list of provisioners we support.
+       Provisioners []string
+}
+
+func (t *MissingProvisionerTransformer) Transform(g *Graph) error {
+       // Create a set of our supported provisioners
+       supported := make(map[string]struct{}, len(t.Provisioners))
+       for _, v := range t.Provisioners {
+               supported[v] = struct{}{}
+       }
+
+       // Get the map of provisioners we already have in our graph
+       m := provisionerVertexMap(g)
+
+       // Go through all the provisioner consumers and make sure we add
+       // that provisioner if it is missing.
+       for _, v := range g.Vertices() {
+               pv, ok := v.(GraphNodeProvisionerConsumer)
+               if !ok {
+                       continue
+               }
+
+               // If this node has a subpath, then we use that as a prefix
+               // into our map to check for an existing provider.
+               var path []string
+               if sp, ok := pv.(GraphNodeSubPath); ok {
+                       raw := normalizeModulePath(sp.Path())
+                       if len(raw) > len(rootModulePath) {
+                               path = raw
+                       }
+               }
+
+               for _, p := range pv.ProvisionedBy() {
+                       // Build the key for storing in the map
+                       key := provisionerMapKey(p, pv)
+
+                       if _, ok := m[key]; ok {
+                               // This provisioner already exists as a configure node
+                               continue
+                       }
+
+                       if _, ok := supported[p]; !ok {
+                               // If we don't support the provisioner type, skip it.
+                               // Validation later will catch this as an error.
+                               continue
+                       }
+
+                       // Build the vertex
+                       var newV dag.Vertex = &NodeProvisioner{
+                               NameValue: p,
+                               PathValue: path,
+                       }
+
+                       // Add the missing provisioner node to the graph
+                       m[key] = g.Add(newV)
+               }
+       }
+
+       return nil
+}
+
+// CloseProvisionerTransformer is a GraphTransformer that adds nodes to the
+// graph that will close open provisioner connections that aren't needed
+// anymore. A provisioner connection is not needed anymore once all depended
+// resources in the graph are evaluated.
+type CloseProvisionerTransformer struct{}
+
+func (t *CloseProvisionerTransformer) Transform(g *Graph) error {
+       m := closeProvisionerVertexMap(g)
+       for _, v := range g.Vertices() {
+               if pv, ok := v.(GraphNodeProvisionerConsumer); ok {
+                       for _, p := range pv.ProvisionedBy() {
+                               source := m[p]
+
+                               if source == nil {
+                                       // Create a new graphNodeCloseProvisioner and add it to the graph
+                                       source = &graphNodeCloseProvisioner{ProvisionerNameValue: p}
+                                       g.Add(source)
+
+                                       // Make sure we also add the new graphNodeCloseProvisioner to the map
+                                       // so we don't create and add any duplicate graphNodeCloseProvisioners.
+                                       m[p] = source
+                               }
+
+                               g.Connect(dag.BasicEdge(source, v))
+                       }
+               }
+       }
+
+       return nil
+}
+
+// provisionerMapKey is a helper that gives us the key to use for the
+// maps returned by things such as provisionerVertexMap.
+func provisionerMapKey(k string, v dag.Vertex) string {
+       pathPrefix := ""
+       if sp, ok := v.(GraphNodeSubPath); ok {
+               raw := normalizeModulePath(sp.Path())
+               if len(raw) > len(rootModulePath) {
+                       pathPrefix = modulePrefixStr(raw) + "."
+               }
+       }
+
+       return pathPrefix + k
+}
+
+func provisionerVertexMap(g *Graph) map[string]dag.Vertex {
+       m := make(map[string]dag.Vertex)
+       for _, v := range g.Vertices() {
+               if pv, ok := v.(GraphNodeProvisioner); ok {
+                       key := provisionerMapKey(pv.ProvisionerName(), v)
+                       m[key] = v
+               }
+       }
+
+       return m
+}
+
+func closeProvisionerVertexMap(g *Graph) map[string]dag.Vertex {
+       m := make(map[string]dag.Vertex)
+       for _, v := range g.Vertices() {
+               if pv, ok := v.(GraphNodeCloseProvisioner); ok {
+                       m[pv.CloseProvisionerName()] = v
+               }
+       }
+
+       return m
+}
+
+type graphNodeCloseProvisioner struct {
+       ProvisionerNameValue string
+}
+
+func (n *graphNodeCloseProvisioner) Name() string {
+       return fmt.Sprintf("provisioner.%s (close)", n.ProvisionerNameValue)
+}
+
+// GraphNodeEvalable impl.
+func (n *graphNodeCloseProvisioner) EvalTree() EvalNode {
+       return &EvalCloseProvisioner{Name: n.ProvisionerNameValue}
+}
+
+func (n *graphNodeCloseProvisioner) CloseProvisionerName() string {
+       return n.ProvisionerNameValue
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go b/vendor/github.com/hashicorp/terraform/terraform/transform_reference.go
new file mode 100644 (file)
index 0000000..c545235
--- /dev/null
@@ -0,0 +1,321 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+       "strings"
+
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeReferenceable must be implemented by any node that represents
+// a Terraform thing that can be referenced (resource, module, etc.).
+//
+// Even if the thing has no name, this should return an empty list. By
+// implementing this and returning a non-nil result, you say that this CAN
+// be referenced and other methods of referencing may still be possible (such
+// as by path!)
+type GraphNodeReferenceable interface {
+       // ReferenceableName is the name by which this can be referenced.
+       // This can be either just the type, or include the field. Example:
+       // "aws_instance.bar" or "aws_instance.bar.id".
+       ReferenceableName() []string
+}
+
+// GraphNodeReferencer must be implemented by nodes that reference other
+// Terraform items and therefore depend on them.
+type GraphNodeReferencer interface {
+       // References are the list of things that this node references. This
+       // can include fields or just the type, just like GraphNodeReferenceable
+       // above.
+       References() []string
+}
+
+// GraphNodeReferenceGlobal is an interface that can optionally be
+// implemented. If ReferenceGlobal returns true, then the References()
+// and ReferenceableName() must be _fully qualified_ with "module.foo.bar"
+// etc.
+//
+// This allows a node to reference and be referenced by a specific name
+// that may cross module boundaries. This can be very dangerous so use
+// this wisely.
+//
+// The primary use case for this is module boundaries (variables coming in).
+type GraphNodeReferenceGlobal interface {
+       // Set to true to signal that references and name are fully
+       // qualified. See the above docs for more information.
+       ReferenceGlobal() bool
+}
+
+// ReferenceTransformer is a GraphTransformer that connects all the
+// nodes that reference each other in order to form the proper ordering.
+type ReferenceTransformer struct{}
+
+func (t *ReferenceTransformer) Transform(g *Graph) error {
+       // Build a reference map so we can efficiently look up the references
+       vs := g.Vertices()
+       m := NewReferenceMap(vs)
+
+       // Find the things that reference things and connect them
+       for _, v := range vs {
+               parents, _ := m.References(v)
+               parentsDbg := make([]string, len(parents))
+               for i, v := range parents {
+                       parentsDbg[i] = dag.VertexName(v)
+               }
+               log.Printf(
+                       "[DEBUG] ReferenceTransformer: %q references: %v",
+                       dag.VertexName(v), parentsDbg)
+
+               for _, parent := range parents {
+                       g.Connect(dag.BasicEdge(v, parent))
+               }
+       }
+
+       return nil
+}
+
+// ReferenceMap is a structure that can be used to efficiently check
+// for references on a graph.
+type ReferenceMap struct {
+       // m is the mapping of referenceable name to list of verticies that
+       // implement that name. This is built on initialization.
+       references   map[string][]dag.Vertex
+       referencedBy map[string][]dag.Vertex
+}
+
+// References returns the list of vertices that this vertex
+// references along with any missing references.
+func (m *ReferenceMap) References(v dag.Vertex) ([]dag.Vertex, []string) {
+       rn, ok := v.(GraphNodeReferencer)
+       if !ok {
+               return nil, nil
+       }
+
+       var matches []dag.Vertex
+       var missing []string
+       prefix := m.prefix(v)
+       for _, ns := range rn.References() {
+               found := false
+               for _, n := range strings.Split(ns, "/") {
+                       n = prefix + n
+                       parents, ok := m.references[n]
+                       if !ok {
+                               continue
+                       }
+
+                       // Mark that we found a match
+                       found = true
+
+                       // Make sure this isn't a self reference, which isn't included
+                       selfRef := false
+                       for _, p := range parents {
+                               if p == v {
+                                       selfRef = true
+                                       break
+                               }
+                       }
+                       if selfRef {
+                               continue
+                       }
+
+                       matches = append(matches, parents...)
+                       break
+               }
+
+               if !found {
+                       missing = append(missing, ns)
+               }
+       }
+
+       return matches, missing
+}
+
+// ReferencedBy returns the list of vertices that reference the
+// vertex passed in.
+func (m *ReferenceMap) ReferencedBy(v dag.Vertex) []dag.Vertex {
+       rn, ok := v.(GraphNodeReferenceable)
+       if !ok {
+               return nil
+       }
+
+       var matches []dag.Vertex
+       prefix := m.prefix(v)
+       for _, n := range rn.ReferenceableName() {
+               n = prefix + n
+               children, ok := m.referencedBy[n]
+               if !ok {
+                       continue
+               }
+
+               // Make sure this isn't a self reference, which isn't included
+               selfRef := false
+               for _, p := range children {
+                       if p == v {
+                               selfRef = true
+                               break
+                       }
+               }
+               if selfRef {
+                       continue
+               }
+
+               matches = append(matches, children...)
+       }
+
+       return matches
+}
+
+func (m *ReferenceMap) prefix(v dag.Vertex) string {
+       // If the node is stating it is already fully qualified then
+       // we don't have to create the prefix!
+       if gn, ok := v.(GraphNodeReferenceGlobal); ok && gn.ReferenceGlobal() {
+               return ""
+       }
+
+       // Create the prefix based on the path
+       var prefix string
+       if pn, ok := v.(GraphNodeSubPath); ok {
+               if path := normalizeModulePath(pn.Path()); len(path) > 1 {
+                       prefix = modulePrefixStr(path) + "."
+               }
+       }
+
+       return prefix
+}
+
+// NewReferenceMap is used to create a new reference map for the
+// given set of vertices.
+func NewReferenceMap(vs []dag.Vertex) *ReferenceMap {
+       var m ReferenceMap
+
+       // Build the lookup table
+       refMap := make(map[string][]dag.Vertex)
+       for _, v := range vs {
+               // We're only looking for referenceable nodes
+               rn, ok := v.(GraphNodeReferenceable)
+               if !ok {
+                       continue
+               }
+
+               // Go through and cache them
+               prefix := m.prefix(v)
+               for _, n := range rn.ReferenceableName() {
+                       n = prefix + n
+                       refMap[n] = append(refMap[n], v)
+               }
+
+               // If there is a path, it is always referenceable by that. For
+               // example, if this is a referenceable thing at path []string{"foo"},
+               // then it can be referenced at "module.foo"
+               if pn, ok := v.(GraphNodeSubPath); ok {
+                       for _, p := range ReferenceModulePath(pn.Path()) {
+                               refMap[p] = append(refMap[p], v)
+                       }
+               }
+       }
+
+       // Build the lookup table for referenced by
+       refByMap := make(map[string][]dag.Vertex)
+       for _, v := range vs {
+               // We're only looking for referenceable nodes
+               rn, ok := v.(GraphNodeReferencer)
+               if !ok {
+                       continue
+               }
+
+               // Go through and cache them
+               prefix := m.prefix(v)
+               for _, n := range rn.References() {
+                       n = prefix + n
+                       refByMap[n] = append(refByMap[n], v)
+               }
+       }
+
+       m.references = refMap
+       m.referencedBy = refByMap
+       return &m
+}
+
+// Returns the reference name for a module path. The path "foo" would return
+// "module.foo". If this is a deeply nested module, it will be every parent
+// as well. For example: ["foo", "bar"] would return both "module.foo" and
+// "module.foo.module.bar"
+func ReferenceModulePath(p []string) []string {
+       p = normalizeModulePath(p)
+       if len(p) == 1 {
+               // Root, no name
+               return nil
+       }
+
+       result := make([]string, 0, len(p)-1)
+       for i := len(p); i > 1; i-- {
+               result = append(result, modulePrefixStr(p[:i]))
+       }
+
+       return result
+}
+
+// ReferencesFromConfig returns the references that a configuration has
+// based on the interpolated variables in a configuration.
+func ReferencesFromConfig(c *config.RawConfig) []string {
+       var result []string
+       for _, v := range c.Variables {
+               if r := ReferenceFromInterpolatedVar(v); len(r) > 0 {
+                       result = append(result, r...)
+               }
+       }
+
+       return result
+}
+
+// ReferenceFromInterpolatedVar returns the reference from this variable,
+// or an empty string if there is no reference.
+func ReferenceFromInterpolatedVar(v config.InterpolatedVariable) []string {
+       switch v := v.(type) {
+       case *config.ModuleVariable:
+               return []string{fmt.Sprintf("module.%s.output.%s", v.Name, v.Field)}
+       case *config.ResourceVariable:
+               id := v.ResourceId()
+
+               // If we have a multi-reference (splat), then we depend on ALL
+               // resources with this type/name.
+               if v.Multi && v.Index == -1 {
+                       return []string{fmt.Sprintf("%s.*", id)}
+               }
+
+               // Otherwise, we depend on a specific index.
+               idx := v.Index
+               if !v.Multi || v.Index == -1 {
+                       idx = 0
+               }
+
+               // Depend on the index, as well as "N" which represents the
+               // un-expanded set of resources.
+               return []string{fmt.Sprintf("%s.%d/%s.N", id, idx, id)}
+       case *config.UserVariable:
+               return []string{fmt.Sprintf("var.%s", v.Name)}
+       default:
+               return nil
+       }
+}
+
+func modulePrefixStr(p []string) string {
+       parts := make([]string, 0, len(p)*2)
+       for _, p := range p[1:] {
+               parts = append(parts, "module", p)
+       }
+
+       return strings.Join(parts, ".")
+}
+
+func modulePrefixList(result []string, prefix string) []string {
+       if prefix != "" {
+               for i, v := range result {
+                       result[i] = fmt.Sprintf("%s.%s", prefix, v)
+               }
+       }
+
+       return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go b/vendor/github.com/hashicorp/terraform/terraform/transform_resource_count.go
new file mode 100644 (file)
index 0000000..cda35cb
--- /dev/null
@@ -0,0 +1,51 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/dag"
+)
+
+// ResourceCountTransformer is a GraphTransformer that expands the count
+// out for a specific resource.
+//
+// This assumes that the count is already interpolated.
+type ResourceCountTransformer struct {
+       Concrete ConcreteResourceNodeFunc
+
+       Count int
+       Addr  *ResourceAddress
+}
+
+func (t *ResourceCountTransformer) Transform(g *Graph) error {
+       // Don't allow the count to be negative
+       if t.Count < 0 {
+               return fmt.Errorf("negative count: %d", t.Count)
+       }
+
+       // For each count, build and add the node
+       for i := 0; i < t.Count; i++ {
+               // Set the index. If our count is 1 we special case it so that
+               // we handle the "resource.0" and "resource" boundary properly.
+               index := i
+               if t.Count == 1 {
+                       index = -1
+               }
+
+               // Build the resource address
+               addr := t.Addr.Copy()
+               addr.Index = index
+
+               // Build the abstract node and the concrete one
+               abstract := &NodeAbstractResource{Addr: addr}
+               var node dag.Vertex = abstract
+               if f := t.Concrete; f != nil {
+                       node = f(abstract)
+               }
+
+               // Add it to the graph
+               g.Add(node)
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_root.go b/vendor/github.com/hashicorp/terraform/terraform/transform_root.go
new file mode 100644 (file)
index 0000000..aee053d
--- /dev/null
@@ -0,0 +1,38 @@
+package terraform
+
+import "github.com/hashicorp/terraform/dag"
+
+const rootNodeName = "root"
+
+// RootTransformer is a GraphTransformer that adds a root to the graph.
+type RootTransformer struct{}
+
+func (t *RootTransformer) Transform(g *Graph) error {
+       // If we already have a good root, we're done
+       if _, err := g.Root(); err == nil {
+               return nil
+       }
+
+       // Add a root
+       var root graphNodeRoot
+       g.Add(root)
+
+       // Connect the root to all the edges that need it
+       for _, v := range g.Vertices() {
+               if v == root {
+                       continue
+               }
+
+               if g.UpEdges(v).Len() == 0 {
+                       g.Connect(dag.BasicEdge(root, v))
+               }
+       }
+
+       return nil
+}
+
+type graphNodeRoot struct{}
+
+func (n graphNodeRoot) Name() string {
+       return rootNodeName
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_state.go b/vendor/github.com/hashicorp/terraform/terraform/transform_state.go
new file mode 100644 (file)
index 0000000..471cd74
--- /dev/null
@@ -0,0 +1,65 @@
+package terraform
+
+import (
+       "fmt"
+       "log"
+
+       "github.com/hashicorp/terraform/dag"
+)
+
+// StateTransformer is a GraphTransformer that adds the elements of
+// the state to the graph.
+//
+// This transform is used for example by the DestroyPlanGraphBuilder to ensure
+// that only resources that are in the state are represented in the graph.
+type StateTransformer struct {
+       Concrete ConcreteResourceNodeFunc
+
+       State *State
+}
+
+func (t *StateTransformer) Transform(g *Graph) error {
+       // If the state is nil or empty (nil is empty) then do nothing
+       if t.State.Empty() {
+               return nil
+       }
+
+       // Go through all the modules in the diff.
+       log.Printf("[TRACE] StateTransformer: starting")
+       var nodes []dag.Vertex
+       for _, ms := range t.State.Modules {
+               log.Printf("[TRACE] StateTransformer: Module: %v", ms.Path)
+
+               // Go through all the resources in this module.
+               for name, rs := range ms.Resources {
+                       log.Printf("[TRACE] StateTransformer: Resource %q: %#v", name, rs)
+
+                       // Add the resource to the graph
+                       addr, err := parseResourceAddressInternal(name)
+                       if err != nil {
+                               panic(fmt.Sprintf(
+                                       "Error parsing internal name, this is a bug: %q", name))
+                       }
+
+                       // Very important: add the module path for this resource to
+                       // the address. Remove "root" from it.
+                       addr.Path = ms.Path[1:]
+
+                       // Add the resource to the graph
+                       abstract := &NodeAbstractResource{Addr: addr}
+                       var node dag.Vertex = abstract
+                       if f := t.Concrete; f != nil {
+                               node = f(abstract)
+                       }
+
+                       nodes = append(nodes, node)
+               }
+       }
+
+       // Add all the nodes to the graph
+       for _, n := range nodes {
+               g.Add(n)
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go b/vendor/github.com/hashicorp/terraform/terraform/transform_targets.go
new file mode 100644 (file)
index 0000000..225ac4b
--- /dev/null
@@ -0,0 +1,144 @@
+package terraform
+
+import (
+       "log"
+
+       "github.com/hashicorp/terraform/dag"
+)
+
+// GraphNodeTargetable is an interface for graph nodes to implement when they
+// need to be told about incoming targets. This is useful for nodes that need
+// to respect targets as they dynamically expand. Note that the list of targets
+// provided will contain every target provided, and each implementing graph
+// node must filter this list to targets considered relevant.
+type GraphNodeTargetable interface {
+       SetTargets([]ResourceAddress)
+}
+
+// TargetsTransformer is a GraphTransformer that, when the user specifies a
+// list of resources to target, limits the graph to only those resources and
+// their dependencies.
+type TargetsTransformer struct {
+       // List of targeted resource names specified by the user
+       Targets []string
+
+       // List of parsed targets, provided by callers like ResourceCountTransform
+       // that already have the targets parsed
+       ParsedTargets []ResourceAddress
+
+       // Set to true when we're in a `terraform destroy` or a
+       // `terraform plan -destroy`
+       Destroy bool
+}
+
+func (t *TargetsTransformer) Transform(g *Graph) error {
+       if len(t.Targets) > 0 && len(t.ParsedTargets) == 0 {
+               addrs, err := t.parseTargetAddresses()
+               if err != nil {
+                       return err
+               }
+
+               t.ParsedTargets = addrs
+       }
+
+       if len(t.ParsedTargets) > 0 {
+               targetedNodes, err := t.selectTargetedNodes(g, t.ParsedTargets)
+               if err != nil {
+                       return err
+               }
+
+               for _, v := range g.Vertices() {
+                       removable := false
+                       if _, ok := v.(GraphNodeResource); ok {
+                               removable = true
+                       }
+                       if vr, ok := v.(RemovableIfNotTargeted); ok {
+                               removable = vr.RemoveIfNotTargeted()
+                       }
+                       if removable && !targetedNodes.Include(v) {
+                               log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v))
+                               g.Remove(v)
+                       }
+               }
+       }
+
+       return nil
+}
+
+func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) {
+       addrs := make([]ResourceAddress, len(t.Targets))
+       for i, target := range t.Targets {
+               ta, err := ParseResourceAddress(target)
+               if err != nil {
+                       return nil, err
+               }
+               addrs[i] = *ta
+       }
+
+       return addrs, nil
+}
+
+// Returns the list of targeted nodes. A targeted node is either addressed
+// directly, or is an Ancestor of a targeted node. Destroy mode keeps
+// Descendents instead of Ancestors.
+func (t *TargetsTransformer) selectTargetedNodes(
+       g *Graph, addrs []ResourceAddress) (*dag.Set, error) {
+       targetedNodes := new(dag.Set)
+       for _, v := range g.Vertices() {
+               if t.nodeIsTarget(v, addrs) {
+                       targetedNodes.Add(v)
+
+                       // We inform nodes that ask about the list of targets - helps for nodes
+                       // that need to dynamically expand. Note that this only occurs for nodes
+                       // that are already directly targeted.
+                       if tn, ok := v.(GraphNodeTargetable); ok {
+                               tn.SetTargets(addrs)
+                       }
+
+                       var deps *dag.Set
+                       var err error
+                       if t.Destroy {
+                               deps, err = g.Descendents(v)
+                       } else {
+                               deps, err = g.Ancestors(v)
+                       }
+                       if err != nil {
+                               return nil, err
+                       }
+
+                       for _, d := range deps.List() {
+                               targetedNodes.Add(d)
+                       }
+               }
+       }
+
+       return targetedNodes, nil
+}
+
+func (t *TargetsTransformer) nodeIsTarget(
+       v dag.Vertex, addrs []ResourceAddress) bool {
+       r, ok := v.(GraphNodeResource)
+       if !ok {
+               return false
+       }
+
+       addr := r.ResourceAddr()
+       for _, targetAddr := range addrs {
+               if targetAddr.Equals(addr) {
+                       return true
+               }
+       }
+
+       return false
+}
+
+// RemovableIfNotTargeted is a special interface for graph nodes that
+// aren't directly addressable, but need to be removed from the graph when they
+// are not targeted. (Nodes that are not directly targeted end up in the set of
+// targeted nodes because something that _is_ targeted depends on them.) The
+// initial use case for this interface is GraphNodeConfigVariable, which was
+// having trouble interpolating for module variables in targeted scenarios that
+// filtered out the resource node being referenced.
+type RemovableIfNotTargeted interface {
+       RemoveIfNotTargeted() bool
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go b/vendor/github.com/hashicorp/terraform/terraform/transform_transitive_reduction.go
new file mode 100644 (file)
index 0000000..2184278
--- /dev/null
@@ -0,0 +1,20 @@
+package terraform
+
+// TransitiveReductionTransformer is a GraphTransformer that performs
+// finds the transitive reduction of the graph. For a definition of
+// transitive reduction, see Wikipedia.
+type TransitiveReductionTransformer struct{}
+
+func (t *TransitiveReductionTransformer) Transform(g *Graph) error {
+       // If the graph isn't valid, skip the transitive reduction.
+       // We don't error here because Terraform itself handles graph
+       // validation in a better way, or we assume it does.
+       if err := g.Validate(); err != nil {
+               return nil
+       }
+
+       // Do it
+       g.TransitiveReduction()
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go b/vendor/github.com/hashicorp/terraform/terraform/transform_variable.go
new file mode 100644 (file)
index 0000000..b31e2c7
--- /dev/null
@@ -0,0 +1,40 @@
+package terraform
+
+import (
+       "github.com/hashicorp/terraform/config/module"
+)
+
+// RootVariableTransformer is a GraphTransformer that adds all the root
+// variables to the graph.
+//
+// Root variables are currently no-ops but they must be added to the
+// graph since downstream things that depend on them must be able to
+// reach them.
+type RootVariableTransformer struct {
+       Module *module.Tree
+}
+
+func (t *RootVariableTransformer) Transform(g *Graph) error {
+       // If no config, no variables
+       if t.Module == nil {
+               return nil
+       }
+
+       // If we have no vars, we're done!
+       vars := t.Module.Config().Variables
+       if len(vars) == 0 {
+               return nil
+       }
+
+       // Add all variables here
+       for _, v := range vars {
+               node := &NodeRootVariable{
+                       Config: v,
+               }
+
+               // Add it!
+               g.Add(node)
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go b/vendor/github.com/hashicorp/terraform/terraform/transform_vertex.go
new file mode 100644 (file)
index 0000000..6b1293f
--- /dev/null
@@ -0,0 +1,44 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/terraform/dag"
+)
+
+// VertexTransformer is a GraphTransformer that transforms vertices
+// using the GraphVertexTransformers. The Transforms are run in sequential
+// order. If a transform replaces a vertex then the next transform will see
+// the new vertex.
+type VertexTransformer struct {
+       Transforms []GraphVertexTransformer
+}
+
+func (t *VertexTransformer) Transform(g *Graph) error {
+       for _, v := range g.Vertices() {
+               for _, vt := range t.Transforms {
+                       newV, err := vt.Transform(v)
+                       if err != nil {
+                               return err
+                       }
+
+                       // If the vertex didn't change, then don't do anything more
+                       if newV == v {
+                               continue
+                       }
+
+                       // Vertex changed, replace it within the graph
+                       if ok := g.Replace(v, newV); !ok {
+                               // This should never happen, big problem
+                               return fmt.Errorf(
+                                       "Failed to replace %s with %s!\n\nSource: %#v\n\nTarget: %#v",
+                                       dag.VertexName(v), dag.VertexName(newV), v, newV)
+                       }
+
+                       // Replace v so that future transforms use the proper vertex
+                       v = newV
+               }
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input.go
new file mode 100644 (file)
index 0000000..7c87459
--- /dev/null
@@ -0,0 +1,26 @@
+package terraform
+
+// UIInput is the interface that must be implemented to ask for input
+// from this user. This should forward the request to wherever the user
+// inputs things to ask for values.
+type UIInput interface {
+       Input(*InputOpts) (string, error)
+}
+
+// InputOpts are options for asking for input.
+type InputOpts struct {
+       // Id is a unique ID for the question being asked that might be
+       // used for logging or to look up a prior answered question.
+       Id string
+
+       // Query is a human-friendly question for inputting this value.
+       Query string
+
+       // Description is a description about what this option is. Be wary
+       // that this will probably be in a terminal so split lines as you see
+       // necessary.
+       Description string
+
+       // Default will be the value returned if no data is entered.
+       Default string
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_mock.go
new file mode 100644 (file)
index 0000000..e3a07ef
--- /dev/null
@@ -0,0 +1,23 @@
+package terraform
+
+// MockUIInput is an implementation of UIInput that can be used for tests.
+type MockUIInput struct {
+       InputCalled       bool
+       InputOpts         *InputOpts
+       InputReturnMap    map[string]string
+       InputReturnString string
+       InputReturnError  error
+       InputFn           func(*InputOpts) (string, error)
+}
+
+func (i *MockUIInput) Input(opts *InputOpts) (string, error) {
+       i.InputCalled = true
+       i.InputOpts = opts
+       if i.InputFn != nil {
+               return i.InputFn(opts)
+       }
+       if i.InputReturnMap != nil {
+               return i.InputReturnMap[opts.Id], i.InputReturnError
+       }
+       return i.InputReturnString, i.InputReturnError
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go b/vendor/github.com/hashicorp/terraform/terraform/ui_input_prefix.go
new file mode 100644 (file)
index 0000000..2207d1d
--- /dev/null
@@ -0,0 +1,19 @@
+package terraform
+
+import (
+       "fmt"
+)
+
+// PrefixUIInput is an implementation of UIInput that prefixes the ID
+// with a string, allowing queries to be namespaced.
+type PrefixUIInput struct {
+       IdPrefix    string
+       QueryPrefix string
+       UIInput     UIInput
+}
+
+func (i *PrefixUIInput) Input(opts *InputOpts) (string, error) {
+       opts.Id = fmt.Sprintf("%s.%s", i.IdPrefix, opts.Id)
+       opts.Query = fmt.Sprintf("%s%s", i.QueryPrefix, opts.Query)
+       return i.UIInput.Input(opts)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output.go
new file mode 100644 (file)
index 0000000..84427c6
--- /dev/null
@@ -0,0 +1,7 @@
+package terraform
+
+// UIOutput is the interface that must be implemented to output
+// data to the end user.
+type UIOutput interface {
+       Output(string)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_callback.go
new file mode 100644 (file)
index 0000000..135a91c
--- /dev/null
@@ -0,0 +1,9 @@
+package terraform
+
+type CallbackUIOutput struct {
+       OutputFn func(string)
+}
+
+func (o *CallbackUIOutput) Output(v string) {
+       o.OutputFn(v)
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_mock.go
new file mode 100644 (file)
index 0000000..7852bc4
--- /dev/null
@@ -0,0 +1,16 @@
+package terraform
+
+// MockUIOutput is an implementation of UIOutput that can be used for tests.
+type MockUIOutput struct {
+       OutputCalled  bool
+       OutputMessage string
+       OutputFn      func(string)
+}
+
+func (o *MockUIOutput) Output(v string) {
+       o.OutputCalled = true
+       o.OutputMessage = v
+       if o.OutputFn != nil {
+               o.OutputFn(v)
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go b/vendor/github.com/hashicorp/terraform/terraform/ui_output_provisioner.go
new file mode 100644 (file)
index 0000000..878a031
--- /dev/null
@@ -0,0 +1,15 @@
+package terraform
+
+// ProvisionerUIOutput is an implementation of UIOutput that calls a hook
+// for the output so that the hooks can handle it.
+type ProvisionerUIOutput struct {
+       Info  *InstanceInfo
+       Type  string
+       Hooks []Hook
+}
+
+func (o *ProvisionerUIOutput) Output(msg string) {
+       for _, h := range o.Hooks {
+               h.ProvisionOutput(o.Info, o.Type, msg)
+       }
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/util.go b/vendor/github.com/hashicorp/terraform/terraform/util.go
new file mode 100644 (file)
index 0000000..f41f0d7
--- /dev/null
@@ -0,0 +1,93 @@
+package terraform
+
+import (
+       "sort"
+       "strings"
+)
+
+// Semaphore is a wrapper around a channel to provide
+// utility methods to clarify that we are treating the
+// channel as a semaphore
+type Semaphore chan struct{}
+
+// NewSemaphore creates a semaphore that allows up
+// to a given limit of simultaneous acquisitions
+func NewSemaphore(n int) Semaphore {
+       if n == 0 {
+               panic("semaphore with limit 0")
+       }
+       ch := make(chan struct{}, n)
+       return Semaphore(ch)
+}
+
+// Acquire is used to acquire an available slot.
+// Blocks until available.
+func (s Semaphore) Acquire() {
+       s <- struct{}{}
+}
+
+// TryAcquire is used to do a non-blocking acquire.
+// Returns a bool indicating success
+func (s Semaphore) TryAcquire() bool {
+       select {
+       case s <- struct{}{}:
+               return true
+       default:
+               return false
+       }
+}
+
+// Release is used to return a slot. Acquire must
+// be called as a pre-condition.
+func (s Semaphore) Release() {
+       select {
+       case <-s:
+       default:
+               panic("release without an acquire")
+       }
+}
+
+// resourceProvider returns the provider name for the given type.
+func resourceProvider(t, alias string) string {
+       if alias != "" {
+               return alias
+       }
+
+       idx := strings.IndexRune(t, '_')
+       if idx == -1 {
+               // If no underscores, the resource name is assumed to be
+               // also the provider name, e.g. if the provider exposes
+               // only a single resource of each type.
+               return t
+       }
+
+       return t[:idx]
+}
+
+// strSliceContains checks if a given string is contained in a slice
+// When anybody asks why Go needs generics, here you go.
+func strSliceContains(haystack []string, needle string) bool {
+       for _, s := range haystack {
+               if s == needle {
+                       return true
+               }
+       }
+       return false
+}
+
+// deduplicate a slice of strings
+func uniqueStrings(s []string) []string {
+       if len(s) < 2 {
+               return s
+       }
+
+       sort.Strings(s)
+       result := make([]string, 1, len(s))
+       result[0] = s[0]
+       for i := 1; i < len(s); i++ {
+               if s[i] != result[len(result)-1] {
+                       result = append(result, s[i])
+               }
+       }
+       return result
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/variables.go b/vendor/github.com/hashicorp/terraform/terraform/variables.go
new file mode 100644 (file)
index 0000000..300f2ad
--- /dev/null
@@ -0,0 +1,166 @@
+package terraform
+
+import (
+       "fmt"
+       "os"
+       "strings"
+
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/module"
+       "github.com/hashicorp/terraform/helper/hilmapstructure"
+)
+
+// Variables returns the fully loaded set of variables to use with
+// ContextOpts and NewContext, loading any additional variables from
+// the environment or any other sources.
+//
+// The given module tree doesn't need to be loaded.
+func Variables(
+       m *module.Tree,
+       override map[string]interface{}) (map[string]interface{}, error) {
+       result := make(map[string]interface{})
+
+       // Variables are loaded in the following sequence. Each additional step
+       // will override conflicting variable keys from prior steps:
+       //
+       //   * Take default values from config
+       //   * Take values from TF_VAR_x env vars
+       //   * Take values specified in the "override" param which is usually
+       //     from -var, -var-file, etc.
+       //
+
+       // First load from the config
+       for _, v := range m.Config().Variables {
+               // If the var has no default, ignore
+               if v.Default == nil {
+                       continue
+               }
+
+               // If the type isn't a string, we use it as-is since it is a rich type
+               if v.Type() != config.VariableTypeString {
+                       result[v.Name] = v.Default
+                       continue
+               }
+
+               // v.Default has already been parsed as HCL but it may be an int type
+               switch typedDefault := v.Default.(type) {
+               case string:
+                       if typedDefault == "" {
+                               continue
+                       }
+                       result[v.Name] = typedDefault
+               case int, int64:
+                       result[v.Name] = fmt.Sprintf("%d", typedDefault)
+               case float32, float64:
+                       result[v.Name] = fmt.Sprintf("%f", typedDefault)
+               case bool:
+                       result[v.Name] = fmt.Sprintf("%t", typedDefault)
+               default:
+                       panic(fmt.Sprintf(
+                               "Unknown default var type: %T\n\n"+
+                                       "THIS IS A BUG. Please report it.",
+                               v.Default))
+               }
+       }
+
+       // Load from env vars
+       for _, v := range os.Environ() {
+               if !strings.HasPrefix(v, VarEnvPrefix) {
+                       continue
+               }
+
+               // Strip off the prefix and get the value after the first "="
+               idx := strings.Index(v, "=")
+               k := v[len(VarEnvPrefix):idx]
+               v = v[idx+1:]
+
+               // Override the configuration-default values. Note that *not* finding the variable
+               // in configuration is OK, as we don't want to preclude people from having multiple
+               // sets of TF_VAR_whatever in their environment even if it is a little weird.
+               for _, schema := range m.Config().Variables {
+                       if schema.Name != k {
+                               continue
+                       }
+
+                       varType := schema.Type()
+                       varVal, err := parseVariableAsHCL(k, v, varType)
+                       if err != nil {
+                               return nil, err
+                       }
+
+                       switch varType {
+                       case config.VariableTypeMap:
+                               if err := varSetMap(result, k, varVal); err != nil {
+                                       return nil, err
+                               }
+                       default:
+                               result[k] = varVal
+                       }
+               }
+       }
+
+       // Load from overrides
+       for k, v := range override {
+               for _, schema := range m.Config().Variables {
+                       if schema.Name != k {
+                               continue
+                       }
+
+                       switch schema.Type() {
+                       case config.VariableTypeList:
+                               result[k] = v
+                       case config.VariableTypeMap:
+                               if err := varSetMap(result, k, v); err != nil {
+                                       return nil, err
+                               }
+                       case config.VariableTypeString:
+                               // Convert to a string and set. We don't catch any errors
+                               // here because the validation step later should catch
+                               // any type errors.
+                               var strVal string
+                               if err := hilmapstructure.WeakDecode(v, &strVal); err == nil {
+                                       result[k] = strVal
+                               } else {
+                                       result[k] = v
+                               }
+                       default:
+                               panic(fmt.Sprintf(
+                                       "Unhandled var type: %T\n\n"+
+                                               "THIS IS A BUG. Please report it.",
+                                       schema.Type()))
+                       }
+               }
+       }
+
+       return result, nil
+}
+
+// varSetMap sets or merges the map in "v" with the key "k" in the
+// "current" set of variables. This is just a private function to remove
+// duplicate logic in Variables
+func varSetMap(current map[string]interface{}, k string, v interface{}) error {
+       existing, ok := current[k]
+       if !ok {
+               current[k] = v
+               return nil
+       }
+
+       existingMap, ok := existing.(map[string]interface{})
+       if !ok {
+               panic(fmt.Sprintf("%q is not a map, this is a bug in Terraform.", k))
+       }
+
+       switch typedV := v.(type) {
+       case []map[string]interface{}:
+               for newKey, newVal := range typedV[0] {
+                       existingMap[newKey] = newVal
+               }
+       case map[string]interface{}:
+               for newKey, newVal := range typedV {
+                       existingMap[newKey] = newVal
+               }
+       default:
+               return fmt.Errorf("variable %q should be type map, got %s", k, hclTypeName(v))
+       }
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version.go b/vendor/github.com/hashicorp/terraform/terraform/version.go
new file mode 100644 (file)
index 0000000..93fb429
--- /dev/null
@@ -0,0 +1,31 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/go-version"
+)
+
+// The main version number that is being run at the moment.
+const Version = "0.9.5"
+
+// A pre-release marker for the version. If this is "" (empty string)
+// then it means that it is a final release. Otherwise, this is a pre-release
+// such as "dev" (in development), "beta", "rc1", etc.
+const VersionPrerelease = ""
+
+// SemVersion is an instance of version.Version. This has the secondary
+// benefit of verifying during tests and init time that our version is a
+// proper semantic version, which should always be the case.
+var SemVersion = version.Must(version.NewVersion(Version))
+
+// VersionHeader is the header name used to send the current terraform version
+// in http requests.
+const VersionHeader = "Terraform-Version"
+
+func VersionString() string {
+       if VersionPrerelease != "" {
+               return fmt.Sprintf("%s-%s", Version, VersionPrerelease)
+       }
+       return Version
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/version_required.go b/vendor/github.com/hashicorp/terraform/terraform/version_required.go
new file mode 100644 (file)
index 0000000..3cbbf56
--- /dev/null
@@ -0,0 +1,69 @@
+package terraform
+
+import (
+       "fmt"
+
+       "github.com/hashicorp/go-version"
+       "github.com/hashicorp/terraform/config"
+       "github.com/hashicorp/terraform/config/module"
+)
+
+// checkRequiredVersion verifies that any version requirements specified by
+// the configuration are met.
+//
+// This checks the root module as well as any additional version requirements
+// from child modules.
+//
+// This is tested in context_test.go.
+func checkRequiredVersion(m *module.Tree) error {
+       // Check any children
+       for _, c := range m.Children() {
+               if err := checkRequiredVersion(c); err != nil {
+                       return err
+               }
+       }
+
+       var tf *config.Terraform
+       if c := m.Config(); c != nil {
+               tf = c.Terraform
+       }
+
+       // If there is no Terraform config or the required version isn't set,
+       // we move on.
+       if tf == nil || tf.RequiredVersion == "" {
+               return nil
+       }
+
+       // Path for errors
+       module := "root"
+       if path := normalizeModulePath(m.Path()); len(path) > 1 {
+               module = modulePrefixStr(path)
+       }
+
+       // Check this version requirement of this module
+       cs, err := version.NewConstraint(tf.RequiredVersion)
+       if err != nil {
+               return fmt.Errorf(
+                       "%s: terraform.required_version %q syntax error: %s",
+                       module,
+                       tf.RequiredVersion, err)
+       }
+
+       if !cs.Check(SemVersion) {
+               return fmt.Errorf(
+                       "The currently running version of Terraform doesn't meet the\n"+
+                               "version requirements explicitly specified by the configuration.\n"+
+                               "Please use the required version or update the configuration.\n"+
+                               "Note that version requirements are usually set for a reason, so\n"+
+                               "we recommend verifying with whoever set the version requirements\n"+
+                               "prior to making any manual changes.\n\n"+
+                               "  Module: %s\n"+
+                               "  Required version: %s\n"+
+                               "  Current version: %s",
+                       module,
+                       tf.RequiredVersion,
+                       SemVersion)
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go
new file mode 100644 (file)
index 0000000..cbd78dd
--- /dev/null
@@ -0,0 +1,16 @@
+// Code generated by "stringer -type=walkOperation graph_walk_operation.go"; DO NOT EDIT.
+
+package terraform
+
+import "fmt"
+
+const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport"
+
+var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96}
+
+func (i walkOperation) String() string {
+       if i >= walkOperation(len(_walkOperation_index)-1) {
+               return fmt.Sprintf("walkOperation(%d)", i)
+       }
+       return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]]
+}
diff --git a/vendor/github.com/hashicorp/yamux/LICENSE b/vendor/github.com/hashicorp/yamux/LICENSE
new file mode 100644 (file)
index 0000000..f0e5c79
--- /dev/null
@@ -0,0 +1,362 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+     means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of
+        version 1.1 or earlier of the License, but not also under the terms of
+        a Secondary License.
+
+1.6. "Executable Form"
+
+     means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+     means a work that combines Covered Software with other material, in a
+     separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+     means this document.
+
+1.9. "Licensable"
+
+     means having the right to grant, to the maximum extent possible, whether
+     at the time of the initial grant or subsequently, any and all of the
+     rights conveyed by this License.
+
+1.10. "Modifications"
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to,
+        deletion from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+      means any patent claim(s), including without limitation, method,
+      process, and apparatus claims, in any patent Licensable by such
+      Contributor that would be infringed, but for the grant of the License,
+      by the making, using, selling, offering for sale, having made, import,
+      or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+      means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, "You" includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, "control" means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or
+        as part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its
+        Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution
+     become effective for each Contribution on the date the Contributor first
+     distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under
+     this License. No additional rights or licenses will be implied from the
+     distribution or licensing of Covered Software under this License.
+     Notwithstanding Section 2.1(b) above, no patent license is granted by a
+     Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party's
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of
+        its Contributions.
+
+     This License does not grant any rights in the trademarks, service marks,
+     or logos of any Contributor (except as may be necessary to comply with
+     the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this
+     License (see Section 10.2) or under the terms of a Secondary License (if
+     permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its
+     Contributions are its original creation(s) or it has sufficient rights to
+     grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under
+     applicable copyright doctrines of fair use, fair dealing, or other
+     equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under
+     the terms of this License. You must inform recipients that the Source
+     Code Form of the Covered Software is governed by the terms of this
+     License, and how they can obtain a copy of this License. You may not
+     attempt to alter or restrict the recipients' rights in the Source Code
+     Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this
+        License, or sublicense it under different terms, provided that the
+        license for the Executable Form does not attempt to limit or alter the
+        recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for
+     the Covered Software. If the Larger Work is a combination of Covered
+     Software with a work governed by one or more Secondary Licenses, and the
+     Covered Software is not Incompatible With Secondary Licenses, this
+     License permits You to additionally distribute such Covered Software
+     under the terms of such Secondary License(s), so that the recipient of
+     the Larger Work may, at their option, further distribute the Covered
+     Software under the terms of either this License or such Secondary
+     License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices
+     (including copyright notices, patent notices, disclaimers of warranty, or
+     limitations of liability) contained within the Source Code Form of the
+     Covered Software, except that You may alter any license notices to the
+     extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on
+     behalf of any Contributor. You must make it absolutely clear that any
+     such warranty, support, indemnity, or liability obligation is offered by
+     You alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute,
+   judicial order, or regulation then You must: (a) comply with the terms of
+   this License to the maximum extent possible; and (b) describe the
+   limitations and the code they affect. Such description must be placed in a
+   text file included with all distributions of the Covered Software under
+   this License. Except to the extent prohibited by statute or regulation,
+   such description must be sufficiently detailed for a recipient of ordinary
+   skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing
+     basis, if such Contributor fails to notify You of the non-compliance by
+     some reasonable means prior to 60 days after You have come back into
+     compliance. Moreover, Your grants from a particular Contributor are
+     reinstated on an ongoing basis if such Contributor notifies You of the
+     non-compliance by some reasonable means, this is the first time You have
+     received notice of non-compliance with this License from such
+     Contributor, and You become compliant prior to 30 days after Your receipt
+     of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions,
+     counter-claims, and cross-claims) alleging that a Contributor Version
+     directly or indirectly infringes any patent, then the rights granted to
+     You by any and all Contributors for the Covered Software under Section
+     2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an "as is" basis,
+   without warranty of any kind, either expressed, implied, or statutory,
+   including, without limitation, warranties that the Covered Software is free
+   of defects, merchantable, fit for a particular purpose or non-infringing.
+   The entire risk as to the quality and performance of the Covered Software
+   is with You. Should any Covered Software prove defective in any respect,
+   You (not any Contributor) assume the cost of any necessary servicing,
+   repair, or correction. This disclaimer of warranty constitutes an essential
+   part of this License. No use of  any Covered Software is authorized under
+   this License except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from
+   such party's negligence to the extent applicable law prohibits such
+   limitation. Some jurisdictions do not allow the exclusion or limitation of
+   incidental or consequential damages, so this exclusion and limitation may
+   not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts
+   of a jurisdiction where the defendant maintains its principal place of
+   business and such litigation shall be governed by laws of that
+   jurisdiction, without reference to its conflict-of-law provisions. Nothing
+   in this Section shall prevent a party's ability to bring cross-claims or
+   counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject
+   matter hereof. If any provision of this License is held to be
+   unenforceable, such provision shall be reformed only to the extent
+   necessary to make it enforceable. Any law or regulation which provides that
+   the language of a contract shall be construed against the drafter shall not
+   be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version
+      of the License under which You originally received the Covered Software,
+      or under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a
+      modified version of this License if you rename the license and remove
+      any references to the name of the license steward (except to note that
+      such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+      Licenses If You choose to distribute Source Code Form that is
+      Incompatible With Secondary Licenses under the terms of this version of
+      the License, the notice described in Exhibit B of this License must be
+      attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+      This Source Code Form is "Incompatible
+      With Secondary Licenses", as defined by
+      the Mozilla Public License, v. 2.0.
\ No newline at end of file
diff --git a/vendor/github.com/hashicorp/yamux/README.md b/vendor/github.com/hashicorp/yamux/README.md
new file mode 100644 (file)
index 0000000..d4db7fc
--- /dev/null
@@ -0,0 +1,86 @@
+# Yamux
+
+Yamux (Yet another Multiplexer) is a multiplexing library for Golang.
+It relies on an underlying connection to provide reliability
+and ordering, such as TCP or Unix domain sockets, and provides
+stream-oriented multiplexing. It is inspired by SPDY but is not
+interoperable with it.
+
+Yamux features include:
+
+* Bi-directional streams
+  * Streams can be opened by either client or server
+  * Useful for NAT traversal
+  * Server-side push support
+* Flow control
+  * Avoid starvation
+  * Back-pressure to prevent overwhelming a receiver
+* Keep Alives
+  * Enables persistent connections over a load balancer
+* Efficient
+  * Enables thousands of logical streams with low overhead
+
+## Documentation
+
+For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux).
+
+## Specification
+
+The full specification for Yamux is provided in the `spec.md` file.
+It can be used as a guide to implementors of interoperable libraries.
+
+## Usage
+
+Using Yamux is remarkably simple:
+
+```go
+
+func client() {
+    // Get a TCP connection
+    conn, err := net.Dial(...)
+    if err != nil {
+        panic(err)
+    }
+
+    // Setup client side of yamux
+    session, err := yamux.Client(conn, nil)
+    if err != nil {
+        panic(err)
+    }
+
+    // Open a new stream
+    stream, err := session.Open()
+    if err != nil {
+        panic(err)
+    }
+
+    // Stream implements net.Conn
+    stream.Write([]byte("ping"))
+}
+
+func server() {
+    // Accept a TCP connection
+    conn, err := listener.Accept()
+    if err != nil {
+        panic(err)
+    }
+
+    // Setup server side of yamux
+    session, err := yamux.Server(conn, nil)
+    if err != nil {
+        panic(err)
+    }
+
+    // Accept a stream
+    stream, err := session.Accept()
+    if err != nil {
+        panic(err)
+    }
+
+    // Listen for a message
+    buf := make([]byte, 4)
+    stream.Read(buf)
+}
+
+```
+
diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go
new file mode 100644 (file)
index 0000000..be6ebca
--- /dev/null
@@ -0,0 +1,60 @@
+package yamux
+
+import (
+       "fmt"
+       "net"
+)
+
+// hasAddr is used to get the address from the underlying connection
+type hasAddr interface {
+       LocalAddr() net.Addr
+       RemoteAddr() net.Addr
+}
+
+// yamuxAddr is used when we cannot get the underlying address
+type yamuxAddr struct {
+       Addr string
+}
+
+func (*yamuxAddr) Network() string {
+       return "yamux"
+}
+
+func (y *yamuxAddr) String() string {
+       return fmt.Sprintf("yamux:%s", y.Addr)
+}
+
+// Addr is used to get the address of the listener.
+func (s *Session) Addr() net.Addr {
+       return s.LocalAddr()
+}
+
+// LocalAddr is used to get the local address of the
+// underlying connection.
+func (s *Session) LocalAddr() net.Addr {
+       addr, ok := s.conn.(hasAddr)
+       if !ok {
+               return &yamuxAddr{"local"}
+       }
+       return addr.LocalAddr()
+}
+
+// RemoteAddr is used to get the address of remote end
+// of the underlying connection
+func (s *Session) RemoteAddr() net.Addr {
+       addr, ok := s.conn.(hasAddr)
+       if !ok {
+               return &yamuxAddr{"remote"}
+       }
+       return addr.RemoteAddr()
+}
+
+// LocalAddr returns the local address
+func (s *Stream) LocalAddr() net.Addr {
+       return s.session.LocalAddr()
+}
+
+// LocalAddr returns the remote address
+func (s *Stream) RemoteAddr() net.Addr {
+       return s.session.RemoteAddr()
+}
diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go
new file mode 100644 (file)
index 0000000..4f52938
--- /dev/null
@@ -0,0 +1,157 @@
+package yamux
+
+import (
+       "encoding/binary"
+       "fmt"
+)
+
+var (
+       // ErrInvalidVersion means we received a frame with an
+       // invalid version
+       ErrInvalidVersion = fmt.Errorf("invalid protocol version")
+
+       // ErrInvalidMsgType means we received a frame with an
+       // invalid message type
+       ErrInvalidMsgType = fmt.Errorf("invalid msg type")
+
+       // ErrSessionShutdown is used if there is a shutdown during
+       // an operation
+       ErrSessionShutdown = fmt.Errorf("session shutdown")
+
+       // ErrStreamsExhausted is returned if we have no more
+       // stream ids to issue
+       ErrStreamsExhausted = fmt.Errorf("streams exhausted")
+
+       // ErrDuplicateStream is used if a duplicate stream is
+       // opened inbound
+       ErrDuplicateStream = fmt.Errorf("duplicate stream initiated")
+
+       // ErrReceiveWindowExceeded indicates the window was exceeded
+       ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded")
+
+       // ErrTimeout is used when we reach an IO deadline
+       ErrTimeout = fmt.Errorf("i/o deadline reached")
+
+       // ErrStreamClosed is returned when using a closed stream
+       ErrStreamClosed = fmt.Errorf("stream closed")
+
+       // ErrUnexpectedFlag is set when we get an unexpected flag
+       ErrUnexpectedFlag = fmt.Errorf("unexpected flag")
+
+       // ErrRemoteGoAway is used when we get a go away from the other side
+       ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections")
+
+       // ErrConnectionReset is sent if a stream is reset. This can happen
+       // if the backlog is exceeded, or if there was a remote GoAway.
+       ErrConnectionReset = fmt.Errorf("connection reset")
+
+       // ErrConnectionWriteTimeout indicates that we hit the "safety valve"
+       // timeout writing to the underlying stream connection.
+       ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout")
+
+       // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close
+       ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout")
+)
+
+const (
+       // protoVersion is the only version we support
+       protoVersion uint8 = 0
+)
+
+const (
+       // Data is used for data frames. They are followed
+       // by length bytes worth of payload.
+       typeData uint8 = iota
+
+       // WindowUpdate is used to change the window of
+       // a given stream. The length indicates the delta
+       // update to the window.
+       typeWindowUpdate
+
+       // Ping is sent as a keep-alive or to measure
+       // the RTT. The StreamID and Length value are echoed
+       // back in the response.
+       typePing
+
+       // GoAway is sent to terminate a session. The StreamID
+       // should be 0 and the length is an error code.
+       typeGoAway
+)
+
+const (
+       // SYN is sent to signal a new stream. May
+       // be sent with a data payload
+       flagSYN uint16 = 1 << iota
+
+       // ACK is sent to acknowledge a new stream. May
+       // be sent with a data payload
+       flagACK
+
+       // FIN is sent to half-close the given stream.
+       // May be sent with a data payload.
+       flagFIN
+
+       // RST is used to hard close a given stream.
+       flagRST
+)
+
+const (
+       // initialStreamWindow is the initial stream window size
+       initialStreamWindow uint32 = 256 * 1024
+)
+
+const (
+       // goAwayNormal is sent on a normal termination
+       goAwayNormal uint32 = iota
+
+       // goAwayProtoErr sent on a protocol error
+       goAwayProtoErr
+
+       // goAwayInternalErr sent on an internal error
+       goAwayInternalErr
+)
+
+const (
+       sizeOfVersion  = 1
+       sizeOfType     = 1
+       sizeOfFlags    = 2
+       sizeOfStreamID = 4
+       sizeOfLength   = 4
+       headerSize     = sizeOfVersion + sizeOfType + sizeOfFlags +
+               sizeOfStreamID + sizeOfLength
+)
+
+type header []byte
+
+func (h header) Version() uint8 {
+       return h[0]
+}
+
+func (h header) MsgType() uint8 {
+       return h[1]
+}
+
+func (h header) Flags() uint16 {
+       return binary.BigEndian.Uint16(h[2:4])
+}
+
+func (h header) StreamID() uint32 {
+       return binary.BigEndian.Uint32(h[4:8])
+}
+
+func (h header) Length() uint32 {
+       return binary.BigEndian.Uint32(h[8:12])
+}
+
+func (h header) String() string {
+       return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d",
+               h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length())
+}
+
+func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) {
+       h[0] = protoVersion
+       h[1] = msgType
+       binary.BigEndian.PutUint16(h[2:4], flags)
+       binary.BigEndian.PutUint32(h[4:8], streamID)
+       binary.BigEndian.PutUint32(h[8:12], length)
+}
diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go
new file mode 100644 (file)
index 0000000..7abc7c7
--- /dev/null
@@ -0,0 +1,87 @@
+package yamux
+
+import (
+       "fmt"
+       "io"
+       "os"
+       "time"
+)
+
+// Config is used to tune the Yamux session
+type Config struct {
+       // AcceptBacklog is used to limit how many streams may be
+       // waiting an accept.
+       AcceptBacklog int
+
+       // EnableKeepalive is used to do a period keep alive
+       // messages using a ping.
+       EnableKeepAlive bool
+
+       // KeepAliveInterval is how often to perform the keep alive
+       KeepAliveInterval time.Duration
+
+       // ConnectionWriteTimeout is meant to be a "safety valve" timeout after
+       // we which will suspect a problem with the underlying connection and
+       // close it. This is only applied to writes, where's there's generally
+       // an expectation that things will move along quickly.
+       ConnectionWriteTimeout time.Duration
+
+       // MaxStreamWindowSize is used to control the maximum
+       // window size that we allow for a stream.
+       MaxStreamWindowSize uint32
+
+       // LogOutput is used to control the log destination
+       LogOutput io.Writer
+}
+
+// DefaultConfig is used to return a default configuration
+func DefaultConfig() *Config {
+       return &Config{
+               AcceptBacklog:          256,
+               EnableKeepAlive:        true,
+               KeepAliveInterval:      30 * time.Second,
+               ConnectionWriteTimeout: 10 * time.Second,
+               MaxStreamWindowSize:    initialStreamWindow,
+               LogOutput:              os.Stderr,
+       }
+}
+
+// VerifyConfig is used to verify the sanity of configuration
+func VerifyConfig(config *Config) error {
+       if config.AcceptBacklog <= 0 {
+               return fmt.Errorf("backlog must be positive")
+       }
+       if config.KeepAliveInterval == 0 {
+               return fmt.Errorf("keep-alive interval must be positive")
+       }
+       if config.MaxStreamWindowSize < initialStreamWindow {
+               return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow)
+       }
+       return nil
+}
+
+// Server is used to initialize a new server-side connection.
+// There must be at most one server-side connection. If a nil config is
+// provided, the DefaultConfiguration will be used.
+func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) {
+       if config == nil {
+               config = DefaultConfig()
+       }
+       if err := VerifyConfig(config); err != nil {
+               return nil, err
+       }
+       return newSession(config, conn, false), nil
+}
+
+// Client is used to initialize a new client-side connection.
+// There must be at most one client-side connection.
+func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) {
+       if config == nil {
+               config = DefaultConfig()
+       }
+
+       if err := VerifyConfig(config); err != nil {
+               return nil, err
+       }
+       return newSession(config, conn, true), nil
+}
diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go
new file mode 100644 (file)
index 0000000..e179818
--- /dev/null
@@ -0,0 +1,623 @@
+package yamux
+
+import (
+       "bufio"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "log"
+       "math"
+       "net"
+       "strings"
+       "sync"
+       "sync/atomic"
+       "time"
+)
+
+// Session is used to wrap a reliable ordered connection and to
+// multiplex it into multiple streams.
+type Session struct {
+       // remoteGoAway indicates the remote side does
+       // not want futher connections. Must be first for alignment.
+       remoteGoAway int32
+
+       // localGoAway indicates that we should stop
+       // accepting futher connections. Must be first for alignment.
+       localGoAway int32
+
+       // nextStreamID is the next stream we should
+       // send. This depends if we are a client/server.
+       nextStreamID uint32
+
+       // config holds our configuration
+       config *Config
+
+       // logger is used for our logs
+       logger *log.Logger
+
+       // conn is the underlying connection
+       conn io.ReadWriteCloser
+
+       // bufRead is a buffered reader
+       bufRead *bufio.Reader
+
+       // pings is used to track inflight pings
+       pings    map[uint32]chan struct{}
+       pingID   uint32
+       pingLock sync.Mutex
+
+       // streams maps a stream id to a stream, and inflight has an entry
+       // for any outgoing stream that has not yet been established. Both are
+       // protected by streamLock.
+       streams    map[uint32]*Stream
+       inflight   map[uint32]struct{}
+       streamLock sync.Mutex
+
+       // synCh acts like a semaphore. It is sized to the AcceptBacklog which
+       // is assumed to be symmetric between the client and server. This allows
+       // the client to avoid exceeding the backlog and instead blocks the open.
+       synCh chan struct{}
+
+       // acceptCh is used to pass ready streams to the client
+       acceptCh chan *Stream
+
+       // sendCh is used to mark a stream as ready to send,
+       // or to send a header out directly.
+       sendCh chan sendReady
+
+       // recvDoneCh is closed when recv() exits to avoid a race
+       // between stream registration and stream shutdown
+       recvDoneCh chan struct{}
+
+       // shutdown is used to safely close a session
+       shutdown     bool
+       shutdownErr  error
+       shutdownCh   chan struct{}
+       shutdownLock sync.Mutex
+}
+
+// sendReady is used to either mark a stream as ready
+// or to directly send a header
+type sendReady struct {
+       Hdr  []byte
+       Body io.Reader
+       Err  chan error
+}
+
+// newSession is used to construct a new session
+func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session {
+       s := &Session{
+               config:     config,
+               logger:     log.New(config.LogOutput, "", log.LstdFlags),
+               conn:       conn,
+               bufRead:    bufio.NewReader(conn),
+               pings:      make(map[uint32]chan struct{}),
+               streams:    make(map[uint32]*Stream),
+               inflight:   make(map[uint32]struct{}),
+               synCh:      make(chan struct{}, config.AcceptBacklog),
+               acceptCh:   make(chan *Stream, config.AcceptBacklog),
+               sendCh:     make(chan sendReady, 64),
+               recvDoneCh: make(chan struct{}),
+               shutdownCh: make(chan struct{}),
+       }
+       if client {
+               s.nextStreamID = 1
+       } else {
+               s.nextStreamID = 2
+       }
+       go s.recv()
+       go s.send()
+       if config.EnableKeepAlive {
+               go s.keepalive()
+       }
+       return s
+}
+
+// IsClosed does a safe check to see if we have shutdown
+func (s *Session) IsClosed() bool {
+       select {
+       case <-s.shutdownCh:
+               return true
+       default:
+               return false
+       }
+}
+
+// NumStreams returns the number of currently open streams
+func (s *Session) NumStreams() int {
+       s.streamLock.Lock()
+       num := len(s.streams)
+       s.streamLock.Unlock()
+       return num
+}
+
+// Open is used to create a new stream as a net.Conn
+func (s *Session) Open() (net.Conn, error) {
+       conn, err := s.OpenStream()
+       if err != nil {
+               return nil, err
+       }
+       return conn, nil
+}
+
+// OpenStream is used to create a new stream
+func (s *Session) OpenStream() (*Stream, error) {
+       if s.IsClosed() {
+               return nil, ErrSessionShutdown
+       }
+       if atomic.LoadInt32(&s.remoteGoAway) == 1 {
+               return nil, ErrRemoteGoAway
+       }
+
+       // Block if we have too many inflight SYNs
+       select {
+       case s.synCh <- struct{}{}:
+       case <-s.shutdownCh:
+               return nil, ErrSessionShutdown
+       }
+
+GET_ID:
+       // Get an ID, and check for stream exhaustion
+       id := atomic.LoadUint32(&s.nextStreamID)
+       if id >= math.MaxUint32-1 {
+               return nil, ErrStreamsExhausted
+       }
+       if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) {
+               goto GET_ID
+       }
+
+       // Register the stream
+       stream := newStream(s, id, streamInit)
+       s.streamLock.Lock()
+       s.streams[id] = stream
+       s.inflight[id] = struct{}{}
+       s.streamLock.Unlock()
+
+       // Send the window update to create
+       if err := stream.sendWindowUpdate(); err != nil {
+               select {
+               case <-s.synCh:
+               default:
+                       s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore")
+               }
+               return nil, err
+       }
+       return stream, nil
+}
+
+// Accept is used to block until the next available stream
+// is ready to be accepted.
+func (s *Session) Accept() (net.Conn, error) {
+       conn, err := s.AcceptStream()
+       if err != nil {
+               return nil, err
+       }
+       return conn, err
+}
+
+// AcceptStream is used to block until the next available stream
+// is ready to be accepted.
+func (s *Session) AcceptStream() (*Stream, error) {
+       select {
+       case stream := <-s.acceptCh:
+               if err := stream.sendWindowUpdate(); err != nil {
+                       return nil, err
+               }
+               return stream, nil
+       case <-s.shutdownCh:
+               return nil, s.shutdownErr
+       }
+}
+
+// Close is used to close the session and all streams.
+// Attempts to send a GoAway before closing the connection.
+func (s *Session) Close() error {
+       s.shutdownLock.Lock()
+       defer s.shutdownLock.Unlock()
+
+       if s.shutdown {
+               return nil
+       }
+       s.shutdown = true
+       if s.shutdownErr == nil {
+               s.shutdownErr = ErrSessionShutdown
+       }
+       close(s.shutdownCh)
+       s.conn.Close()
+       <-s.recvDoneCh
+
+       s.streamLock.Lock()
+       defer s.streamLock.Unlock()
+       for _, stream := range s.streams {
+               stream.forceClose()
+       }
+       return nil
+}
+
+// exitErr is used to handle an error that is causing the
+// session to terminate.
+func (s *Session) exitErr(err error) {
+       s.shutdownLock.Lock()
+       if s.shutdownErr == nil {
+               s.shutdownErr = err
+       }
+       s.shutdownLock.Unlock()
+       s.Close()
+}
+
+// GoAway can be used to prevent accepting further
+// connections. It does not close the underlying conn.
+func (s *Session) GoAway() error {
+       return s.waitForSend(s.goAway(goAwayNormal), nil)
+}
+
+// goAway is used to send a goAway message
+func (s *Session) goAway(reason uint32) header {
+       atomic.SwapInt32(&s.localGoAway, 1)
+       hdr := header(make([]byte, headerSize))
+       hdr.encode(typeGoAway, 0, 0, reason)
+       return hdr
+}
+
+// Ping is used to measure the RTT response time
+func (s *Session) Ping() (time.Duration, error) {
+       // Get a channel for the ping
+       ch := make(chan struct{})
+
+       // Get a new ping id, mark as pending
+       s.pingLock.Lock()
+       id := s.pingID
+       s.pingID++
+       s.pings[id] = ch
+       s.pingLock.Unlock()
+
+       // Send the ping request
+       hdr := header(make([]byte, headerSize))
+       hdr.encode(typePing, flagSYN, 0, id)
+       if err := s.waitForSend(hdr, nil); err != nil {
+               return 0, err
+       }
+
+       // Wait for a response
+       start := time.Now()
+       select {
+       case <-ch:
+       case <-time.After(s.config.ConnectionWriteTimeout):
+               s.pingLock.Lock()
+               delete(s.pings, id) // Ignore it if a response comes later.
+               s.pingLock.Unlock()
+               return 0, ErrTimeout
+       case <-s.shutdownCh:
+               return 0, ErrSessionShutdown
+       }
+
+       // Compute the RTT
+       return time.Now().Sub(start), nil
+}
+
+// keepalive is a long running goroutine that periodically does
+// a ping to keep the connection alive.
+func (s *Session) keepalive() {
+       for {
+               select {
+               case <-time.After(s.config.KeepAliveInterval):
+                       _, err := s.Ping()
+                       if err != nil {
+                               s.logger.Printf("[ERR] yamux: keepalive failed: %v", err)
+                               s.exitErr(ErrKeepAliveTimeout)
+                               return
+                       }
+               case <-s.shutdownCh:
+                       return
+               }
+       }
+}
+
+// waitForSendErr waits to send a header, checking for a potential shutdown
+func (s *Session) waitForSend(hdr header, body io.Reader) error {
+       errCh := make(chan error, 1)
+       return s.waitForSendErr(hdr, body, errCh)
+}
+
+// waitForSendErr waits to send a header with optional data, checking for a
+// potential shutdown. Since there's the expectation that sends can happen
+// in a timely manner, we enforce the connection write timeout here.
+func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error {
+       timer := time.NewTimer(s.config.ConnectionWriteTimeout)
+       defer timer.Stop()
+
+       ready := sendReady{Hdr: hdr, Body: body, Err: errCh}
+       select {
+       case s.sendCh <- ready:
+       case <-s.shutdownCh:
+               return ErrSessionShutdown
+       case <-timer.C:
+               return ErrConnectionWriteTimeout
+       }
+
+       select {
+       case err := <-errCh:
+               return err
+       case <-s.shutdownCh:
+               return ErrSessionShutdown
+       case <-timer.C:
+               return ErrConnectionWriteTimeout
+       }
+}
+
+// sendNoWait does a send without waiting. Since there's the expectation that
+// the send happens right here, we enforce the connection write timeout if we
+// can't queue the header to be sent.
+func (s *Session) sendNoWait(hdr header) error {
+       timer := time.NewTimer(s.config.ConnectionWriteTimeout)
+       defer timer.Stop()
+
+       select {
+       case s.sendCh <- sendReady{Hdr: hdr}:
+               return nil
+       case <-s.shutdownCh:
+               return ErrSessionShutdown
+       case <-timer.C:
+               return ErrConnectionWriteTimeout
+       }
+}
+
+// send is a long running goroutine that sends data
+func (s *Session) send() {
+       for {
+               select {
+               case ready := <-s.sendCh:
+                       // Send a header if ready
+                       if ready.Hdr != nil {
+                               sent := 0
+                               for sent < len(ready.Hdr) {
+                                       n, err := s.conn.Write(ready.Hdr[sent:])
+                                       if err != nil {
+                                               s.logger.Printf("[ERR] yamux: Failed to write header: %v", err)
+                                               asyncSendErr(ready.Err, err)
+                                               s.exitErr(err)
+                                               return
+                                       }
+                                       sent += n
+                               }
+                       }
+
+                       // Send data from a body if given
+                       if ready.Body != nil {
+                               _, err := io.Copy(s.conn, ready.Body)
+                               if err != nil {
+                                       s.logger.Printf("[ERR] yamux: Failed to write body: %v", err)
+                                       asyncSendErr(ready.Err, err)
+                                       s.exitErr(err)
+                                       return
+                               }
+                       }
+
+                       // No error, successful send
+                       asyncSendErr(ready.Err, nil)
+               case <-s.shutdownCh:
+                       return
+               }
+       }
+}
+
+// recv is a long running goroutine that accepts new data
+func (s *Session) recv() {
+       if err := s.recvLoop(); err != nil {
+               s.exitErr(err)
+       }
+}
+
+// recvLoop continues to receive data until a fatal error is encountered
+func (s *Session) recvLoop() error {
+       defer close(s.recvDoneCh)
+       hdr := header(make([]byte, headerSize))
+       var handler func(header) error
+       for {
+               // Read the header
+               if _, err := io.ReadFull(s.bufRead, hdr); err != nil {
+                       if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") {
+                               s.logger.Printf("[ERR] yamux: Failed to read header: %v", err)
+                       }
+                       return err
+               }
+
+               // Verify the version
+               if hdr.Version() != protoVersion {
+                       s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version())
+                       return ErrInvalidVersion
+               }
+
+               // Switch on the type
+               switch hdr.MsgType() {
+               case typeData:
+                       handler = s.handleStreamMessage
+               case typeWindowUpdate:
+                       handler = s.handleStreamMessage
+               case typeGoAway:
+                       handler = s.handleGoAway
+               case typePing:
+                       handler = s.handlePing
+               default:
+                       return ErrInvalidMsgType
+               }
+
+               // Invoke the handler
+               if err := handler(hdr); err != nil {
+                       return err
+               }
+       }
+}
+
+// handleStreamMessage handles either a data or window update frame
+func (s *Session) handleStreamMessage(hdr header) error {
+       // Check for a new stream creation
+       id := hdr.StreamID()
+       flags := hdr.Flags()
+       if flags&flagSYN == flagSYN {
+               if err := s.incomingStream(id); err != nil {
+                       return err
+               }
+       }
+
+       // Get the stream
+       s.streamLock.Lock()
+       stream := s.streams[id]
+       s.streamLock.Unlock()
+
+       // If we do not have a stream, likely we sent a RST
+       if stream == nil {
+               // Drain any data on the wire
+               if hdr.MsgType() == typeData && hdr.Length() > 0 {
+                       s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id)
+                       if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil {
+                               s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err)
+                               return nil
+                       }
+               } else {
+                       s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr)
+               }
+               return nil
+       }
+
+       // Check if this is a window update
+       if hdr.MsgType() == typeWindowUpdate {
+               if err := stream.incrSendWindow(hdr, flags); err != nil {
+                       if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
+                               s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
+                       }
+                       return err
+               }
+               return nil
+       }
+
+       // Read the new data
+       if err := stream.readData(hdr, flags, s.bufRead); err != nil {
+               if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
+                       s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
+               }
+               return err
+       }
+       return nil
+}
+
+// handlePing is invokde for a typePing frame
+func (s *Session) handlePing(hdr header) error {
+       flags := hdr.Flags()
+       pingID := hdr.Length()
+
+       // Check if this is a query, respond back in a separate context so we
+       // don't interfere with the receiving thread blocking for the write.
+       if flags&flagSYN == flagSYN {
+               go func() {
+                       hdr := header(make([]byte, headerSize))
+                       hdr.encode(typePing, flagACK, 0, pingID)
+                       if err := s.sendNoWait(hdr); err != nil {
+                               s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err)
+                       }
+               }()
+               return nil
+       }
+
+       // Handle a response
+       s.pingLock.Lock()
+       ch := s.pings[pingID]
+       if ch != nil {
+               delete(s.pings, pingID)
+               close(ch)
+       }
+       s.pingLock.Unlock()
+       return nil
+}
+
+// handleGoAway is invokde for a typeGoAway frame
+func (s *Session) handleGoAway(hdr header) error {
+       code := hdr.Length()
+       switch code {
+       case goAwayNormal:
+               atomic.SwapInt32(&s.remoteGoAway, 1)
+       case goAwayProtoErr:
+               s.logger.Printf("[ERR] yamux: received protocol error go away")
+               return fmt.Errorf("yamux protocol error")
+       case goAwayInternalErr:
+               s.logger.Printf("[ERR] yamux: received internal error go away")
+               return fmt.Errorf("remote yamux internal error")
+       default:
+               s.logger.Printf("[ERR] yamux: received unexpected go away")
+               return fmt.Errorf("unexpected go away received")
+       }
+       return nil
+}
+
+// incomingStream is used to create a new incoming stream
+func (s *Session) incomingStream(id uint32) error {
+       // Reject immediately if we are doing a go away
+       if atomic.LoadInt32(&s.localGoAway) == 1 {
+               hdr := header(make([]byte, headerSize))
+               hdr.encode(typeWindowUpdate, flagRST, id, 0)
+               return s.sendNoWait(hdr)
+       }
+
+       // Allocate a new stream
+       stream := newStream(s, id, streamSYNReceived)
+
+       s.streamLock.Lock()
+       defer s.streamLock.Unlock()
+
+       // Check if stream already exists
+       if _, ok := s.streams[id]; ok {
+               s.logger.Printf("[ERR] yamux: duplicate stream declared")
+               if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil {
+                       s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr)
+               }
+               return ErrDuplicateStream
+       }
+
+       // Register the stream
+       s.streams[id] = stream
+
+       // Check if we've exceeded the backlog
+       select {
+       case s.acceptCh <- stream:
+               return nil
+       default:
+               // Backlog exceeded! RST the stream
+               s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset")
+               delete(s.streams, id)
+               stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0)
+               return s.sendNoWait(stream.sendHdr)
+       }
+}
+
+// closeStream is used to close a stream once both sides have
+// issued a close. If there was an in-flight SYN and the stream
+// was not yet established, then this will give the credit back.
+func (s *Session) closeStream(id uint32) {
+       s.streamLock.Lock()
+       if _, ok := s.inflight[id]; ok {
+               select {
+               case <-s.synCh:
+               default:
+                       s.logger.Printf("[ERR] yamux: SYN tracking out of sync")
+               }
+       }
+       delete(s.streams, id)
+       s.streamLock.Unlock()
+}
+
+// establishStream is used to mark a stream that was in the
+// SYN Sent state as established.
+func (s *Session) establishStream(id uint32) {
+       s.streamLock.Lock()
+       if _, ok := s.inflight[id]; ok {
+               delete(s.inflight, id)
+       } else {
+               s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)")
+       }
+       select {
+       case <-s.synCh:
+       default:
+               s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)")
+       }
+       s.streamLock.Unlock()
+}
diff --git a/vendor/github.com/hashicorp/yamux/spec.md b/vendor/github.com/hashicorp/yamux/spec.md
new file mode 100644 (file)
index 0000000..183d797
--- /dev/null
@@ -0,0 +1,140 @@
+# Specification
+
+We use this document to detail the internal specification of Yamux.
+This is used both as a guide for implementing Yamux, but also for
+alternative interoperable libraries to be built.
+
+# Framing
+
+Yamux uses a streaming connection underneath, but imposes a message
+framing so that it can be shared between many logical streams. Each
+frame contains a header like:
+
+* Version (8 bits)
+* Type (8 bits)
+* Flags (16 bits)
+* StreamID (32 bits)
+* Length (32 bits)
+
+This means that each header has a 12 byte overhead.
+All fields are encoded in network order (big endian).
+Each field is described below:
+
+## Version Field
+
+The version field is used for future backward compatibility. At the
+current time, the field is always set to 0, to indicate the initial
+version.
+
+## Type Field
+
+The type field is used to switch the frame message type. The following
+message types are supported:
+
+* 0x0 Data - Used to transmit data. May transmit zero length payloads
+  depending on the flags.
+
+* 0x1 Window Update - Used to updated the senders receive window size.
+  This is used to implement per-session flow control.
+
+* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat
+  and do keep-alives over TCP.
+
+* 0x3 Go Away - Used to close a session.
+
+## Flag Field
+
+The flags field is used to provide additional information related
+to the message type. The following flags are supported:
+
+* 0x1 SYN - Signals the start of a new stream. May be sent with a data or
+  window update message. Also sent with a ping to indicate outbound.
+
+* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data
+  or window update message. Also sent with a ping to indicate response.
+
+* 0x4 FIN - Performs a half-close of a stream. May be sent with a data
+  message or window update.
+
+* 0x8 RST - Reset a stream immediately. May be sent with a data or
+  window update message.
+
+## StreamID Field
+
+The StreamID field is used to identify the logical stream the frame
+is addressing. The client side should use odd ID's, and the server even.
+This prevents any collisions. Additionally, the 0 ID is reserved to represent
+the session.
+
+Both Ping and Go Away messages should always use the 0 StreamID.
+
+## Length Field
+
+The meaning of the length field depends on the message type:
+
+* Data - provides the length of bytes following the header
+* Window update - provides a delta update to the window size
+* Ping - Contains an opaque value, echoed back
+* Go Away - Contains an error code
+
+# Message Flow
+
+There is no explicit connection setup, as Yamux relies on an underlying
+transport to be provided. However, there is a distinction between client
+and server side of the connection.
+
+## Opening a stream
+
+To open a stream, an initial data or window update frame is sent
+with a new StreamID. The SYN flag should be set to signal a new stream.
+
+The receiver must then reply with either a data or window update frame
+with the StreamID along with the ACK flag to accept the stream or with
+the RST flag to reject the stream.
+
+Because we are relying on the reliable stream underneath, a connection
+can begin sending data once the SYN flag is sent. The corresponding
+ACK does not need to be received. This is particularly well suited
+for an RPC system where a client wants to open a stream and immediately
+fire a request without waiting for the RTT of the ACK.
+
+This does introduce the possibility of a connection being rejected
+after data has been sent already. This is a slight semantic difference
+from TCP, where the conection cannot be refused after it is opened.
+Clients should be prepared to handle this by checking for an error
+that indicates a RST was received.
+
+## Closing a stream
+
+To close a stream, either side sends a data or window update frame
+along with the FIN flag. This does a half-close indicating the sender
+will send no further data.
+
+Once both sides have closed the connection, the stream is closed.
+
+Alternatively, if an error occurs, the RST flag can be used to
+hard close a stream immediately.
+
+## Flow Control
+
+When Yamux is initially starts each stream with a 256KB window size.
+There is no window size for the session.
+
+To prevent the streams from stalling, window update frames should be
+sent regularly. Yamux can be configured to provide a larger limit for
+windows sizes. Both sides assume the initial 256KB window, but can
+immediately send a window update as part of the SYN/ACK indicating a
+larger window.
+
+Both sides should track the number of bytes sent in Data frames
+only, as only they are tracked as part of the window size.
+
+## Session termination
+
+When a session is being terminated, the Go Away message should
+be sent. The Length should be set to one of the following to
+provide an error code:
+
+* 0x0 Normal termination
+* 0x1 Protocol error
+* 0x2 Internal error
diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go
new file mode 100644 (file)
index 0000000..d216e28
--- /dev/null
@@ -0,0 +1,457 @@
+package yamux
+
+import (
+       "bytes"
+       "io"
+       "sync"
+       "sync/atomic"
+       "time"
+)
+
+type streamState int
+
+const (
+       streamInit streamState = iota
+       streamSYNSent
+       streamSYNReceived
+       streamEstablished
+       streamLocalClose
+       streamRemoteClose
+       streamClosed
+       streamReset
+)
+
+// Stream is used to represent a logical stream
+// within a session.
+type Stream struct {
+       recvWindow uint32
+       sendWindow uint32
+
+       id      uint32
+       session *Session
+
+       state     streamState
+       stateLock sync.Mutex
+
+       recvBuf  *bytes.Buffer
+       recvLock sync.Mutex
+
+       controlHdr     header
+       controlErr     chan error
+       controlHdrLock sync.Mutex
+
+       sendHdr  header
+       sendErr  chan error
+       sendLock sync.Mutex
+
+       recvNotifyCh chan struct{}
+       sendNotifyCh chan struct{}
+
+       readDeadline  time.Time
+       writeDeadline time.Time
+}
+
+// newStream is used to construct a new stream within
+// a given session for an ID
+func newStream(session *Session, id uint32, state streamState) *Stream {
+       s := &Stream{
+               id:           id,
+               session:      session,
+               state:        state,
+               controlHdr:   header(make([]byte, headerSize)),
+               controlErr:   make(chan error, 1),
+               sendHdr:      header(make([]byte, headerSize)),
+               sendErr:      make(chan error, 1),
+               recvWindow:   initialStreamWindow,
+               sendWindow:   initialStreamWindow,
+               recvNotifyCh: make(chan struct{}, 1),
+               sendNotifyCh: make(chan struct{}, 1),
+       }
+       return s
+}
+
+// Session returns the associated stream session
+func (s *Stream) Session() *Session {
+       return s.session
+}
+
+// StreamID returns the ID of this stream
+func (s *Stream) StreamID() uint32 {
+       return s.id
+}
+
+// Read is used to read from the stream
+func (s *Stream) Read(b []byte) (n int, err error) {
+       defer asyncNotify(s.recvNotifyCh)
+START:
+       s.stateLock.Lock()
+       switch s.state {
+       case streamLocalClose:
+               fallthrough
+       case streamRemoteClose:
+               fallthrough
+       case streamClosed:
+               s.recvLock.Lock()
+               if s.recvBuf == nil || s.recvBuf.Len() == 0 {
+                       s.recvLock.Unlock()
+                       s.stateLock.Unlock()
+                       return 0, io.EOF
+               }
+               s.recvLock.Unlock()
+       case streamReset:
+               s.stateLock.Unlock()
+               return 0, ErrConnectionReset
+       }
+       s.stateLock.Unlock()
+
+       // If there is no data available, block
+       s.recvLock.Lock()
+       if s.recvBuf == nil || s.recvBuf.Len() == 0 {
+               s.recvLock.Unlock()
+               goto WAIT
+       }
+
+       // Read any bytes
+       n, _ = s.recvBuf.Read(b)
+       s.recvLock.Unlock()
+
+       // Send a window update potentially
+       err = s.sendWindowUpdate()
+       return n, err
+
+WAIT:
+       var timeout <-chan time.Time
+       var timer *time.Timer
+       if !s.readDeadline.IsZero() {
+               delay := s.readDeadline.Sub(time.Now())
+               timer = time.NewTimer(delay)
+               timeout = timer.C
+       }
+       select {
+       case <-s.recvNotifyCh:
+               if timer != nil {
+                       timer.Stop()
+               }
+               goto START
+       case <-timeout:
+               return 0, ErrTimeout
+       }
+}
+
+// Write is used to write to the stream
+func (s *Stream) Write(b []byte) (n int, err error) {
+       s.sendLock.Lock()
+       defer s.sendLock.Unlock()
+       total := 0
+       for total < len(b) {
+               n, err := s.write(b[total:])
+               total += n
+               if err != nil {
+                       return total, err
+               }
+       }
+       return total, nil
+}
+
+// write is used to write to the stream, may return on
+// a short write.
+func (s *Stream) write(b []byte) (n int, err error) {
+       var flags uint16
+       var max uint32
+       var body io.Reader
+START:
+       s.stateLock.Lock()
+       switch s.state {
+       case streamLocalClose:
+               fallthrough
+       case streamClosed:
+               s.stateLock.Unlock()
+               return 0, ErrStreamClosed
+       case streamReset:
+               s.stateLock.Unlock()
+               return 0, ErrConnectionReset
+       }
+       s.stateLock.Unlock()
+
+       // If there is no data available, block
+       window := atomic.LoadUint32(&s.sendWindow)
+       if window == 0 {
+               goto WAIT
+       }
+
+       // Determine the flags if any
+       flags = s.sendFlags()
+
+       // Send up to our send window
+       max = min(window, uint32(len(b)))
+       body = bytes.NewReader(b[:max])
+
+       // Send the header
+       s.sendHdr.encode(typeData, flags, s.id, max)
+       if err := s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil {
+               return 0, err
+       }
+
+       // Reduce our send window
+       atomic.AddUint32(&s.sendWindow, ^uint32(max-1))
+
+       // Unlock
+       return int(max), err
+
+WAIT:
+       var timeout <-chan time.Time
+       if !s.writeDeadline.IsZero() {
+               delay := s.writeDeadline.Sub(time.Now())
+               timeout = time.After(delay)
+       }
+       select {
+       case <-s.sendNotifyCh:
+               goto START
+       case <-timeout:
+               return 0, ErrTimeout
+       }
+       return 0, nil
+}
+
+// sendFlags determines any flags that are appropriate
+// based on the current stream state
+func (s *Stream) sendFlags() uint16 {
+       s.stateLock.Lock()
+       defer s.stateLock.Unlock()
+       var flags uint16
+       switch s.state {
+       case streamInit:
+               flags |= flagSYN
+               s.state = streamSYNSent
+       case streamSYNReceived:
+               flags |= flagACK
+               s.state = streamEstablished
+       }
+       return flags
+}
+
+// sendWindowUpdate potentially sends a window update enabling
+// further writes to take place. Must be invoked with the lock.
+func (s *Stream) sendWindowUpdate() error {
+       s.controlHdrLock.Lock()
+       defer s.controlHdrLock.Unlock()
+
+       // Determine the delta update
+       max := s.session.config.MaxStreamWindowSize
+       delta := max - atomic.LoadUint32(&s.recvWindow)
+
+       // Determine the flags if any
+       flags := s.sendFlags()
+
+       // Check if we can omit the update
+       if delta < (max/2) && flags == 0 {
+               return nil
+       }
+
+       // Update our window
+       atomic.AddUint32(&s.recvWindow, delta)
+
+       // Send the header
+       s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta)
+       if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
+               return err
+       }
+       return nil
+}
+
+// sendClose is used to send a FIN
+func (s *Stream) sendClose() error {
+       s.controlHdrLock.Lock()
+       defer s.controlHdrLock.Unlock()
+
+       flags := s.sendFlags()
+       flags |= flagFIN
+       s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0)
+       if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil {
+               return err
+       }
+       return nil
+}
+
+// Close is used to close the stream
+func (s *Stream) Close() error {
+       closeStream := false
+       s.stateLock.Lock()
+       switch s.state {
+       // Opened means we need to signal a close
+       case streamSYNSent:
+               fallthrough
+       case streamSYNReceived:
+               fallthrough
+       case streamEstablished:
+               s.state = streamLocalClose
+               goto SEND_CLOSE
+
+       case streamLocalClose:
+       case streamRemoteClose:
+               s.state = streamClosed
+               closeStream = true
+               goto SEND_CLOSE
+
+       case streamClosed:
+       case streamReset:
+       default:
+               panic("unhandled state")
+       }
+       s.stateLock.Unlock()
+       return nil
+SEND_CLOSE:
+       s.stateLock.Unlock()
+       s.sendClose()
+       s.notifyWaiting()
+       if closeStream {
+               s.session.closeStream(s.id)
+       }
+       return nil
+}
+
+// forceClose is used for when the session is exiting
+func (s *Stream) forceClose() {
+       s.stateLock.Lock()
+       s.state = streamClosed
+       s.stateLock.Unlock()
+       s.notifyWaiting()
+}
+
+// processFlags is used to update the state of the stream
+// based on set flags, if any. Lock must be held
+func (s *Stream) processFlags(flags uint16) error {
+       // Close the stream without holding the state lock
+       closeStream := false
+       defer func() {
+               if closeStream {
+                       s.session.closeStream(s.id)
+               }
+       }()
+
+       s.stateLock.Lock()
+       defer s.stateLock.Unlock()
+       if flags&flagACK == flagACK {
+               if s.state == streamSYNSent {
+                       s.state = streamEstablished
+               }
+               s.session.establishStream(s.id)
+       }
+       if flags&flagFIN == flagFIN {
+               switch s.state {
+               case streamSYNSent:
+                       fallthrough
+               case streamSYNReceived:
+                       fallthrough
+               case streamEstablished:
+                       s.state = streamRemoteClose
+                       s.notifyWaiting()
+               case streamLocalClose:
+                       s.state = streamClosed
+                       closeStream = true
+                       s.notifyWaiting()
+               default:
+                       s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state)
+                       return ErrUnexpectedFlag
+               }
+       }
+       if flags&flagRST == flagRST {
+               s.state = streamReset
+               closeStream = true
+               s.notifyWaiting()
+       }
+       return nil
+}
+
+// notifyWaiting notifies all the waiting channels
+func (s *Stream) notifyWaiting() {
+       asyncNotify(s.recvNotifyCh)
+       asyncNotify(s.sendNotifyCh)
+}
+
+// incrSendWindow updates the size of our send window
+func (s *Stream) incrSendWindow(hdr header, flags uint16) error {
+       if err := s.processFlags(flags); err != nil {
+               return err
+       }
+
+       // Increase window, unblock a sender
+       atomic.AddUint32(&s.sendWindow, hdr.Length())
+       asyncNotify(s.sendNotifyCh)
+       return nil
+}
+
+// readData is used to handle a data frame
+func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error {
+       if err := s.processFlags(flags); err != nil {
+               return err
+       }
+
+       // Check that our recv window is not exceeded
+       length := hdr.Length()
+       if length == 0 {
+               return nil
+       }
+       if remain := atomic.LoadUint32(&s.recvWindow); length > remain {
+               s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length)
+               return ErrRecvWindowExceeded
+       }
+
+       // Wrap in a limited reader
+       conn = &io.LimitedReader{R: conn, N: int64(length)}
+
+       // Copy into buffer
+       s.recvLock.Lock()
+       if s.recvBuf == nil {
+               // Allocate the receive buffer just-in-time to fit the full data frame.
+               // This way we can read in the whole packet without further allocations.
+               s.recvBuf = bytes.NewBuffer(make([]byte, 0, length))
+       }
+       if _, err := io.Copy(s.recvBuf, conn); err != nil {
+               s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err)
+               s.recvLock.Unlock()
+               return err
+       }
+
+       // Decrement the receive window
+       atomic.AddUint32(&s.recvWindow, ^uint32(length-1))
+       s.recvLock.Unlock()
+
+       // Unblock any readers
+       asyncNotify(s.recvNotifyCh)
+       return nil
+}
+
+// SetDeadline sets the read and write deadlines
+func (s *Stream) SetDeadline(t time.Time) error {
+       if err := s.SetReadDeadline(t); err != nil {
+               return err
+       }
+       if err := s.SetWriteDeadline(t); err != nil {
+               return err
+       }
+       return nil
+}
+
+// SetReadDeadline sets the deadline for future Read calls.
+func (s *Stream) SetReadDeadline(t time.Time) error {
+       s.readDeadline = t
+       return nil
+}
+
+// SetWriteDeadline sets the deadline for future Write calls
+func (s *Stream) SetWriteDeadline(t time.Time) error {
+       s.writeDeadline = t
+       return nil
+}
+
+// Shrink is used to compact the amount of buffers utilized
+// This is useful when using Yamux in a connection pool to reduce
+// the idle memory utilization.
+func (s *Stream) Shrink() {
+       s.recvLock.Lock()
+       if s.recvBuf != nil && s.recvBuf.Len() == 0 {
+               s.recvBuf = nil
+       }
+       s.recvLock.Unlock()
+}
diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go
new file mode 100644 (file)
index 0000000..5fe45af
--- /dev/null
@@ -0,0 +1,28 @@
+package yamux
+
+// asyncSendErr is used to try an async send of an error
+func asyncSendErr(ch chan error, err error) {
+       if ch == nil {
+               return
+       }
+       select {
+       case ch <- err:
+       default:
+       }
+}
+
+// asyncNotify is used to signal a waiting goroutine
+func asyncNotify(ch chan struct{}) {
+       select {
+       case ch <- struct{}{}:
+       default:
+       }
+}
+
+// min computes the minimum of two values
+func min(a, b uint32) uint32 {
+       if a < b {
+               return a
+       }
+       return b
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE
new file mode 100644 (file)
index 0000000..b03310a
--- /dev/null
@@ -0,0 +1,13 @@
+Copyright 2015 James Saryerwinnie
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile
new file mode 100644 (file)
index 0000000..a828d28
--- /dev/null
@@ -0,0 +1,44 @@
+
+CMD = jpgo
+
+help:
+       @echo "Please use \`make <target>' where <target> is one of"
+       @echo "  test                    to run all the tests"
+       @echo "  build                   to build the library and jp executable"
+       @echo "  generate                to run codegen"
+
+
+generate:
+       go generate ./...
+
+build:
+       rm -f $(CMD)
+       go build ./...
+       rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./...
+       mv cmd/$(CMD)/$(CMD) .
+
+test:
+       go test -v ./...
+
+check:
+       go vet ./...
+       @echo "golint ./..."
+       @lint=`golint ./...`; \
+       lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \
+       echo "$$lint"; \
+       if [ "$$lint" != "" ]; then exit 1; fi
+
+htmlc:
+       go test -coverprofile="/tmp/jpcov"  && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov
+
+buildfuzz:
+       go-fuzz-build github.com/jmespath/go-jmespath/fuzz
+
+fuzz: buildfuzz
+       go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata
+
+bench:
+       go test -bench . -cpuprofile cpu.out
+
+pprof-cpu:
+       go tool pprof ./go-jmespath.test ./cpu.out
diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md
new file mode 100644 (file)
index 0000000..187ef67
--- /dev/null
@@ -0,0 +1,7 @@
+# go-jmespath - A JMESPath implementation in Go
+
+[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath)
+
+
+
+See http://jmespath.org for more info.
diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go
new file mode 100644 (file)
index 0000000..9cfa988
--- /dev/null
@@ -0,0 +1,49 @@
+package jmespath
+
+import "strconv"
+
+// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is
+// safe for concurrent use by multiple goroutines.
+type JMESPath struct {
+       ast  ASTNode
+       intr *treeInterpreter
+}
+
+// Compile parses a JMESPath expression and returns, if successful, a JMESPath
+// object that can be used to match against data.
+func Compile(expression string) (*JMESPath, error) {
+       parser := NewParser()
+       ast, err := parser.Parse(expression)
+       if err != nil {
+               return nil, err
+       }
+       jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
+       return jmespath, nil
+}
+
+// MustCompile is like Compile but panics if the expression cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled
+// JMESPaths.
+func MustCompile(expression string) *JMESPath {
+       jmespath, err := Compile(expression)
+       if err != nil {
+               panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
+       }
+       return jmespath
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
+       return jp.intr.Execute(jp.ast, data)
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func Search(expression string, data interface{}) (interface{}, error) {
+       intr := newInterpreter()
+       parser := NewParser()
+       ast, err := parser.Parse(expression)
+       if err != nil {
+               return nil, err
+       }
+       return intr.Execute(ast, data)
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
new file mode 100644 (file)
index 0000000..1cd2d23
--- /dev/null
@@ -0,0 +1,16 @@
+// generated by stringer -type astNodeType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection"
+
+var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307}
+
+func (i astNodeType) String() string {
+       if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) {
+               return fmt.Sprintf("astNodeType(%d)", i)
+       }
+       return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]]
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go
new file mode 100644 (file)
index 0000000..9b7cd89
--- /dev/null
@@ -0,0 +1,842 @@
+package jmespath
+
+import (
+       "encoding/json"
+       "errors"
+       "fmt"
+       "math"
+       "reflect"
+       "sort"
+       "strconv"
+       "strings"
+       "unicode/utf8"
+)
+
+type jpFunction func(arguments []interface{}) (interface{}, error)
+
+type jpType string
+
+const (
+       jpUnknown     jpType = "unknown"
+       jpNumber      jpType = "number"
+       jpString      jpType = "string"
+       jpArray       jpType = "array"
+       jpObject      jpType = "object"
+       jpArrayNumber jpType = "array[number]"
+       jpArrayString jpType = "array[string]"
+       jpExpref      jpType = "expref"
+       jpAny         jpType = "any"
+)
+
+type functionEntry struct {
+       name      string
+       arguments []argSpec
+       handler   jpFunction
+       hasExpRef bool
+}
+
+type argSpec struct {
+       types    []jpType
+       variadic bool
+}
+
+type byExprString struct {
+       intr     *treeInterpreter
+       node     ASTNode
+       items    []interface{}
+       hasError bool
+}
+
+func (a *byExprString) Len() int {
+       return len(a.items)
+}
+func (a *byExprString) Swap(i, j int) {
+       a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprString) Less(i, j int) bool {
+       first, err := a.intr.Execute(a.node, a.items[i])
+       if err != nil {
+               a.hasError = true
+               // Return a dummy value.
+               return true
+       }
+       ith, ok := first.(string)
+       if !ok {
+               a.hasError = true
+               return true
+       }
+       second, err := a.intr.Execute(a.node, a.items[j])
+       if err != nil {
+               a.hasError = true
+               // Return a dummy value.
+               return true
+       }
+       jth, ok := second.(string)
+       if !ok {
+               a.hasError = true
+               return true
+       }
+       return ith < jth
+}
+
+type byExprFloat struct {
+       intr     *treeInterpreter
+       node     ASTNode
+       items    []interface{}
+       hasError bool
+}
+
+func (a *byExprFloat) Len() int {
+       return len(a.items)
+}
+func (a *byExprFloat) Swap(i, j int) {
+       a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprFloat) Less(i, j int) bool {
+       first, err := a.intr.Execute(a.node, a.items[i])
+       if err != nil {
+               a.hasError = true
+               // Return a dummy value.
+               return true
+       }
+       ith, ok := first.(float64)
+       if !ok {
+               a.hasError = true
+               return true
+       }
+       second, err := a.intr.Execute(a.node, a.items[j])
+       if err != nil {
+               a.hasError = true
+               // Return a dummy value.
+               return true
+       }
+       jth, ok := second.(float64)
+       if !ok {
+               a.hasError = true
+               return true
+       }
+       return ith < jth
+}
+
+type functionCaller struct {
+       functionTable map[string]functionEntry
+}
+
+func newFunctionCaller() *functionCaller {
+       caller := &functionCaller{}
+       caller.functionTable = map[string]functionEntry{
+               "length": {
+                       name: "length",
+                       arguments: []argSpec{
+                               {types: []jpType{jpString, jpArray, jpObject}},
+                       },
+                       handler: jpfLength,
+               },
+               "starts_with": {
+                       name: "starts_with",
+                       arguments: []argSpec{
+                               {types: []jpType{jpString}},
+                               {types: []jpType{jpString}},
+                       },
+                       handler: jpfStartsWith,
+               },
+               "abs": {
+                       name: "abs",
+                       arguments: []argSpec{
+                               {types: []jpType{jpNumber}},
+                       },
+                       handler: jpfAbs,
+               },
+               "avg": {
+                       name: "avg",
+                       arguments: []argSpec{
+                               {types: []jpType{jpArrayNumber}},
+                       },
+                       handler: jpfAvg,
+               },
+               "ceil": {
+                       name: "ceil",
+                       arguments: []argSpec{
+                               {types: []jpType{jpNumber}},
+                       },
+                       handler: jpfCeil,
+               },
+               "contains": {
+                       name: "contains",
+                       arguments: []argSpec{
+                               {types: []jpType{jpArray, jpString}},
+                               {types: []jpType{jpAny}},
+                       },
+                       handler: jpfContains,
+               },
+               "ends_with": {
+                       name: "ends_with",
+                       arguments: []argSpec{
+                               {types: []jpType{jpString}},
+                               {types: []jpType{jpString}},
+                       },
+                       handler: jpfEndsWith,
+               },
+               "floor": {
+                       name: "floor",
+                       arguments: []argSpec{
+                               {types: []jpType{jpNumber}},
+                       },
+                       handler: jpfFloor,
+               },
+               "map": {
+                       name: "amp",
+                       arguments: []argSpec{
+                               {types: []jpType{jpExpref}},
+                               {types: []jpType{jpArray}},
+                       },
+                       handler:   jpfMap,
+                       hasExpRef: true,
+               },
+               "max": {
+                       name: "max",
+                       arguments: []argSpec{
+                               {types: []jpType{jpArrayNumber, jpArrayString}},
+                       },
+                       handler: jpfMax,
+               },
+               "merge": {
+                       name: "merge",
+                       arguments: []argSpec{
+                               {types: []jpType{jpObject}, variadic: true},
+                       },
+                       handler: jpfMerge,
+               },
+               "max_by": {
+                       name: "max_by",
+                       arguments: []argSpec{
+                               {types: []jpType{jpArray}},
+                               {types: []jpType{jpExpref}},
+                       },
+                       handler:   jpfMaxBy,
+                       hasExpRef: true,
+               },
+               "sum": {
+                       name: "sum",
+                       arguments: []argSpec{
+                               {types: []jpType{jpArrayNumber}},
+                       },
+                       handler: jpfSum,
+               },
+               "min": {
+                       name: "min",
+                       arguments: []argSpec{
+                               {types: []jpType{jpArrayNumber, jpArrayString}},
+                       },
+                       handler: jpfMin,
+               },
+               "min_by": {
+                       name: "min_by",
+                       arguments: []argSpec{
+                               {types: []jpType{jpArray}},
+                               {types: []jpType{jpExpref}},
+                       },
+                       handler:   jpfMinBy,
+                       hasExpRef: true,
+               },
+               "type": {
+                       name: "type",
+                       arguments: []argSpec{
+                               {types: []jpType{jpAny}},
+                       },
+                       handler: jpfType,
+               },
+               "keys": {
+                       name: "keys",
+                       arguments: []argSpec{
+                               {types: []jpType{jpObject}},
+                       },
+                       handler: jpfKeys,
+               },
+               "values": {
+                       name: "values",
+                       arguments: []argSpec{
+                               {types: []jpType{jpObject}},
+                       },
+                       handler: jpfValues,
+               },
+               "sort": {
+                       name: "sort",
+                       arguments: []argSpec{
+                               {types: []jpType{jpArrayString, jpArrayNumber}},
+                       },
+                       handler: jpfSort,
+               },
+               "sort_by": {
+                       name: "sort_by",
+                       arguments: []argSpec{
+                               {types: []jpType{jpArray}},
+                               {types: []jpType{jpExpref}},
+                       },
+                       handler:   jpfSortBy,
+                       hasExpRef: true,
+               },
+               "join": {
+                       name: "join",
+                       arguments: []argSpec{
+                               {types: []jpType{jpString}},
+                               {types: []jpType{jpArrayString}},
+                       },
+                       handler: jpfJoin,
+               },
+               "reverse": {
+                       name: "reverse",
+                       arguments: []argSpec{
+                               {types: []jpType{jpArray, jpString}},
+                       },
+                       handler: jpfReverse,
+               },
+               "to_array": {
+                       name: "to_array",
+                       arguments: []argSpec{
+                               {types: []jpType{jpAny}},
+                       },
+                       handler: jpfToArray,
+               },
+               "to_string": {
+                       name: "to_string",
+                       arguments: []argSpec{
+                               {types: []jpType{jpAny}},
+                       },
+                       handler: jpfToString,
+               },
+               "to_number": {
+                       name: "to_number",
+                       arguments: []argSpec{
+                               {types: []jpType{jpAny}},
+                       },
+                       handler: jpfToNumber,
+               },
+               "not_null": {
+                       name: "not_null",
+                       arguments: []argSpec{
+                               {types: []jpType{jpAny}, variadic: true},
+                       },
+                       handler: jpfNotNull,
+               },
+       }
+       return caller
+}
+
+func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) {
+       if len(e.arguments) == 0 {
+               return arguments, nil
+       }
+       if !e.arguments[len(e.arguments)-1].variadic {
+               if len(e.arguments) != len(arguments) {
+                       return nil, errors.New("incorrect number of args")
+               }
+               for i, spec := range e.arguments {
+                       userArg := arguments[i]
+                       err := spec.typeCheck(userArg)
+                       if err != nil {
+                               return nil, err
+                       }
+               }
+               return arguments, nil
+       }
+       if len(arguments) < len(e.arguments) {
+               return nil, errors.New("Invalid arity.")
+       }
+       return arguments, nil
+}
+
+func (a *argSpec) typeCheck(arg interface{}) error {
+       for _, t := range a.types {
+               switch t {
+               case jpNumber:
+                       if _, ok := arg.(float64); ok {
+                               return nil
+                       }
+               case jpString:
+                       if _, ok := arg.(string); ok {
+                               return nil
+                       }
+               case jpArray:
+                       if isSliceType(arg) {
+                               return nil
+                       }
+               case jpObject:
+                       if _, ok := arg.(map[string]interface{}); ok {
+                               return nil
+                       }
+               case jpArrayNumber:
+                       if _, ok := toArrayNum(arg); ok {
+                               return nil
+                       }
+               case jpArrayString:
+                       if _, ok := toArrayStr(arg); ok {
+                               return nil
+                       }
+               case jpAny:
+                       return nil
+               case jpExpref:
+                       if _, ok := arg.(expRef); ok {
+                               return nil
+                       }
+               }
+       }
+       return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types)
+}
+
+func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) {
+       entry, ok := f.functionTable[name]
+       if !ok {
+               return nil, errors.New("unknown function: " + name)
+       }
+       resolvedArgs, err := entry.resolveArgs(arguments)
+       if err != nil {
+               return nil, err
+       }
+       if entry.hasExpRef {
+               var extra []interface{}
+               extra = append(extra, intr)
+               resolvedArgs = append(extra, resolvedArgs...)
+       }
+       return entry.handler(resolvedArgs)
+}
+
+func jpfAbs(arguments []interface{}) (interface{}, error) {
+       num := arguments[0].(float64)
+       return math.Abs(num), nil
+}
+
+func jpfLength(arguments []interface{}) (interface{}, error) {
+       arg := arguments[0]
+       if c, ok := arg.(string); ok {
+               return float64(utf8.RuneCountInString(c)), nil
+       } else if isSliceType(arg) {
+               v := reflect.ValueOf(arg)
+               return float64(v.Len()), nil
+       } else if c, ok := arg.(map[string]interface{}); ok {
+               return float64(len(c)), nil
+       }
+       return nil, errors.New("could not compute length()")
+}
+
+func jpfStartsWith(arguments []interface{}) (interface{}, error) {
+       search := arguments[0].(string)
+       prefix := arguments[1].(string)
+       return strings.HasPrefix(search, prefix), nil
+}
+
+func jpfAvg(arguments []interface{}) (interface{}, error) {
+       // We've already type checked the value so we can safely use
+       // type assertions.
+       args := arguments[0].([]interface{})
+       length := float64(len(args))
+       numerator := 0.0
+       for _, n := range args {
+               numerator += n.(float64)
+       }
+       return numerator / length, nil
+}
+func jpfCeil(arguments []interface{}) (interface{}, error) {
+       val := arguments[0].(float64)
+       return math.Ceil(val), nil
+}
+func jpfContains(arguments []interface{}) (interface{}, error) {
+       search := arguments[0]
+       el := arguments[1]
+       if searchStr, ok := search.(string); ok {
+               if elStr, ok := el.(string); ok {
+                       return strings.Index(searchStr, elStr) != -1, nil
+               }
+               return false, nil
+       }
+       // Otherwise this is a generic contains for []interface{}
+       general := search.([]interface{})
+       for _, item := range general {
+               if item == el {
+                       return true, nil
+               }
+       }
+       return false, nil
+}
+func jpfEndsWith(arguments []interface{}) (interface{}, error) {
+       search := arguments[0].(string)
+       suffix := arguments[1].(string)
+       return strings.HasSuffix(search, suffix), nil
+}
+func jpfFloor(arguments []interface{}) (interface{}, error) {
+       val := arguments[0].(float64)
+       return math.Floor(val), nil
+}
+func jpfMap(arguments []interface{}) (interface{}, error) {
+       intr := arguments[0].(*treeInterpreter)
+       exp := arguments[1].(expRef)
+       node := exp.ref
+       arr := arguments[2].([]interface{})
+       mapped := make([]interface{}, 0, len(arr))
+       for _, value := range arr {
+               current, err := intr.Execute(node, value)
+               if err != nil {
+                       return nil, err
+               }
+               mapped = append(mapped, current)
+       }
+       return mapped, nil
+}
+func jpfMax(arguments []interface{}) (interface{}, error) {
+       if items, ok := toArrayNum(arguments[0]); ok {
+               if len(items) == 0 {
+                       return nil, nil
+               }
+               if len(items) == 1 {
+                       return items[0], nil
+               }
+               best := items[0]
+               for _, item := range items[1:] {
+                       if item > best {
+                               best = item
+                       }
+               }
+               return best, nil
+       }
+       // Otherwise we're dealing with a max() of strings.
+       items, _ := toArrayStr(arguments[0])
+       if len(items) == 0 {
+               return nil, nil
+       }
+       if len(items) == 1 {
+               return items[0], nil
+       }
+       best := items[0]
+       for _, item := range items[1:] {
+               if item > best {
+                       best = item
+               }
+       }
+       return best, nil
+}
+func jpfMerge(arguments []interface{}) (interface{}, error) {
+       final := make(map[string]interface{})
+       for _, m := range arguments {
+               mapped := m.(map[string]interface{})
+               for key, value := range mapped {
+                       final[key] = value
+               }
+       }
+       return final, nil
+}
+func jpfMaxBy(arguments []interface{}) (interface{}, error) {
+       intr := arguments[0].(*treeInterpreter)
+       arr := arguments[1].([]interface{})
+       exp := arguments[2].(expRef)
+       node := exp.ref
+       if len(arr) == 0 {
+               return nil, nil
+       } else if len(arr) == 1 {
+               return arr[0], nil
+       }
+       start, err := intr.Execute(node, arr[0])
+       if err != nil {
+               return nil, err
+       }
+       switch t := start.(type) {
+       case float64:
+               bestVal := t
+               bestItem := arr[0]
+               for _, item := range arr[1:] {
+                       result, err := intr.Execute(node, item)
+                       if err != nil {
+                               return nil, err
+                       }
+                       current, ok := result.(float64)
+                       if !ok {
+                               return nil, errors.New("invalid type, must be number")
+                       }
+                       if current > bestVal {
+                               bestVal = current
+                               bestItem = item
+                       }
+               }
+               return bestItem, nil
+       case string:
+               bestVal := t
+               bestItem := arr[0]
+               for _, item := range arr[1:] {
+                       result, err := intr.Execute(node, item)
+                       if err != nil {
+                               return nil, err
+                       }
+                       current, ok := result.(string)
+                       if !ok {
+                               return nil, errors.New("invalid type, must be string")
+                       }
+                       if current > bestVal {
+                               bestVal = current
+                               bestItem = item
+                       }
+               }
+               return bestItem, nil
+       default:
+               return nil, errors.New("invalid type, must be number of string")
+       }
+}
+func jpfSum(arguments []interface{}) (interface{}, error) {
+       items, _ := toArrayNum(arguments[0])
+       sum := 0.0
+       for _, item := range items {
+               sum += item
+       }
+       return sum, nil
+}
+
+func jpfMin(arguments []interface{}) (interface{}, error) {
+       if items, ok := toArrayNum(arguments[0]); ok {
+               if len(items) == 0 {
+                       return nil, nil
+               }
+               if len(items) == 1 {
+                       return items[0], nil
+               }
+               best := items[0]
+               for _, item := range items[1:] {
+                       if item < best {
+                               best = item
+                       }
+               }
+               return best, nil
+       }
+       items, _ := toArrayStr(arguments[0])
+       if len(items) == 0 {
+               return nil, nil
+       }
+       if len(items) == 1 {
+               return items[0], nil
+       }
+       best := items[0]
+       for _, item := range items[1:] {
+               if item < best {
+                       best = item
+               }
+       }
+       return best, nil
+}
+
+func jpfMinBy(arguments []interface{}) (interface{}, error) {
+       intr := arguments[0].(*treeInterpreter)
+       arr := arguments[1].([]interface{})
+       exp := arguments[2].(expRef)
+       node := exp.ref
+       if len(arr) == 0 {
+               return nil, nil
+       } else if len(arr) == 1 {
+               return arr[0], nil
+       }
+       start, err := intr.Execute(node, arr[0])
+       if err != nil {
+               return nil, err
+       }
+       if t, ok := start.(float64); ok {
+               bestVal := t
+               bestItem := arr[0]
+               for _, item := range arr[1:] {
+                       result, err := intr.Execute(node, item)
+                       if err != nil {
+                               return nil, err
+                       }
+                       current, ok := result.(float64)
+                       if !ok {
+                               return nil, errors.New("invalid type, must be number")
+                       }
+                       if current < bestVal {
+                               bestVal = current
+                               bestItem = item
+                       }
+               }
+               return bestItem, nil
+       } else if t, ok := start.(string); ok {
+               bestVal := t
+               bestItem := arr[0]
+               for _, item := range arr[1:] {
+                       result, err := intr.Execute(node, item)
+                       if err != nil {
+                               return nil, err
+                       }
+                       current, ok := result.(string)
+                       if !ok {
+                               return nil, errors.New("invalid type, must be string")
+                       }
+                       if current < bestVal {
+                               bestVal = current
+                               bestItem = item
+                       }
+               }
+               return bestItem, nil
+       } else {
+               return nil, errors.New("invalid type, must be number of string")
+       }
+}
+func jpfType(arguments []interface{}) (interface{}, error) {
+       arg := arguments[0]
+       if _, ok := arg.(float64); ok {
+               return "number", nil
+       }
+       if _, ok := arg.(string); ok {
+               return "string", nil
+       }
+       if _, ok := arg.([]interface{}); ok {
+               return "array", nil
+       }
+       if _, ok := arg.(map[string]interface{}); ok {
+               return "object", nil
+       }
+       if arg == nil {
+               return "null", nil
+       }
+       if arg == true || arg == false {
+               return "boolean", nil
+       }
+       return nil, errors.New("unknown type")
+}
+func jpfKeys(arguments []interface{}) (interface{}, error) {
+       arg := arguments[0].(map[string]interface{})
+       collected := make([]interface{}, 0, len(arg))
+       for key := range arg {
+               collected = append(collected, key)
+       }
+       return collected, nil
+}
+func jpfValues(arguments []interface{}) (interface{}, error) {
+       arg := arguments[0].(map[string]interface{})
+       collected := make([]interface{}, 0, len(arg))
+       for _, value := range arg {
+               collected = append(collected, value)
+       }
+       return collected, nil
+}
+func jpfSort(arguments []interface{}) (interface{}, error) {
+       if items, ok := toArrayNum(arguments[0]); ok {
+               d := sort.Float64Slice(items)
+               sort.Stable(d)
+               final := make([]interface{}, len(d))
+               for i, val := range d {
+                       final[i] = val
+               }
+               return final, nil
+       }
+       // Otherwise we're dealing with sort()'ing strings.
+       items, _ := toArrayStr(arguments[0])
+       d := sort.StringSlice(items)
+       sort.Stable(d)
+       final := make([]interface{}, len(d))
+       for i, val := range d {
+               final[i] = val
+       }
+       return final, nil
+}
+func jpfSortBy(arguments []interface{}) (interface{}, error) {
+       intr := arguments[0].(*treeInterpreter)
+       arr := arguments[1].([]interface{})
+       exp := arguments[2].(expRef)
+       node := exp.ref
+       if len(arr) == 0 {
+               return arr, nil
+       } else if len(arr) == 1 {
+               return arr, nil
+       }
+       start, err := intr.Execute(node, arr[0])
+       if err != nil {
+               return nil, err
+       }
+       if _, ok := start.(float64); ok {
+               sortable := &byExprFloat{intr, node, arr, false}
+               sort.Stable(sortable)
+               if sortable.hasError {
+                       return nil, errors.New("error in sort_by comparison")
+               }
+               return arr, nil
+       } else if _, ok := start.(string); ok {
+               sortable := &byExprString{intr, node, arr, false}
+               sort.Stable(sortable)
+               if sortable.hasError {
+                       return nil, errors.New("error in sort_by comparison")
+               }
+               return arr, nil
+       } else {
+               return nil, errors.New("invalid type, must be number of string")
+       }
+}
+func jpfJoin(arguments []interface{}) (interface{}, error) {
+       sep := arguments[0].(string)
+       // We can't just do arguments[1].([]string), we have to
+       // manually convert each item to a string.
+       arrayStr := []string{}
+       for _, item := range arguments[1].([]interface{}) {
+               arrayStr = append(arrayStr, item.(string))
+       }
+       return strings.Join(arrayStr, sep), nil
+}
+func jpfReverse(arguments []interface{}) (interface{}, error) {
+       if s, ok := arguments[0].(string); ok {
+               r := []rune(s)
+               for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
+                       r[i], r[j] = r[j], r[i]
+               }
+               return string(r), nil
+       }
+       items := arguments[0].([]interface{})
+       length := len(items)
+       reversed := make([]interface{}, length)
+       for i, item := range items {
+               reversed[length-(i+1)] = item
+       }
+       return reversed, nil
+}
+func jpfToArray(arguments []interface{}) (interface{}, error) {
+       if _, ok := arguments[0].([]interface{}); ok {
+               return arguments[0], nil
+       }
+       return arguments[:1:1], nil
+}
+func jpfToString(arguments []interface{}) (interface{}, error) {
+       if v, ok := arguments[0].(string); ok {
+               return v, nil
+       }
+       result, err := json.Marshal(arguments[0])
+       if err != nil {
+               return nil, err
+       }
+       return string(result), nil
+}
+func jpfToNumber(arguments []interface{}) (interface{}, error) {
+       arg := arguments[0]
+       if v, ok := arg.(float64); ok {
+               return v, nil
+       }
+       if v, ok := arg.(string); ok {
+               conv, err := strconv.ParseFloat(v, 64)
+               if err != nil {
+                       return nil, nil
+               }
+               return conv, nil
+       }
+       if _, ok := arg.([]interface{}); ok {
+               return nil, nil
+       }
+       if _, ok := arg.(map[string]interface{}); ok {
+               return nil, nil
+       }
+       if arg == nil {
+               return nil, nil
+       }
+       if arg == true || arg == false {
+               return nil, nil
+       }
+       return nil, errors.New("unknown type")
+}
+func jpfNotNull(arguments []interface{}) (interface{}, error) {
+       for _, arg := range arguments {
+               if arg != nil {
+                       return arg, nil
+               }
+       }
+       return nil, nil
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go
new file mode 100644 (file)
index 0000000..13c7460
--- /dev/null
@@ -0,0 +1,418 @@
+package jmespath
+
+import (
+       "errors"
+       "reflect"
+       "unicode"
+       "unicode/utf8"
+)
+
+/* This is a tree based interpreter.  It walks the AST and directly
+   interprets the AST to search through a JSON document.
+*/
+
+type treeInterpreter struct {
+       fCall *functionCaller
+}
+
+func newInterpreter() *treeInterpreter {
+       interpreter := treeInterpreter{}
+       interpreter.fCall = newFunctionCaller()
+       return &interpreter
+}
+
+type expRef struct {
+       ref ASTNode
+}
+
+// Execute takes an ASTNode and input data and interprets the AST directly.
+// It will produce the result of applying the JMESPath expression associated
+// with the ASTNode to the input data "value".
+func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) {
+       switch node.nodeType {
+       case ASTComparator:
+               left, err := intr.Execute(node.children[0], value)
+               if err != nil {
+                       return nil, err
+               }
+               right, err := intr.Execute(node.children[1], value)
+               if err != nil {
+                       return nil, err
+               }
+               switch node.value {
+               case tEQ:
+                       return objsEqual(left, right), nil
+               case tNE:
+                       return !objsEqual(left, right), nil
+               }
+               leftNum, ok := left.(float64)
+               if !ok {
+                       return nil, nil
+               }
+               rightNum, ok := right.(float64)
+               if !ok {
+                       return nil, nil
+               }
+               switch node.value {
+               case tGT:
+                       return leftNum > rightNum, nil
+               case tGTE:
+                       return leftNum >= rightNum, nil
+               case tLT:
+                       return leftNum < rightNum, nil
+               case tLTE:
+                       return leftNum <= rightNum, nil
+               }
+       case ASTExpRef:
+               return expRef{ref: node.children[0]}, nil
+       case ASTFunctionExpression:
+               resolvedArgs := []interface{}{}
+               for _, arg := range node.children {
+                       current, err := intr.Execute(arg, value)
+                       if err != nil {
+                               return nil, err
+                       }
+                       resolvedArgs = append(resolvedArgs, current)
+               }
+               return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr)
+       case ASTField:
+               if m, ok := value.(map[string]interface{}); ok {
+                       key := node.value.(string)
+                       return m[key], nil
+               }
+               return intr.fieldFromStruct(node.value.(string), value)
+       case ASTFilterProjection:
+               left, err := intr.Execute(node.children[0], value)
+               if err != nil {
+                       return nil, nil
+               }
+               sliceType, ok := left.([]interface{})
+               if !ok {
+                       if isSliceType(left) {
+                               return intr.filterProjectionWithReflection(node, left)
+                       }
+                       return nil, nil
+               }
+               compareNode := node.children[2]
+               collected := []interface{}{}
+               for _, element := range sliceType {
+                       result, err := intr.Execute(compareNode, element)
+                       if err != nil {
+                               return nil, err
+                       }
+                       if !isFalse(result) {
+                               current, err := intr.Execute(node.children[1], element)
+                               if err != nil {
+                                       return nil, err
+                               }
+                               if current != nil {
+                                       collected = append(collected, current)
+                               }
+                       }
+               }
+               return collected, nil
+       case ASTFlatten:
+               left, err := intr.Execute(node.children[0], value)
+               if err != nil {
+                       return nil, nil
+               }
+               sliceType, ok := left.([]interface{})
+               if !ok {
+                       // If we can't type convert to []interface{}, there's
+                       // a chance this could still work via reflection if we're
+                       // dealing with user provided types.
+                       if isSliceType(left) {
+                               return intr.flattenWithReflection(left)
+                       }
+                       return nil, nil
+               }
+               flattened := []interface{}{}
+               for _, element := range sliceType {
+                       if elementSlice, ok := element.([]interface{}); ok {
+                               flattened = append(flattened, elementSlice...)
+                       } else if isSliceType(element) {
+                               reflectFlat := []interface{}{}
+                               v := reflect.ValueOf(element)
+                               for i := 0; i < v.Len(); i++ {
+                                       reflectFlat = append(reflectFlat, v.Index(i).Interface())
+                               }
+                               flattened = append(flattened, reflectFlat...)
+                       } else {
+                               flattened = append(flattened, element)
+                       }
+               }
+               return flattened, nil
+       case ASTIdentity, ASTCurrentNode:
+               return value, nil
+       case ASTIndex:
+               if sliceType, ok := value.([]interface{}); ok {
+                       index := node.value.(int)
+                       if index < 0 {
+                               index += len(sliceType)
+                       }
+                       if index < len(sliceType) && index >= 0 {
+                               return sliceType[index], nil
+                       }
+                       return nil, nil
+               }
+               // Otherwise try via reflection.
+               rv := reflect.ValueOf(value)
+               if rv.Kind() == reflect.Slice {
+                       index := node.value.(int)
+                       if index < 0 {
+                               index += rv.Len()
+                       }
+                       if index < rv.Len() && index >= 0 {
+                               v := rv.Index(index)
+                               return v.Interface(), nil
+                       }
+               }
+               return nil, nil
+       case ASTKeyValPair:
+               return intr.Execute(node.children[0], value)
+       case ASTLiteral:
+               return node.value, nil
+       case ASTMultiSelectHash:
+               if value == nil {
+                       return nil, nil
+               }
+               collected := make(map[string]interface{})
+               for _, child := range node.children {
+                       current, err := intr.Execute(child, value)
+                       if err != nil {
+                               return nil, err
+                       }
+                       key := child.value.(string)
+                       collected[key] = current
+               }
+               return collected, nil
+       case ASTMultiSelectList:
+               if value == nil {
+                       return nil, nil
+               }
+               collected := []interface{}{}
+               for _, child := range node.children {
+                       current, err := intr.Execute(child, value)
+                       if err != nil {
+                               return nil, err
+                       }
+                       collected = append(collected, current)
+               }
+               return collected, nil
+       case ASTOrExpression:
+               matched, err := intr.Execute(node.children[0], value)
+               if err != nil {
+                       return nil, err
+               }
+               if isFalse(matched) {
+                       matched, err = intr.Execute(node.children[1], value)
+                       if err != nil {
+                               return nil, err
+                       }
+               }
+               return matched, nil
+       case ASTAndExpression:
+               matched, err := intr.Execute(node.children[0], value)
+               if err != nil {
+                       return nil, err
+               }
+               if isFalse(matched) {
+                       return matched, nil
+               }
+               return intr.Execute(node.children[1], value)
+       case ASTNotExpression:
+               matched, err := intr.Execute(node.children[0], value)
+               if err != nil {
+                       return nil, err
+               }
+               if isFalse(matched) {
+                       return true, nil
+               }
+               return false, nil
+       case ASTPipe:
+               result := value
+               var err error
+               for _, child := range node.children {
+                       result, err = intr.Execute(child, result)
+                       if err != nil {
+                               return nil, err
+                       }
+               }
+               return result, nil
+       case ASTProjection:
+               left, err := intr.Execute(node.children[0], value)
+               if err != nil {
+                       return nil, err
+               }
+               sliceType, ok := left.([]interface{})
+               if !ok {
+                       if isSliceType(left) {
+                               return intr.projectWithReflection(node, left)
+                       }
+                       return nil, nil
+               }
+               collected := []interface{}{}
+               var current interface{}
+               for _, element := range sliceType {
+                       current, err = intr.Execute(node.children[1], element)
+                       if err != nil {
+                               return nil, err
+                       }
+                       if current != nil {
+                               collected = append(collected, current)
+                       }
+               }
+               return collected, nil
+       case ASTSubexpression, ASTIndexExpression:
+               left, err := intr.Execute(node.children[0], value)
+               if err != nil {
+                       return nil, err
+               }
+               return intr.Execute(node.children[1], left)
+       case ASTSlice:
+               sliceType, ok := value.([]interface{})
+               if !ok {
+                       if isSliceType(value) {
+                               return intr.sliceWithReflection(node, value)
+                       }
+                       return nil, nil
+               }
+               parts := node.value.([]*int)
+               sliceParams := make([]sliceParam, 3)
+               for i, part := range parts {
+                       if part != nil {
+                               sliceParams[i].Specified = true
+                               sliceParams[i].N = *part
+                       }
+               }
+               return slice(sliceType, sliceParams)
+       case ASTValueProjection:
+               left, err := intr.Execute(node.children[0], value)
+               if err != nil {
+                       return nil, nil
+               }
+               mapType, ok := left.(map[string]interface{})
+               if !ok {
+                       return nil, nil
+               }
+               values := make([]interface{}, len(mapType))
+               for _, value := range mapType {
+                       values = append(values, value)
+               }
+               collected := []interface{}{}
+               for _, element := range values {
+                       current, err := intr.Execute(node.children[1], element)
+                       if err != nil {
+                               return nil, err
+                       }
+                       if current != nil {
+                               collected = append(collected, current)
+                       }
+               }
+               return collected, nil
+       }
+       return nil, errors.New("Unknown AST node: " + node.nodeType.String())
+}
+
+func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) {
+       rv := reflect.ValueOf(value)
+       first, n := utf8.DecodeRuneInString(key)
+       fieldName := string(unicode.ToUpper(first)) + key[n:]
+       if rv.Kind() == reflect.Struct {
+               v := rv.FieldByName(fieldName)
+               if !v.IsValid() {
+                       return nil, nil
+               }
+               return v.Interface(), nil
+       } else if rv.Kind() == reflect.Ptr {
+               // Handle multiple levels of indirection?
+               if rv.IsNil() {
+                       return nil, nil
+               }
+               rv = rv.Elem()
+               v := rv.FieldByName(fieldName)
+               if !v.IsValid() {
+                       return nil, nil
+               }
+               return v.Interface(), nil
+       }
+       return nil, nil
+}
+
+func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) {
+       v := reflect.ValueOf(value)
+       flattened := []interface{}{}
+       for i := 0; i < v.Len(); i++ {
+               element := v.Index(i).Interface()
+               if reflect.TypeOf(element).Kind() == reflect.Slice {
+                       // Then insert the contents of the element
+                       // slice into the flattened slice,
+                       // i.e flattened = append(flattened, mySlice...)
+                       elementV := reflect.ValueOf(element)
+                       for j := 0; j < elementV.Len(); j++ {
+                               flattened = append(
+                                       flattened, elementV.Index(j).Interface())
+                       }
+               } else {
+                       flattened = append(flattened, element)
+               }
+       }
+       return flattened, nil
+}
+
+func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+       v := reflect.ValueOf(value)
+       parts := node.value.([]*int)
+       sliceParams := make([]sliceParam, 3)
+       for i, part := range parts {
+               if part != nil {
+                       sliceParams[i].Specified = true
+                       sliceParams[i].N = *part
+               }
+       }
+       final := []interface{}{}
+       for i := 0; i < v.Len(); i++ {
+               element := v.Index(i).Interface()
+               final = append(final, element)
+       }
+       return slice(final, sliceParams)
+}
+
+func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+       compareNode := node.children[2]
+       collected := []interface{}{}
+       v := reflect.ValueOf(value)
+       for i := 0; i < v.Len(); i++ {
+               element := v.Index(i).Interface()
+               result, err := intr.Execute(compareNode, element)
+               if err != nil {
+                       return nil, err
+               }
+               if !isFalse(result) {
+                       current, err := intr.Execute(node.children[1], element)
+                       if err != nil {
+                               return nil, err
+                       }
+                       if current != nil {
+                               collected = append(collected, current)
+                       }
+               }
+       }
+       return collected, nil
+}
+
+func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+       collected := []interface{}{}
+       v := reflect.ValueOf(value)
+       for i := 0; i < v.Len(); i++ {
+               element := v.Index(i).Interface()
+               result, err := intr.Execute(node.children[1], element)
+               if err != nil {
+                       return nil, err
+               }
+               if result != nil {
+                       collected = append(collected, result)
+               }
+       }
+       return collected, nil
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go
new file mode 100644 (file)
index 0000000..817900c
--- /dev/null
@@ -0,0 +1,420 @@
+package jmespath
+
+import (
+       "bytes"
+       "encoding/json"
+       "fmt"
+       "strconv"
+       "strings"
+       "unicode/utf8"
+)
+
+type token struct {
+       tokenType tokType
+       value     string
+       position  int
+       length    int
+}
+
+type tokType int
+
+const eof = -1
+
+// Lexer contains information about the expression being tokenized.
+type Lexer struct {
+       expression string       // The expression provided by the user.
+       currentPos int          // The current position in the string.
+       lastWidth  int          // The width of the current rune.  This
+       buf        bytes.Buffer // Internal buffer used for building up values.
+}
+
+// SyntaxError is the main error used whenever a lexing or parsing error occurs.
+type SyntaxError struct {
+       msg        string // Error message displayed to user
+       Expression string // Expression that generated a SyntaxError
+       Offset     int    // The location in the string where the error occurred
+}
+
+func (e SyntaxError) Error() string {
+       // In the future, it would be good to underline the specific
+       // location where the error occurred.
+       return "SyntaxError: " + e.msg
+}
+
+// HighlightLocation will show where the syntax error occurred.
+// It will place a "^" character on a line below the expression
+// at the point where the syntax error occurred.
+func (e SyntaxError) HighlightLocation() string {
+       return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
+}
+
+//go:generate stringer -type=tokType
+const (
+       tUnknown tokType = iota
+       tStar
+       tDot
+       tFilter
+       tFlatten
+       tLparen
+       tRparen
+       tLbracket
+       tRbracket
+       tLbrace
+       tRbrace
+       tOr
+       tPipe
+       tNumber
+       tUnquotedIdentifier
+       tQuotedIdentifier
+       tComma
+       tColon
+       tLT
+       tLTE
+       tGT
+       tGTE
+       tEQ
+       tNE
+       tJSONLiteral
+       tStringLiteral
+       tCurrent
+       tExpref
+       tAnd
+       tNot
+       tEOF
+)
+
+var basicTokens = map[rune]tokType{
+       '.': tDot,
+       '*': tStar,
+       ',': tComma,
+       ':': tColon,
+       '{': tLbrace,
+       '}': tRbrace,
+       ']': tRbracket, // tLbracket not included because it could be "[]"
+       '(': tLparen,
+       ')': tRparen,
+       '@': tCurrent,
+}
+
+// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
+// When using this bitmask just be sure to shift the rune down 64 bits
+// before checking against identifierStartBits.
+const identifierStartBits uint64 = 576460745995190270
+
+// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
+var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
+
+var whiteSpace = map[rune]bool{
+       ' ': true, '\t': true, '\n': true, '\r': true,
+}
+
+func (t token) String() string {
+       return fmt.Sprintf("Token{%+v, %s, %d, %d}",
+               t.tokenType, t.value, t.position, t.length)
+}
+
+// NewLexer creates a new JMESPath lexer.
+func NewLexer() *Lexer {
+       lexer := Lexer{}
+       return &lexer
+}
+
+func (lexer *Lexer) next() rune {
+       if lexer.currentPos >= len(lexer.expression) {
+               lexer.lastWidth = 0
+               return eof
+       }
+       r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
+       lexer.lastWidth = w
+       lexer.currentPos += w
+       return r
+}
+
+func (lexer *Lexer) back() {
+       lexer.currentPos -= lexer.lastWidth
+}
+
+func (lexer *Lexer) peek() rune {
+       t := lexer.next()
+       lexer.back()
+       return t
+}
+
+// tokenize takes an expression and returns corresponding tokens.
+func (lexer *Lexer) tokenize(expression string) ([]token, error) {
+       var tokens []token
+       lexer.expression = expression
+       lexer.currentPos = 0
+       lexer.lastWidth = 0
+loop:
+       for {
+               r := lexer.next()
+               if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
+                       t := lexer.consumeUnquotedIdentifier()
+                       tokens = append(tokens, t)
+               } else if val, ok := basicTokens[r]; ok {
+                       // Basic single char token.
+                       t := token{
+                               tokenType: val,
+                               value:     string(r),
+                               position:  lexer.currentPos - lexer.lastWidth,
+                               length:    1,
+                       }
+                       tokens = append(tokens, t)
+               } else if r == '-' || (r >= '0' && r <= '9') {
+                       t := lexer.consumeNumber()
+                       tokens = append(tokens, t)
+               } else if r == '[' {
+                       t := lexer.consumeLBracket()
+                       tokens = append(tokens, t)
+               } else if r == '"' {
+                       t, err := lexer.consumeQuotedIdentifier()
+                       if err != nil {
+                               return tokens, err
+                       }
+                       tokens = append(tokens, t)
+               } else if r == '\'' {
+                       t, err := lexer.consumeRawStringLiteral()
+                       if err != nil {
+                               return tokens, err
+                       }
+                       tokens = append(tokens, t)
+               } else if r == '`' {
+                       t, err := lexer.consumeLiteral()
+                       if err != nil {
+                               return tokens, err
+                       }
+                       tokens = append(tokens, t)
+               } else if r == '|' {
+                       t := lexer.matchOrElse(r, '|', tOr, tPipe)
+                       tokens = append(tokens, t)
+               } else if r == '<' {
+                       t := lexer.matchOrElse(r, '=', tLTE, tLT)
+                       tokens = append(tokens, t)
+               } else if r == '>' {
+                       t := lexer.matchOrElse(r, '=', tGTE, tGT)
+                       tokens = append(tokens, t)
+               } else if r == '!' {
+                       t := lexer.matchOrElse(r, '=', tNE, tNot)
+                       tokens = append(tokens, t)
+               } else if r == '=' {
+                       t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
+                       tokens = append(tokens, t)
+               } else if r == '&' {
+                       t := lexer.matchOrElse(r, '&', tAnd, tExpref)
+                       tokens = append(tokens, t)
+               } else if r == eof {
+                       break loop
+               } else if _, ok := whiteSpace[r]; ok {
+                       // Ignore whitespace
+               } else {
+                       return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
+               }
+       }
+       tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
+       return tokens, nil
+}
+
+// Consume characters until the ending rune "r" is reached.
+// If the end of the expression is reached before seeing the
+// terminating rune "r", then an error is returned.
+// If no error occurs then the matching substring is returned.
+// The returned string will not include the ending rune.
+func (lexer *Lexer) consumeUntil(end rune) (string, error) {
+       start := lexer.currentPos
+       current := lexer.next()
+       for current != end && current != eof {
+               if current == '\\' && lexer.peek() != eof {
+                       lexer.next()
+               }
+               current = lexer.next()
+       }
+       if lexer.lastWidth == 0 {
+               // Then we hit an EOF so we never reached the closing
+               // delimiter.
+               return "", SyntaxError{
+                       msg:        "Unclosed delimiter: " + string(end),
+                       Expression: lexer.expression,
+                       Offset:     len(lexer.expression),
+               }
+       }
+       return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
+}
+
+func (lexer *Lexer) consumeLiteral() (token, error) {
+       start := lexer.currentPos
+       value, err := lexer.consumeUntil('`')
+       if err != nil {
+               return token{}, err
+       }
+       value = strings.Replace(value, "\\`", "`", -1)
+       return token{
+               tokenType: tJSONLiteral,
+               value:     value,
+               position:  start,
+               length:    len(value),
+       }, nil
+}
+
+func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
+       start := lexer.currentPos
+       currentIndex := start
+       current := lexer.next()
+       for current != '\'' && lexer.peek() != eof {
+               if current == '\\' && lexer.peek() == '\'' {
+                       chunk := lexer.expression[currentIndex : lexer.currentPos-1]
+                       lexer.buf.WriteString(chunk)
+                       lexer.buf.WriteString("'")
+                       lexer.next()
+                       currentIndex = lexer.currentPos
+               }
+               current = lexer.next()
+       }
+       if lexer.lastWidth == 0 {
+               // Then we hit an EOF so we never reached the closing
+               // delimiter.
+               return token{}, SyntaxError{
+                       msg:        "Unclosed delimiter: '",
+                       Expression: lexer.expression,
+                       Offset:     len(lexer.expression),
+               }
+       }
+       if currentIndex < lexer.currentPos {
+               lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
+       }
+       value := lexer.buf.String()
+       // Reset the buffer so it can reused again.
+       lexer.buf.Reset()
+       return token{
+               tokenType: tStringLiteral,
+               value:     value,
+               position:  start,
+               length:    len(value),
+       }, nil
+}
+
+func (lexer *Lexer) syntaxError(msg string) SyntaxError {
+       return SyntaxError{
+               msg:        msg,
+               Expression: lexer.expression,
+               Offset:     lexer.currentPos - 1,
+       }
+}
+
+// Checks for a two char token, otherwise matches a single character
+// token. This is used whenever a two char token overlaps a single
+// char token, e.g. "||" -> tPipe, "|" -> tOr.
+func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
+       start := lexer.currentPos - lexer.lastWidth
+       nextRune := lexer.next()
+       var t token
+       if nextRune == second {
+               t = token{
+                       tokenType: matchedType,
+                       value:     string(first) + string(second),
+                       position:  start,
+                       length:    2,
+               }
+       } else {
+               lexer.back()
+               t = token{
+                       tokenType: singleCharType,
+                       value:     string(first),
+                       position:  start,
+                       length:    1,
+               }
+       }
+       return t
+}
+
+func (lexer *Lexer) consumeLBracket() token {
+       // There's three options here:
+       // 1. A filter expression "[?"
+       // 2. A flatten operator "[]"
+       // 3. A bare rbracket "["
+       start := lexer.currentPos - lexer.lastWidth
+       nextRune := lexer.next()
+       var t token
+       if nextRune == '?' {
+               t = token{
+                       tokenType: tFilter,
+                       value:     "[?",
+                       position:  start,
+                       length:    2,
+               }
+       } else if nextRune == ']' {
+               t = token{
+                       tokenType: tFlatten,
+                       value:     "[]",
+                       position:  start,
+                       length:    2,
+               }
+       } else {
+               t = token{
+                       tokenType: tLbracket,
+                       value:     "[",
+                       position:  start,
+                       length:    1,
+               }
+               lexer.back()
+       }
+       return t
+}
+
+func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
+       start := lexer.currentPos
+       value, err := lexer.consumeUntil('"')
+       if err != nil {
+               return token{}, err
+       }
+       var decoded string
+       asJSON := []byte("\"" + value + "\"")
+       if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
+               return token{}, err
+       }
+       return token{
+               tokenType: tQuotedIdentifier,
+               value:     decoded,
+               position:  start - 1,
+               length:    len(decoded),
+       }, nil
+}
+
+func (lexer *Lexer) consumeUnquotedIdentifier() token {
+       // Consume runes until we reach the end of an unquoted
+       // identifier.
+       start := lexer.currentPos - lexer.lastWidth
+       for {
+               r := lexer.next()
+               if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
+                       lexer.back()
+                       break
+               }
+       }
+       value := lexer.expression[start:lexer.currentPos]
+       return token{
+               tokenType: tUnquotedIdentifier,
+               value:     value,
+               position:  start,
+               length:    lexer.currentPos - start,
+       }
+}
+
+func (lexer *Lexer) consumeNumber() token {
+       // Consume runes until we reach something that's not a number.
+       start := lexer.currentPos - lexer.lastWidth
+       for {
+               r := lexer.next()
+               if r < '0' || r > '9' {
+                       lexer.back()
+                       break
+               }
+       }
+       value := lexer.expression[start:lexer.currentPos]
+       return token{
+               tokenType: tNumber,
+               value:     value,
+               position:  start,
+               length:    lexer.currentPos - start,
+       }
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go
new file mode 100644 (file)
index 0000000..1240a17
--- /dev/null
@@ -0,0 +1,603 @@
+package jmespath
+
+import (
+       "encoding/json"
+       "fmt"
+       "strconv"
+       "strings"
+)
+
+type astNodeType int
+
+//go:generate stringer -type astNodeType
+const (
+       ASTEmpty astNodeType = iota
+       ASTComparator
+       ASTCurrentNode
+       ASTExpRef
+       ASTFunctionExpression
+       ASTField
+       ASTFilterProjection
+       ASTFlatten
+       ASTIdentity
+       ASTIndex
+       ASTIndexExpression
+       ASTKeyValPair
+       ASTLiteral
+       ASTMultiSelectHash
+       ASTMultiSelectList
+       ASTOrExpression
+       ASTAndExpression
+       ASTNotExpression
+       ASTPipe
+       ASTProjection
+       ASTSubexpression
+       ASTSlice
+       ASTValueProjection
+)
+
+// ASTNode represents the abstract syntax tree of a JMESPath expression.
+type ASTNode struct {
+       nodeType astNodeType
+       value    interface{}
+       children []ASTNode
+}
+
+func (node ASTNode) String() string {
+       return node.PrettyPrint(0)
+}
+
+// PrettyPrint will pretty print the parsed AST.
+// The AST is an implementation detail and this pretty print
+// function is provided as a convenience method to help with
+// debugging.  You should not rely on its output as the internal
+// structure of the AST may change at any time.
+func (node ASTNode) PrettyPrint(indent int) string {
+       spaces := strings.Repeat(" ", indent)
+       output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType)
+       nextIndent := indent + 2
+       if node.value != nil {
+               if converted, ok := node.value.(fmt.Stringer); ok {
+                       // Account for things like comparator nodes
+                       // that are enums with a String() method.
+                       output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String())
+               } else {
+                       output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value)
+               }
+       }
+       lastIndex := len(node.children)
+       if lastIndex > 0 {
+               output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent))
+               childIndent := nextIndent + 2
+               for _, elem := range node.children {
+                       output += elem.PrettyPrint(childIndent)
+               }
+       }
+       output += fmt.Sprintf("%s}\n", spaces)
+       return output
+}
+
+var bindingPowers = map[tokType]int{
+       tEOF:                0,
+       tUnquotedIdentifier: 0,
+       tQuotedIdentifier:   0,
+       tRbracket:           0,
+       tRparen:             0,
+       tComma:              0,
+       tRbrace:             0,
+       tNumber:             0,
+       tCurrent:            0,
+       tExpref:             0,
+       tColon:              0,
+       tPipe:               1,
+       tOr:                 2,
+       tAnd:                3,
+       tEQ:                 5,
+       tLT:                 5,
+       tLTE:                5,
+       tGT:                 5,
+       tGTE:                5,
+       tNE:                 5,
+       tFlatten:            9,
+       tStar:               20,
+       tFilter:             21,
+       tDot:                40,
+       tNot:                45,
+       tLbrace:             50,
+       tLbracket:           55,
+       tLparen:             60,
+}
+
+// Parser holds state about the current expression being parsed.
+type Parser struct {
+       expression string
+       tokens     []token
+       index      int
+}
+
+// NewParser creates a new JMESPath parser.
+func NewParser() *Parser {
+       p := Parser{}
+       return &p
+}
+
+// Parse will compile a JMESPath expression.
+func (p *Parser) Parse(expression string) (ASTNode, error) {
+       lexer := NewLexer()
+       p.expression = expression
+       p.index = 0
+       tokens, err := lexer.tokenize(expression)
+       if err != nil {
+               return ASTNode{}, err
+       }
+       p.tokens = tokens
+       parsed, err := p.parseExpression(0)
+       if err != nil {
+               return ASTNode{}, err
+       }
+       if p.current() != tEOF {
+               return ASTNode{}, p.syntaxError(fmt.Sprintf(
+                       "Unexpected token at the end of the expresssion: %s", p.current()))
+       }
+       return parsed, nil
+}
+
+func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) {
+       var err error
+       leftToken := p.lookaheadToken(0)
+       p.advance()
+       leftNode, err := p.nud(leftToken)
+       if err != nil {
+               return ASTNode{}, err
+       }
+       currentToken := p.current()
+       for bindingPower < bindingPowers[currentToken] {
+               p.advance()
+               leftNode, err = p.led(currentToken, leftNode)
+               if err != nil {
+                       return ASTNode{}, err
+               }
+               currentToken = p.current()
+       }
+       return leftNode, nil
+}
+
+func (p *Parser) parseIndexExpression() (ASTNode, error) {
+       if p.lookahead(0) == tColon || p.lookahead(1) == tColon {
+               return p.parseSliceExpression()
+       }
+       indexStr := p.lookaheadToken(0).value
+       parsedInt, err := strconv.Atoi(indexStr)
+       if err != nil {
+               return ASTNode{}, err
+       }
+       indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt}
+       p.advance()
+       if err := p.match(tRbracket); err != nil {
+               return ASTNode{}, err
+       }
+       return indexNode, nil
+}
+
+func (p *Parser) parseSliceExpression() (ASTNode, error) {
+       parts := []*int{nil, nil, nil}
+       index := 0
+       current := p.current()
+       for current != tRbracket && index < 3 {
+               if current == tColon {
+                       index++
+                       p.advance()
+               } else if current == tNumber {
+                       parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value)
+                       if err != nil {
+                               return ASTNode{}, err
+                       }
+                       parts[index] = &parsedInt
+                       p.advance()
+               } else {
+                       return ASTNode{}, p.syntaxError(
+                               "Expected tColon or tNumber" + ", received: " + p.current().String())
+               }
+               current = p.current()
+       }
+       if err := p.match(tRbracket); err != nil {
+               return ASTNode{}, err
+       }
+       return ASTNode{
+               nodeType: ASTSlice,
+               value:    parts,
+       }, nil
+}
+
+func (p *Parser) match(tokenType tokType) error {
+       if p.current() == tokenType {
+               p.advance()
+               return nil
+       }
+       return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String())
+}
+
+func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) {
+       switch tokenType {
+       case tDot:
+               if p.current() != tStar {
+                       right, err := p.parseDotRHS(bindingPowers[tDot])
+                       return ASTNode{
+                               nodeType: ASTSubexpression,
+                               children: []ASTNode{node, right},
+                       }, err
+               }
+               p.advance()
+               right, err := p.parseProjectionRHS(bindingPowers[tDot])
+               return ASTNode{
+                       nodeType: ASTValueProjection,
+                       children: []ASTNode{node, right},
+               }, err
+       case tPipe:
+               right, err := p.parseExpression(bindingPowers[tPipe])
+               return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err
+       case tOr:
+               right, err := p.parseExpression(bindingPowers[tOr])
+               return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err
+       case tAnd:
+               right, err := p.parseExpression(bindingPowers[tAnd])
+               return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err
+       case tLparen:
+               name := node.value
+               var args []ASTNode
+               for p.current() != tRparen {
+                       expression, err := p.parseExpression(0)
+                       if err != nil {
+                               return ASTNode{}, err
+                       }
+                       if p.current() == tComma {
+                               if err := p.match(tComma); err != nil {
+                                       return ASTNode{}, err
+                               }
+                       }
+                       args = append(args, expression)
+               }
+               if err := p.match(tRparen); err != nil {
+                       return ASTNode{}, err
+               }
+               return ASTNode{
+                       nodeType: ASTFunctionExpression,
+                       value:    name,
+                       children: args,
+               }, nil
+       case tFilter:
+               return p.parseFilter(node)
+       case tFlatten:
+               left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}}
+               right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+               return ASTNode{
+                       nodeType: ASTProjection,
+                       children: []ASTNode{left, right},
+               }, err
+       case tEQ, tNE, tGT, tGTE, tLT, tLTE:
+               right, err := p.parseExpression(bindingPowers[tokenType])
+               if err != nil {
+                       return ASTNode{}, err
+               }
+               return ASTNode{
+                       nodeType: ASTComparator,
+                       value:    tokenType,
+                       children: []ASTNode{node, right},
+               }, nil
+       case tLbracket:
+               tokenType := p.current()
+               var right ASTNode
+               var err error
+               if tokenType == tNumber || tokenType == tColon {
+                       right, err = p.parseIndexExpression()
+                       if err != nil {
+                               return ASTNode{}, err
+                       }
+                       return p.projectIfSlice(node, right)
+               }
+               // Otherwise this is a projection.
+               if err := p.match(tStar); err != nil {
+                       return ASTNode{}, err
+               }
+               if err := p.match(tRbracket); err != nil {
+                       return ASTNode{}, err
+               }
+               right, err = p.parseProjectionRHS(bindingPowers[tStar])
+               if err != nil {
+                       return ASTNode{}, err
+               }
+               return ASTNode{
+                       nodeType: ASTProjection,
+                       children: []ASTNode{node, right},
+               }, nil
+       }
+       return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String())
+}
+
+func (p *Parser) nud(token token) (ASTNode, error) {
+       switch token.tokenType {
+       case tJSONLiteral:
+               var parsed interface{}
+               err := json.Unmarshal([]byte(token.value), &parsed)
+               if err != nil {
+                       return ASTNode{}, err
+               }
+               return ASTNode{nodeType: ASTLiteral, value: parsed}, nil
+       case tStringLiteral:
+               return ASTNode{nodeType: ASTLiteral, value: token.value}, nil
+       case tUnquotedIdentifier:
+               return ASTNode{
+                       nodeType: ASTField,
+                       value:    token.value,
+               }, nil
+       case tQuotedIdentifier:
+               node := ASTNode{nodeType: ASTField, value: token.value}
+               if p.current() == tLparen {
+                       return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token)
+               }
+               return node, nil
+       case tStar:
+               left := ASTNode{nodeType: ASTIdentity}
+               var right ASTNode
+               var err error
+               if p.current() == tRbracket {
+                       right = ASTNode{nodeType: ASTIdentity}
+               } else {
+                       right, err = p.parseProjectionRHS(bindingPowers[tStar])
+               }
+               return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err
+       case tFilter:
+               return p.parseFilter(ASTNode{nodeType: ASTIdentity})
+       case tLbrace:
+               return p.parseMultiSelectHash()
+       case tFlatten:
+               left := ASTNode{
+                       nodeType: ASTFlatten,
+                       children: []ASTNode{{nodeType: ASTIdentity}},
+               }
+               right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+               if err != nil {
+                       return ASTNode{}, err
+               }
+               return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil
+       case tLbracket:
+               tokenType := p.current()
+               //var right ASTNode
+               if tokenType == tNumber || tokenType == tColon {
+                       right, err := p.parseIndexExpression()
+                       if err != nil {
+                               return ASTNode{}, nil
+                       }
+                       return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right)
+               } else if tokenType == tStar && p.lookahead(1) == tRbracket {
+                       p.advance()
+                       p.advance()
+                       right, err := p.parseProjectionRHS(bindingPowers[tStar])
+                       if err != nil {
+                               return ASTNode{}, err
+                       }
+                       return ASTNode{
+                               nodeType: ASTProjection,
+                               children: []ASTNode{{nodeType: ASTIdentity}, right},
+                       }, nil
+               } else {
+                       return p.parseMultiSelectList()
+               }
+       case tCurrent:
+               return ASTNode{nodeType: ASTCurrentNode}, nil
+       case tExpref:
+               expression, err := p.parseExpression(bindingPowers[tExpref])
+               if err != nil {
+                       return ASTNode{}, err
+               }
+               return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil
+       case tNot:
+               expression, err := p.parseExpression(bindingPowers[tNot])
+               if err != nil {
+                       return ASTNode{}, err
+               }
+               return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil
+       case tLparen:
+               expression, err := p.parseExpression(0)
+               if err != nil {
+                       return ASTNode{}, err
+               }
+               if err := p.match(tRparen); err != nil {
+                       return ASTNode{}, err
+               }
+               return expression, nil
+       case tEOF:
+               return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token)
+       }
+
+       return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token)
+}
+
+func (p *Parser) parseMultiSelectList() (ASTNode, error) {
+       var expressions []ASTNode
+       for {
+               expression, err := p.parseExpression(0)
+               if err != nil {
+                       return ASTNode{}, err
+               }
+               expressions = append(expressions, expression)
+               if p.current() == tRbracket {
+                       break
+               }
+               err = p.match(tComma)
+               if err != nil {
+                       return ASTNode{}, err
+               }
+       }
+       err := p.match(tRbracket)
+       if err != nil {
+               return ASTNode{}, err
+       }
+       return ASTNode{
+               nodeType: ASTMultiSelectList,
+               children: expressions,
+       }, nil
+}
+
+func (p *Parser) parseMultiSelectHash() (ASTNode, error) {
+       var children []ASTNode
+       for {
+               keyToken := p.lookaheadToken(0)
+               if err := p.match(tUnquotedIdentifier); err != nil {
+                       if err := p.match(tQuotedIdentifier); err != nil {
+                               return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier")
+                       }
+               }
+               keyName := keyToken.value
+               err := p.match(tColon)
+               if err != nil {
+                       return ASTNode{}, err
+               }
+               value, err := p.parseExpression(0)
+               if err != nil {
+                       return ASTNode{}, err
+               }
+               node := ASTNode{
+                       nodeType: ASTKeyValPair,
+                       value:    keyName,
+                       children: []ASTNode{value},
+               }
+               children = append(children, node)
+               if p.current() == tComma {
+                       err := p.match(tComma)
+                       if err != nil {
+                               return ASTNode{}, nil
+                       }
+               } else if p.current() == tRbrace {
+                       err := p.match(tRbrace)
+                       if err != nil {
+                               return ASTNode{}, nil
+                       }
+                       break
+               }
+       }
+       return ASTNode{
+               nodeType: ASTMultiSelectHash,
+               children: children,
+       }, nil
+}
+
+func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) {
+       indexExpr := ASTNode{
+               nodeType: ASTIndexExpression,
+               children: []ASTNode{left, right},
+       }
+       if right.nodeType == ASTSlice {
+               right, err := p.parseProjectionRHS(bindingPowers[tStar])
+               return ASTNode{
+                       nodeType: ASTProjection,
+                       children: []ASTNode{indexExpr, right},
+               }, err
+       }
+       return indexExpr, nil
+}
+func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) {
+       var right, condition ASTNode
+       var err error
+       condition, err = p.parseExpression(0)
+       if err != nil {
+               return ASTNode{}, err
+       }
+       if err := p.match(tRbracket); err != nil {
+               return ASTNode{}, err
+       }
+       if p.current() == tFlatten {
+               right = ASTNode{nodeType: ASTIdentity}
+       } else {
+               right, err = p.parseProjectionRHS(bindingPowers[tFilter])
+               if err != nil {
+                       return ASTNode{}, err
+               }
+       }
+
+       return ASTNode{
+               nodeType: ASTFilterProjection,
+               children: []ASTNode{node, right, condition},
+       }, nil
+}
+
+func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) {
+       lookahead := p.current()
+       if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) {
+               return p.parseExpression(bindingPower)
+       } else if lookahead == tLbracket {
+               if err := p.match(tLbracket); err != nil {
+                       return ASTNode{}, err
+               }
+               return p.parseMultiSelectList()
+       } else if lookahead == tLbrace {
+               if err := p.match(tLbrace); err != nil {
+                       return ASTNode{}, err
+               }
+               return p.parseMultiSelectHash()
+       }
+       return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace")
+}
+
+func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) {
+       current := p.current()
+       if bindingPowers[current] < 10 {
+               return ASTNode{nodeType: ASTIdentity}, nil
+       } else if current == tLbracket {
+               return p.parseExpression(bindingPower)
+       } else if current == tFilter {
+               return p.parseExpression(bindingPower)
+       } else if current == tDot {
+               err := p.match(tDot)
+               if err != nil {
+                       return ASTNode{}, err
+               }
+               return p.parseDotRHS(bindingPower)
+       } else {
+               return ASTNode{}, p.syntaxError("Error")
+       }
+}
+
+func (p *Parser) lookahead(number int) tokType {
+       return p.lookaheadToken(number).tokenType
+}
+
+func (p *Parser) current() tokType {
+       return p.lookahead(0)
+}
+
+func (p *Parser) lookaheadToken(number int) token {
+       return p.tokens[p.index+number]
+}
+
+func (p *Parser) advance() {
+       p.index++
+}
+
+func tokensOneOf(elements []tokType, token tokType) bool {
+       for _, elem := range elements {
+               if elem == token {
+                       return true
+               }
+       }
+       return false
+}
+
+func (p *Parser) syntaxError(msg string) SyntaxError {
+       return SyntaxError{
+               msg:        msg,
+               Expression: p.expression,
+               Offset:     p.lookaheadToken(0).position,
+       }
+}
+
+// Create a SyntaxError based on the provided token.
+// This differs from syntaxError() which creates a SyntaxError
+// based on the current lookahead token.
+func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError {
+       return SyntaxError{
+               msg:        msg,
+               Expression: p.expression,
+               Offset:     t.position,
+       }
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go
new file mode 100644 (file)
index 0000000..dae79cb
--- /dev/null
@@ -0,0 +1,16 @@
+// generated by stringer -type=tokType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF"
+
+var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214}
+
+func (i tokType) String() string {
+       if i < 0 || i >= tokType(len(_tokType_index)-1) {
+               return fmt.Sprintf("tokType(%d)", i)
+       }
+       return _tokType_name[_tokType_index[i]:_tokType_index[i+1]]
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go
new file mode 100644 (file)
index 0000000..ddc1b7d
--- /dev/null
@@ -0,0 +1,185 @@
+package jmespath
+
+import (
+       "errors"
+       "reflect"
+)
+
+// IsFalse determines if an object is false based on the JMESPath spec.
+// JMESPath defines false values to be any of:
+// - An empty string array, or hash.
+// - The boolean value false.
+// - nil
+func isFalse(value interface{}) bool {
+       switch v := value.(type) {
+       case bool:
+               return !v
+       case []interface{}:
+               return len(v) == 0
+       case map[string]interface{}:
+               return len(v) == 0
+       case string:
+               return len(v) == 0
+       case nil:
+               return true
+       }
+       // Try the reflection cases before returning false.
+       rv := reflect.ValueOf(value)
+       switch rv.Kind() {
+       case reflect.Struct:
+               // A struct type will never be false, even if
+               // all of its values are the zero type.
+               return false
+       case reflect.Slice, reflect.Map:
+               return rv.Len() == 0
+       case reflect.Ptr:
+               if rv.IsNil() {
+                       return true
+               }
+               // If it's a pointer type, we'll try to deref the pointer
+               // and evaluate the pointer value for isFalse.
+               element := rv.Elem()
+               return isFalse(element.Interface())
+       }
+       return false
+}
+
+// ObjsEqual is a generic object equality check.
+// It will take two arbitrary objects and recursively determine
+// if they are equal.
+func objsEqual(left interface{}, right interface{}) bool {
+       return reflect.DeepEqual(left, right)
+}
+
+// SliceParam refers to a single part of a slice.
+// A slice consists of a start, a stop, and a step, similar to
+// python slices.
+type sliceParam struct {
+       N         int
+       Specified bool
+}
+
+// Slice supports [start:stop:step] style slicing that's supported in JMESPath.
+func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) {
+       computed, err := computeSliceParams(len(slice), parts)
+       if err != nil {
+               return nil, err
+       }
+       start, stop, step := computed[0], computed[1], computed[2]
+       result := []interface{}{}
+       if step > 0 {
+               for i := start; i < stop; i += step {
+                       result = append(result, slice[i])
+               }
+       } else {
+               for i := start; i > stop; i += step {
+                       result = append(result, slice[i])
+               }
+       }
+       return result, nil
+}
+
+func computeSliceParams(length int, parts []sliceParam) ([]int, error) {
+       var start, stop, step int
+       if !parts[2].Specified {
+               step = 1
+       } else if parts[2].N == 0 {
+               return nil, errors.New("Invalid slice, step cannot be 0")
+       } else {
+               step = parts[2].N
+       }
+       var stepValueNegative bool
+       if step < 0 {
+               stepValueNegative = true
+       } else {
+               stepValueNegative = false
+       }
+
+       if !parts[0].Specified {
+               if stepValueNegative {
+                       start = length - 1
+               } else {
+                       start = 0
+               }
+       } else {
+               start = capSlice(length, parts[0].N, step)
+       }
+
+       if !parts[1].Specified {
+               if stepValueNegative {
+                       stop = -1
+               } else {
+                       stop = length
+               }
+       } else {
+               stop = capSlice(length, parts[1].N, step)
+       }
+       return []int{start, stop, step}, nil
+}
+
+func capSlice(length int, actual int, step int) int {
+       if actual < 0 {
+               actual += length
+               if actual < 0 {
+                       if step < 0 {
+                               actual = -1
+                       } else {
+                               actual = 0
+                       }
+               }
+       } else if actual >= length {
+               if step < 0 {
+                       actual = length - 1
+               } else {
+                       actual = length
+               }
+       }
+       return actual
+}
+
+// ToArrayNum converts an empty interface type to a slice of float64.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false.
+func toArrayNum(data interface{}) ([]float64, bool) {
+       // Is there a better way to do this with reflect?
+       if d, ok := data.([]interface{}); ok {
+               result := make([]float64, len(d))
+               for i, el := range d {
+                       item, ok := el.(float64)
+                       if !ok {
+                               return nil, false
+                       }
+                       result[i] = item
+               }
+               return result, true
+       }
+       return nil, false
+}
+
+// ToArrayStr converts an empty interface type to a slice of strings.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false.  If the input data could be entirely
+// converted, then the converted data, along with a second value of true,
+// will be returned.
+func toArrayStr(data interface{}) ([]string, bool) {
+       // Is there a better way to do this with reflect?
+       if d, ok := data.([]interface{}); ok {
+               result := make([]string, len(d))
+               for i, el := range d {
+                       item, ok := el.(string)
+                       if !ok {
+                               return nil, false
+                       }
+                       result[i] = item
+               }
+               return result, true
+       }
+       return nil, false
+}
+
+func isSliceType(v interface{}) bool {
+       if v == nil {
+               return false
+       }
+       return reflect.TypeOf(v).Kind() == reflect.Slice
+}
diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE
new file mode 100644 (file)
index 0000000..2298515
--- /dev/null
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md
new file mode 100644 (file)
index 0000000..bcb8c8d
--- /dev/null
@@ -0,0 +1,21 @@
+# copystructure\r
+\r
+copystructure is a Go library for deep copying values in Go.\r
+\r
+This allows you to copy Go values that may contain reference values\r
+such as maps, slices, or pointers, and copy their data as well instead\r
+of just their references.\r
+\r
+## Installation\r
+\r
+Standard `go get`:\r
+\r
+```\r
+$ go get github.com/mitchellh/copystructure\r
+```\r
+\r
+## Usage & Example\r
+\r
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure).\r
+\r
+The `Copy` function has examples associated with it there.\r
diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go
new file mode 100644 (file)
index 0000000..db6a6aa
--- /dev/null
@@ -0,0 +1,15 @@
+package copystructure
+
+import (
+       "reflect"
+       "time"
+)
+
+func init() {
+       Copiers[reflect.TypeOf(time.Time{})] = timeCopier
+}
+
+func timeCopier(v interface{}) (interface{}, error) {
+       // Just... copy it.
+       return v.(time.Time), nil
+}
diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go
new file mode 100644 (file)
index 0000000..0e725ea
--- /dev/null
@@ -0,0 +1,477 @@
+package copystructure
+
+import (
+       "errors"
+       "reflect"
+       "sync"
+
+       "github.com/mitchellh/reflectwalk"
+)
+
+// Copy returns a deep copy of v.
+func Copy(v interface{}) (interface{}, error) {
+       return Config{}.Copy(v)
+}
+
+// CopierFunc is a function that knows how to deep copy a specific type.
+// Register these globally with the Copiers variable.
+type CopierFunc func(interface{}) (interface{}, error)
+
+// Copiers is a map of types that behave specially when they are copied.
+// If a type is found in this map while deep copying, this function
+// will be called to copy it instead of attempting to copy all fields.
+//
+// The key should be the type, obtained using: reflect.TypeOf(value with type).
+//
+// It is unsafe to write to this map after Copies have started. If you
+// are writing to this map while also copying, wrap all modifications to
+// this map as well as to Copy in a mutex.
+var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc)
+
+// Must is a helper that wraps a call to a function returning
+// (interface{}, error) and panics if the error is non-nil. It is intended
+// for use in variable initializations and should only be used when a copy
+// error should be a crashing case.
+func Must(v interface{}, err error) interface{} {
+       if err != nil {
+               panic("copy error: " + err.Error())
+       }
+
+       return v
+}
+
+var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true")
+
+type Config struct {
+       // Lock any types that are a sync.Locker and are not a mutex while copying.
+       // If there is an RLocker method, use that to get the sync.Locker.
+       Lock bool
+
+       // Copiers is a map of types associated with a CopierFunc. Use the global
+       // Copiers map if this is nil.
+       Copiers map[reflect.Type]CopierFunc
+}
+
+func (c Config) Copy(v interface{}) (interface{}, error) {
+       if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr {
+               return nil, errPointerRequired
+       }
+
+       w := new(walker)
+       if c.Lock {
+               w.useLocks = true
+       }
+
+       if c.Copiers == nil {
+               c.Copiers = Copiers
+       }
+
+       err := reflectwalk.Walk(v, w)
+       if err != nil {
+               return nil, err
+       }
+
+       // Get the result. If the result is nil, then we want to turn it
+       // into a typed nil if we can.
+       result := w.Result
+       if result == nil {
+               val := reflect.ValueOf(v)
+               result = reflect.Indirect(reflect.New(val.Type())).Interface()
+       }
+
+       return result, nil
+}
+
+// Return the key used to index interfaces types we've seen. Store the number
+// of pointers in the upper 32bits, and the depth in the lower 32bits. This is
+// easy to calculate, easy to match a key with our current depth, and we don't
+// need to deal with initializing and cleaning up nested maps or slices.
+func ifaceKey(pointers, depth int) uint64 {
+       return uint64(pointers)<<32 | uint64(depth)
+}
+
+type walker struct {
+       Result interface{}
+
+       depth       int
+       ignoreDepth int
+       vals        []reflect.Value
+       cs          []reflect.Value
+
+       // This stores the number of pointers we've walked over, indexed by depth.
+       ps []int
+
+       // If an interface is indirected by a pointer, we need to know the type of
+       // interface to create when creating the new value.  Store the interface
+       // types here, indexed by both the walk depth and the number of pointers
+       // already seen at that depth. Use ifaceKey to calculate the proper uint64
+       // value.
+       ifaceTypes map[uint64]reflect.Type
+
+       // any locks we've taken, indexed by depth
+       locks []sync.Locker
+       // take locks while walking the structure
+       useLocks bool
+}
+
+func (w *walker) Enter(l reflectwalk.Location) error {
+       w.depth++
+
+       // ensure we have enough elements to index via w.depth
+       for w.depth >= len(w.locks) {
+               w.locks = append(w.locks, nil)
+       }
+
+       for len(w.ps) < w.depth+1 {
+               w.ps = append(w.ps, 0)
+       }
+
+       return nil
+}
+
+func (w *walker) Exit(l reflectwalk.Location) error {
+       locker := w.locks[w.depth]
+       w.locks[w.depth] = nil
+       if locker != nil {
+               defer locker.Unlock()
+       }
+
+       // clear out pointers and interfaces as we exit the stack
+       w.ps[w.depth] = 0
+
+       for k := range w.ifaceTypes {
+               mask := uint64(^uint32(0))
+               if k&mask == uint64(w.depth) {
+                       delete(w.ifaceTypes, k)
+               }
+       }
+
+       w.depth--
+       if w.ignoreDepth > w.depth {
+               w.ignoreDepth = 0
+       }
+
+       if w.ignoring() {
+               return nil
+       }
+
+       switch l {
+       case reflectwalk.Map:
+               fallthrough
+       case reflectwalk.Slice:
+               // Pop map off our container
+               w.cs = w.cs[:len(w.cs)-1]
+       case reflectwalk.MapValue:
+               // Pop off the key and value
+               mv := w.valPop()
+               mk := w.valPop()
+               m := w.cs[len(w.cs)-1]
+
+               // If mv is the zero value, SetMapIndex deletes the key form the map,
+               // or in this case never adds it. We need to create a properly typed
+               // zero value so that this key can be set.
+               if !mv.IsValid() {
+                       mv = reflect.Zero(m.Type().Elem())
+               }
+               m.SetMapIndex(mk, mv)
+       case reflectwalk.SliceElem:
+               // Pop off the value and the index and set it on the slice
+               v := w.valPop()
+               i := w.valPop().Interface().(int)
+               if v.IsValid() {
+                       s := w.cs[len(w.cs)-1]
+                       se := s.Index(i)
+                       if se.CanSet() {
+                               se.Set(v)
+                       }
+               }
+       case reflectwalk.Struct:
+               w.replacePointerMaybe()
+
+               // Remove the struct from the container stack
+               w.cs = w.cs[:len(w.cs)-1]
+       case reflectwalk.StructField:
+               // Pop off the value and the field
+               v := w.valPop()
+               f := w.valPop().Interface().(reflect.StructField)
+               if v.IsValid() {
+                       s := w.cs[len(w.cs)-1]
+                       sf := reflect.Indirect(s).FieldByName(f.Name)
+
+                       if sf.CanSet() {
+                               sf.Set(v)
+                       }
+               }
+       case reflectwalk.WalkLoc:
+               // Clear out the slices for GC
+               w.cs = nil
+               w.vals = nil
+       }
+
+       return nil
+}
+
+func (w *walker) Map(m reflect.Value) error {
+       if w.ignoring() {
+               return nil
+       }
+       w.lock(m)
+
+       // Create the map. If the map itself is nil, then just make a nil map
+       var newMap reflect.Value
+       if m.IsNil() {
+               newMap = reflect.Indirect(reflect.New(m.Type()))
+       } else {
+               newMap = reflect.MakeMap(m.Type())
+       }
+
+       w.cs = append(w.cs, newMap)
+       w.valPush(newMap)
+       return nil
+}
+
+func (w *walker) MapElem(m, k, v reflect.Value) error {
+       return nil
+}
+
+func (w *walker) PointerEnter(v bool) error {
+       if v {
+               w.ps[w.depth]++
+       }
+       return nil
+}
+
+func (w *walker) PointerExit(v bool) error {
+       if v {
+               w.ps[w.depth]--
+       }
+       return nil
+}
+
+func (w *walker) Interface(v reflect.Value) error {
+       if !v.IsValid() {
+               return nil
+       }
+       if w.ifaceTypes == nil {
+               w.ifaceTypes = make(map[uint64]reflect.Type)
+       }
+
+       w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type()
+       return nil
+}
+
+func (w *walker) Primitive(v reflect.Value) error {
+       if w.ignoring() {
+               return nil
+       }
+       w.lock(v)
+
+       // IsValid verifies the v is non-zero and CanInterface verifies
+       // that we're allowed to read this value (unexported fields).
+       var newV reflect.Value
+       if v.IsValid() && v.CanInterface() {
+               newV = reflect.New(v.Type())
+               newV.Elem().Set(v)
+       }
+
+       w.valPush(newV)
+       w.replacePointerMaybe()
+       return nil
+}
+
+func (w *walker) Slice(s reflect.Value) error {
+       if w.ignoring() {
+               return nil
+       }
+       w.lock(s)
+
+       var newS reflect.Value
+       if s.IsNil() {
+               newS = reflect.Indirect(reflect.New(s.Type()))
+       } else {
+               newS = reflect.MakeSlice(s.Type(), s.Len(), s.Cap())
+       }
+
+       w.cs = append(w.cs, newS)
+       w.valPush(newS)
+       return nil
+}
+
+func (w *walker) SliceElem(i int, elem reflect.Value) error {
+       if w.ignoring() {
+               return nil
+       }
+
+       // We don't write the slice here because elem might still be
+       // arbitrarily complex. Just record the index and continue on.
+       w.valPush(reflect.ValueOf(i))
+
+       return nil
+}
+
+func (w *walker) Struct(s reflect.Value) error {
+       if w.ignoring() {
+               return nil
+       }
+       w.lock(s)
+
+       var v reflect.Value
+       if c, ok := Copiers[s.Type()]; ok {
+               // We have a Copier for this struct, so we use that copier to
+               // get the copy, and we ignore anything deeper than this.
+               w.ignoreDepth = w.depth
+
+               dup, err := c(s.Interface())
+               if err != nil {
+                       return err
+               }
+
+               v = reflect.ValueOf(dup)
+       } else {
+               // No copier, we copy ourselves and allow reflectwalk to guide
+               // us deeper into the structure for copying.
+               v = reflect.New(s.Type())
+       }
+
+       // Push the value onto the value stack for setting the struct field,
+       // and add the struct itself to the containers stack in case we walk
+       // deeper so that its own fields can be modified.
+       w.valPush(v)
+       w.cs = append(w.cs, v)
+
+       return nil
+}
+
+func (w *walker) StructField(f reflect.StructField, v reflect.Value) error {
+       if w.ignoring() {
+               return nil
+       }
+
+       // If PkgPath is non-empty, this is a private (unexported) field.
+       // We do not set this unexported since the Go runtime doesn't allow us.
+       if f.PkgPath != "" {
+               return reflectwalk.SkipEntry
+       }
+
+       // Push the field onto the stack, we'll handle it when we exit
+       // the struct field in Exit...
+       w.valPush(reflect.ValueOf(f))
+       return nil
+}
+
+// ignore causes the walker to ignore any more values until we exit this on
+func (w *walker) ignore() {
+       w.ignoreDepth = w.depth
+}
+
+func (w *walker) ignoring() bool {
+       return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth
+}
+
+func (w *walker) pointerPeek() bool {
+       return w.ps[w.depth] > 0
+}
+
+func (w *walker) valPop() reflect.Value {
+       result := w.vals[len(w.vals)-1]
+       w.vals = w.vals[:len(w.vals)-1]
+
+       // If we're out of values, that means we popped everything off. In
+       // this case, we reset the result so the next pushed value becomes
+       // the result.
+       if len(w.vals) == 0 {
+               w.Result = nil
+       }
+
+       return result
+}
+
+func (w *walker) valPush(v reflect.Value) {
+       w.vals = append(w.vals, v)
+
+       // If we haven't set the result yet, then this is the result since
+       // it is the first (outermost) value we're seeing.
+       if w.Result == nil && v.IsValid() {
+               w.Result = v.Interface()
+       }
+}
+
+func (w *walker) replacePointerMaybe() {
+       // Determine the last pointer value. If it is NOT a pointer, then
+       // we need to push that onto the stack.
+       if !w.pointerPeek() {
+               w.valPush(reflect.Indirect(w.valPop()))
+               return
+       }
+
+       v := w.valPop()
+       for i := 1; i < w.ps[w.depth]; i++ {
+               if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok {
+                       iface := reflect.New(iType).Elem()
+                       iface.Set(v)
+                       v = iface
+               }
+
+               p := reflect.New(v.Type())
+               p.Elem().Set(v)
+               v = p
+       }
+
+       w.valPush(v)
+}
+
+// if this value is a Locker, lock it and add it to the locks slice
+func (w *walker) lock(v reflect.Value) {
+       if !w.useLocks {
+               return
+       }
+
+       if !v.IsValid() || !v.CanInterface() {
+               return
+       }
+
+       type rlocker interface {
+               RLocker() sync.Locker
+       }
+
+       var locker sync.Locker
+
+       // We can't call Interface() on a value directly, since that requires
+       // a copy. This is OK, since the pointer to a value which is a sync.Locker
+       // is also a sync.Locker.
+       if v.Kind() == reflect.Ptr {
+               switch l := v.Interface().(type) {
+               case rlocker:
+                       // don't lock a mutex directly
+                       if _, ok := l.(*sync.RWMutex); !ok {
+                               locker = l.RLocker()
+                       }
+               case sync.Locker:
+                       locker = l
+               }
+       } else if v.CanAddr() {
+               switch l := v.Addr().Interface().(type) {
+               case rlocker:
+                       // don't lock a mutex directly
+                       if _, ok := l.(*sync.RWMutex); !ok {
+                               locker = l.RLocker()
+                       }
+               case sync.Locker:
+                       locker = l
+               }
+       }
+
+       // still no callable locker
+       if locker == nil {
+               return
+       }
+
+       // don't lock a mutex directly
+       switch locker.(type) {
+       case *sync.Mutex, *sync.RWMutex:
+               return
+       }
+
+       locker.Lock()
+       w.locks[w.depth] = locker
+}
diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE
new file mode 100644 (file)
index 0000000..f9c841a
--- /dev/null
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md
new file mode 100644 (file)
index 0000000..d70706d
--- /dev/null
@@ -0,0 +1,14 @@
+# go-homedir
+
+This is a Go library for detecting the user's home directory without
+the use of cgo, so the library can be used in cross-compilation environments.
+
+Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
+for a user, and `homedir.Expand()` to expand the `~` in a path to the home
+directory.
+
+**Why not just use `os/user`?** The built-in `os/user` package requires
+cgo on Darwin systems. This means that any Go code that uses that package
+cannot cross compile. But 99% of the time the use for `os/user` is just to
+retrieve the home directory, which we can do for the current user without
+cgo. This library does that, enabling cross-compilation.
diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go
new file mode 100644 (file)
index 0000000..47e1f9e
--- /dev/null
@@ -0,0 +1,137 @@
+package homedir
+
+import (
+       "bytes"
+       "errors"
+       "os"
+       "os/exec"
+       "path/filepath"
+       "runtime"
+       "strconv"
+       "strings"
+       "sync"
+)
+
+// DisableCache will disable caching of the home directory. Caching is enabled
+// by default.
+var DisableCache bool
+
+var homedirCache string
+var cacheLock sync.RWMutex
+
+// Dir returns the home directory for the executing user.
+//
+// This uses an OS-specific method for discovering the home directory.
+// An error is returned if a home directory cannot be detected.
+func Dir() (string, error) {
+       if !DisableCache {
+               cacheLock.RLock()
+               cached := homedirCache
+               cacheLock.RUnlock()
+               if cached != "" {
+                       return cached, nil
+               }
+       }
+
+       cacheLock.Lock()
+       defer cacheLock.Unlock()
+
+       var result string
+       var err error
+       if runtime.GOOS == "windows" {
+               result, err = dirWindows()
+       } else {
+               // Unix-like system, so just assume Unix
+               result, err = dirUnix()
+       }
+
+       if err != nil {
+               return "", err
+       }
+       homedirCache = result
+       return result, nil
+}
+
+// Expand expands the path to include the home directory if the path
+// is prefixed with `~`. If it isn't prefixed with `~`, the path is
+// returned as-is.
+func Expand(path string) (string, error) {
+       if len(path) == 0 {
+               return path, nil
+       }
+
+       if path[0] != '~' {
+               return path, nil
+       }
+
+       if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
+               return "", errors.New("cannot expand user-specific home dir")
+       }
+
+       dir, err := Dir()
+       if err != nil {
+               return "", err
+       }
+
+       return filepath.Join(dir, path[1:]), nil
+}
+
+func dirUnix() (string, error) {
+       // First prefer the HOME environmental variable
+       if home := os.Getenv("HOME"); home != "" {
+               return home, nil
+       }
+
+       // If that fails, try getent
+       var stdout bytes.Buffer
+       cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
+       cmd.Stdout = &stdout
+       if err := cmd.Run(); err != nil {
+               // If the error is ErrNotFound, we ignore it. Otherwise, return it.
+               if err != exec.ErrNotFound {
+                       return "", err
+               }
+       } else {
+               if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
+                       // username:password:uid:gid:gecos:home:shell
+                       passwdParts := strings.SplitN(passwd, ":", 7)
+                       if len(passwdParts) > 5 {
+                               return passwdParts[5], nil
+                       }
+               }
+       }
+
+       // If all else fails, try the shell
+       stdout.Reset()
+       cmd = exec.Command("sh", "-c", "cd && pwd")
+       cmd.Stdout = &stdout
+       if err := cmd.Run(); err != nil {
+               return "", err
+       }
+
+       result := strings.TrimSpace(stdout.String())
+       if result == "" {
+               return "", errors.New("blank output when reading home directory")
+       }
+
+       return result, nil
+}
+
+func dirWindows() (string, error) {
+       // First prefer the HOME environmental variable
+       if home := os.Getenv("HOME"); home != "" {
+               return home, nil
+       }
+
+       drive := os.Getenv("HOMEDRIVE")
+       path := os.Getenv("HOMEPATH")
+       home := drive + path
+       if drive == "" || path == "" {
+               home = os.Getenv("USERPROFILE")
+       }
+       if home == "" {
+               return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank")
+       }
+
+       return home, nil
+}
diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/LICENSE
new file mode 100644 (file)
index 0000000..a3866a2
--- /dev/null
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/README.md
new file mode 100644 (file)
index 0000000..7d0de5b
--- /dev/null
@@ -0,0 +1,61 @@
+# hashstructure
+
+hashstructure is a Go library for creating a unique hash value
+for arbitrary values in Go.
+
+This can be used to key values in a hash (for use in a map, set, etc.)
+that are complex. The most common use case is comparing two values without
+sending data across the network, caching values locally (de-dup), and so on.
+
+## Features
+
+  * Hash any arbitrary Go value, including complex types.
+
+  * Tag a struct field to ignore it and not affect the hash value.
+
+  * Tag a slice type struct field to treat it as a set where ordering
+    doesn't affect the hash code but the field itself is still taken into
+    account to create the hash value.
+
+  * Optionally specify a custom hash function to optimize for speed, collision
+    avoidance for your data set, etc.
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/hashstructure
+```
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure).
+
+A quick code example is shown below:
+
+
+       type ComplexStruct struct {
+               Name     string
+               Age      uint
+               Metadata map[string]interface{}
+       }
+
+       v := ComplexStruct{
+               Name: "mitchellh",
+               Age:  64,
+               Metadata: map[string]interface{}{
+                       "car":      true,
+                       "location": "California",
+                       "siblings": []string{"Bob", "John"},
+               },
+       }
+
+       hash, err := hashstructure.Hash(v, nil)
+       if err != nil {
+               panic(err)
+       }
+
+       fmt.Printf("%d", hash)
+       // Output:
+       // 2307517237273902113
diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/hashstructure.go
new file mode 100644 (file)
index 0000000..6f586fa
--- /dev/null
@@ -0,0 +1,323 @@
+package hashstructure
+
+import (
+       "encoding/binary"
+       "fmt"
+       "hash"
+       "hash/fnv"
+       "reflect"
+)
+
+// HashOptions are options that are available for hashing.
+type HashOptions struct {
+       // Hasher is the hash function to use. If this isn't set, it will
+       // default to FNV.
+       Hasher hash.Hash64
+
+       // TagName is the struct tag to look at when hashing the structure.
+       // By default this is "hash".
+       TagName string
+}
+
+// Hash returns the hash value of an arbitrary value.
+//
+// If opts is nil, then default options will be used. See HashOptions
+// for the default values.
+//
+// Notes on the value:
+//
+//   * Unexported fields on structs are ignored and do not affect the
+//     hash value.
+//
+//   * Adding an exported field to a struct with the zero value will change
+//     the hash value.
+//
+// For structs, the hashing can be controlled using tags. For example:
+//
+//    struct {
+//        Name string
+//        UUID string `hash:"ignore"`
+//    }
+//
+// The available tag values are:
+//
+//   * "ignore" - The field will be ignored and not affect the hash code.
+//
+//   * "set" - The field will be treated as a set, where ordering doesn't
+//             affect the hash code. This only works for slices.
+//
+func Hash(v interface{}, opts *HashOptions) (uint64, error) {
+       // Create default options
+       if opts == nil {
+               opts = &HashOptions{}
+       }
+       if opts.Hasher == nil {
+               opts.Hasher = fnv.New64()
+       }
+       if opts.TagName == "" {
+               opts.TagName = "hash"
+       }
+
+       // Reset the hash
+       opts.Hasher.Reset()
+
+       // Create our walker and walk the structure
+       w := &walker{
+               h:   opts.Hasher,
+               tag: opts.TagName,
+       }
+       return w.visit(reflect.ValueOf(v), nil)
+}
+
+type walker struct {
+       h   hash.Hash64
+       tag string
+}
+
+type visitOpts struct {
+       // Flags are a bitmask of flags to affect behavior of this visit
+       Flags visitFlag
+
+       // Information about the struct containing this field
+       Struct      interface{}
+       StructField string
+}
+
+func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) {
+       // Loop since these can be wrapped in multiple layers of pointers
+       // and interfaces.
+       for {
+               // If we have an interface, dereference it. We have to do this up
+               // here because it might be a nil in there and the check below must
+               // catch that.
+               if v.Kind() == reflect.Interface {
+                       v = v.Elem()
+                       continue
+               }
+
+               if v.Kind() == reflect.Ptr {
+                       v = reflect.Indirect(v)
+                       continue
+               }
+
+               break
+       }
+
+       // If it is nil, treat it like a zero.
+       if !v.IsValid() {
+               var tmp int8
+               v = reflect.ValueOf(tmp)
+       }
+
+       // Binary writing can use raw ints, we have to convert to
+       // a sized-int, we'll choose the largest...
+       switch v.Kind() {
+       case reflect.Int:
+               v = reflect.ValueOf(int64(v.Int()))
+       case reflect.Uint:
+               v = reflect.ValueOf(uint64(v.Uint()))
+       case reflect.Bool:
+               var tmp int8
+               if v.Bool() {
+                       tmp = 1
+               }
+               v = reflect.ValueOf(tmp)
+       }
+
+       k := v.Kind()
+
+       // We can shortcut numeric values by directly binary writing them
+       if k >= reflect.Int && k <= reflect.Complex64 {
+               // A direct hash calculation
+               w.h.Reset()
+               err := binary.Write(w.h, binary.LittleEndian, v.Interface())
+               return w.h.Sum64(), err
+       }
+
+       switch k {
+       case reflect.Array:
+               var h uint64
+               l := v.Len()
+               for i := 0; i < l; i++ {
+                       current, err := w.visit(v.Index(i), nil)
+                       if err != nil {
+                               return 0, err
+                       }
+
+                       h = hashUpdateOrdered(w.h, h, current)
+               }
+
+               return h, nil
+
+       case reflect.Map:
+               var includeMap IncludableMap
+               if opts != nil && opts.Struct != nil {
+                       if v, ok := opts.Struct.(IncludableMap); ok {
+                               includeMap = v
+                       }
+               }
+
+               // Build the hash for the map. We do this by XOR-ing all the key
+               // and value hashes. This makes it deterministic despite ordering.
+               var h uint64
+               for _, k := range v.MapKeys() {
+                       v := v.MapIndex(k)
+                       if includeMap != nil {
+                               incl, err := includeMap.HashIncludeMap(
+                                       opts.StructField, k.Interface(), v.Interface())
+                               if err != nil {
+                                       return 0, err
+                               }
+                               if !incl {
+                                       continue
+                               }
+                       }
+
+                       kh, err := w.visit(k, nil)
+                       if err != nil {
+                               return 0, err
+                       }
+                       vh, err := w.visit(v, nil)
+                       if err != nil {
+                               return 0, err
+                       }
+
+                       fieldHash := hashUpdateOrdered(w.h, kh, vh)
+                       h = hashUpdateUnordered(h, fieldHash)
+               }
+
+               return h, nil
+
+       case reflect.Struct:
+               var include Includable
+               parent := v.Interface()
+               if impl, ok := parent.(Includable); ok {
+                       include = impl
+               }
+
+               t := v.Type()
+               h, err := w.visit(reflect.ValueOf(t.Name()), nil)
+               if err != nil {
+                       return 0, err
+               }
+
+               l := v.NumField()
+               for i := 0; i < l; i++ {
+                       if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
+                               var f visitFlag
+                               fieldType := t.Field(i)
+                               if fieldType.PkgPath != "" {
+                                       // Unexported
+                                       continue
+                               }
+
+                               tag := fieldType.Tag.Get(w.tag)
+                               if tag == "ignore" {
+                                       // Ignore this field
+                                       continue
+                               }
+
+                               // Check if we implement includable and check it
+                               if include != nil {
+                                       incl, err := include.HashInclude(fieldType.Name, v)
+                                       if err != nil {
+                                               return 0, err
+                                       }
+                                       if !incl {
+                                               continue
+                                       }
+                               }
+
+                               switch tag {
+                               case "set":
+                                       f |= visitFlagSet
+                               }
+
+                               kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil)
+                               if err != nil {
+                                       return 0, err
+                               }
+
+                               vh, err := w.visit(v, &visitOpts{
+                                       Flags:       f,
+                                       Struct:      parent,
+                                       StructField: fieldType.Name,
+                               })
+                               if err != nil {
+                                       return 0, err
+                               }
+
+                               fieldHash := hashUpdateOrdered(w.h, kh, vh)
+                               h = hashUpdateUnordered(h, fieldHash)
+                       }
+               }
+
+               return h, nil
+
+       case reflect.Slice:
+               // We have two behaviors here. If it isn't a set, then we just
+               // visit all the elements. If it is a set, then we do a deterministic
+               // hash code.
+               var h uint64
+               var set bool
+               if opts != nil {
+                       set = (opts.Flags & visitFlagSet) != 0
+               }
+               l := v.Len()
+               for i := 0; i < l; i++ {
+                       current, err := w.visit(v.Index(i), nil)
+                       if err != nil {
+                               return 0, err
+                       }
+
+                       if set {
+                               h = hashUpdateUnordered(h, current)
+                       } else {
+                               h = hashUpdateOrdered(w.h, h, current)
+                       }
+               }
+
+               return h, nil
+
+       case reflect.String:
+               // Directly hash
+               w.h.Reset()
+               _, err := w.h.Write([]byte(v.String()))
+               return w.h.Sum64(), err
+
+       default:
+               return 0, fmt.Errorf("unknown kind to hash: %s", k)
+       }
+
+       return 0, nil
+}
+
+func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 {
+       // For ordered updates, use a real hash function
+       h.Reset()
+
+       // We just panic if the binary writes fail because we are writing
+       // an int64 which should never be fail-able.
+       e1 := binary.Write(h, binary.LittleEndian, a)
+       e2 := binary.Write(h, binary.LittleEndian, b)
+       if e1 != nil {
+               panic(e1)
+       }
+       if e2 != nil {
+               panic(e2)
+       }
+
+       return h.Sum64()
+}
+
+func hashUpdateUnordered(a, b uint64) uint64 {
+       return a ^ b
+}
+
+// visitFlag is used as a bitmask for affecting visit behavior
+type visitFlag uint
+
+const (
+       visitFlagInvalid visitFlag = iota
+       visitFlagSet               = iota << 1
+)
diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/include.go
new file mode 100644 (file)
index 0000000..b6289c0
--- /dev/null
@@ -0,0 +1,15 @@
+package hashstructure
+
+// Includable is an interface that can optionally be implemented by
+// a struct. It will be called for each field in the struct to check whether
+// it should be included in the hash.
+type Includable interface {
+       HashInclude(field string, v interface{}) (bool, error)
+}
+
+// IncludableMap is an interface that can optionally be implemented by
+// a struct. It will be called when a map-type field is found to ask the
+// struct if the map item should be included in the hash.
+type IncludableMap interface {
+       HashIncludeMap(field string, k, v interface{}) (bool, error)
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE
new file mode 100644 (file)
index 0000000..f9c841a
--- /dev/null
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md
new file mode 100644 (file)
index 0000000..659d688
--- /dev/null
@@ -0,0 +1,46 @@
+# mapstructure
+
+mapstructure is a Go library for decoding generic map values to structures
+and vice versa, while providing helpful error handling.
+
+This library is most useful when decoding values from some data stream (JSON,
+Gob, etc.) where you don't _quite_ know the structure of the underlying data
+until you read a part of it. You can therefore read a `map[string]interface{}`
+and use this library to decode it into the proper underlying native Go
+structure.
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/mapstructure
+```
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
+
+The `Decode` function has examples associated with it there.
+
+## But Why?!
+
+Go offers fantastic standard libraries for decoding formats such as JSON.
+The standard method is to have a struct pre-created, and populate that struct
+from the bytes of the encoded format. This is great, but the problem is if
+you have configuration or an encoding that changes slightly depending on
+specific fields. For example, consider this JSON:
+
+```json
+{
+  "type": "person",
+  "name": "Mitchell"
+}
+```
+
+Perhaps we can't populate a specific structure without first reading
+the "type" field from the JSON. We could always do two passes over the
+decoding of the JSON (reading the "type" first, and the rest later).
+However, it is much simpler to just decode this into a `map[string]interface{}`
+structure, read the "type" key, then use something like this library
+to decode it into the proper structure.
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
new file mode 100644 (file)
index 0000000..115ae67
--- /dev/null
@@ -0,0 +1,154 @@
+package mapstructure
+
+import (
+       "errors"
+       "reflect"
+       "strconv"
+       "strings"
+       "time"
+)
+
+// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
+// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
+func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
+       // Create variables here so we can reference them with the reflect pkg
+       var f1 DecodeHookFuncType
+       var f2 DecodeHookFuncKind
+
+       // Fill in the variables into this interface and the rest is done
+       // automatically using the reflect package.
+       potential := []interface{}{f1, f2}
+
+       v := reflect.ValueOf(h)
+       vt := v.Type()
+       for _, raw := range potential {
+               pt := reflect.ValueOf(raw).Type()
+               if vt.ConvertibleTo(pt) {
+                       return v.Convert(pt).Interface()
+               }
+       }
+
+       return nil
+}
+
+// DecodeHookExec executes the given decode hook. This should be used
+// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
+// that took reflect.Kind instead of reflect.Type.
+func DecodeHookExec(
+       raw DecodeHookFunc,
+       from reflect.Type, to reflect.Type,
+       data interface{}) (interface{}, error) {
+       // Build our arguments that reflect expects
+       argVals := make([]reflect.Value, 3)
+       argVals[0] = reflect.ValueOf(from)
+       argVals[1] = reflect.ValueOf(to)
+       argVals[2] = reflect.ValueOf(data)
+
+       switch f := typedDecodeHook(raw).(type) {
+       case DecodeHookFuncType:
+               return f(from, to, data)
+       case DecodeHookFuncKind:
+               return f(from.Kind(), to.Kind(), data)
+       default:
+               return nil, errors.New("invalid decode hook signature")
+       }
+}
+
+// ComposeDecodeHookFunc creates a single DecodeHookFunc that
+// automatically composes multiple DecodeHookFuncs.
+//
+// The composed funcs are called in order, with the result of the
+// previous transformation.
+func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
+       return func(
+               f reflect.Type,
+               t reflect.Type,
+               data interface{}) (interface{}, error) {
+               var err error
+               for _, f1 := range fs {
+                       data, err = DecodeHookExec(f1, f, t, data)
+                       if err != nil {
+                               return nil, err
+                       }
+
+                       // Modify the from kind to be correct with the new data
+                       f = nil
+                       if val := reflect.ValueOf(data); val.IsValid() {
+                               f = val.Type()
+                       }
+               }
+
+               return data, nil
+       }
+}
+
+// StringToSliceHookFunc returns a DecodeHookFunc that converts
+// string to []string by splitting on the given sep.
+func StringToSliceHookFunc(sep string) DecodeHookFunc {
+       return func(
+               f reflect.Kind,
+               t reflect.Kind,
+               data interface{}) (interface{}, error) {
+               if f != reflect.String || t != reflect.Slice {
+                       return data, nil
+               }
+
+               raw := data.(string)
+               if raw == "" {
+                       return []string{}, nil
+               }
+
+               return strings.Split(raw, sep), nil
+       }
+}
+
+// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
+// strings to time.Duration.
+func StringToTimeDurationHookFunc() DecodeHookFunc {
+       return func(
+               f reflect.Type,
+               t reflect.Type,
+               data interface{}) (interface{}, error) {
+               if f.Kind() != reflect.String {
+                       return data, nil
+               }
+               if t != reflect.TypeOf(time.Duration(5)) {
+                       return data, nil
+               }
+
+               // Convert it by parsing
+               return time.ParseDuration(data.(string))
+       }
+}
+
+func WeaklyTypedHook(
+       f reflect.Kind,
+       t reflect.Kind,
+       data interface{}) (interface{}, error) {
+       dataVal := reflect.ValueOf(data)
+       switch t {
+       case reflect.String:
+               switch f {
+               case reflect.Bool:
+                       if dataVal.Bool() {
+                               return "1", nil
+                       } else {
+                               return "0", nil
+                       }
+               case reflect.Float32:
+                       return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
+               case reflect.Int:
+                       return strconv.FormatInt(dataVal.Int(), 10), nil
+               case reflect.Slice:
+                       dataType := dataVal.Type()
+                       elemKind := dataType.Elem().Kind()
+                       if elemKind == reflect.Uint8 {
+                               return string(dataVal.Interface().([]uint8)), nil
+                       }
+               case reflect.Uint:
+                       return strconv.FormatUint(dataVal.Uint(), 10), nil
+               }
+       }
+
+       return data, nil
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go
new file mode 100644 (file)
index 0000000..47a99e5
--- /dev/null
@@ -0,0 +1,50 @@
+package mapstructure
+
+import (
+       "errors"
+       "fmt"
+       "sort"
+       "strings"
+)
+
+// Error implements the error interface and can represents multiple
+// errors that occur in the course of a single decode.
+type Error struct {
+       Errors []string
+}
+
+func (e *Error) Error() string {
+       points := make([]string, len(e.Errors))
+       for i, err := range e.Errors {
+               points[i] = fmt.Sprintf("* %s", err)
+       }
+
+       sort.Strings(points)
+       return fmt.Sprintf(
+               "%d error(s) decoding:\n\n%s",
+               len(e.Errors), strings.Join(points, "\n"))
+}
+
+// WrappedErrors implements the errwrap.Wrapper interface to make this
+// return value more useful with the errwrap and go-multierror libraries.
+func (e *Error) WrappedErrors() []error {
+       if e == nil {
+               return nil
+       }
+
+       result := make([]error, len(e.Errors))
+       for i, e := range e.Errors {
+               result[i] = errors.New(e)
+       }
+
+       return result
+}
+
+func appendErrors(errors []string, err error) []string {
+       switch e := err.(type) {
+       case *Error:
+               return append(errors, e.Errors...)
+       default:
+               return append(errors, e.Error())
+       }
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
new file mode 100644 (file)
index 0000000..6dee0ef
--- /dev/null
@@ -0,0 +1,823 @@
+// The mapstructure package exposes functionality to convert an
+// arbitrary map[string]interface{} into a native Go structure.
+//
+// The Go structure can be arbitrarily complex, containing slices,
+// other structs, etc. and the decoder will properly decode nested
+// maps and so on into the proper structures in the native Go struct.
+// See the examples to see what the decoder is capable of.
+package mapstructure
+
+import (
+       "encoding/json"
+       "errors"
+       "fmt"
+       "reflect"
+       "sort"
+       "strconv"
+       "strings"
+)
+
+// DecodeHookFunc is the callback function that can be used for
+// data transformations. See "DecodeHook" in the DecoderConfig
+// struct.
+//
+// The type should be DecodeHookFuncType or DecodeHookFuncKind.
+// Either is accepted. Types are a superset of Kinds (Types can return
+// Kinds) and are generally a richer thing to use, but Kinds are simpler
+// if you only need those.
+//
+// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
+// we started with Kinds and then realized Types were the better solution,
+// but have a promise to not break backwards compat so we now support
+// both.
+type DecodeHookFunc interface{}
+
+type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
+type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
+
+// DecoderConfig is the configuration that is used to create a new decoder
+// and allows customization of various aspects of decoding.
+type DecoderConfig struct {
+       // DecodeHook, if set, will be called before any decoding and any
+       // type conversion (if WeaklyTypedInput is on). This lets you modify
+       // the values before they're set down onto the resulting struct.
+       //
+       // If an error is returned, the entire decode will fail with that
+       // error.
+       DecodeHook DecodeHookFunc
+
+       // If ErrorUnused is true, then it is an error for there to exist
+       // keys in the original map that were unused in the decoding process
+       // (extra keys).
+       ErrorUnused bool
+
+       // ZeroFields, if set to true, will zero fields before writing them.
+       // For example, a map will be emptied before decoded values are put in
+       // it. If this is false, a map will be merged.
+       ZeroFields bool
+
+       // If WeaklyTypedInput is true, the decoder will make the following
+       // "weak" conversions:
+       //
+       //   - bools to string (true = "1", false = "0")
+       //   - numbers to string (base 10)
+       //   - bools to int/uint (true = 1, false = 0)
+       //   - strings to int/uint (base implied by prefix)
+       //   - int to bool (true if value != 0)
+       //   - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
+       //     FALSE, false, False. Anything else is an error)
+       //   - empty array = empty map and vice versa
+       //   - negative numbers to overflowed uint values (base 10)
+       //   - slice of maps to a merged map
+       //   - single values are converted to slices if required. Each
+       //     element is weakly decoded. For example: "4" can become []int{4}
+       //     if the target type is an int slice.
+       //
+       WeaklyTypedInput bool
+
+       // Metadata is the struct that will contain extra metadata about
+       // the decoding. If this is nil, then no metadata will be tracked.
+       Metadata *Metadata
+
+       // Result is a pointer to the struct that will contain the decoded
+       // value.
+       Result interface{}
+
+       // The tag name that mapstructure reads for field names. This
+       // defaults to "mapstructure"
+       TagName string
+}
+
+// A Decoder takes a raw interface value and turns it into structured
+// data, keeping track of rich error information along the way in case
+// anything goes wrong. Unlike the basic top-level Decode method, you can
+// more finely control how the Decoder behaves using the DecoderConfig
+// structure. The top-level Decode method is just a convenience that sets
+// up the most basic Decoder.
+type Decoder struct {
+       config *DecoderConfig
+}
+
+// Metadata contains information about decoding a structure that
+// is tedious or difficult to get otherwise.
+type Metadata struct {
+       // Keys are the keys of the structure which were successfully decoded
+       Keys []string
+
+       // Unused is a slice of keys that were found in the raw value but
+       // weren't decoded since there was no matching field in the result interface
+       Unused []string
+}
+
+// Decode takes a map and uses reflection to convert it into the
+// given Go native structure. val must be a pointer to a struct.
+func Decode(m interface{}, rawVal interface{}) error {
+       config := &DecoderConfig{
+               Metadata: nil,
+               Result:   rawVal,
+       }
+
+       decoder, err := NewDecoder(config)
+       if err != nil {
+               return err
+       }
+
+       return decoder.Decode(m)
+}
+
+// WeakDecode is the same as Decode but is shorthand to enable
+// WeaklyTypedInput. See DecoderConfig for more info.
+func WeakDecode(input, output interface{}) error {
+       config := &DecoderConfig{
+               Metadata:         nil,
+               Result:           output,
+               WeaklyTypedInput: true,
+       }
+
+       decoder, err := NewDecoder(config)
+       if err != nil {
+               return err
+       }
+
+       return decoder.Decode(input)
+}
+
+// NewDecoder returns a new decoder for the given configuration. Once
+// a decoder has been returned, the same configuration must not be used
+// again.
+func NewDecoder(config *DecoderConfig) (*Decoder, error) {
+       val := reflect.ValueOf(config.Result)
+       if val.Kind() != reflect.Ptr {
+               return nil, errors.New("result must be a pointer")
+       }
+
+       val = val.Elem()
+       if !val.CanAddr() {
+               return nil, errors.New("result must be addressable (a pointer)")
+       }
+
+       if config.Metadata != nil {
+               if config.Metadata.Keys == nil {
+                       config.Metadata.Keys = make([]string, 0)
+               }
+
+               if config.Metadata.Unused == nil {
+                       config.Metadata.Unused = make([]string, 0)
+               }
+       }
+
+       if config.TagName == "" {
+               config.TagName = "mapstructure"
+       }
+
+       result := &Decoder{
+               config: config,
+       }
+
+       return result, nil
+}
+
+// Decode decodes the given raw interface to the target pointer specified
+// by the configuration.
+func (d *Decoder) Decode(raw interface{}) error {
+       return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem())
+}
+
+// Decodes an unknown data type into a specific reflection value.
+func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error {
+       if data == nil {
+               // If the data is nil, then we don't set anything.
+               return nil
+       }
+
+       dataVal := reflect.ValueOf(data)
+       if !dataVal.IsValid() {
+               // If the data value is invalid, then we just set the value
+               // to be the zero value.
+               val.Set(reflect.Zero(val.Type()))
+               return nil
+       }
+
+       if d.config.DecodeHook != nil {
+               // We have a DecodeHook, so let's pre-process the data.
+               var err error
+               data, err = DecodeHookExec(
+                       d.config.DecodeHook,
+                       dataVal.Type(), val.Type(), data)
+               if err != nil {
+                       return fmt.Errorf("error decoding '%s': %s", name, err)
+               }
+       }
+
+       var err error
+       dataKind := getKind(val)
+       switch dataKind {
+       case reflect.Bool:
+               err = d.decodeBool(name, data, val)
+       case reflect.Interface:
+               err = d.decodeBasic(name, data, val)
+       case reflect.String:
+               err = d.decodeString(name, data, val)
+       case reflect.Int:
+               err = d.decodeInt(name, data, val)
+       case reflect.Uint:
+               err = d.decodeUint(name, data, val)
+       case reflect.Float32:
+               err = d.decodeFloat(name, data, val)
+       case reflect.Struct:
+               err = d.decodeStruct(name, data, val)
+       case reflect.Map:
+               err = d.decodeMap(name, data, val)
+       case reflect.Ptr:
+               err = d.decodePtr(name, data, val)
+       case reflect.Slice:
+               err = d.decodeSlice(name, data, val)
+       case reflect.Func:
+               err = d.decodeFunc(name, data, val)
+       default:
+               // If we reached this point then we weren't able to decode it
+               return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
+       }
+
+       // If we reached here, then we successfully decoded SOMETHING, so
+       // mark the key as used if we're tracking metadata.
+       if d.config.Metadata != nil && name != "" {
+               d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
+       }
+
+       return err
+}
+
+// This decodes a basic type (bool, int, string, etc.) and sets the
+// value to "data" of that type.
+func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
+       dataVal := reflect.ValueOf(data)
+       if !dataVal.IsValid() {
+               dataVal = reflect.Zero(val.Type())
+       }
+
+       dataValType := dataVal.Type()
+       if !dataValType.AssignableTo(val.Type()) {
+               return fmt.Errorf(
+                       "'%s' expected type '%s', got '%s'",
+                       name, val.Type(), dataValType)
+       }
+
+       val.Set(dataVal)
+       return nil
+}
+
+func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
+       dataVal := reflect.ValueOf(data)
+       dataKind := getKind(dataVal)
+
+       converted := true
+       switch {
+       case dataKind == reflect.String:
+               val.SetString(dataVal.String())
+       case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+               if dataVal.Bool() {
+                       val.SetString("1")
+               } else {
+                       val.SetString("0")
+               }
+       case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+               val.SetString(strconv.FormatInt(dataVal.Int(), 10))
+       case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+               val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
+       case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+               val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
+       case dataKind == reflect.Slice && d.config.WeaklyTypedInput:
+               dataType := dataVal.Type()
+               elemKind := dataType.Elem().Kind()
+               switch {
+               case elemKind == reflect.Uint8:
+                       val.SetString(string(dataVal.Interface().([]uint8)))
+               default:
+                       converted = false
+               }
+       default:
+               converted = false
+       }
+
+       if !converted {
+               return fmt.Errorf(
+                       "'%s' expected type '%s', got unconvertible type '%s'",
+                       name, val.Type(), dataVal.Type())
+       }
+
+       return nil
+}
+
+func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
+       dataVal := reflect.ValueOf(data)
+       dataKind := getKind(dataVal)
+       dataType := dataVal.Type()
+
+       switch {
+       case dataKind == reflect.Int:
+               val.SetInt(dataVal.Int())
+       case dataKind == reflect.Uint:
+               val.SetInt(int64(dataVal.Uint()))
+       case dataKind == reflect.Float32:
+               val.SetInt(int64(dataVal.Float()))
+       case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+               if dataVal.Bool() {
+                       val.SetInt(1)
+               } else {
+                       val.SetInt(0)
+               }
+       case dataKind == reflect.String && d.config.WeaklyTypedInput:
+               i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
+               if err == nil {
+                       val.SetInt(i)
+               } else {
+                       return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
+               }
+       case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+               jn := data.(json.Number)
+               i, err := jn.Int64()
+               if err != nil {
+                       return fmt.Errorf(
+                               "error decoding json.Number into %s: %s", name, err)
+               }
+               val.SetInt(i)
+       default:
+               return fmt.Errorf(
+                       "'%s' expected type '%s', got unconvertible type '%s'",
+                       name, val.Type(), dataVal.Type())
+       }
+
+       return nil
+}
+
+func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
+       dataVal := reflect.ValueOf(data)
+       dataKind := getKind(dataVal)
+
+       switch {
+       case dataKind == reflect.Int:
+               i := dataVal.Int()
+               if i < 0 && !d.config.WeaklyTypedInput {
+                       return fmt.Errorf("cannot parse '%s', %d overflows uint",
+                               name, i)
+               }
+               val.SetUint(uint64(i))
+       case dataKind == reflect.Uint:
+               val.SetUint(dataVal.Uint())
+       case dataKind == reflect.Float32:
+               f := dataVal.Float()
+               if f < 0 && !d.config.WeaklyTypedInput {
+                       return fmt.Errorf("cannot parse '%s', %f overflows uint",
+                               name, f)
+               }
+               val.SetUint(uint64(f))
+       case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+               if dataVal.Bool() {
+                       val.SetUint(1)
+               } else {
+                       val.SetUint(0)
+               }
+       case dataKind == reflect.String && d.config.WeaklyTypedInput:
+               i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
+               if err == nil {
+                       val.SetUint(i)
+               } else {
+                       return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
+               }
+       default:
+               return fmt.Errorf(
+                       "'%s' expected type '%s', got unconvertible type '%s'",
+                       name, val.Type(), dataVal.Type())
+       }
+
+       return nil
+}
+
+func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
+       dataVal := reflect.ValueOf(data)
+       dataKind := getKind(dataVal)
+
+       switch {
+       case dataKind == reflect.Bool:
+               val.SetBool(dataVal.Bool())
+       case dataKind == reflect.Int && d.config.WeaklyTypedInput:
+               val.SetBool(dataVal.Int() != 0)
+       case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
+               val.SetBool(dataVal.Uint() != 0)
+       case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
+               val.SetBool(dataVal.Float() != 0)
+       case dataKind == reflect.String && d.config.WeaklyTypedInput:
+               b, err := strconv.ParseBool(dataVal.String())
+               if err == nil {
+                       val.SetBool(b)
+               } else if dataVal.String() == "" {
+                       val.SetBool(false)
+               } else {
+                       return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
+               }
+       default:
+               return fmt.Errorf(
+                       "'%s' expected type '%s', got unconvertible type '%s'",
+                       name, val.Type(), dataVal.Type())
+       }
+
+       return nil
+}
+
+func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
+       dataVal := reflect.ValueOf(data)
+       dataKind := getKind(dataVal)
+       dataType := dataVal.Type()
+
+       switch {
+       case dataKind == reflect.Int:
+               val.SetFloat(float64(dataVal.Int()))
+       case dataKind == reflect.Uint:
+               val.SetFloat(float64(dataVal.Uint()))
+       case dataKind == reflect.Float32:
+               val.SetFloat(float64(dataVal.Float()))
+       case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
+               if dataVal.Bool() {
+                       val.SetFloat(1)
+               } else {
+                       val.SetFloat(0)
+               }
+       case dataKind == reflect.String && d.config.WeaklyTypedInput:
+               f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
+               if err == nil {
+                       val.SetFloat(f)
+               } else {
+                       return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
+               }
+       case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
+               jn := data.(json.Number)
+               i, err := jn.Float64()
+               if err != nil {
+                       return fmt.Errorf(
+                               "error decoding json.Number into %s: %s", name, err)
+               }
+               val.SetFloat(i)
+       default:
+               return fmt.Errorf(
+                       "'%s' expected type '%s', got unconvertible type '%s'",
+                       name, val.Type(), dataVal.Type())
+       }
+
+       return nil
+}
+
+func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
+       valType := val.Type()
+       valKeyType := valType.Key()
+       valElemType := valType.Elem()
+
+       // By default we overwrite keys in the current map
+       valMap := val
+
+       // If the map is nil or we're purposely zeroing fields, make a new map
+       if valMap.IsNil() || d.config.ZeroFields {
+               // Make a new map to hold our result
+               mapType := reflect.MapOf(valKeyType, valElemType)
+               valMap = reflect.MakeMap(mapType)
+       }
+
+       // Check input type
+       dataVal := reflect.Indirect(reflect.ValueOf(data))
+       if dataVal.Kind() != reflect.Map {
+               // In weak mode, we accept a slice of maps as an input...
+               if d.config.WeaklyTypedInput {
+                       switch dataVal.Kind() {
+                       case reflect.Array, reflect.Slice:
+                               // Special case for BC reasons (covered by tests)
+                               if dataVal.Len() == 0 {
+                                       val.Set(valMap)
+                                       return nil
+                               }
+
+                               for i := 0; i < dataVal.Len(); i++ {
+                                       err := d.decode(
+                                               fmt.Sprintf("%s[%d]", name, i),
+                                               dataVal.Index(i).Interface(), val)
+                                       if err != nil {
+                                               return err
+                                       }
+                               }
+
+                               return nil
+                       }
+               }
+
+               return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
+       }
+
+       // Accumulate errors
+       errors := make([]string, 0)
+
+       for _, k := range dataVal.MapKeys() {
+               fieldName := fmt.Sprintf("%s[%s]", name, k)
+
+               // First decode the key into the proper type
+               currentKey := reflect.Indirect(reflect.New(valKeyType))
+               if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
+                       errors = appendErrors(errors, err)
+                       continue
+               }
+
+               // Next decode the data into the proper type
+               v := dataVal.MapIndex(k).Interface()
+               currentVal := reflect.Indirect(reflect.New(valElemType))
+               if err := d.decode(fieldName, v, currentVal); err != nil {
+                       errors = appendErrors(errors, err)
+                       continue
+               }
+
+               valMap.SetMapIndex(currentKey, currentVal)
+       }
+
+       // Set the built up map to the value
+       val.Set(valMap)
+
+       // If we had errors, return those
+       if len(errors) > 0 {
+               return &Error{errors}
+       }
+
+       return nil
+}
+
+func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
+       // Create an element of the concrete (non pointer) type and decode
+       // into that. Then set the value of the pointer to this type.
+       valType := val.Type()
+       valElemType := valType.Elem()
+
+       realVal := val
+       if realVal.IsNil() || d.config.ZeroFields {
+               realVal = reflect.New(valElemType)
+       }
+
+       if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
+               return err
+       }
+
+       val.Set(realVal)
+       return nil
+}
+
+func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
+       // Create an element of the concrete (non pointer) type and decode
+       // into that. Then set the value of the pointer to this type.
+       dataVal := reflect.Indirect(reflect.ValueOf(data))
+       if val.Type() != dataVal.Type() {
+               return fmt.Errorf(
+                       "'%s' expected type '%s', got unconvertible type '%s'",
+                       name, val.Type(), dataVal.Type())
+       }
+       val.Set(dataVal)
+       return nil
+}
+
+func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
+       dataVal := reflect.Indirect(reflect.ValueOf(data))
+       dataValKind := dataVal.Kind()
+       valType := val.Type()
+       valElemType := valType.Elem()
+       sliceType := reflect.SliceOf(valElemType)
+
+       valSlice := val
+       if valSlice.IsNil() || d.config.ZeroFields {
+               // Check input type
+               if dataValKind != reflect.Array && dataValKind != reflect.Slice {
+                       if d.config.WeaklyTypedInput {
+                               switch {
+                               // Empty maps turn into empty slices
+                               case dataValKind == reflect.Map:
+                                       if dataVal.Len() == 0 {
+                                               val.Set(reflect.MakeSlice(sliceType, 0, 0))
+                                               return nil
+                                       }
+
+                               // All other types we try to convert to the slice type
+                               // and "lift" it into it. i.e. a string becomes a string slice.
+                               default:
+                                       // Just re-try this function with data as a slice.
+                                       return d.decodeSlice(name, []interface{}{data}, val)
+                               }
+                       }
+
+                       return fmt.Errorf(
+                               "'%s': source data must be an array or slice, got %s", name, dataValKind)
+
+               }
+
+               // Make a new slice to hold our result, same size as the original data.
+               valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
+       }
+
+       // Accumulate any errors
+       errors := make([]string, 0)
+
+       for i := 0; i < dataVal.Len(); i++ {
+               currentData := dataVal.Index(i).Interface()
+               for valSlice.Len() <= i {
+                       valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
+               }
+               currentField := valSlice.Index(i)
+
+               fieldName := fmt.Sprintf("%s[%d]", name, i)
+               if err := d.decode(fieldName, currentData, currentField); err != nil {
+                       errors = appendErrors(errors, err)
+               }
+       }
+
+       // Finally, set the value to the slice we built up
+       val.Set(valSlice)
+
+       // If there were errors, we return those
+       if len(errors) > 0 {
+               return &Error{errors}
+       }
+
+       return nil
+}
+
+func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
+       dataVal := reflect.Indirect(reflect.ValueOf(data))
+
+       // If the type of the value to write to and the data match directly,
+       // then we just set it directly instead of recursing into the structure.
+       if dataVal.Type() == val.Type() {
+               val.Set(dataVal)
+               return nil
+       }
+
+       dataValKind := dataVal.Kind()
+       if dataValKind != reflect.Map {
+               return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind)
+       }
+
+       dataValType := dataVal.Type()
+       if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
+               return fmt.Errorf(
+                       "'%s' needs a map with string keys, has '%s' keys",
+                       name, dataValType.Key().Kind())
+       }
+
+       dataValKeys := make(map[reflect.Value]struct{})
+       dataValKeysUnused := make(map[interface{}]struct{})
+       for _, dataValKey := range dataVal.MapKeys() {
+               dataValKeys[dataValKey] = struct{}{}
+               dataValKeysUnused[dataValKey.Interface()] = struct{}{}
+       }
+
+       errors := make([]string, 0)
+
+       // This slice will keep track of all the structs we'll be decoding.
+       // There can be more than one struct if there are embedded structs
+       // that are squashed.
+       structs := make([]reflect.Value, 1, 5)
+       structs[0] = val
+
+       // Compile the list of all the fields that we're going to be decoding
+       // from all the structs.
+       fields := make(map[*reflect.StructField]reflect.Value)
+       for len(structs) > 0 {
+               structVal := structs[0]
+               structs = structs[1:]
+
+               structType := structVal.Type()
+
+               for i := 0; i < structType.NumField(); i++ {
+                       fieldType := structType.Field(i)
+                       fieldKind := fieldType.Type.Kind()
+
+                       // If "squash" is specified in the tag, we squash the field down.
+                       squash := false
+                       tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
+                       for _, tag := range tagParts[1:] {
+                               if tag == "squash" {
+                                       squash = true
+                                       break
+                               }
+                       }
+
+                       if squash {
+                               if fieldKind != reflect.Struct {
+                                       errors = appendErrors(errors,
+                                               fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
+                               } else {
+                                       structs = append(structs, val.FieldByName(fieldType.Name))
+                               }
+                               continue
+                       }
+
+                       // Normal struct field, store it away
+                       fields[&fieldType] = structVal.Field(i)
+               }
+       }
+
+       for fieldType, field := range fields {
+               fieldName := fieldType.Name
+
+               tagValue := fieldType.Tag.Get(d.config.TagName)
+               tagValue = strings.SplitN(tagValue, ",", 2)[0]
+               if tagValue != "" {
+                       fieldName = tagValue
+               }
+
+               rawMapKey := reflect.ValueOf(fieldName)
+               rawMapVal := dataVal.MapIndex(rawMapKey)
+               if !rawMapVal.IsValid() {
+                       // Do a slower search by iterating over each key and
+                       // doing case-insensitive search.
+                       for dataValKey := range dataValKeys {
+                               mK, ok := dataValKey.Interface().(string)
+                               if !ok {
+                                       // Not a string key
+                                       continue
+                               }
+
+                               if strings.EqualFold(mK, fieldName) {
+                                       rawMapKey = dataValKey
+                                       rawMapVal = dataVal.MapIndex(dataValKey)
+                                       break
+                               }
+                       }
+
+                       if !rawMapVal.IsValid() {
+                               // There was no matching key in the map for the value in
+                               // the struct. Just ignore.
+                               continue
+                       }
+               }
+
+               // Delete the key we're using from the unused map so we stop tracking
+               delete(dataValKeysUnused, rawMapKey.Interface())
+
+               if !field.IsValid() {
+                       // This should never happen
+                       panic("field is not valid")
+               }
+
+               // If we can't set the field, then it is unexported or something,
+               // and we just continue onwards.
+               if !field.CanSet() {
+                       continue
+               }
+
+               // If the name is empty string, then we're at the root, and we
+               // don't dot-join the fields.
+               if name != "" {
+                       fieldName = fmt.Sprintf("%s.%s", name, fieldName)
+               }
+
+               if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil {
+                       errors = appendErrors(errors, err)
+               }
+       }
+
+       if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
+               keys := make([]string, 0, len(dataValKeysUnused))
+               for rawKey := range dataValKeysUnused {
+                       keys = append(keys, rawKey.(string))
+               }
+               sort.Strings(keys)
+
+               err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
+               errors = appendErrors(errors, err)
+       }
+
+       if len(errors) > 0 {
+               return &Error{errors}
+       }
+
+       // Add the unused keys to the list of unused keys if we're tracking metadata
+       if d.config.Metadata != nil {
+               for rawKey := range dataValKeysUnused {
+                       key := rawKey.(string)
+                       if name != "" {
+                               key = fmt.Sprintf("%s.%s", name, key)
+                       }
+
+                       d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
+               }
+       }
+
+       return nil
+}
+
+func getKind(val reflect.Value) reflect.Kind {
+       kind := val.Kind()
+
+       switch {
+       case kind >= reflect.Int && kind <= reflect.Int64:
+               return reflect.Int
+       case kind >= reflect.Uint && kind <= reflect.Uint64:
+               return reflect.Uint
+       case kind >= reflect.Float32 && kind <= reflect.Float64:
+               return reflect.Float32
+       default:
+               return kind
+       }
+}
diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE
new file mode 100644 (file)
index 0000000..f9c841a
--- /dev/null
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md
new file mode 100644 (file)
index 0000000..ac82cd2
--- /dev/null
@@ -0,0 +1,6 @@
+# reflectwalk
+
+reflectwalk is a Go library for "walking" a value in Go using reflection,
+in the same way a directory tree can be "walked" on the filesystem. Walking
+a complex structure can allow you to do manipulations on unknown structures
+such as those decoded from JSON.
diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go
new file mode 100644 (file)
index 0000000..7c59d76
--- /dev/null
@@ -0,0 +1,17 @@
+package reflectwalk
+
+//go:generate stringer -type=Location location.go
+
+type Location uint
+
+const (
+       None Location = iota
+       Map
+       MapKey
+       MapValue
+       Slice
+       SliceElem
+       Struct
+       StructField
+       WalkLoc
+)
diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go
new file mode 100644 (file)
index 0000000..d3cfe85
--- /dev/null
@@ -0,0 +1,16 @@
+// generated by stringer -type=Location location.go; DO NOT EDIT
+
+package reflectwalk
+
+import "fmt"
+
+const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemStructStructFieldWalkLoc"
+
+var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 41, 52, 59}
+
+func (i Location) String() string {
+       if i+1 >= Location(len(_Location_index)) {
+               return fmt.Sprintf("Location(%d)", i)
+       }
+       return _Location_name[_Location_index[i]:_Location_index[i+1]]
+}
diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
new file mode 100644 (file)
index 0000000..ec0a623
--- /dev/null
@@ -0,0 +1,339 @@
+// reflectwalk is a package that allows you to "walk" complex structures
+// similar to how you may "walk" a filesystem: visiting every element one
+// by one and calling callback functions allowing you to handle and manipulate
+// those elements.
+package reflectwalk
+
+import (
+       "errors"
+       "reflect"
+)
+
+// PrimitiveWalker implementations are able to handle primitive values
+// within complex structures. Primitive values are numbers, strings,
+// booleans, funcs, chans.
+//
+// These primitive values are often members of more complex
+// structures (slices, maps, etc.) that are walkable by other interfaces.
+type PrimitiveWalker interface {
+       Primitive(reflect.Value) error
+}
+
+// InterfaceWalker implementations are able to handle interface values as they
+// are encountered during the walk.
+type InterfaceWalker interface {
+       Interface(reflect.Value) error
+}
+
+// MapWalker implementations are able to handle individual elements
+// found within a map structure.
+type MapWalker interface {
+       Map(m reflect.Value) error
+       MapElem(m, k, v reflect.Value) error
+}
+
+// SliceWalker implementations are able to handle slice elements found
+// within complex structures.
+type SliceWalker interface {
+       Slice(reflect.Value) error
+       SliceElem(int, reflect.Value) error
+}
+
+// StructWalker is an interface that has methods that are called for
+// structs when a Walk is done.
+type StructWalker interface {
+       Struct(reflect.Value) error
+       StructField(reflect.StructField, reflect.Value) error
+}
+
+// EnterExitWalker implementations are notified before and after
+// they walk deeper into complex structures (into struct fields,
+// into slice elements, etc.)
+type EnterExitWalker interface {
+       Enter(Location) error
+       Exit(Location) error
+}
+
+// PointerWalker implementations are notified when the value they're
+// walking is a pointer or not. Pointer is called for _every_ value whether
+// it is a pointer or not.
+type PointerWalker interface {
+       PointerEnter(bool) error
+       PointerExit(bool) error
+}
+
+// SkipEntry can be returned from walk functions to skip walking
+// the value of this field. This is only valid in the following functions:
+//
+//   - StructField: skips walking the struct value
+//
+var SkipEntry = errors.New("skip this entry")
+
+// Walk takes an arbitrary value and an interface and traverses the
+// value, calling callbacks on the interface if they are supported.
+// The interface should implement one or more of the walker interfaces
+// in this package, such as PrimitiveWalker, StructWalker, etc.
+func Walk(data, walker interface{}) (err error) {
+       v := reflect.ValueOf(data)
+       ew, ok := walker.(EnterExitWalker)
+       if ok {
+               err = ew.Enter(WalkLoc)
+       }
+
+       if err == nil {
+               err = walk(v, walker)
+       }
+
+       if ok && err == nil {
+               err = ew.Exit(WalkLoc)
+       }
+
+       return
+}
+
+func walk(v reflect.Value, w interface{}) (err error) {
+       // Determine if we're receiving a pointer and if so notify the walker.
+       // The logic here is convoluted but very important (tests will fail if
+       // almost any part is changed). I will try to explain here.
+       //
+       // First, we check if the value is an interface, if so, we really need
+       // to check the interface's VALUE to see whether it is a pointer.
+       //
+       // Check whether the value is then a pointer. If so, then set pointer
+       // to true to notify the user.
+       //
+       // If we still have a pointer or an interface after the indirections, then
+       // we unwrap another level
+       //
+       // At this time, we also set "v" to be the dereferenced value. This is
+       // because once we've unwrapped the pointer we want to use that value.
+       pointer := false
+       pointerV := v
+
+       for {
+               if pointerV.Kind() == reflect.Interface {
+                       if iw, ok := w.(InterfaceWalker); ok {
+                               if err = iw.Interface(pointerV); err != nil {
+                                       return
+                               }
+                       }
+
+                       pointerV = pointerV.Elem()
+               }
+
+               if pointerV.Kind() == reflect.Ptr {
+                       pointer = true
+                       v = reflect.Indirect(pointerV)
+               }
+               if pw, ok := w.(PointerWalker); ok {
+                       if err = pw.PointerEnter(pointer); err != nil {
+                               return
+                       }
+
+                       defer func(pointer bool) {
+                               if err != nil {
+                                       return
+                               }
+
+                               err = pw.PointerExit(pointer)
+                       }(pointer)
+               }
+
+               if pointer {
+                       pointerV = v
+               }
+               pointer = false
+
+               // If we still have a pointer or interface we have to indirect another level.
+               switch pointerV.Kind() {
+               case reflect.Ptr, reflect.Interface:
+                       continue
+               }
+               break
+       }
+
+       // We preserve the original value here because if it is an interface
+       // type, we want to pass that directly into the walkPrimitive, so that
+       // we can set it.
+       originalV := v
+       if v.Kind() == reflect.Interface {
+               v = v.Elem()
+       }
+
+       k := v.Kind()
+       if k >= reflect.Int && k <= reflect.Complex128 {
+               k = reflect.Int
+       }
+
+       switch k {
+       // Primitives
+       case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid:
+               err = walkPrimitive(originalV, w)
+               return
+       case reflect.Map:
+               err = walkMap(v, w)
+               return
+       case reflect.Slice:
+               err = walkSlice(v, w)
+               return
+       case reflect.Struct:
+               err = walkStruct(v, w)
+               return
+       default:
+               panic("unsupported type: " + k.String())
+       }
+}
+
+func walkMap(v reflect.Value, w interface{}) error {
+       ew, ewok := w.(EnterExitWalker)
+       if ewok {
+               ew.Enter(Map)
+       }
+
+       if mw, ok := w.(MapWalker); ok {
+               if err := mw.Map(v); err != nil {
+                       return err
+               }
+       }
+
+       for _, k := range v.MapKeys() {
+               kv := v.MapIndex(k)
+
+               if mw, ok := w.(MapWalker); ok {
+                       if err := mw.MapElem(v, k, kv); err != nil {
+                               return err
+                       }
+               }
+
+               ew, ok := w.(EnterExitWalker)
+               if ok {
+                       ew.Enter(MapKey)
+               }
+
+               if err := walk(k, w); err != nil {
+                       return err
+               }
+
+               if ok {
+                       ew.Exit(MapKey)
+                       ew.Enter(MapValue)
+               }
+
+               if err := walk(kv, w); err != nil {
+                       return err
+               }
+
+               if ok {
+                       ew.Exit(MapValue)
+               }
+       }
+
+       if ewok {
+               ew.Exit(Map)
+       }
+
+       return nil
+}
+
+func walkPrimitive(v reflect.Value, w interface{}) error {
+       if pw, ok := w.(PrimitiveWalker); ok {
+               return pw.Primitive(v)
+       }
+
+       return nil
+}
+
+func walkSlice(v reflect.Value, w interface{}) (err error) {
+       ew, ok := w.(EnterExitWalker)
+       if ok {
+               ew.Enter(Slice)
+       }
+
+       if sw, ok := w.(SliceWalker); ok {
+               if err := sw.Slice(v); err != nil {
+                       return err
+               }
+       }
+
+       for i := 0; i < v.Len(); i++ {
+               elem := v.Index(i)
+
+               if sw, ok := w.(SliceWalker); ok {
+                       if err := sw.SliceElem(i, elem); err != nil {
+                               return err
+                       }
+               }
+
+               ew, ok := w.(EnterExitWalker)
+               if ok {
+                       ew.Enter(SliceElem)
+               }
+
+               if err := walk(elem, w); err != nil {
+                       return err
+               }
+
+               if ok {
+                       ew.Exit(SliceElem)
+               }
+       }
+
+       ew, ok = w.(EnterExitWalker)
+       if ok {
+               ew.Exit(Slice)
+       }
+
+       return nil
+}
+
+func walkStruct(v reflect.Value, w interface{}) (err error) {
+       ew, ewok := w.(EnterExitWalker)
+       if ewok {
+               ew.Enter(Struct)
+       }
+
+       if sw, ok := w.(StructWalker); ok {
+               if err = sw.Struct(v); err != nil {
+                       return
+               }
+       }
+
+       vt := v.Type()
+       for i := 0; i < vt.NumField(); i++ {
+               sf := vt.Field(i)
+               f := v.FieldByIndex([]int{i})
+
+               if sw, ok := w.(StructWalker); ok {
+                       err = sw.StructField(sf, f)
+
+                       // SkipEntry just pretends this field doesn't even exist
+                       if err == SkipEntry {
+                               continue
+                       }
+
+                       if err != nil {
+                               return
+                       }
+               }
+
+               ew, ok := w.(EnterExitWalker)
+               if ok {
+                       ew.Enter(StructField)
+               }
+
+               err = walk(f, w)
+               if err != nil {
+                       return
+               }
+
+               if ok {
+                       ew.Exit(StructField)
+               }
+       }
+
+       if ewok {
+               ew.Exit(Struct)
+       }
+
+       return nil
+}
diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE
new file mode 100644 (file)
index 0000000..488357b
--- /dev/null
@@ -0,0 +1,20 @@
+Copyright (C) 2013-2016 by Maxim Bublis <b@codemonkey.ru>
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md
new file mode 100644 (file)
index 0000000..b6aad1c
--- /dev/null
@@ -0,0 +1,65 @@
+# UUID package for Go language
+
+[![Build Status](https://travis-ci.org/satori/go.uuid.png?branch=master)](https://travis-ci.org/satori/go.uuid)
+[![Coverage Status](https://coveralls.io/repos/github/satori/go.uuid/badge.svg?branch=master)](https://coveralls.io/github/satori/go.uuid)
+[![GoDoc](http://godoc.org/github.com/satori/go.uuid?status.png)](http://godoc.org/github.com/satori/go.uuid)
+
+This package provides pure Go implementation of Universally Unique Identifier (UUID). Supported both creation and parsing of UUIDs.
+
+With 100% test coverage and benchmarks out of box.
+
+Supported versions:
+* Version 1, based on timestamp and MAC address (RFC 4122)
+* Version 2, based on timestamp, MAC address and POSIX UID/GID (DCE 1.1)
+* Version 3, based on MD5 hashing (RFC 4122)
+* Version 4, based on random numbers (RFC 4122)
+* Version 5, based on SHA-1 hashing (RFC 4122)
+
+## Installation
+
+Use the `go` command:
+
+       $ go get github.com/satori/go.uuid
+
+## Requirements
+
+UUID package requires Go >= 1.2.
+
+## Example
+
+```go
+package main
+
+import (
+       "fmt"
+       "github.com/satori/go.uuid"
+)
+
+func main() {
+       // Creating UUID Version 4
+       u1 := uuid.NewV4()
+       fmt.Printf("UUIDv4: %s\n", u1)
+
+       // Parsing UUID from string input
+       u2, err := uuid.FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+       if err != nil {
+               fmt.Printf("Something gone wrong: %s", err)
+       }
+       fmt.Printf("Successfully parsed: %s", u2)
+}
+```
+
+## Documentation
+
+[Documentation](http://godoc.org/github.com/satori/go.uuid) is hosted at GoDoc project.
+
+## Links
+* [RFC 4122](http://tools.ietf.org/html/rfc4122)
+* [DCE 1.1: Authentication and Security Services](http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01)
+
+## Copyright
+
+Copyright (C) 2013-2016 by Maxim Bublis <b@codemonkey.ru>.
+
+UUID package released under MIT License.
+See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details.
diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go
new file mode 100644 (file)
index 0000000..295f3fc
--- /dev/null
@@ -0,0 +1,481 @@
+// Copyright (C) 2013-2015 by Maxim Bublis <b@codemonkey.ru>
+//
+// Permission is hereby granted, free of charge, to any person obtaining
+// a copy of this software and associated documentation files (the
+// "Software"), to deal in the Software without restriction, including
+// without limitation the rights to use, copy, modify, merge, publish,
+// distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to
+// the following conditions:
+//
+// The above copyright notice and this permission notice shall be
+// included in all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+// Package uuid provides implementation of Universally Unique Identifier (UUID).
+// Supported versions are 1, 3, 4 and 5 (as specified in RFC 4122) and
+// version 2 (as specified in DCE 1.1).
+package uuid
+
+import (
+       "bytes"
+       "crypto/md5"
+       "crypto/rand"
+       "crypto/sha1"
+       "database/sql/driver"
+       "encoding/binary"
+       "encoding/hex"
+       "fmt"
+       "hash"
+       "net"
+       "os"
+       "sync"
+       "time"
+)
+
+// UUID layout variants.
+const (
+       VariantNCS = iota
+       VariantRFC4122
+       VariantMicrosoft
+       VariantFuture
+)
+
+// UUID DCE domains.
+const (
+       DomainPerson = iota
+       DomainGroup
+       DomainOrg
+)
+
+// Difference in 100-nanosecond intervals between
+// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
+const epochStart = 122192928000000000
+
+// Used in string method conversion
+const dash byte = '-'
+
+// UUID v1/v2 storage.
+var (
+       storageMutex  sync.Mutex
+       storageOnce   sync.Once
+       epochFunc     = unixTimeFunc
+       clockSequence uint16
+       lastTime      uint64
+       hardwareAddr  [6]byte
+       posixUID      = uint32(os.Getuid())
+       posixGID      = uint32(os.Getgid())
+)
+
+// String parse helpers.
+var (
+       urnPrefix  = []byte("urn:uuid:")
+       byteGroups = []int{8, 4, 4, 4, 12}
+)
+
+func initClockSequence() {
+       buf := make([]byte, 2)
+       safeRandom(buf)
+       clockSequence = binary.BigEndian.Uint16(buf)
+}
+
+func initHardwareAddr() {
+       interfaces, err := net.Interfaces()
+       if err == nil {
+               for _, iface := range interfaces {
+                       if len(iface.HardwareAddr) >= 6 {
+                               copy(hardwareAddr[:], iface.HardwareAddr)
+                               return
+                       }
+               }
+       }
+
+       // Initialize hardwareAddr randomly in case
+       // of real network interfaces absence
+       safeRandom(hardwareAddr[:])
+
+       // Set multicast bit as recommended in RFC 4122
+       hardwareAddr[0] |= 0x01
+}
+
+func initStorage() {
+       initClockSequence()
+       initHardwareAddr()
+}
+
+func safeRandom(dest []byte) {
+       if _, err := rand.Read(dest); err != nil {
+               panic(err)
+       }
+}
+
+// Returns difference in 100-nanosecond intervals between
+// UUID epoch (October 15, 1582) and current time.
+// This is default epoch calculation function.
+func unixTimeFunc() uint64 {
+       return epochStart + uint64(time.Now().UnixNano()/100)
+}
+
+// UUID representation compliant with specification
+// described in RFC 4122.
+type UUID [16]byte
+
+// NullUUID can be used with the standard sql package to represent a
+// UUID value that can be NULL in the database
+type NullUUID struct {
+       UUID  UUID
+       Valid bool
+}
+
+// The nil UUID is special form of UUID that is specified to have all
+// 128 bits set to zero.
+var Nil = UUID{}
+
+// Predefined namespace UUIDs.
+var (
+       NamespaceDNS, _  = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
+       NamespaceURL, _  = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
+       NamespaceOID, _  = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
+       NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
+)
+
+// And returns result of binary AND of two UUIDs.
+func And(u1 UUID, u2 UUID) UUID {
+       u := UUID{}
+       for i := 0; i < 16; i++ {
+               u[i] = u1[i] & u2[i]
+       }
+       return u
+}
+
+// Or returns result of binary OR of two UUIDs.
+func Or(u1 UUID, u2 UUID) UUID {
+       u := UUID{}
+       for i := 0; i < 16; i++ {
+               u[i] = u1[i] | u2[i]
+       }
+       return u
+}
+
+// Equal returns true if u1 and u2 equals, otherwise returns false.
+func Equal(u1 UUID, u2 UUID) bool {
+       return bytes.Equal(u1[:], u2[:])
+}
+
+// Version returns algorithm version used to generate UUID.
+func (u UUID) Version() uint {
+       return uint(u[6] >> 4)
+}
+
+// Variant returns UUID layout variant.
+func (u UUID) Variant() uint {
+       switch {
+       case (u[8] & 0x80) == 0x00:
+               return VariantNCS
+       case (u[8]&0xc0)|0x80 == 0x80:
+               return VariantRFC4122
+       case (u[8]&0xe0)|0xc0 == 0xc0:
+               return VariantMicrosoft
+       }
+       return VariantFuture
+}
+
+// Bytes returns bytes slice representation of UUID.
+func (u UUID) Bytes() []byte {
+       return u[:]
+}
+
+// Returns canonical string representation of UUID:
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
+func (u UUID) String() string {
+       buf := make([]byte, 36)
+
+       hex.Encode(buf[0:8], u[0:4])
+       buf[8] = dash
+       hex.Encode(buf[9:13], u[4:6])
+       buf[13] = dash
+       hex.Encode(buf[14:18], u[6:8])
+       buf[18] = dash
+       hex.Encode(buf[19:23], u[8:10])
+       buf[23] = dash
+       hex.Encode(buf[24:], u[10:])
+
+       return string(buf)
+}
+
+// SetVersion sets version bits.
+func (u *UUID) SetVersion(v byte) {
+       u[6] = (u[6] & 0x0f) | (v << 4)
+}
+
+// SetVariant sets variant bits as described in RFC 4122.
+func (u *UUID) SetVariant() {
+       u[8] = (u[8] & 0xbf) | 0x80
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+// The encoding is the same as returned by String.
+func (u UUID) MarshalText() (text []byte, err error) {
+       text = []byte(u.String())
+       return
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+// Following formats are supported:
+// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
+// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
+// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
+func (u *UUID) UnmarshalText(text []byte) (err error) {
+       if len(text) < 32 {
+               err = fmt.Errorf("uuid: UUID string too short: %s", text)
+               return
+       }
+
+       t := text[:]
+       braced := false
+
+       if bytes.Equal(t[:9], urnPrefix) {
+               t = t[9:]
+       } else if t[0] == '{' {
+               braced = true
+               t = t[1:]
+       }
+
+       b := u[:]
+
+       for i, byteGroup := range byteGroups {
+               if i > 0 {
+                       if t[0] != '-' {
+                               err = fmt.Errorf("uuid: invalid string format")
+                               return
+                       }
+                       t = t[1:]
+               }
+
+               if len(t) < byteGroup {
+                       err = fmt.Errorf("uuid: UUID string too short: %s", text)
+                       return
+               }
+
+               if i == 4 && len(t) > byteGroup &&
+                       ((braced && t[byteGroup] != '}') || len(t[byteGroup:]) > 1 || !braced) {
+                       err = fmt.Errorf("uuid: UUID string too long: %s", text)
+                       return
+               }
+
+               _, err = hex.Decode(b[:byteGroup/2], t[:byteGroup])
+               if err != nil {
+                       return
+               }
+
+               t = t[byteGroup:]
+               b = b[byteGroup/2:]
+       }
+
+       return
+}
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (u UUID) MarshalBinary() (data []byte, err error) {
+       data = u.Bytes()
+       return
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+// It will return error if the slice isn't 16 bytes long.
+func (u *UUID) UnmarshalBinary(data []byte) (err error) {
+       if len(data) != 16 {
+               err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
+               return
+       }
+       copy(u[:], data)
+
+       return
+}
+
+// Value implements the driver.Valuer interface.
+func (u UUID) Value() (driver.Value, error) {
+       return u.String(), nil
+}
+
+// Scan implements the sql.Scanner interface.
+// A 16-byte slice is handled by UnmarshalBinary, while
+// a longer byte slice or a string is handled by UnmarshalText.
+func (u *UUID) Scan(src interface{}) error {
+       switch src := src.(type) {
+       case []byte:
+               if len(src) == 16 {
+                       return u.UnmarshalBinary(src)
+               }
+               return u.UnmarshalText(src)
+
+       case string:
+               return u.UnmarshalText([]byte(src))
+       }
+
+       return fmt.Errorf("uuid: cannot convert %T to UUID", src)
+}
+
+// Value implements the driver.Valuer interface.
+func (u NullUUID) Value() (driver.Value, error) {
+       if !u.Valid {
+               return nil, nil
+       }
+       // Delegate to UUID Value function
+       return u.UUID.Value()
+}
+
+// Scan implements the sql.Scanner interface.
+func (u *NullUUID) Scan(src interface{}) error {
+       if src == nil {
+               u.UUID, u.Valid = Nil, false
+               return nil
+       }
+
+       // Delegate to UUID Scan function
+       u.Valid = true
+       return u.UUID.Scan(src)
+}
+
+// FromBytes returns UUID converted from raw byte slice input.
+// It will return error if the slice isn't 16 bytes long.
+func FromBytes(input []byte) (u UUID, err error) {
+       err = u.UnmarshalBinary(input)
+       return
+}
+
+// FromBytesOrNil returns UUID converted from raw byte slice input.
+// Same behavior as FromBytes, but returns a Nil UUID on error.
+func FromBytesOrNil(input []byte) UUID {
+       uuid, err := FromBytes(input)
+       if err != nil {
+               return Nil
+       }
+       return uuid
+}
+
+// FromString returns UUID parsed from string input.
+// Input is expected in a form accepted by UnmarshalText.
+func FromString(input string) (u UUID, err error) {
+       err = u.UnmarshalText([]byte(input))
+       return
+}
+
+// FromStringOrNil returns UUID parsed from string input.
+// Same behavior as FromString, but returns a Nil UUID on error.
+func FromStringOrNil(input string) UUID {
+       uuid, err := FromString(input)
+       if err != nil {
+               return Nil
+       }
+       return uuid
+}
+
+// Returns UUID v1/v2 storage state.
+// Returns epoch timestamp, clock sequence, and hardware address.
+func getStorage() (uint64, uint16, []byte) {
+       storageOnce.Do(initStorage)
+
+       storageMutex.Lock()
+       defer storageMutex.Unlock()
+
+       timeNow := epochFunc()
+       // Clock changed backwards since last UUID generation.
+       // Should increase clock sequence.
+       if timeNow <= lastTime {
+               clockSequence++
+       }
+       lastTime = timeNow
+
+       return timeNow, clockSequence, hardwareAddr[:]
+}
+
+// NewV1 returns UUID based on current timestamp and MAC address.
+func NewV1() UUID {
+       u := UUID{}
+
+       timeNow, clockSeq, hardwareAddr := getStorage()
+
+       binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
+       binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
+       binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
+       binary.BigEndian.PutUint16(u[8:], clockSeq)
+
+       copy(u[10:], hardwareAddr)
+
+       u.SetVersion(1)
+       u.SetVariant()
+
+       return u
+}
+
+// NewV2 returns DCE Security UUID based on POSIX UID/GID.
+func NewV2(domain byte) UUID {
+       u := UUID{}
+
+       timeNow, clockSeq, hardwareAddr := getStorage()
+
+       switch domain {
+       case DomainPerson:
+               binary.BigEndian.PutUint32(u[0:], posixUID)
+       case DomainGroup:
+               binary.BigEndian.PutUint32(u[0:], posixGID)
+       }
+
+       binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
+       binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
+       binary.BigEndian.PutUint16(u[8:], clockSeq)
+       u[9] = domain
+
+       copy(u[10:], hardwareAddr)
+
+       u.SetVersion(2)
+       u.SetVariant()
+
+       return u
+}
+
+// NewV3 returns UUID based on MD5 hash of namespace UUID and name.
+func NewV3(ns UUID, name string) UUID {
+       u := newFromHash(md5.New(), ns, name)
+       u.SetVersion(3)
+       u.SetVariant()
+
+       return u
+}
+
+// NewV4 returns random generated UUID.
+func NewV4() UUID {
+       u := UUID{}
+       safeRandom(u[:])
+       u.SetVersion(4)
+       u.SetVariant()
+
+       return u
+}
+
+// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name.
+func NewV5(ns UUID, name string) UUID {
+       u := newFromHash(sha1.New(), ns, name)
+       u.SetVersion(5)
+       u.SetVariant()
+
+       return u
+}
+
+// Returns UUID based on hashing of namespace UUID and name.
+func newFromHash(h hash.Hash, ns UUID, name string) UUID {
+       u := UUID{}
+       h.Write(ns[:])
+       h.Write([]byte(name))
+       copy(u[:], h.Sum(nil))
+
+       return u
+}
diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE
new file mode 100644 (file)
index 0000000..6a66aea
--- /dev/null
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS
new file mode 100644 (file)
index 0000000..7330990
--- /dev/null
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go.  This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation.  If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.h b/vendor/golang.org/x/crypto/curve25519/const_amd64.h
new file mode 100644 (file)
index 0000000..80ad222
--- /dev/null
@@ -0,0 +1,8 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+#define REDMASK51     0x0007FFFFFFFFFFFF
diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s
new file mode 100644 (file)
index 0000000..0ad5398
--- /dev/null
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+// These constants cannot be encoded in non-MOVQ immediates.
+// We access them directly from memory instead.
+
+DATA ·_121666_213(SB)/8, $996687872
+GLOBL ·_121666_213(SB), 8, $8
+
+DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
+GLOBL ·_2P0(SB), 8, $8
+
+DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
+GLOBL ·_2P1234(SB), 8, $8
diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
new file mode 100644 (file)
index 0000000..45484d1
--- /dev/null
@@ -0,0 +1,88 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+// func cswap(inout *[5]uint64, v uint64)
+TEXT ·cswap(SB),7,$0
+       MOVQ inout+0(FP),DI
+       MOVQ v+8(FP),SI
+
+       CMPQ SI,$1
+       MOVQ 0(DI),SI
+       MOVQ 80(DI),DX
+       MOVQ 8(DI),CX
+       MOVQ 88(DI),R8
+       MOVQ SI,R9
+       CMOVQEQ DX,SI
+       CMOVQEQ R9,DX
+       MOVQ CX,R9
+       CMOVQEQ R8,CX
+       CMOVQEQ R9,R8
+       MOVQ SI,0(DI)
+       MOVQ DX,80(DI)
+       MOVQ CX,8(DI)
+       MOVQ R8,88(DI)
+       MOVQ 16(DI),SI
+       MOVQ 96(DI),DX
+       MOVQ 24(DI),CX
+       MOVQ 104(DI),R8
+       MOVQ SI,R9
+       CMOVQEQ DX,SI
+       CMOVQEQ R9,DX
+       MOVQ CX,R9
+       CMOVQEQ R8,CX
+       CMOVQEQ R9,R8
+       MOVQ SI,16(DI)
+       MOVQ DX,96(DI)
+       MOVQ CX,24(DI)
+       MOVQ R8,104(DI)
+       MOVQ 32(DI),SI
+       MOVQ 112(DI),DX
+       MOVQ 40(DI),CX
+       MOVQ 120(DI),R8
+       MOVQ SI,R9
+       CMOVQEQ DX,SI
+       CMOVQEQ R9,DX
+       MOVQ CX,R9
+       CMOVQEQ R8,CX
+       CMOVQEQ R9,R8
+       MOVQ SI,32(DI)
+       MOVQ DX,112(DI)
+       MOVQ CX,40(DI)
+       MOVQ R8,120(DI)
+       MOVQ 48(DI),SI
+       MOVQ 128(DI),DX
+       MOVQ 56(DI),CX
+       MOVQ 136(DI),R8
+       MOVQ SI,R9
+       CMOVQEQ DX,SI
+       CMOVQEQ R9,DX
+       MOVQ CX,R9
+       CMOVQEQ R8,CX
+       CMOVQEQ R9,R8
+       MOVQ SI,48(DI)
+       MOVQ DX,128(DI)
+       MOVQ CX,56(DI)
+       MOVQ R8,136(DI)
+       MOVQ 64(DI),SI
+       MOVQ 144(DI),DX
+       MOVQ 72(DI),CX
+       MOVQ 152(DI),R8
+       MOVQ SI,R9
+       CMOVQEQ DX,SI
+       CMOVQEQ R9,DX
+       MOVQ CX,R9
+       CMOVQEQ R8,CX
+       CMOVQEQ R9,R8
+       MOVQ SI,64(DI)
+       MOVQ DX,144(DI)
+       MOVQ CX,72(DI)
+       MOVQ R8,152(DI)
+       MOVQ DI,AX
+       MOVQ SI,DX
+       RET
diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go
new file mode 100644 (file)
index 0000000..6918c47
--- /dev/null
@@ -0,0 +1,841 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// We have a implementation in amd64 assembly so this code is only run on
+// non-amd64 platforms. The amd64 assembly does not support gccgo.
+// +build !amd64 gccgo appengine
+
+package curve25519
+
+// This code is a port of the public domain, "ref10" implementation of
+// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
+
+// fieldElement represents an element of the field GF(2^255 - 19). An element
+// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
+// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
+// context.
+type fieldElement [10]int32
+
+func feZero(fe *fieldElement) {
+       for i := range fe {
+               fe[i] = 0
+       }
+}
+
+func feOne(fe *fieldElement) {
+       feZero(fe)
+       fe[0] = 1
+}
+
+func feAdd(dst, a, b *fieldElement) {
+       for i := range dst {
+               dst[i] = a[i] + b[i]
+       }
+}
+
+func feSub(dst, a, b *fieldElement) {
+       for i := range dst {
+               dst[i] = a[i] - b[i]
+       }
+}
+
+func feCopy(dst, src *fieldElement) {
+       for i := range dst {
+               dst[i] = src[i]
+       }
+}
+
+// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
+//
+// Preconditions: b in {0,1}.
+func feCSwap(f, g *fieldElement, b int32) {
+       var x fieldElement
+       b = -b
+       for i := range x {
+               x[i] = b & (f[i] ^ g[i])
+       }
+
+       for i := range f {
+               f[i] ^= x[i]
+       }
+       for i := range g {
+               g[i] ^= x[i]
+       }
+}
+
+// load3 reads a 24-bit, little-endian value from in.
+func load3(in []byte) int64 {
+       var r int64
+       r = int64(in[0])
+       r |= int64(in[1]) << 8
+       r |= int64(in[2]) << 16
+       return r
+}
+
+// load4 reads a 32-bit, little-endian value from in.
+func load4(in []byte) int64 {
+       var r int64
+       r = int64(in[0])
+       r |= int64(in[1]) << 8
+       r |= int64(in[2]) << 16
+       r |= int64(in[3]) << 24
+       return r
+}
+
+func feFromBytes(dst *fieldElement, src *[32]byte) {
+       h0 := load4(src[:])
+       h1 := load3(src[4:]) << 6
+       h2 := load3(src[7:]) << 5
+       h3 := load3(src[10:]) << 3
+       h4 := load3(src[13:]) << 2
+       h5 := load4(src[16:])
+       h6 := load3(src[20:]) << 7
+       h7 := load3(src[23:]) << 5
+       h8 := load3(src[26:]) << 4
+       h9 := load3(src[29:]) << 2
+
+       var carry [10]int64
+       carry[9] = (h9 + 1<<24) >> 25
+       h0 += carry[9] * 19
+       h9 -= carry[9] << 25
+       carry[1] = (h1 + 1<<24) >> 25
+       h2 += carry[1]
+       h1 -= carry[1] << 25
+       carry[3] = (h3 + 1<<24) >> 25
+       h4 += carry[3]
+       h3 -= carry[3] << 25
+       carry[5] = (h5 + 1<<24) >> 25
+       h6 += carry[5]
+       h5 -= carry[5] << 25
+       carry[7] = (h7 + 1<<24) >> 25
+       h8 += carry[7]
+       h7 -= carry[7] << 25
+
+       carry[0] = (h0 + 1<<25) >> 26
+       h1 += carry[0]
+       h0 -= carry[0] << 26
+       carry[2] = (h2 + 1<<25) >> 26
+       h3 += carry[2]
+       h2 -= carry[2] << 26
+       carry[4] = (h4 + 1<<25) >> 26
+       h5 += carry[4]
+       h4 -= carry[4] << 26
+       carry[6] = (h6 + 1<<25) >> 26
+       h7 += carry[6]
+       h6 -= carry[6] << 26
+       carry[8] = (h8 + 1<<25) >> 26
+       h9 += carry[8]
+       h8 -= carry[8] << 26
+
+       dst[0] = int32(h0)
+       dst[1] = int32(h1)
+       dst[2] = int32(h2)
+       dst[3] = int32(h3)
+       dst[4] = int32(h4)
+       dst[5] = int32(h5)
+       dst[6] = int32(h6)
+       dst[7] = int32(h7)
+       dst[8] = int32(h8)
+       dst[9] = int32(h9)
+}
+
+// feToBytes marshals h to s.
+// Preconditions:
+//   |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+//
+// Write p=2^255-19; q=floor(h/p).
+// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
+//
+// Proof:
+//   Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
+//   Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
+//
+//   Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
+//   Then 0<y<1.
+//
+//   Write r=h-pq.
+//   Have 0<=r<=p-1=2^255-20.
+//   Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
+//
+//   Write x=r+19(2^-255)r+y.
+//   Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
+//
+//   Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
+//   so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
+func feToBytes(s *[32]byte, h *fieldElement) {
+       var carry [10]int32
+
+       q := (19*h[9] + (1 << 24)) >> 25
+       q = (h[0] + q) >> 26
+       q = (h[1] + q) >> 25
+       q = (h[2] + q) >> 26
+       q = (h[3] + q) >> 25
+       q = (h[4] + q) >> 26
+       q = (h[5] + q) >> 25
+       q = (h[6] + q) >> 26
+       q = (h[7] + q) >> 25
+       q = (h[8] + q) >> 26
+       q = (h[9] + q) >> 25
+
+       // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
+       h[0] += 19 * q
+       // Goal: Output h-2^255 q, which is between 0 and 2^255-20.
+
+       carry[0] = h[0] >> 26
+       h[1] += carry[0]
+       h[0] -= carry[0] << 26
+       carry[1] = h[1] >> 25
+       h[2] += carry[1]
+       h[1] -= carry[1] << 25
+       carry[2] = h[2] >> 26
+       h[3] += carry[2]
+       h[2] -= carry[2] << 26
+       carry[3] = h[3] >> 25
+       h[4] += carry[3]
+       h[3] -= carry[3] << 25
+       carry[4] = h[4] >> 26
+       h[5] += carry[4]
+       h[4] -= carry[4] << 26
+       carry[5] = h[5] >> 25
+       h[6] += carry[5]
+       h[5] -= carry[5] << 25
+       carry[6] = h[6] >> 26
+       h[7] += carry[6]
+       h[6] -= carry[6] << 26
+       carry[7] = h[7] >> 25
+       h[8] += carry[7]
+       h[7] -= carry[7] << 25
+       carry[8] = h[8] >> 26
+       h[9] += carry[8]
+       h[8] -= carry[8] << 26
+       carry[9] = h[9] >> 25
+       h[9] -= carry[9] << 25
+       // h10 = carry9
+
+       // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
+       // Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
+       // evidently 2^255 h10-2^255 q = 0.
+       // Goal: Output h[0]+...+2^230 h[9].
+
+       s[0] = byte(h[0] >> 0)
+       s[1] = byte(h[0] >> 8)
+       s[2] = byte(h[0] >> 16)
+       s[3] = byte((h[0] >> 24) | (h[1] << 2))
+       s[4] = byte(h[1] >> 6)
+       s[5] = byte(h[1] >> 14)
+       s[6] = byte((h[1] >> 22) | (h[2] << 3))
+       s[7] = byte(h[2] >> 5)
+       s[8] = byte(h[2] >> 13)
+       s[9] = byte((h[2] >> 21) | (h[3] << 5))
+       s[10] = byte(h[3] >> 3)
+       s[11] = byte(h[3] >> 11)
+       s[12] = byte((h[3] >> 19) | (h[4] << 6))
+       s[13] = byte(h[4] >> 2)
+       s[14] = byte(h[4] >> 10)
+       s[15] = byte(h[4] >> 18)
+       s[16] = byte(h[5] >> 0)
+       s[17] = byte(h[5] >> 8)
+       s[18] = byte(h[5] >> 16)
+       s[19] = byte((h[5] >> 24) | (h[6] << 1))
+       s[20] = byte(h[6] >> 7)
+       s[21] = byte(h[6] >> 15)
+       s[22] = byte((h[6] >> 23) | (h[7] << 3))
+       s[23] = byte(h[7] >> 5)
+       s[24] = byte(h[7] >> 13)
+       s[25] = byte((h[7] >> 21) | (h[8] << 4))
+       s[26] = byte(h[8] >> 4)
+       s[27] = byte(h[8] >> 12)
+       s[28] = byte((h[8] >> 20) | (h[9] << 6))
+       s[29] = byte(h[9] >> 2)
+       s[30] = byte(h[9] >> 10)
+       s[31] = byte(h[9] >> 18)
+}
+
+// feMul calculates h = f * g
+// Can overlap h with f or g.
+//
+// Preconditions:
+//    |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//    |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+//    |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+//
+// Notes on implementation strategy:
+//
+// Using schoolbook multiplication.
+// Karatsuba would save a little in some cost models.
+//
+// Most multiplications by 2 and 19 are 32-bit precomputations;
+// cheaper than 64-bit postcomputations.
+//
+// There is one remaining multiplication by 19 in the carry chain;
+// one *19 precomputation can be merged into this,
+// but the resulting data flow is considerably less clean.
+//
+// There are 12 carries below.
+// 10 of them are 2-way parallelizable and vectorizable.
+// Can get away with 11 carries, but then data flow is much deeper.
+//
+// With tighter constraints on inputs can squeeze carries into int32.
+func feMul(h, f, g *fieldElement) {
+       f0 := f[0]
+       f1 := f[1]
+       f2 := f[2]
+       f3 := f[3]
+       f4 := f[4]
+       f5 := f[5]
+       f6 := f[6]
+       f7 := f[7]
+       f8 := f[8]
+       f9 := f[9]
+       g0 := g[0]
+       g1 := g[1]
+       g2 := g[2]
+       g3 := g[3]
+       g4 := g[4]
+       g5 := g[5]
+       g6 := g[6]
+       g7 := g[7]
+       g8 := g[8]
+       g9 := g[9]
+       g1_19 := 19 * g1 // 1.4*2^29
+       g2_19 := 19 * g2 // 1.4*2^30; still ok
+       g3_19 := 19 * g3
+       g4_19 := 19 * g4
+       g5_19 := 19 * g5
+       g6_19 := 19 * g6
+       g7_19 := 19 * g7
+       g8_19 := 19 * g8
+       g9_19 := 19 * g9
+       f1_2 := 2 * f1
+       f3_2 := 2 * f3
+       f5_2 := 2 * f5
+       f7_2 := 2 * f7
+       f9_2 := 2 * f9
+       f0g0 := int64(f0) * int64(g0)
+       f0g1 := int64(f0) * int64(g1)
+       f0g2 := int64(f0) * int64(g2)
+       f0g3 := int64(f0) * int64(g3)
+       f0g4 := int64(f0) * int64(g4)
+       f0g5 := int64(f0) * int64(g5)
+       f0g6 := int64(f0) * int64(g6)
+       f0g7 := int64(f0) * int64(g7)
+       f0g8 := int64(f0) * int64(g8)
+       f0g9 := int64(f0) * int64(g9)
+       f1g0 := int64(f1) * int64(g0)
+       f1g1_2 := int64(f1_2) * int64(g1)
+       f1g2 := int64(f1) * int64(g2)
+       f1g3_2 := int64(f1_2) * int64(g3)
+       f1g4 := int64(f1) * int64(g4)
+       f1g5_2 := int64(f1_2) * int64(g5)
+       f1g6 := int64(f1) * int64(g6)
+       f1g7_2 := int64(f1_2) * int64(g7)
+       f1g8 := int64(f1) * int64(g8)
+       f1g9_38 := int64(f1_2) * int64(g9_19)
+       f2g0 := int64(f2) * int64(g0)
+       f2g1 := int64(f2) * int64(g1)
+       f2g2 := int64(f2) * int64(g2)
+       f2g3 := int64(f2) * int64(g3)
+       f2g4 := int64(f2) * int64(g4)
+       f2g5 := int64(f2) * int64(g5)
+       f2g6 := int64(f2) * int64(g6)
+       f2g7 := int64(f2) * int64(g7)
+       f2g8_19 := int64(f2) * int64(g8_19)
+       f2g9_19 := int64(f2) * int64(g9_19)
+       f3g0 := int64(f3) * int64(g0)
+       f3g1_2 := int64(f3_2) * int64(g1)
+       f3g2 := int64(f3) * int64(g2)
+       f3g3_2 := int64(f3_2) * int64(g3)
+       f3g4 := int64(f3) * int64(g4)
+       f3g5_2 := int64(f3_2) * int64(g5)
+       f3g6 := int64(f3) * int64(g6)
+       f3g7_38 := int64(f3_2) * int64(g7_19)
+       f3g8_19 := int64(f3) * int64(g8_19)
+       f3g9_38 := int64(f3_2) * int64(g9_19)
+       f4g0 := int64(f4) * int64(g0)
+       f4g1 := int64(f4) * int64(g1)
+       f4g2 := int64(f4) * int64(g2)
+       f4g3 := int64(f4) * int64(g3)
+       f4g4 := int64(f4) * int64(g4)
+       f4g5 := int64(f4) * int64(g5)
+       f4g6_19 := int64(f4) * int64(g6_19)
+       f4g7_19 := int64(f4) * int64(g7_19)
+       f4g8_19 := int64(f4) * int64(g8_19)
+       f4g9_19 := int64(f4) * int64(g9_19)
+       f5g0 := int64(f5) * int64(g0)
+       f5g1_2 := int64(f5_2) * int64(g1)
+       f5g2 := int64(f5) * int64(g2)
+       f5g3_2 := int64(f5_2) * int64(g3)
+       f5g4 := int64(f5) * int64(g4)
+       f5g5_38 := int64(f5_2) * int64(g5_19)
+       f5g6_19 := int64(f5) * int64(g6_19)
+       f5g7_38 := int64(f5_2) * int64(g7_19)
+       f5g8_19 := int64(f5) * int64(g8_19)
+       f5g9_38 := int64(f5_2) * int64(g9_19)
+       f6g0 := int64(f6) * int64(g0)
+       f6g1 := int64(f6) * int64(g1)
+       f6g2 := int64(f6) * int64(g2)
+       f6g3 := int64(f6) * int64(g3)
+       f6g4_19 := int64(f6) * int64(g4_19)
+       f6g5_19 := int64(f6) * int64(g5_19)
+       f6g6_19 := int64(f6) * int64(g6_19)
+       f6g7_19 := int64(f6) * int64(g7_19)
+       f6g8_19 := int64(f6) * int64(g8_19)
+       f6g9_19 := int64(f6) * int64(g9_19)
+       f7g0 := int64(f7) * int64(g0)
+       f7g1_2 := int64(f7_2) * int64(g1)
+       f7g2 := int64(f7) * int64(g2)
+       f7g3_38 := int64(f7_2) * int64(g3_19)
+       f7g4_19 := int64(f7) * int64(g4_19)
+       f7g5_38 := int64(f7_2) * int64(g5_19)
+       f7g6_19 := int64(f7) * int64(g6_19)
+       f7g7_38 := int64(f7_2) * int64(g7_19)
+       f7g8_19 := int64(f7) * int64(g8_19)
+       f7g9_38 := int64(f7_2) * int64(g9_19)
+       f8g0 := int64(f8) * int64(g0)
+       f8g1 := int64(f8) * int64(g1)
+       f8g2_19 := int64(f8) * int64(g2_19)
+       f8g3_19 := int64(f8) * int64(g3_19)
+       f8g4_19 := int64(f8) * int64(g4_19)
+       f8g5_19 := int64(f8) * int64(g5_19)
+       f8g6_19 := int64(f8) * int64(g6_19)
+       f8g7_19 := int64(f8) * int64(g7_19)
+       f8g8_19 := int64(f8) * int64(g8_19)
+       f8g9_19 := int64(f8) * int64(g9_19)
+       f9g0 := int64(f9) * int64(g0)
+       f9g1_38 := int64(f9_2) * int64(g1_19)
+       f9g2_19 := int64(f9) * int64(g2_19)
+       f9g3_38 := int64(f9_2) * int64(g3_19)
+       f9g4_19 := int64(f9) * int64(g4_19)
+       f9g5_38 := int64(f9_2) * int64(g5_19)
+       f9g6_19 := int64(f9) * int64(g6_19)
+       f9g7_38 := int64(f9_2) * int64(g7_19)
+       f9g8_19 := int64(f9) * int64(g8_19)
+       f9g9_38 := int64(f9_2) * int64(g9_19)
+       h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
+       h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
+       h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
+       h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
+       h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
+       h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
+       h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
+       h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
+       h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
+       h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
+       var carry [10]int64
+
+       // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
+       //   i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
+       // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
+       //   i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
+
+       carry[0] = (h0 + (1 << 25)) >> 26
+       h1 += carry[0]
+       h0 -= carry[0] << 26
+       carry[4] = (h4 + (1 << 25)) >> 26
+       h5 += carry[4]
+       h4 -= carry[4] << 26
+       // |h0| <= 2^25
+       // |h4| <= 2^25
+       // |h1| <= 1.51*2^58
+       // |h5| <= 1.51*2^58
+
+       carry[1] = (h1 + (1 << 24)) >> 25
+       h2 += carry[1]
+       h1 -= carry[1] << 25
+       carry[5] = (h5 + (1 << 24)) >> 25
+       h6 += carry[5]
+       h5 -= carry[5] << 25
+       // |h1| <= 2^24; from now on fits into int32
+       // |h5| <= 2^24; from now on fits into int32
+       // |h2| <= 1.21*2^59
+       // |h6| <= 1.21*2^59
+
+       carry[2] = (h2 + (1 << 25)) >> 26
+       h3 += carry[2]
+       h2 -= carry[2] << 26
+       carry[6] = (h6 + (1 << 25)) >> 26
+       h7 += carry[6]
+       h6 -= carry[6] << 26
+       // |h2| <= 2^25; from now on fits into int32 unchanged
+       // |h6| <= 2^25; from now on fits into int32 unchanged
+       // |h3| <= 1.51*2^58
+       // |h7| <= 1.51*2^58
+
+       carry[3] = (h3 + (1 << 24)) >> 25
+       h4 += carry[3]
+       h3 -= carry[3] << 25
+       carry[7] = (h7 + (1 << 24)) >> 25
+       h8 += carry[7]
+       h7 -= carry[7] << 25
+       // |h3| <= 2^24; from now on fits into int32 unchanged
+       // |h7| <= 2^24; from now on fits into int32 unchanged
+       // |h4| <= 1.52*2^33
+       // |h8| <= 1.52*2^33
+
+       carry[4] = (h4 + (1 << 25)) >> 26
+       h5 += carry[4]
+       h4 -= carry[4] << 26
+       carry[8] = (h8 + (1 << 25)) >> 26
+       h9 += carry[8]
+       h8 -= carry[8] << 26
+       // |h4| <= 2^25; from now on fits into int32 unchanged
+       // |h8| <= 2^25; from now on fits into int32 unchanged
+       // |h5| <= 1.01*2^24
+       // |h9| <= 1.51*2^58
+
+       carry[9] = (h9 + (1 << 24)) >> 25
+       h0 += carry[9] * 19
+       h9 -= carry[9] << 25
+       // |h9| <= 2^24; from now on fits into int32 unchanged
+       // |h0| <= 1.8*2^37
+
+       carry[0] = (h0 + (1 << 25)) >> 26
+       h1 += carry[0]
+       h0 -= carry[0] << 26
+       // |h0| <= 2^25; from now on fits into int32 unchanged
+       // |h1| <= 1.01*2^24
+
+       h[0] = int32(h0)
+       h[1] = int32(h1)
+       h[2] = int32(h2)
+       h[3] = int32(h3)
+       h[4] = int32(h4)
+       h[5] = int32(h5)
+       h[6] = int32(h6)
+       h[7] = int32(h7)
+       h[8] = int32(h8)
+       h[9] = int32(h9)
+}
+
+// feSquare calculates h = f*f. Can overlap h with f.
+//
+// Preconditions:
+//    |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+//    |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+func feSquare(h, f *fieldElement) {
+       f0 := f[0]
+       f1 := f[1]
+       f2 := f[2]
+       f3 := f[3]
+       f4 := f[4]
+       f5 := f[5]
+       f6 := f[6]
+       f7 := f[7]
+       f8 := f[8]
+       f9 := f[9]
+       f0_2 := 2 * f0
+       f1_2 := 2 * f1
+       f2_2 := 2 * f2
+       f3_2 := 2 * f3
+       f4_2 := 2 * f4
+       f5_2 := 2 * f5
+       f6_2 := 2 * f6
+       f7_2 := 2 * f7
+       f5_38 := 38 * f5 // 1.31*2^30
+       f6_19 := 19 * f6 // 1.31*2^30
+       f7_38 := 38 * f7 // 1.31*2^30
+       f8_19 := 19 * f8 // 1.31*2^30
+       f9_38 := 38 * f9 // 1.31*2^30
+       f0f0 := int64(f0) * int64(f0)
+       f0f1_2 := int64(f0_2) * int64(f1)
+       f0f2_2 := int64(f0_2) * int64(f2)
+       f0f3_2 := int64(f0_2) * int64(f3)
+       f0f4_2 := int64(f0_2) * int64(f4)
+       f0f5_2 := int64(f0_2) * int64(f5)
+       f0f6_2 := int64(f0_2) * int64(f6)
+       f0f7_2 := int64(f0_2) * int64(f7)
+       f0f8_2 := int64(f0_2) * int64(f8)
+       f0f9_2 := int64(f0_2) * int64(f9)
+       f1f1_2 := int64(f1_2) * int64(f1)
+       f1f2_2 := int64(f1_2) * int64(f2)
+       f1f3_4 := int64(f1_2) * int64(f3_2)
+       f1f4_2 := int64(f1_2) * int64(f4)
+       f1f5_4 := int64(f1_2) * int64(f5_2)
+       f1f6_2 := int64(f1_2) * int64(f6)
+       f1f7_4 := int64(f1_2) * int64(f7_2)
+       f1f8_2 := int64(f1_2) * int64(f8)
+       f1f9_76 := int64(f1_2) * int64(f9_38)
+       f2f2 := int64(f2) * int64(f2)
+       f2f3_2 := int64(f2_2) * int64(f3)
+       f2f4_2 := int64(f2_2) * int64(f4)
+       f2f5_2 := int64(f2_2) * int64(f5)
+       f2f6_2 := int64(f2_2) * int64(f6)
+       f2f7_2 := int64(f2_2) * int64(f7)
+       f2f8_38 := int64(f2_2) * int64(f8_19)
+       f2f9_38 := int64(f2) * int64(f9_38)
+       f3f3_2 := int64(f3_2) * int64(f3)
+       f3f4_2 := int64(f3_2) * int64(f4)
+       f3f5_4 := int64(f3_2) * int64(f5_2)
+       f3f6_2 := int64(f3_2) * int64(f6)
+       f3f7_76 := int64(f3_2) * int64(f7_38)
+       f3f8_38 := int64(f3_2) * int64(f8_19)
+       f3f9_76 := int64(f3_2) * int64(f9_38)
+       f4f4 := int64(f4) * int64(f4)
+       f4f5_2 := int64(f4_2) * int64(f5)
+       f4f6_38 := int64(f4_2) * int64(f6_19)
+       f4f7_38 := int64(f4) * int64(f7_38)
+       f4f8_38 := int64(f4_2) * int64(f8_19)
+       f4f9_38 := int64(f4) * int64(f9_38)
+       f5f5_38 := int64(f5) * int64(f5_38)
+       f5f6_38 := int64(f5_2) * int64(f6_19)
+       f5f7_76 := int64(f5_2) * int64(f7_38)
+       f5f8_38 := int64(f5_2) * int64(f8_19)
+       f5f9_76 := int64(f5_2) * int64(f9_38)
+       f6f6_19 := int64(f6) * int64(f6_19)
+       f6f7_38 := int64(f6) * int64(f7_38)
+       f6f8_38 := int64(f6_2) * int64(f8_19)
+       f6f9_38 := int64(f6) * int64(f9_38)
+       f7f7_38 := int64(f7) * int64(f7_38)
+       f7f8_38 := int64(f7_2) * int64(f8_19)
+       f7f9_76 := int64(f7_2) * int64(f9_38)
+       f8f8_19 := int64(f8) * int64(f8_19)
+       f8f9_38 := int64(f8) * int64(f9_38)
+       f9f9_38 := int64(f9) * int64(f9_38)
+       h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
+       h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
+       h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
+       h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
+       h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
+       h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
+       h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
+       h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
+       h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
+       h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
+       var carry [10]int64
+
+       carry[0] = (h0 + (1 << 25)) >> 26
+       h1 += carry[0]
+       h0 -= carry[0] << 26
+       carry[4] = (h4 + (1 << 25)) >> 26
+       h5 += carry[4]
+       h4 -= carry[4] << 26
+
+       carry[1] = (h1 + (1 << 24)) >> 25
+       h2 += carry[1]
+       h1 -= carry[1] << 25
+       carry[5] = (h5 + (1 << 24)) >> 25
+       h6 += carry[5]
+       h5 -= carry[5] << 25
+
+       carry[2] = (h2 + (1 << 25)) >> 26
+       h3 += carry[2]
+       h2 -= carry[2] << 26
+       carry[6] = (h6 + (1 << 25)) >> 26
+       h7 += carry[6]
+       h6 -= carry[6] << 26
+
+       carry[3] = (h3 + (1 << 24)) >> 25
+       h4 += carry[3]
+       h3 -= carry[3] << 25
+       carry[7] = (h7 + (1 << 24)) >> 25
+       h8 += carry[7]
+       h7 -= carry[7] << 25
+
+       carry[4] = (h4 + (1 << 25)) >> 26
+       h5 += carry[4]
+       h4 -= carry[4] << 26
+       carry[8] = (h8 + (1 << 25)) >> 26
+       h9 += carry[8]
+       h8 -= carry[8] << 26
+
+       carry[9] = (h9 + (1 << 24)) >> 25
+       h0 += carry[9] * 19
+       h9 -= carry[9] << 25
+
+       carry[0] = (h0 + (1 << 25)) >> 26
+       h1 += carry[0]
+       h0 -= carry[0] << 26
+
+       h[0] = int32(h0)
+       h[1] = int32(h1)
+       h[2] = int32(h2)
+       h[3] = int32(h3)
+       h[4] = int32(h4)
+       h[5] = int32(h5)
+       h[6] = int32(h6)
+       h[7] = int32(h7)
+       h[8] = int32(h8)
+       h[9] = int32(h9)
+}
+
+// feMul121666 calculates h = f * 121666. Can overlap h with f.
+//
+// Preconditions:
+//    |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+//    |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+func feMul121666(h, f *fieldElement) {
+       h0 := int64(f[0]) * 121666
+       h1 := int64(f[1]) * 121666
+       h2 := int64(f[2]) * 121666
+       h3 := int64(f[3]) * 121666
+       h4 := int64(f[4]) * 121666
+       h5 := int64(f[5]) * 121666
+       h6 := int64(f[6]) * 121666
+       h7 := int64(f[7]) * 121666
+       h8 := int64(f[8]) * 121666
+       h9 := int64(f[9]) * 121666
+       var carry [10]int64
+
+       carry[9] = (h9 + (1 << 24)) >> 25
+       h0 += carry[9] * 19
+       h9 -= carry[9] << 25
+       carry[1] = (h1 + (1 << 24)) >> 25
+       h2 += carry[1]
+       h1 -= carry[1] << 25
+       carry[3] = (h3 + (1 << 24)) >> 25
+       h4 += carry[3]
+       h3 -= carry[3] << 25
+       carry[5] = (h5 + (1 << 24)) >> 25
+       h6 += carry[5]
+       h5 -= carry[5] << 25
+       carry[7] = (h7 + (1 << 24)) >> 25
+       h8 += carry[7]
+       h7 -= carry[7] << 25
+
+       carry[0] = (h0 + (1 << 25)) >> 26
+       h1 += carry[0]
+       h0 -= carry[0] << 26
+       carry[2] = (h2 + (1 << 25)) >> 26
+       h3 += carry[2]
+       h2 -= carry[2] << 26
+       carry[4] = (h4 + (1 << 25)) >> 26
+       h5 += carry[4]
+       h4 -= carry[4] << 26
+       carry[6] = (h6 + (1 << 25)) >> 26
+       h7 += carry[6]
+       h6 -= carry[6] << 26
+       carry[8] = (h8 + (1 << 25)) >> 26
+       h9 += carry[8]
+       h8 -= carry[8] << 26
+
+       h[0] = int32(h0)
+       h[1] = int32(h1)
+       h[2] = int32(h2)
+       h[3] = int32(h3)
+       h[4] = int32(h4)
+       h[5] = int32(h5)
+       h[6] = int32(h6)
+       h[7] = int32(h7)
+       h[8] = int32(h8)
+       h[9] = int32(h9)
+}
+
+// feInvert sets out = z^-1.
+func feInvert(out, z *fieldElement) {
+       var t0, t1, t2, t3 fieldElement
+       var i int
+
+       feSquare(&t0, z)
+       for i = 1; i < 1; i++ {
+               feSquare(&t0, &t0)
+       }
+       feSquare(&t1, &t0)
+       for i = 1; i < 2; i++ {
+               feSquare(&t1, &t1)
+       }
+       feMul(&t1, z, &t1)
+       feMul(&t0, &t0, &t1)
+       feSquare(&t2, &t0)
+       for i = 1; i < 1; i++ {
+               feSquare(&t2, &t2)
+       }
+       feMul(&t1, &t1, &t2)
+       feSquare(&t2, &t1)
+       for i = 1; i < 5; i++ {
+               feSquare(&t2, &t2)
+       }
+       feMul(&t1, &t2, &t1)
+       feSquare(&t2, &t1)
+       for i = 1; i < 10; i++ {
+               feSquare(&t2, &t2)
+       }
+       feMul(&t2, &t2, &t1)
+       feSquare(&t3, &t2)
+       for i = 1; i < 20; i++ {
+               feSquare(&t3, &t3)
+       }
+       feMul(&t2, &t3, &t2)
+       feSquare(&t2, &t2)
+       for i = 1; i < 10; i++ {
+               feSquare(&t2, &t2)
+       }
+       feMul(&t1, &t2, &t1)
+       feSquare(&t2, &t1)
+       for i = 1; i < 50; i++ {
+               feSquare(&t2, &t2)
+       }
+       feMul(&t2, &t2, &t1)
+       feSquare(&t3, &t2)
+       for i = 1; i < 100; i++ {
+               feSquare(&t3, &t3)
+       }
+       feMul(&t2, &t3, &t2)
+       feSquare(&t2, &t2)
+       for i = 1; i < 50; i++ {
+               feSquare(&t2, &t2)
+       }
+       feMul(&t1, &t2, &t1)
+       feSquare(&t1, &t1)
+       for i = 1; i < 5; i++ {
+               feSquare(&t1, &t1)
+       }
+       feMul(out, &t1, &t0)
+}
+
+func scalarMult(out, in, base *[32]byte) {
+       var e [32]byte
+
+       copy(e[:], in[:])
+       e[0] &= 248
+       e[31] &= 127
+       e[31] |= 64
+
+       var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
+       feFromBytes(&x1, base)
+       feOne(&x2)
+       feCopy(&x3, &x1)
+       feOne(&z3)
+
+       swap := int32(0)
+       for pos := 254; pos >= 0; pos-- {
+               b := e[pos/8] >> uint(pos&7)
+               b &= 1
+               swap ^= int32(b)
+               feCSwap(&x2, &x3, swap)
+               feCSwap(&z2, &z3, swap)
+               swap = int32(b)
+
+               feSub(&tmp0, &x3, &z3)
+               feSub(&tmp1, &x2, &z2)
+               feAdd(&x2, &x2, &z2)
+               feAdd(&z2, &x3, &z3)
+               feMul(&z3, &tmp0, &x2)
+               feMul(&z2, &z2, &tmp1)
+               feSquare(&tmp0, &tmp1)
+               feSquare(&tmp1, &x2)
+               feAdd(&x3, &z3, &z2)
+               feSub(&z2, &z3, &z2)
+               feMul(&x2, &tmp1, &tmp0)
+               feSub(&tmp1, &tmp1, &tmp0)
+               feSquare(&z2, &z2)
+               feMul121666(&z3, &tmp1)
+               feSquare(&x3, &x3)
+               feAdd(&tmp0, &tmp0, &z3)
+               feMul(&z3, &x1, &z2)
+               feMul(&z2, &tmp1, &tmp0)
+       }
+
+       feCSwap(&x2, &x3, swap)
+       feCSwap(&z2, &z3, swap)
+
+       feInvert(&z2, &z2)
+       feMul(&x2, &x2, &z2)
+       feToBytes(out, &x2)
+}
diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go
new file mode 100644 (file)
index 0000000..ebeea3c
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package curve25519 provides an implementation of scalar multiplication on
+// the elliptic curve known as curve25519. See http://cr.yp.to/ecdh.html
+package curve25519 // import "golang.org/x/crypto/curve25519"
+
+// basePoint is the x coordinate of the generator of the curve.
+var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+// ScalarMult sets dst to the product in*base where dst and base are the x
+// coordinates of group points and all values are in little-endian form.
+func ScalarMult(dst, in, base *[32]byte) {
+       scalarMult(dst, in, base)
+}
+
+// ScalarBaseMult sets dst to the product in*base where dst and base are the x
+// coordinates of group points, base is the standard generator and all values
+// are in little-endian form.
+func ScalarBaseMult(dst, in *[32]byte) {
+       ScalarMult(dst, in, &basePoint)
+}
diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
new file mode 100644 (file)
index 0000000..536479b
--- /dev/null
@@ -0,0 +1,73 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+#include "const_amd64.h"
+
+// func freeze(inout *[5]uint64)
+TEXT ·freeze(SB),7,$0-8
+       MOVQ inout+0(FP), DI
+
+       MOVQ 0(DI),SI
+       MOVQ 8(DI),DX
+       MOVQ 16(DI),CX
+       MOVQ 24(DI),R8
+       MOVQ 32(DI),R9
+       MOVQ $REDMASK51,AX
+       MOVQ AX,R10
+       SUBQ $18,R10
+       MOVQ $3,R11
+REDUCELOOP:
+       MOVQ SI,R12
+       SHRQ $51,R12
+       ANDQ AX,SI
+       ADDQ R12,DX
+       MOVQ DX,R12
+       SHRQ $51,R12
+       ANDQ AX,DX
+       ADDQ R12,CX
+       MOVQ CX,R12
+       SHRQ $51,R12
+       ANDQ AX,CX
+       ADDQ R12,R8
+       MOVQ R8,R12
+       SHRQ $51,R12
+       ANDQ AX,R8
+       ADDQ R12,R9
+       MOVQ R9,R12
+       SHRQ $51,R12
+       ANDQ AX,R9
+       IMUL3Q $19,R12,R12
+       ADDQ R12,SI
+       SUBQ $1,R11
+       JA REDUCELOOP
+       MOVQ $1,R12
+       CMPQ R10,SI
+       CMOVQLT R11,R12
+       CMPQ AX,DX
+       CMOVQNE R11,R12
+       CMPQ AX,CX
+       CMOVQNE R11,R12
+       CMPQ AX,R8
+       CMOVQNE R11,R12
+       CMPQ AX,R9
+       CMOVQNE R11,R12
+       NEGQ R12
+       ANDQ R12,AX
+       ANDQ R12,R10
+       SUBQ R10,SI
+       SUBQ AX,DX
+       SUBQ AX,CX
+       SUBQ AX,R8
+       SUBQ AX,R9
+       MOVQ SI,0(DI)
+       MOVQ DX,8(DI)
+       MOVQ CX,16(DI)
+       MOVQ R8,24(DI)
+       MOVQ R9,32(DI)
+       RET
diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
new file mode 100644 (file)
index 0000000..7074e5c
--- /dev/null
@@ -0,0 +1,1377 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+#include "const_amd64.h"
+
+// func ladderstep(inout *[5][5]uint64)
+TEXT ·ladderstep(SB),0,$296-8
+       MOVQ inout+0(FP),DI
+
+       MOVQ 40(DI),SI
+       MOVQ 48(DI),DX
+       MOVQ 56(DI),CX
+       MOVQ 64(DI),R8
+       MOVQ 72(DI),R9
+       MOVQ SI,AX
+       MOVQ DX,R10
+       MOVQ CX,R11
+       MOVQ R8,R12
+       MOVQ R9,R13
+       ADDQ ·_2P0(SB),AX
+       ADDQ ·_2P1234(SB),R10
+       ADDQ ·_2P1234(SB),R11
+       ADDQ ·_2P1234(SB),R12
+       ADDQ ·_2P1234(SB),R13
+       ADDQ 80(DI),SI
+       ADDQ 88(DI),DX
+       ADDQ 96(DI),CX
+       ADDQ 104(DI),R8
+       ADDQ 112(DI),R9
+       SUBQ 80(DI),AX
+       SUBQ 88(DI),R10
+       SUBQ 96(DI),R11
+       SUBQ 104(DI),R12
+       SUBQ 112(DI),R13
+       MOVQ SI,0(SP)
+       MOVQ DX,8(SP)
+       MOVQ CX,16(SP)
+       MOVQ R8,24(SP)
+       MOVQ R9,32(SP)
+       MOVQ AX,40(SP)
+       MOVQ R10,48(SP)
+       MOVQ R11,56(SP)
+       MOVQ R12,64(SP)
+       MOVQ R13,72(SP)
+       MOVQ 40(SP),AX
+       MULQ 40(SP)
+       MOVQ AX,SI
+       MOVQ DX,CX
+       MOVQ 40(SP),AX
+       SHLQ $1,AX
+       MULQ 48(SP)
+       MOVQ AX,R8
+       MOVQ DX,R9
+       MOVQ 40(SP),AX
+       SHLQ $1,AX
+       MULQ 56(SP)
+       MOVQ AX,R10
+       MOVQ DX,R11
+       MOVQ 40(SP),AX
+       SHLQ $1,AX
+       MULQ 64(SP)
+       MOVQ AX,R12
+       MOVQ DX,R13
+       MOVQ 40(SP),AX
+       SHLQ $1,AX
+       MULQ 72(SP)
+       MOVQ AX,R14
+       MOVQ DX,R15
+       MOVQ 48(SP),AX
+       MULQ 48(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 48(SP),AX
+       SHLQ $1,AX
+       MULQ 56(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 48(SP),AX
+       SHLQ $1,AX
+       MULQ 64(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 48(SP),DX
+       IMUL3Q $38,DX,AX
+       MULQ 72(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 56(SP),AX
+       MULQ 56(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 56(SP),DX
+       IMUL3Q $38,DX,AX
+       MULQ 64(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 56(SP),DX
+       IMUL3Q $38,DX,AX
+       MULQ 72(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 64(SP),DX
+       IMUL3Q $19,DX,AX
+       MULQ 64(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 64(SP),DX
+       IMUL3Q $38,DX,AX
+       MULQ 72(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 72(SP),DX
+       IMUL3Q $19,DX,AX
+       MULQ 72(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ $REDMASK51,DX
+       SHLQ $13,CX:SI
+       ANDQ DX,SI
+       SHLQ $13,R9:R8
+       ANDQ DX,R8
+       ADDQ CX,R8
+       SHLQ $13,R11:R10
+       ANDQ DX,R10
+       ADDQ R9,R10
+       SHLQ $13,R13:R12
+       ANDQ DX,R12
+       ADDQ R11,R12
+       SHLQ $13,R15:R14
+       ANDQ DX,R14
+       ADDQ R13,R14
+       IMUL3Q $19,R15,CX
+       ADDQ CX,SI
+       MOVQ SI,CX
+       SHRQ $51,CX
+       ADDQ R8,CX
+       ANDQ DX,SI
+       MOVQ CX,R8
+       SHRQ $51,CX
+       ADDQ R10,CX
+       ANDQ DX,R8
+       MOVQ CX,R9
+       SHRQ $51,CX
+       ADDQ R12,CX
+       ANDQ DX,R9
+       MOVQ CX,AX
+       SHRQ $51,CX
+       ADDQ R14,CX
+       ANDQ DX,AX
+       MOVQ CX,R10
+       SHRQ $51,CX
+       IMUL3Q $19,CX,CX
+       ADDQ CX,SI
+       ANDQ DX,R10
+       MOVQ SI,80(SP)
+       MOVQ R8,88(SP)
+       MOVQ R9,96(SP)
+       MOVQ AX,104(SP)
+       MOVQ R10,112(SP)
+       MOVQ 0(SP),AX
+       MULQ 0(SP)
+       MOVQ AX,SI
+       MOVQ DX,CX
+       MOVQ 0(SP),AX
+       SHLQ $1,AX
+       MULQ 8(SP)
+       MOVQ AX,R8
+       MOVQ DX,R9
+       MOVQ 0(SP),AX
+       SHLQ $1,AX
+       MULQ 16(SP)
+       MOVQ AX,R10
+       MOVQ DX,R11
+       MOVQ 0(SP),AX
+       SHLQ $1,AX
+       MULQ 24(SP)
+       MOVQ AX,R12
+       MOVQ DX,R13
+       MOVQ 0(SP),AX
+       SHLQ $1,AX
+       MULQ 32(SP)
+       MOVQ AX,R14
+       MOVQ DX,R15
+       MOVQ 8(SP),AX
+       MULQ 8(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 8(SP),AX
+       SHLQ $1,AX
+       MULQ 16(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 8(SP),AX
+       SHLQ $1,AX
+       MULQ 24(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 8(SP),DX
+       IMUL3Q $38,DX,AX
+       MULQ 32(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 16(SP),AX
+       MULQ 16(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 16(SP),DX
+       IMUL3Q $38,DX,AX
+       MULQ 24(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 16(SP),DX
+       IMUL3Q $38,DX,AX
+       MULQ 32(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 24(SP),DX
+       IMUL3Q $19,DX,AX
+       MULQ 24(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 24(SP),DX
+       IMUL3Q $38,DX,AX
+       MULQ 32(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 32(SP),DX
+       IMUL3Q $19,DX,AX
+       MULQ 32(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ $REDMASK51,DX
+       SHLQ $13,CX:SI
+       ANDQ DX,SI
+       SHLQ $13,R9:R8
+       ANDQ DX,R8
+       ADDQ CX,R8
+       SHLQ $13,R11:R10
+       ANDQ DX,R10
+       ADDQ R9,R10
+       SHLQ $13,R13:R12
+       ANDQ DX,R12
+       ADDQ R11,R12
+       SHLQ $13,R15:R14
+       ANDQ DX,R14
+       ADDQ R13,R14
+       IMUL3Q $19,R15,CX
+       ADDQ CX,SI
+       MOVQ SI,CX
+       SHRQ $51,CX
+       ADDQ R8,CX
+       ANDQ DX,SI
+       MOVQ CX,R8
+       SHRQ $51,CX
+       ADDQ R10,CX
+       ANDQ DX,R8
+       MOVQ CX,R9
+       SHRQ $51,CX
+       ADDQ R12,CX
+       ANDQ DX,R9
+       MOVQ CX,AX
+       SHRQ $51,CX
+       ADDQ R14,CX
+       ANDQ DX,AX
+       MOVQ CX,R10
+       SHRQ $51,CX
+       IMUL3Q $19,CX,CX
+       ADDQ CX,SI
+       ANDQ DX,R10
+       MOVQ SI,120(SP)
+       MOVQ R8,128(SP)
+       MOVQ R9,136(SP)
+       MOVQ AX,144(SP)
+       MOVQ R10,152(SP)
+       MOVQ SI,SI
+       MOVQ R8,DX
+       MOVQ R9,CX
+       MOVQ AX,R8
+       MOVQ R10,R9
+       ADDQ ·_2P0(SB),SI
+       ADDQ ·_2P1234(SB),DX
+       ADDQ ·_2P1234(SB),CX
+       ADDQ ·_2P1234(SB),R8
+       ADDQ ·_2P1234(SB),R9
+       SUBQ 80(SP),SI
+       SUBQ 88(SP),DX
+       SUBQ 96(SP),CX
+       SUBQ 104(SP),R8
+       SUBQ 112(SP),R9
+       MOVQ SI,160(SP)
+       MOVQ DX,168(SP)
+       MOVQ CX,176(SP)
+       MOVQ R8,184(SP)
+       MOVQ R9,192(SP)
+       MOVQ 120(DI),SI
+       MOVQ 128(DI),DX
+       MOVQ 136(DI),CX
+       MOVQ 144(DI),R8
+       MOVQ 152(DI),R9
+       MOVQ SI,AX
+       MOVQ DX,R10
+       MOVQ CX,R11
+       MOVQ R8,R12
+       MOVQ R9,R13
+       ADDQ ·_2P0(SB),AX
+       ADDQ ·_2P1234(SB),R10
+       ADDQ ·_2P1234(SB),R11
+       ADDQ ·_2P1234(SB),R12
+       ADDQ ·_2P1234(SB),R13
+       ADDQ 160(DI),SI
+       ADDQ 168(DI),DX
+       ADDQ 176(DI),CX
+       ADDQ 184(DI),R8
+       ADDQ 192(DI),R9
+       SUBQ 160(DI),AX
+       SUBQ 168(DI),R10
+       SUBQ 176(DI),R11
+       SUBQ 184(DI),R12
+       SUBQ 192(DI),R13
+       MOVQ SI,200(SP)
+       MOVQ DX,208(SP)
+       MOVQ CX,216(SP)
+       MOVQ R8,224(SP)
+       MOVQ R9,232(SP)
+       MOVQ AX,240(SP)
+       MOVQ R10,248(SP)
+       MOVQ R11,256(SP)
+       MOVQ R12,264(SP)
+       MOVQ R13,272(SP)
+       MOVQ 224(SP),SI
+       IMUL3Q $19,SI,AX
+       MOVQ AX,280(SP)
+       MULQ 56(SP)
+       MOVQ AX,SI
+       MOVQ DX,CX
+       MOVQ 232(SP),DX
+       IMUL3Q $19,DX,AX
+       MOVQ AX,288(SP)
+       MULQ 48(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 200(SP),AX
+       MULQ 40(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 200(SP),AX
+       MULQ 48(SP)
+       MOVQ AX,R8
+       MOVQ DX,R9
+       MOVQ 200(SP),AX
+       MULQ 56(SP)
+       MOVQ AX,R10
+       MOVQ DX,R11
+       MOVQ 200(SP),AX
+       MULQ 64(SP)
+       MOVQ AX,R12
+       MOVQ DX,R13
+       MOVQ 200(SP),AX
+       MULQ 72(SP)
+       MOVQ AX,R14
+       MOVQ DX,R15
+       MOVQ 208(SP),AX
+       MULQ 40(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 208(SP),AX
+       MULQ 48(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 208(SP),AX
+       MULQ 56(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 208(SP),AX
+       MULQ 64(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 208(SP),DX
+       IMUL3Q $19,DX,AX
+       MULQ 72(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 216(SP),AX
+       MULQ 40(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 216(SP),AX
+       MULQ 48(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 216(SP),AX
+       MULQ 56(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 216(SP),DX
+       IMUL3Q $19,DX,AX
+       MULQ 64(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 216(SP),DX
+       IMUL3Q $19,DX,AX
+       MULQ 72(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 224(SP),AX
+       MULQ 40(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 224(SP),AX
+       MULQ 48(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 280(SP),AX
+       MULQ 64(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 280(SP),AX
+       MULQ 72(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 232(SP),AX
+       MULQ 40(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 288(SP),AX
+       MULQ 56(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 288(SP),AX
+       MULQ 64(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 288(SP),AX
+       MULQ 72(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ $REDMASK51,DX
+       SHLQ $13,CX:SI
+       ANDQ DX,SI
+       SHLQ $13,R9:R8
+       ANDQ DX,R8
+       ADDQ CX,R8
+       SHLQ $13,R11:R10
+       ANDQ DX,R10
+       ADDQ R9,R10
+       SHLQ $13,R13:R12
+       ANDQ DX,R12
+       ADDQ R11,R12
+       SHLQ $13,R15:R14
+       ANDQ DX,R14
+       ADDQ R13,R14
+       IMUL3Q $19,R15,CX
+       ADDQ CX,SI
+       MOVQ SI,CX
+       SHRQ $51,CX
+       ADDQ R8,CX
+       MOVQ CX,R8
+       SHRQ $51,CX
+       ANDQ DX,SI
+       ADDQ R10,CX
+       MOVQ CX,R9
+       SHRQ $51,CX
+       ANDQ DX,R8
+       ADDQ R12,CX
+       MOVQ CX,AX
+       SHRQ $51,CX
+       ANDQ DX,R9
+       ADDQ R14,CX
+       MOVQ CX,R10
+       SHRQ $51,CX
+       ANDQ DX,AX
+       IMUL3Q $19,CX,CX
+       ADDQ CX,SI
+       ANDQ DX,R10
+       MOVQ SI,40(SP)
+       MOVQ R8,48(SP)
+       MOVQ R9,56(SP)
+       MOVQ AX,64(SP)
+       MOVQ R10,72(SP)
+       MOVQ 264(SP),SI
+       IMUL3Q $19,SI,AX
+       MOVQ AX,200(SP)
+       MULQ 16(SP)
+       MOVQ AX,SI
+       MOVQ DX,CX
+       MOVQ 272(SP),DX
+       IMUL3Q $19,DX,AX
+       MOVQ AX,208(SP)
+       MULQ 8(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 240(SP),AX
+       MULQ 0(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 240(SP),AX
+       MULQ 8(SP)
+       MOVQ AX,R8
+       MOVQ DX,R9
+       MOVQ 240(SP),AX
+       MULQ 16(SP)
+       MOVQ AX,R10
+       MOVQ DX,R11
+       MOVQ 240(SP),AX
+       MULQ 24(SP)
+       MOVQ AX,R12
+       MOVQ DX,R13
+       MOVQ 240(SP),AX
+       MULQ 32(SP)
+       MOVQ AX,R14
+       MOVQ DX,R15
+       MOVQ 248(SP),AX
+       MULQ 0(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 248(SP),AX
+       MULQ 8(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 248(SP),AX
+       MULQ 16(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 248(SP),AX
+       MULQ 24(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 248(SP),DX
+       IMUL3Q $19,DX,AX
+       MULQ 32(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 256(SP),AX
+       MULQ 0(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 256(SP),AX
+       MULQ 8(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 256(SP),AX
+       MULQ 16(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 256(SP),DX
+       IMUL3Q $19,DX,AX
+       MULQ 24(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 256(SP),DX
+       IMUL3Q $19,DX,AX
+       MULQ 32(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 264(SP),AX
+       MULQ 0(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 264(SP),AX
+       MULQ 8(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 200(SP),AX
+       MULQ 24(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 200(SP),AX
+       MULQ 32(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 272(SP),AX
+       MULQ 0(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 208(SP),AX
+       MULQ 16(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 208(SP),AX
+       MULQ 24(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 208(SP),AX
+       MULQ 32(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ $REDMASK51,DX
+       SHLQ $13,CX:SI
+       ANDQ DX,SI
+       SHLQ $13,R9:R8
+       ANDQ DX,R8
+       ADDQ CX,R8
+       SHLQ $13,R11:R10
+       ANDQ DX,R10
+       ADDQ R9,R10
+       SHLQ $13,R13:R12
+       ANDQ DX,R12
+       ADDQ R11,R12
+       SHLQ $13,R15:R14
+       ANDQ DX,R14
+       ADDQ R13,R14
+       IMUL3Q $19,R15,CX
+       ADDQ CX,SI
+       MOVQ SI,CX
+       SHRQ $51,CX
+       ADDQ R8,CX
+       MOVQ CX,R8
+       SHRQ $51,CX
+       ANDQ DX,SI
+       ADDQ R10,CX
+       MOVQ CX,R9
+       SHRQ $51,CX
+       ANDQ DX,R8
+       ADDQ R12,CX
+       MOVQ CX,AX
+       SHRQ $51,CX
+       ANDQ DX,R9
+       ADDQ R14,CX
+       MOVQ CX,R10
+       SHRQ $51,CX
+       ANDQ DX,AX
+       IMUL3Q $19,CX,CX
+       ADDQ CX,SI
+       ANDQ DX,R10
+       MOVQ SI,DX
+       MOVQ R8,CX
+       MOVQ R9,R11
+       MOVQ AX,R12
+       MOVQ R10,R13
+       ADDQ ·_2P0(SB),DX
+       ADDQ ·_2P1234(SB),CX
+       ADDQ ·_2P1234(SB),R11
+       ADDQ ·_2P1234(SB),R12
+       ADDQ ·_2P1234(SB),R13
+       ADDQ 40(SP),SI
+       ADDQ 48(SP),R8
+       ADDQ 56(SP),R9
+       ADDQ 64(SP),AX
+       ADDQ 72(SP),R10
+       SUBQ 40(SP),DX
+       SUBQ 48(SP),CX
+       SUBQ 56(SP),R11
+       SUBQ 64(SP),R12
+       SUBQ 72(SP),R13
+       MOVQ SI,120(DI)
+       MOVQ R8,128(DI)
+       MOVQ R9,136(DI)
+       MOVQ AX,144(DI)
+       MOVQ R10,152(DI)
+       MOVQ DX,160(DI)
+       MOVQ CX,168(DI)
+       MOVQ R11,176(DI)
+       MOVQ R12,184(DI)
+       MOVQ R13,192(DI)
+       MOVQ 120(DI),AX
+       MULQ 120(DI)
+       MOVQ AX,SI
+       MOVQ DX,CX
+       MOVQ 120(DI),AX
+       SHLQ $1,AX
+       MULQ 128(DI)
+       MOVQ AX,R8
+       MOVQ DX,R9
+       MOVQ 120(DI),AX
+       SHLQ $1,AX
+       MULQ 136(DI)
+       MOVQ AX,R10
+       MOVQ DX,R11
+       MOVQ 120(DI),AX
+       SHLQ $1,AX
+       MULQ 144(DI)
+       MOVQ AX,R12
+       MOVQ DX,R13
+       MOVQ 120(DI),AX
+       SHLQ $1,AX
+       MULQ 152(DI)
+       MOVQ AX,R14
+       MOVQ DX,R15
+       MOVQ 128(DI),AX
+       MULQ 128(DI)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 128(DI),AX
+       SHLQ $1,AX
+       MULQ 136(DI)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 128(DI),AX
+       SHLQ $1,AX
+       MULQ 144(DI)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 128(DI),DX
+       IMUL3Q $38,DX,AX
+       MULQ 152(DI)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 136(DI),AX
+       MULQ 136(DI)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 136(DI),DX
+       IMUL3Q $38,DX,AX
+       MULQ 144(DI)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 136(DI),DX
+       IMUL3Q $38,DX,AX
+       MULQ 152(DI)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 144(DI),DX
+       IMUL3Q $19,DX,AX
+       MULQ 144(DI)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 144(DI),DX
+       IMUL3Q $38,DX,AX
+       MULQ 152(DI)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 152(DI),DX
+       IMUL3Q $19,DX,AX
+       MULQ 152(DI)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ $REDMASK51,DX
+       SHLQ $13,CX:SI
+       ANDQ DX,SI
+       SHLQ $13,R9:R8
+       ANDQ DX,R8
+       ADDQ CX,R8
+       SHLQ $13,R11:R10
+       ANDQ DX,R10
+       ADDQ R9,R10
+       SHLQ $13,R13:R12
+       ANDQ DX,R12
+       ADDQ R11,R12
+       SHLQ $13,R15:R14
+       ANDQ DX,R14
+       ADDQ R13,R14
+       IMUL3Q $19,R15,CX
+       ADDQ CX,SI
+       MOVQ SI,CX
+       SHRQ $51,CX
+       ADDQ R8,CX
+       ANDQ DX,SI
+       MOVQ CX,R8
+       SHRQ $51,CX
+       ADDQ R10,CX
+       ANDQ DX,R8
+       MOVQ CX,R9
+       SHRQ $51,CX
+       ADDQ R12,CX
+       ANDQ DX,R9
+       MOVQ CX,AX
+       SHRQ $51,CX
+       ADDQ R14,CX
+       ANDQ DX,AX
+       MOVQ CX,R10
+       SHRQ $51,CX
+       IMUL3Q $19,CX,CX
+       ADDQ CX,SI
+       ANDQ DX,R10
+       MOVQ SI,120(DI)
+       MOVQ R8,128(DI)
+       MOVQ R9,136(DI)
+       MOVQ AX,144(DI)
+       MOVQ R10,152(DI)
+       MOVQ 160(DI),AX
+       MULQ 160(DI)
+       MOVQ AX,SI
+       MOVQ DX,CX
+       MOVQ 160(DI),AX
+       SHLQ $1,AX
+       MULQ 168(DI)
+       MOVQ AX,R8
+       MOVQ DX,R9
+       MOVQ 160(DI),AX
+       SHLQ $1,AX
+       MULQ 176(DI)
+       MOVQ AX,R10
+       MOVQ DX,R11
+       MOVQ 160(DI),AX
+       SHLQ $1,AX
+       MULQ 184(DI)
+       MOVQ AX,R12
+       MOVQ DX,R13
+       MOVQ 160(DI),AX
+       SHLQ $1,AX
+       MULQ 192(DI)
+       MOVQ AX,R14
+       MOVQ DX,R15
+       MOVQ 168(DI),AX
+       MULQ 168(DI)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 168(DI),AX
+       SHLQ $1,AX
+       MULQ 176(DI)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 168(DI),AX
+       SHLQ $1,AX
+       MULQ 184(DI)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 168(DI),DX
+       IMUL3Q $38,DX,AX
+       MULQ 192(DI)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 176(DI),AX
+       MULQ 176(DI)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 176(DI),DX
+       IMUL3Q $38,DX,AX
+       MULQ 184(DI)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 176(DI),DX
+       IMUL3Q $38,DX,AX
+       MULQ 192(DI)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 184(DI),DX
+       IMUL3Q $19,DX,AX
+       MULQ 184(DI)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 184(DI),DX
+       IMUL3Q $38,DX,AX
+       MULQ 192(DI)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 192(DI),DX
+       IMUL3Q $19,DX,AX
+       MULQ 192(DI)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ $REDMASK51,DX
+       SHLQ $13,CX:SI
+       ANDQ DX,SI
+       SHLQ $13,R9:R8
+       ANDQ DX,R8
+       ADDQ CX,R8
+       SHLQ $13,R11:R10
+       ANDQ DX,R10
+       ADDQ R9,R10
+       SHLQ $13,R13:R12
+       ANDQ DX,R12
+       ADDQ R11,R12
+       SHLQ $13,R15:R14
+       ANDQ DX,R14
+       ADDQ R13,R14
+       IMUL3Q $19,R15,CX
+       ADDQ CX,SI
+       MOVQ SI,CX
+       SHRQ $51,CX
+       ADDQ R8,CX
+       ANDQ DX,SI
+       MOVQ CX,R8
+       SHRQ $51,CX
+       ADDQ R10,CX
+       ANDQ DX,R8
+       MOVQ CX,R9
+       SHRQ $51,CX
+       ADDQ R12,CX
+       ANDQ DX,R9
+       MOVQ CX,AX
+       SHRQ $51,CX
+       ADDQ R14,CX
+       ANDQ DX,AX
+       MOVQ CX,R10
+       SHRQ $51,CX
+       IMUL3Q $19,CX,CX
+       ADDQ CX,SI
+       ANDQ DX,R10
+       MOVQ SI,160(DI)
+       MOVQ R8,168(DI)
+       MOVQ R9,176(DI)
+       MOVQ AX,184(DI)
+       MOVQ R10,192(DI)
+       MOVQ 184(DI),SI
+       IMUL3Q $19,SI,AX
+       MOVQ AX,0(SP)
+       MULQ 16(DI)
+       MOVQ AX,SI
+       MOVQ DX,CX
+       MOVQ 192(DI),DX
+       IMUL3Q $19,DX,AX
+       MOVQ AX,8(SP)
+       MULQ 8(DI)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 160(DI),AX
+       MULQ 0(DI)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 160(DI),AX
+       MULQ 8(DI)
+       MOVQ AX,R8
+       MOVQ DX,R9
+       MOVQ 160(DI),AX
+       MULQ 16(DI)
+       MOVQ AX,R10
+       MOVQ DX,R11
+       MOVQ 160(DI),AX
+       MULQ 24(DI)
+       MOVQ AX,R12
+       MOVQ DX,R13
+       MOVQ 160(DI),AX
+       MULQ 32(DI)
+       MOVQ AX,R14
+       MOVQ DX,R15
+       MOVQ 168(DI),AX
+       MULQ 0(DI)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 168(DI),AX
+       MULQ 8(DI)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 168(DI),AX
+       MULQ 16(DI)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 168(DI),AX
+       MULQ 24(DI)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 168(DI),DX
+       IMUL3Q $19,DX,AX
+       MULQ 32(DI)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 176(DI),AX
+       MULQ 0(DI)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 176(DI),AX
+       MULQ 8(DI)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 176(DI),AX
+       MULQ 16(DI)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 176(DI),DX
+       IMUL3Q $19,DX,AX
+       MULQ 24(DI)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 176(DI),DX
+       IMUL3Q $19,DX,AX
+       MULQ 32(DI)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 184(DI),AX
+       MULQ 0(DI)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 184(DI),AX
+       MULQ 8(DI)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 0(SP),AX
+       MULQ 24(DI)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 0(SP),AX
+       MULQ 32(DI)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 192(DI),AX
+       MULQ 0(DI)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 8(SP),AX
+       MULQ 16(DI)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 8(SP),AX
+       MULQ 24(DI)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 8(SP),AX
+       MULQ 32(DI)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ $REDMASK51,DX
+       SHLQ $13,CX:SI
+       ANDQ DX,SI
+       SHLQ $13,R9:R8
+       ANDQ DX,R8
+       ADDQ CX,R8
+       SHLQ $13,R11:R10
+       ANDQ DX,R10
+       ADDQ R9,R10
+       SHLQ $13,R13:R12
+       ANDQ DX,R12
+       ADDQ R11,R12
+       SHLQ $13,R15:R14
+       ANDQ DX,R14
+       ADDQ R13,R14
+       IMUL3Q $19,R15,CX
+       ADDQ CX,SI
+       MOVQ SI,CX
+       SHRQ $51,CX
+       ADDQ R8,CX
+       MOVQ CX,R8
+       SHRQ $51,CX
+       ANDQ DX,SI
+       ADDQ R10,CX
+       MOVQ CX,R9
+       SHRQ $51,CX
+       ANDQ DX,R8
+       ADDQ R12,CX
+       MOVQ CX,AX
+       SHRQ $51,CX
+       ANDQ DX,R9
+       ADDQ R14,CX
+       MOVQ CX,R10
+       SHRQ $51,CX
+       ANDQ DX,AX
+       IMUL3Q $19,CX,CX
+       ADDQ CX,SI
+       ANDQ DX,R10
+       MOVQ SI,160(DI)
+       MOVQ R8,168(DI)
+       MOVQ R9,176(DI)
+       MOVQ AX,184(DI)
+       MOVQ R10,192(DI)
+       MOVQ 144(SP),SI
+       IMUL3Q $19,SI,AX
+       MOVQ AX,0(SP)
+       MULQ 96(SP)
+       MOVQ AX,SI
+       MOVQ DX,CX
+       MOVQ 152(SP),DX
+       IMUL3Q $19,DX,AX
+       MOVQ AX,8(SP)
+       MULQ 88(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 120(SP),AX
+       MULQ 80(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 120(SP),AX
+       MULQ 88(SP)
+       MOVQ AX,R8
+       MOVQ DX,R9
+       MOVQ 120(SP),AX
+       MULQ 96(SP)
+       MOVQ AX,R10
+       MOVQ DX,R11
+       MOVQ 120(SP),AX
+       MULQ 104(SP)
+       MOVQ AX,R12
+       MOVQ DX,R13
+       MOVQ 120(SP),AX
+       MULQ 112(SP)
+       MOVQ AX,R14
+       MOVQ DX,R15
+       MOVQ 128(SP),AX
+       MULQ 80(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 128(SP),AX
+       MULQ 88(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 128(SP),AX
+       MULQ 96(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 128(SP),AX
+       MULQ 104(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 128(SP),DX
+       IMUL3Q $19,DX,AX
+       MULQ 112(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 136(SP),AX
+       MULQ 80(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 136(SP),AX
+       MULQ 88(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 136(SP),AX
+       MULQ 96(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 136(SP),DX
+       IMUL3Q $19,DX,AX
+       MULQ 104(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 136(SP),DX
+       IMUL3Q $19,DX,AX
+       MULQ 112(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 144(SP),AX
+       MULQ 80(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 144(SP),AX
+       MULQ 88(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 0(SP),AX
+       MULQ 104(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 0(SP),AX
+       MULQ 112(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 152(SP),AX
+       MULQ 80(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 8(SP),AX
+       MULQ 96(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 8(SP),AX
+       MULQ 104(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 8(SP),AX
+       MULQ 112(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ $REDMASK51,DX
+       SHLQ $13,CX:SI
+       ANDQ DX,SI
+       SHLQ $13,R9:R8
+       ANDQ DX,R8
+       ADDQ CX,R8
+       SHLQ $13,R11:R10
+       ANDQ DX,R10
+       ADDQ R9,R10
+       SHLQ $13,R13:R12
+       ANDQ DX,R12
+       ADDQ R11,R12
+       SHLQ $13,R15:R14
+       ANDQ DX,R14
+       ADDQ R13,R14
+       IMUL3Q $19,R15,CX
+       ADDQ CX,SI
+       MOVQ SI,CX
+       SHRQ $51,CX
+       ADDQ R8,CX
+       MOVQ CX,R8
+       SHRQ $51,CX
+       ANDQ DX,SI
+       ADDQ R10,CX
+       MOVQ CX,R9
+       SHRQ $51,CX
+       ANDQ DX,R8
+       ADDQ R12,CX
+       MOVQ CX,AX
+       SHRQ $51,CX
+       ANDQ DX,R9
+       ADDQ R14,CX
+       MOVQ CX,R10
+       SHRQ $51,CX
+       ANDQ DX,AX
+       IMUL3Q $19,CX,CX
+       ADDQ CX,SI
+       ANDQ DX,R10
+       MOVQ SI,40(DI)
+       MOVQ R8,48(DI)
+       MOVQ R9,56(DI)
+       MOVQ AX,64(DI)
+       MOVQ R10,72(DI)
+       MOVQ 160(SP),AX
+       MULQ ·_121666_213(SB)
+       SHRQ $13,AX
+       MOVQ AX,SI
+       MOVQ DX,CX
+       MOVQ 168(SP),AX
+       MULQ ·_121666_213(SB)
+       SHRQ $13,AX
+       ADDQ AX,CX
+       MOVQ DX,R8
+       MOVQ 176(SP),AX
+       MULQ ·_121666_213(SB)
+       SHRQ $13,AX
+       ADDQ AX,R8
+       MOVQ DX,R9
+       MOVQ 184(SP),AX
+       MULQ ·_121666_213(SB)
+       SHRQ $13,AX
+       ADDQ AX,R9
+       MOVQ DX,R10
+       MOVQ 192(SP),AX
+       MULQ ·_121666_213(SB)
+       SHRQ $13,AX
+       ADDQ AX,R10
+       IMUL3Q $19,DX,DX
+       ADDQ DX,SI
+       ADDQ 80(SP),SI
+       ADDQ 88(SP),CX
+       ADDQ 96(SP),R8
+       ADDQ 104(SP),R9
+       ADDQ 112(SP),R10
+       MOVQ SI,80(DI)
+       MOVQ CX,88(DI)
+       MOVQ R8,96(DI)
+       MOVQ R9,104(DI)
+       MOVQ R10,112(DI)
+       MOVQ 104(DI),SI
+       IMUL3Q $19,SI,AX
+       MOVQ AX,0(SP)
+       MULQ 176(SP)
+       MOVQ AX,SI
+       MOVQ DX,CX
+       MOVQ 112(DI),DX
+       IMUL3Q $19,DX,AX
+       MOVQ AX,8(SP)
+       MULQ 168(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 80(DI),AX
+       MULQ 160(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 80(DI),AX
+       MULQ 168(SP)
+       MOVQ AX,R8
+       MOVQ DX,R9
+       MOVQ 80(DI),AX
+       MULQ 176(SP)
+       MOVQ AX,R10
+       MOVQ DX,R11
+       MOVQ 80(DI),AX
+       MULQ 184(SP)
+       MOVQ AX,R12
+       MOVQ DX,R13
+       MOVQ 80(DI),AX
+       MULQ 192(SP)
+       MOVQ AX,R14
+       MOVQ DX,R15
+       MOVQ 88(DI),AX
+       MULQ 160(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 88(DI),AX
+       MULQ 168(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 88(DI),AX
+       MULQ 176(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 88(DI),AX
+       MULQ 184(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 88(DI),DX
+       IMUL3Q $19,DX,AX
+       MULQ 192(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 96(DI),AX
+       MULQ 160(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 96(DI),AX
+       MULQ 168(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 96(DI),AX
+       MULQ 176(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 96(DI),DX
+       IMUL3Q $19,DX,AX
+       MULQ 184(SP)
+       ADDQ AX,SI
+       ADCQ DX,CX
+       MOVQ 96(DI),DX
+       IMUL3Q $19,DX,AX
+       MULQ 192(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 104(DI),AX
+       MULQ 160(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 104(DI),AX
+       MULQ 168(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 0(SP),AX
+       MULQ 184(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 0(SP),AX
+       MULQ 192(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 112(DI),AX
+       MULQ 160(SP)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 8(SP),AX
+       MULQ 176(SP)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 8(SP),AX
+       MULQ 184(SP)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 8(SP),AX
+       MULQ 192(SP)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ $REDMASK51,DX
+       SHLQ $13,CX:SI
+       ANDQ DX,SI
+       SHLQ $13,R9:R8
+       ANDQ DX,R8
+       ADDQ CX,R8
+       SHLQ $13,R11:R10
+       ANDQ DX,R10
+       ADDQ R9,R10
+       SHLQ $13,R13:R12
+       ANDQ DX,R12
+       ADDQ R11,R12
+       SHLQ $13,R15:R14
+       ANDQ DX,R14
+       ADDQ R13,R14
+       IMUL3Q $19,R15,CX
+       ADDQ CX,SI
+       MOVQ SI,CX
+       SHRQ $51,CX
+       ADDQ R8,CX
+       MOVQ CX,R8
+       SHRQ $51,CX
+       ANDQ DX,SI
+       ADDQ R10,CX
+       MOVQ CX,R9
+       SHRQ $51,CX
+       ANDQ DX,R8
+       ADDQ R12,CX
+       MOVQ CX,AX
+       SHRQ $51,CX
+       ANDQ DX,R9
+       ADDQ R14,CX
+       MOVQ CX,R10
+       SHRQ $51,CX
+       ANDQ DX,AX
+       IMUL3Q $19,CX,CX
+       ADDQ CX,SI
+       ANDQ DX,R10
+       MOVQ SI,80(DI)
+       MOVQ R8,88(DI)
+       MOVQ R9,96(DI)
+       MOVQ AX,104(DI)
+       MOVQ R10,112(DI)
+       RET
diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go
new file mode 100644 (file)
index 0000000..5822bd5
--- /dev/null
@@ -0,0 +1,240 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!gccgo,!appengine
+
+package curve25519
+
+// These functions are implemented in the .s files. The names of the functions
+// in the rest of the file are also taken from the SUPERCOP sources to help
+// people following along.
+
+//go:noescape
+
+func cswap(inout *[5]uint64, v uint64)
+
+//go:noescape
+
+func ladderstep(inout *[5][5]uint64)
+
+//go:noescape
+
+func freeze(inout *[5]uint64)
+
+//go:noescape
+
+func mul(dest, a, b *[5]uint64)
+
+//go:noescape
+
+func square(out, in *[5]uint64)
+
+// mladder uses a Montgomery ladder to calculate (xr/zr) *= s.
+func mladder(xr, zr *[5]uint64, s *[32]byte) {
+       var work [5][5]uint64
+
+       work[0] = *xr
+       setint(&work[1], 1)
+       setint(&work[2], 0)
+       work[3] = *xr
+       setint(&work[4], 1)
+
+       j := uint(6)
+       var prevbit byte
+
+       for i := 31; i >= 0; i-- {
+               for j < 8 {
+                       bit := ((*s)[i] >> j) & 1
+                       swap := bit ^ prevbit
+                       prevbit = bit
+                       cswap(&work[1], uint64(swap))
+                       ladderstep(&work)
+                       j--
+               }
+               j = 7
+       }
+
+       *xr = work[1]
+       *zr = work[2]
+}
+
+func scalarMult(out, in, base *[32]byte) {
+       var e [32]byte
+       copy(e[:], (*in)[:])
+       e[0] &= 248
+       e[31] &= 127
+       e[31] |= 64
+
+       var t, z [5]uint64
+       unpack(&t, base)
+       mladder(&t, &z, &e)
+       invert(&z, &z)
+       mul(&t, &t, &z)
+       pack(out, &t)
+}
+
+func setint(r *[5]uint64, v uint64) {
+       r[0] = v
+       r[1] = 0
+       r[2] = 0
+       r[3] = 0
+       r[4] = 0
+}
+
+// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian
+// order.
+func unpack(r *[5]uint64, x *[32]byte) {
+       r[0] = uint64(x[0]) |
+               uint64(x[1])<<8 |
+               uint64(x[2])<<16 |
+               uint64(x[3])<<24 |
+               uint64(x[4])<<32 |
+               uint64(x[5])<<40 |
+               uint64(x[6]&7)<<48
+
+       r[1] = uint64(x[6])>>3 |
+               uint64(x[7])<<5 |
+               uint64(x[8])<<13 |
+               uint64(x[9])<<21 |
+               uint64(x[10])<<29 |
+               uint64(x[11])<<37 |
+               uint64(x[12]&63)<<45
+
+       r[2] = uint64(x[12])>>6 |
+               uint64(x[13])<<2 |
+               uint64(x[14])<<10 |
+               uint64(x[15])<<18 |
+               uint64(x[16])<<26 |
+               uint64(x[17])<<34 |
+               uint64(x[18])<<42 |
+               uint64(x[19]&1)<<50
+
+       r[3] = uint64(x[19])>>1 |
+               uint64(x[20])<<7 |
+               uint64(x[21])<<15 |
+               uint64(x[22])<<23 |
+               uint64(x[23])<<31 |
+               uint64(x[24])<<39 |
+               uint64(x[25]&15)<<47
+
+       r[4] = uint64(x[25])>>4 |
+               uint64(x[26])<<4 |
+               uint64(x[27])<<12 |
+               uint64(x[28])<<20 |
+               uint64(x[29])<<28 |
+               uint64(x[30])<<36 |
+               uint64(x[31]&127)<<44
+}
+
+// pack sets out = x where out is the usual, little-endian form of the 5,
+// 51-bit limbs in x.
+func pack(out *[32]byte, x *[5]uint64) {
+       t := *x
+       freeze(&t)
+
+       out[0] = byte(t[0])
+       out[1] = byte(t[0] >> 8)
+       out[2] = byte(t[0] >> 16)
+       out[3] = byte(t[0] >> 24)
+       out[4] = byte(t[0] >> 32)
+       out[5] = byte(t[0] >> 40)
+       out[6] = byte(t[0] >> 48)
+
+       out[6] ^= byte(t[1]<<3) & 0xf8
+       out[7] = byte(t[1] >> 5)
+       out[8] = byte(t[1] >> 13)
+       out[9] = byte(t[1] >> 21)
+       out[10] = byte(t[1] >> 29)
+       out[11] = byte(t[1] >> 37)
+       out[12] = byte(t[1] >> 45)
+
+       out[12] ^= byte(t[2]<<6) & 0xc0
+       out[13] = byte(t[2] >> 2)
+       out[14] = byte(t[2] >> 10)
+       out[15] = byte(t[2] >> 18)
+       out[16] = byte(t[2] >> 26)
+       out[17] = byte(t[2] >> 34)
+       out[18] = byte(t[2] >> 42)
+       out[19] = byte(t[2] >> 50)
+
+       out[19] ^= byte(t[3]<<1) & 0xfe
+       out[20] = byte(t[3] >> 7)
+       out[21] = byte(t[3] >> 15)
+       out[22] = byte(t[3] >> 23)
+       out[23] = byte(t[3] >> 31)
+       out[24] = byte(t[3] >> 39)
+       out[25] = byte(t[3] >> 47)
+
+       out[25] ^= byte(t[4]<<4) & 0xf0
+       out[26] = byte(t[4] >> 4)
+       out[27] = byte(t[4] >> 12)
+       out[28] = byte(t[4] >> 20)
+       out[29] = byte(t[4] >> 28)
+       out[30] = byte(t[4] >> 36)
+       out[31] = byte(t[4] >> 44)
+}
+
+// invert calculates r = x^-1 mod p using Fermat's little theorem.
+func invert(r *[5]uint64, x *[5]uint64) {
+       var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64
+
+       square(&z2, x)        /* 2 */
+       square(&t, &z2)       /* 4 */
+       square(&t, &t)        /* 8 */
+       mul(&z9, &t, x)       /* 9 */
+       mul(&z11, &z9, &z2)   /* 11 */
+       square(&t, &z11)      /* 22 */
+       mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */
+
+       square(&t, &z2_5_0)      /* 2^6 - 2^1 */
+       for i := 1; i < 5; i++ { /* 2^20 - 2^10 */
+               square(&t, &t)
+       }
+       mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */
+
+       square(&t, &z2_10_0)      /* 2^11 - 2^1 */
+       for i := 1; i < 10; i++ { /* 2^20 - 2^10 */
+               square(&t, &t)
+       }
+       mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */
+
+       square(&t, &z2_20_0)      /* 2^21 - 2^1 */
+       for i := 1; i < 20; i++ { /* 2^40 - 2^20 */
+               square(&t, &t)
+       }
+       mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */
+
+       square(&t, &t)            /* 2^41 - 2^1 */
+       for i := 1; i < 10; i++ { /* 2^50 - 2^10 */
+               square(&t, &t)
+       }
+       mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */
+
+       square(&t, &z2_50_0)      /* 2^51 - 2^1 */
+       for i := 1; i < 50; i++ { /* 2^100 - 2^50 */
+               square(&t, &t)
+       }
+       mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */
+
+       square(&t, &z2_100_0)      /* 2^101 - 2^1 */
+       for i := 1; i < 100; i++ { /* 2^200 - 2^100 */
+               square(&t, &t)
+       }
+       mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */
+
+       square(&t, &t)            /* 2^201 - 2^1 */
+       for i := 1; i < 50; i++ { /* 2^250 - 2^50 */
+               square(&t, &t)
+       }
+       mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */
+
+       square(&t, &t) /* 2^251 - 2^1 */
+       square(&t, &t) /* 2^252 - 2^2 */
+       square(&t, &t) /* 2^253 - 2^3 */
+
+       square(&t, &t) /* 2^254 - 2^4 */
+
+       square(&t, &t)   /* 2^255 - 2^5 */
+       mul(r, &t, &z11) /* 2^255 - 21 */
+}
diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s
new file mode 100644 (file)
index 0000000..b162e65
--- /dev/null
@@ -0,0 +1,169 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+#include "const_amd64.h"
+
+// func mul(dest, a, b *[5]uint64)
+TEXT ·mul(SB),0,$16-24
+       MOVQ dest+0(FP), DI
+       MOVQ a+8(FP), SI
+       MOVQ b+16(FP), DX
+
+       MOVQ DX,CX
+       MOVQ 24(SI),DX
+       IMUL3Q $19,DX,AX
+       MOVQ AX,0(SP)
+       MULQ 16(CX)
+       MOVQ AX,R8
+       MOVQ DX,R9
+       MOVQ 32(SI),DX
+       IMUL3Q $19,DX,AX
+       MOVQ AX,8(SP)
+       MULQ 8(CX)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 0(SI),AX
+       MULQ 0(CX)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 0(SI),AX
+       MULQ 8(CX)
+       MOVQ AX,R10
+       MOVQ DX,R11
+       MOVQ 0(SI),AX
+       MULQ 16(CX)
+       MOVQ AX,R12
+       MOVQ DX,R13
+       MOVQ 0(SI),AX
+       MULQ 24(CX)
+       MOVQ AX,R14
+       MOVQ DX,R15
+       MOVQ 0(SI),AX
+       MULQ 32(CX)
+       MOVQ AX,BX
+       MOVQ DX,BP
+       MOVQ 8(SI),AX
+       MULQ 0(CX)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 8(SI),AX
+       MULQ 8(CX)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 8(SI),AX
+       MULQ 16(CX)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 8(SI),AX
+       MULQ 24(CX)
+       ADDQ AX,BX
+       ADCQ DX,BP
+       MOVQ 8(SI),DX
+       IMUL3Q $19,DX,AX
+       MULQ 32(CX)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 16(SI),AX
+       MULQ 0(CX)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 16(SI),AX
+       MULQ 8(CX)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 16(SI),AX
+       MULQ 16(CX)
+       ADDQ AX,BX
+       ADCQ DX,BP
+       MOVQ 16(SI),DX
+       IMUL3Q $19,DX,AX
+       MULQ 24(CX)
+       ADDQ AX,R8
+       ADCQ DX,R9
+       MOVQ 16(SI),DX
+       IMUL3Q $19,DX,AX
+       MULQ 32(CX)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 24(SI),AX
+       MULQ 0(CX)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ 24(SI),AX
+       MULQ 8(CX)
+       ADDQ AX,BX
+       ADCQ DX,BP
+       MOVQ 0(SP),AX
+       MULQ 24(CX)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 0(SP),AX
+       MULQ 32(CX)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 32(SI),AX
+       MULQ 0(CX)
+       ADDQ AX,BX
+       ADCQ DX,BP
+       MOVQ 8(SP),AX
+       MULQ 16(CX)
+       ADDQ AX,R10
+       ADCQ DX,R11
+       MOVQ 8(SP),AX
+       MULQ 24(CX)
+       ADDQ AX,R12
+       ADCQ DX,R13
+       MOVQ 8(SP),AX
+       MULQ 32(CX)
+       ADDQ AX,R14
+       ADCQ DX,R15
+       MOVQ $REDMASK51,SI
+       SHLQ $13,R9:R8
+       ANDQ SI,R8
+       SHLQ $13,R11:R10
+       ANDQ SI,R10
+       ADDQ R9,R10
+       SHLQ $13,R13:R12
+       ANDQ SI,R12
+       ADDQ R11,R12
+       SHLQ $13,R15:R14
+       ANDQ SI,R14
+       ADDQ R13,R14
+       SHLQ $13,BP:BX
+       ANDQ SI,BX
+       ADDQ R15,BX
+       IMUL3Q $19,BP,DX
+       ADDQ DX,R8
+       MOVQ R8,DX
+       SHRQ $51,DX
+       ADDQ R10,DX
+       MOVQ DX,CX
+       SHRQ $51,DX
+       ANDQ SI,R8
+       ADDQ R12,DX
+       MOVQ DX,R9
+       SHRQ $51,DX
+       ANDQ SI,CX
+       ADDQ R14,DX
+       MOVQ DX,AX
+       SHRQ $51,DX
+       ANDQ SI,R9
+       ADDQ BX,DX
+       MOVQ DX,R10
+       SHRQ $51,DX
+       ANDQ SI,AX
+       IMUL3Q $19,DX,DX
+       ADDQ DX,R8
+       ANDQ SI,R10
+       MOVQ R8,0(DI)
+       MOVQ CX,8(DI)
+       MOVQ R9,16(DI)
+       MOVQ AX,24(DI)
+       MOVQ R10,32(DI)
+       RET
diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s
new file mode 100644 (file)
index 0000000..4e864a8
--- /dev/null
@@ -0,0 +1,132 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+#include "const_amd64.h"
+
+// func square(out, in *[5]uint64)
+TEXT ·square(SB),7,$0-16
+       MOVQ out+0(FP), DI
+       MOVQ in+8(FP), SI
+
+       MOVQ 0(SI),AX
+       MULQ 0(SI)
+       MOVQ AX,CX
+       MOVQ DX,R8
+       MOVQ 0(SI),AX
+       SHLQ $1,AX
+       MULQ 8(SI)
+       MOVQ AX,R9
+       MOVQ DX,R10
+       MOVQ 0(SI),AX
+       SHLQ $1,AX
+       MULQ 16(SI)
+       MOVQ AX,R11
+       MOVQ DX,R12
+       MOVQ 0(SI),AX
+       SHLQ $1,AX
+       MULQ 24(SI)
+       MOVQ AX,R13
+       MOVQ DX,R14
+       MOVQ 0(SI),AX
+       SHLQ $1,AX
+       MULQ 32(SI)
+       MOVQ AX,R15
+       MOVQ DX,BX
+       MOVQ 8(SI),AX
+       MULQ 8(SI)
+       ADDQ AX,R11
+       ADCQ DX,R12
+       MOVQ 8(SI),AX
+       SHLQ $1,AX
+       MULQ 16(SI)
+       ADDQ AX,R13
+       ADCQ DX,R14
+       MOVQ 8(SI),AX
+       SHLQ $1,AX
+       MULQ 24(SI)
+       ADDQ AX,R15
+       ADCQ DX,BX
+       MOVQ 8(SI),DX
+       IMUL3Q $38,DX,AX
+       MULQ 32(SI)
+       ADDQ AX,CX
+       ADCQ DX,R8
+       MOVQ 16(SI),AX
+       MULQ 16(SI)
+       ADDQ AX,R15
+       ADCQ DX,BX
+       MOVQ 16(SI),DX
+       IMUL3Q $38,DX,AX
+       MULQ 24(SI)
+       ADDQ AX,CX
+       ADCQ DX,R8
+       MOVQ 16(SI),DX
+       IMUL3Q $38,DX,AX
+       MULQ 32(SI)
+       ADDQ AX,R9
+       ADCQ DX,R10
+       MOVQ 24(SI),DX
+       IMUL3Q $19,DX,AX
+       MULQ 24(SI)
+       ADDQ AX,R9
+       ADCQ DX,R10
+       MOVQ 24(SI),DX
+       IMUL3Q $38,DX,AX
+       MULQ 32(SI)
+       ADDQ AX,R11
+       ADCQ DX,R12
+       MOVQ 32(SI),DX
+       IMUL3Q $19,DX,AX
+       MULQ 32(SI)
+       ADDQ AX,R13
+       ADCQ DX,R14
+       MOVQ $REDMASK51,SI
+       SHLQ $13,R8:CX
+       ANDQ SI,CX
+       SHLQ $13,R10:R9
+       ANDQ SI,R9
+       ADDQ R8,R9
+       SHLQ $13,R12:R11
+       ANDQ SI,R11
+       ADDQ R10,R11
+       SHLQ $13,R14:R13
+       ANDQ SI,R13
+       ADDQ R12,R13
+       SHLQ $13,BX:R15
+       ANDQ SI,R15
+       ADDQ R14,R15
+       IMUL3Q $19,BX,DX
+       ADDQ DX,CX
+       MOVQ CX,DX
+       SHRQ $51,DX
+       ADDQ R9,DX
+       ANDQ SI,CX
+       MOVQ DX,R8
+       SHRQ $51,DX
+       ADDQ R11,DX
+       ANDQ SI,R8
+       MOVQ DX,R9
+       SHRQ $51,DX
+       ADDQ R13,DX
+       ANDQ SI,R9
+       MOVQ DX,AX
+       SHRQ $51,DX
+       ADDQ R15,DX
+       ANDQ SI,AX
+       MOVQ DX,R10
+       SHRQ $51,DX
+       IMUL3Q $19,DX,DX
+       ADDQ DX,CX
+       ANDQ SI,R10
+       MOVQ CX,0(DI)
+       MOVQ R8,8(DI)
+       MOVQ R9,16(DI)
+       MOVQ AX,24(DI)
+       MOVQ R10,32(DI)
+       RET
diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go
new file mode 100644 (file)
index 0000000..f1d9567
--- /dev/null
@@ -0,0 +1,181 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ed25519 implements the Ed25519 signature algorithm. See
+// http://ed25519.cr.yp.to/.
+//
+// These functions are also compatible with the “Ed25519” function defined in
+// https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-05.
+package ed25519
+
+// This code is a port of the public domain, “ref10” implementation of ed25519
+// from SUPERCOP.
+
+import (
+       "crypto"
+       cryptorand "crypto/rand"
+       "crypto/sha512"
+       "crypto/subtle"
+       "errors"
+       "io"
+       "strconv"
+
+       "golang.org/x/crypto/ed25519/internal/edwards25519"
+)
+
+const (
+       // PublicKeySize is the size, in bytes, of public keys as used in this package.
+       PublicKeySize = 32
+       // PrivateKeySize is the size, in bytes, of private keys as used in this package.
+       PrivateKeySize = 64
+       // SignatureSize is the size, in bytes, of signatures generated and verified by this package.
+       SignatureSize = 64
+)
+
+// PublicKey is the type of Ed25519 public keys.
+type PublicKey []byte
+
+// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
+type PrivateKey []byte
+
+// Public returns the PublicKey corresponding to priv.
+func (priv PrivateKey) Public() crypto.PublicKey {
+       publicKey := make([]byte, PublicKeySize)
+       copy(publicKey, priv[32:])
+       return PublicKey(publicKey)
+}
+
+// Sign signs the given message with priv.
+// Ed25519 performs two passes over messages to be signed and therefore cannot
+// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
+// indicate the message hasn't been hashed. This can be achieved by passing
+// crypto.Hash(0) as the value for opts.
+func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
+       if opts.HashFunc() != crypto.Hash(0) {
+               return nil, errors.New("ed25519: cannot sign hashed message")
+       }
+
+       return Sign(priv, message), nil
+}
+
+// GenerateKey generates a public/private key pair using entropy from rand.
+// If rand is nil, crypto/rand.Reader will be used.
+func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) {
+       if rand == nil {
+               rand = cryptorand.Reader
+       }
+
+       privateKey = make([]byte, PrivateKeySize)
+       publicKey = make([]byte, PublicKeySize)
+       _, err = io.ReadFull(rand, privateKey[:32])
+       if err != nil {
+               return nil, nil, err
+       }
+
+       digest := sha512.Sum512(privateKey[:32])
+       digest[0] &= 248
+       digest[31] &= 127
+       digest[31] |= 64
+
+       var A edwards25519.ExtendedGroupElement
+       var hBytes [32]byte
+       copy(hBytes[:], digest[:])
+       edwards25519.GeScalarMultBase(&A, &hBytes)
+       var publicKeyBytes [32]byte
+       A.ToBytes(&publicKeyBytes)
+
+       copy(privateKey[32:], publicKeyBytes[:])
+       copy(publicKey, publicKeyBytes[:])
+
+       return publicKey, privateKey, nil
+}
+
+// Sign signs the message with privateKey and returns a signature. It will
+// panic if len(privateKey) is not PrivateKeySize.
+func Sign(privateKey PrivateKey, message []byte) []byte {
+       if l := len(privateKey); l != PrivateKeySize {
+               panic("ed25519: bad private key length: " + strconv.Itoa(l))
+       }
+
+       h := sha512.New()
+       h.Write(privateKey[:32])
+
+       var digest1, messageDigest, hramDigest [64]byte
+       var expandedSecretKey [32]byte
+       h.Sum(digest1[:0])
+       copy(expandedSecretKey[:], digest1[:])
+       expandedSecretKey[0] &= 248
+       expandedSecretKey[31] &= 63
+       expandedSecretKey[31] |= 64
+
+       h.Reset()
+       h.Write(digest1[32:])
+       h.Write(message)
+       h.Sum(messageDigest[:0])
+
+       var messageDigestReduced [32]byte
+       edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
+       var R edwards25519.ExtendedGroupElement
+       edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
+
+       var encodedR [32]byte
+       R.ToBytes(&encodedR)
+
+       h.Reset()
+       h.Write(encodedR[:])
+       h.Write(privateKey[32:])
+       h.Write(message)
+       h.Sum(hramDigest[:0])
+       var hramDigestReduced [32]byte
+       edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
+
+       var s [32]byte
+       edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
+
+       signature := make([]byte, SignatureSize)
+       copy(signature[:], encodedR[:])
+       copy(signature[32:], s[:])
+
+       return signature
+}
+
+// Verify reports whether sig is a valid signature of message by publicKey. It
+// will panic if len(publicKey) is not PublicKeySize.
+func Verify(publicKey PublicKey, message, sig []byte) bool {
+       if l := len(publicKey); l != PublicKeySize {
+               panic("ed25519: bad public key length: " + strconv.Itoa(l))
+       }
+
+       if len(sig) != SignatureSize || sig[63]&224 != 0 {
+               return false
+       }
+
+       var A edwards25519.ExtendedGroupElement
+       var publicKeyBytes [32]byte
+       copy(publicKeyBytes[:], publicKey)
+       if !A.FromBytes(&publicKeyBytes) {
+               return false
+       }
+       edwards25519.FeNeg(&A.X, &A.X)
+       edwards25519.FeNeg(&A.T, &A.T)
+
+       h := sha512.New()
+       h.Write(sig[:32])
+       h.Write(publicKey[:])
+       h.Write(message)
+       var digest [64]byte
+       h.Sum(digest[:0])
+
+       var hReduced [32]byte
+       edwards25519.ScReduce(&hReduced, &digest)
+
+       var R edwards25519.ProjectiveGroupElement
+       var b [32]byte
+       copy(b[:], sig[32:])
+       edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b)
+
+       var checkR [32]byte
+       R.ToBytes(&checkR)
+       return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1
+}
diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
new file mode 100644 (file)
index 0000000..e39f086
--- /dev/null
@@ -0,0 +1,1422 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+// These values are from the public domain, “ref10” implementation of ed25519
+// from SUPERCOP.
+
+// d is a constant in the Edwards curve equation.
+var d = FieldElement{
+       -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116,
+}
+
+// d2 is 2*d.
+var d2 = FieldElement{
+       -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199,
+}
+
+// SqrtM1 is the square-root of -1 in the field.
+var SqrtM1 = FieldElement{
+       -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482,
+}
+
+// A is a constant in the Montgomery-form of curve25519.
+var A = FieldElement{
+       486662, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+}
+
+// bi contains precomputed multiples of the base-point. See the Ed25519 paper
+// for a discussion about how these values are used.
+var bi = [8]PreComputedGroupElement{
+       {
+               FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605},
+               FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378},
+               FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546},
+       },
+       {
+               FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024},
+               FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574},
+               FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357},
+       },
+       {
+               FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380},
+               FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306},
+               FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942},
+       },
+       {
+               FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766},
+               FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701},
+               FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300},
+       },
+       {
+               FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877},
+               FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951},
+               FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784},
+       },
+       {
+               FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436},
+               FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918},
+               FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877},
+       },
+       {
+               FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800},
+               FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305},
+               FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300},
+       },
+       {
+               FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876},
+               FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619},
+               FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683},
+       },
+}
+
+// base contains precomputed multiples of the base-point. See the Ed25519 paper
+// for a discussion about how these values are used.
+var base = [32][8]PreComputedGroupElement{
+       {
+               {
+                       FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605},
+                       FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378},
+                       FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546},
+               },
+               {
+                       FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303},
+                       FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081},
+                       FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697},
+               },
+               {
+                       FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024},
+                       FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574},
+                       FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357},
+               },
+               {
+                       FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540},
+                       FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397},
+                       FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325},
+               },
+               {
+                       FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380},
+                       FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306},
+                       FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942},
+               },
+               {
+                       FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777},
+                       FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737},
+                       FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652},
+               },
+               {
+                       FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766},
+                       FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701},
+                       FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300},
+               },
+               {
+                       FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726},
+                       FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955},
+                       FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425},
+               },
+       },
+       {
+               {
+                       FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171},
+                       FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510},
+                       FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660},
+               },
+               {
+                       FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639},
+                       FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963},
+                       FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950},
+               },
+               {
+                       FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568},
+                       FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335},
+                       FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628},
+               },
+               {
+                       FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007},
+                       FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772},
+                       FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653},
+               },
+               {
+                       FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567},
+                       FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686},
+                       FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372},
+               },
+               {
+                       FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887},
+                       FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954},
+                       FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953},
+               },
+               {
+                       FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833},
+                       FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532},
+                       FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876},
+               },
+               {
+                       FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268},
+                       FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214},
+                       FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038},
+               },
+       },
+       {
+               {
+                       FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800},
+                       FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645},
+                       FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664},
+               },
+               {
+                       FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933},
+                       FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182},
+                       FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222},
+               },
+               {
+                       FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991},
+                       FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880},
+                       FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092},
+               },
+               {
+                       FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295},
+                       FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788},
+                       FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553},
+               },
+               {
+                       FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026},
+                       FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347},
+                       FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033},
+               },
+               {
+                       FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395},
+                       FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278},
+                       FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890},
+               },
+               {
+                       FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995},
+                       FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596},
+                       FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891},
+               },
+               {
+                       FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060},
+                       FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608},
+                       FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606},
+               },
+       },
+       {
+               {
+                       FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389},
+                       FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016},
+                       FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341},
+               },
+               {
+                       FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505},
+                       FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553},
+                       FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655},
+               },
+               {
+                       FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220},
+                       FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631},
+                       FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099},
+               },
+               {
+                       FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556},
+                       FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749},
+                       FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930},
+               },
+               {
+                       FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391},
+                       FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253},
+                       FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066},
+               },
+               {
+                       FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958},
+                       FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082},
+                       FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383},
+               },
+               {
+                       FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521},
+                       FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807},
+                       FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948},
+               },
+               {
+                       FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134},
+                       FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455},
+                       FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629},
+               },
+       },
+       {
+               {
+                       FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069},
+                       FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746},
+                       FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919},
+               },
+               {
+                       FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837},
+                       FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906},
+                       FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771},
+               },
+               {
+                       FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817},
+                       FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098},
+                       FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409},
+               },
+               {
+                       FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504},
+                       FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727},
+                       FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420},
+               },
+               {
+                       FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003},
+                       FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605},
+                       FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384},
+               },
+               {
+                       FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701},
+                       FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683},
+                       FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708},
+               },
+               {
+                       FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563},
+                       FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260},
+                       FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387},
+               },
+               {
+                       FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672},
+                       FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686},
+                       FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665},
+               },
+       },
+       {
+               {
+                       FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182},
+                       FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277},
+                       FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628},
+               },
+               {
+                       FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474},
+                       FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539},
+                       FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822},
+               },
+               {
+                       FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970},
+                       FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756},
+                       FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508},
+               },
+               {
+                       FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683},
+                       FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655},
+                       FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158},
+               },
+               {
+                       FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125},
+                       FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839},
+                       FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664},
+               },
+               {
+                       FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294},
+                       FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899},
+                       FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070},
+               },
+               {
+                       FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294},
+                       FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949},
+                       FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083},
+               },
+               {
+                       FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420},
+                       FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940},
+                       FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396},
+               },
+       },
+       {
+               {
+                       FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567},
+                       FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127},
+                       FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294},
+               },
+               {
+                       FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887},
+                       FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964},
+                       FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195},
+               },
+               {
+                       FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244},
+                       FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999},
+                       FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762},
+               },
+               {
+                       FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274},
+                       FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236},
+                       FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605},
+               },
+               {
+                       FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761},
+                       FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884},
+                       FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482},
+               },
+               {
+                       FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638},
+                       FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490},
+                       FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170},
+               },
+               {
+                       FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736},
+                       FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124},
+                       FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392},
+               },
+               {
+                       FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029},
+                       FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048},
+                       FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958},
+               },
+       },
+       {
+               {
+                       FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593},
+                       FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071},
+                       FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692},
+               },
+               {
+                       FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687},
+                       FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441},
+                       FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001},
+               },
+               {
+                       FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460},
+                       FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007},
+                       FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762},
+               },
+               {
+                       FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005},
+                       FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674},
+                       FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035},
+               },
+               {
+                       FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590},
+                       FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957},
+                       FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812},
+               },
+               {
+                       FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740},
+                       FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122},
+                       FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158},
+               },
+               {
+                       FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885},
+                       FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140},
+                       FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857},
+               },
+               {
+                       FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155},
+                       FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260},
+                       FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483},
+               },
+       },
+       {
+               {
+                       FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677},
+                       FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815},
+                       FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751},
+               },
+               {
+                       FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203},
+                       FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208},
+                       FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230},
+               },
+               {
+                       FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850},
+                       FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389},
+                       FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968},
+               },
+               {
+                       FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689},
+                       FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880},
+                       FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304},
+               },
+               {
+                       FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632},
+                       FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412},
+                       FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566},
+               },
+               {
+                       FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038},
+                       FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232},
+                       FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943},
+               },
+               {
+                       FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856},
+                       FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738},
+                       FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971},
+               },
+               {
+                       FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718},
+                       FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697},
+                       FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883},
+               },
+       },
+       {
+               {
+                       FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912},
+                       FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358},
+                       FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849},
+               },
+               {
+                       FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307},
+                       FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977},
+                       FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335},
+               },
+               {
+                       FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644},
+                       FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616},
+                       FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735},
+               },
+               {
+                       FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099},
+                       FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341},
+                       FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336},
+               },
+               {
+                       FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646},
+                       FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425},
+                       FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388},
+               },
+               {
+                       FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743},
+                       FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822},
+                       FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462},
+               },
+               {
+                       FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985},
+                       FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702},
+                       FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797},
+               },
+               {
+                       FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293},
+                       FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100},
+                       FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688},
+               },
+       },
+       {
+               {
+                       FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186},
+                       FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610},
+                       FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707},
+               },
+               {
+                       FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220},
+                       FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025},
+                       FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044},
+               },
+               {
+                       FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992},
+                       FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027},
+                       FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197},
+               },
+               {
+                       FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901},
+                       FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952},
+                       FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878},
+               },
+               {
+                       FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390},
+                       FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730},
+                       FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730},
+               },
+               {
+                       FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180},
+                       FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272},
+                       FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715},
+               },
+               {
+                       FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970},
+                       FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772},
+                       FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865},
+               },
+               {
+                       FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750},
+                       FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373},
+                       FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348},
+               },
+       },
+       {
+               {
+                       FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144},
+                       FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195},
+                       FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086},
+               },
+               {
+                       FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684},
+                       FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518},
+                       FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233},
+               },
+               {
+                       FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793},
+                       FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794},
+                       FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435},
+               },
+               {
+                       FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921},
+                       FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518},
+                       FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563},
+               },
+               {
+                       FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278},
+                       FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024},
+                       FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030},
+               },
+               {
+                       FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783},
+                       FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717},
+                       FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844},
+               },
+               {
+                       FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333},
+                       FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048},
+                       FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760},
+               },
+               {
+                       FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760},
+                       FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757},
+                       FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112},
+               },
+       },
+       {
+               {
+                       FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468},
+                       FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184},
+                       FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289},
+               },
+               {
+                       FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066},
+                       FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882},
+                       FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226},
+               },
+               {
+                       FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101},
+                       FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279},
+                       FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811},
+               },
+               {
+                       FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709},
+                       FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714},
+                       FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121},
+               },
+               {
+                       FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464},
+                       FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847},
+                       FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400},
+               },
+               {
+                       FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414},
+                       FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158},
+                       FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045},
+               },
+               {
+                       FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415},
+                       FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459},
+                       FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079},
+               },
+               {
+                       FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412},
+                       FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743},
+                       FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836},
+               },
+       },
+       {
+               {
+                       FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022},
+                       FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429},
+                       FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065},
+               },
+               {
+                       FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861},
+                       FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000},
+                       FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101},
+               },
+               {
+                       FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815},
+                       FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642},
+                       FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966},
+               },
+               {
+                       FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574},
+                       FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742},
+                       FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689},
+               },
+               {
+                       FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020},
+                       FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772},
+                       FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982},
+               },
+               {
+                       FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953},
+                       FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218},
+                       FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265},
+               },
+               {
+                       FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073},
+                       FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325},
+                       FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798},
+               },
+               {
+                       FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870},
+                       FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863},
+                       FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927},
+               },
+       },
+       {
+               {
+                       FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267},
+                       FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663},
+                       FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862},
+               },
+               {
+                       FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673},
+                       FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943},
+                       FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020},
+               },
+               {
+                       FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238},
+                       FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064},
+                       FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795},
+               },
+               {
+                       FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052},
+                       FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904},
+                       FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531},
+               },
+               {
+                       FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979},
+                       FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841},
+                       FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431},
+               },
+               {
+                       FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324},
+                       FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940},
+                       FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320},
+               },
+               {
+                       FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184},
+                       FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114},
+                       FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878},
+               },
+               {
+                       FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784},
+                       FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091},
+                       FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585},
+               },
+       },
+       {
+               {
+                       FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208},
+                       FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864},
+                       FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661},
+               },
+               {
+                       FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233},
+                       FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212},
+                       FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525},
+               },
+               {
+                       FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068},
+                       FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397},
+                       FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988},
+               },
+               {
+                       FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889},
+                       FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038},
+                       FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697},
+               },
+               {
+                       FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875},
+                       FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905},
+                       FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656},
+               },
+               {
+                       FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818},
+                       FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714},
+                       FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203},
+               },
+               {
+                       FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931},
+                       FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024},
+                       FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084},
+               },
+               {
+                       FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204},
+                       FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817},
+                       FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667},
+               },
+       },
+       {
+               {
+                       FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504},
+                       FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768},
+                       FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255},
+               },
+               {
+                       FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790},
+                       FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438},
+                       FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333},
+               },
+               {
+                       FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971},
+                       FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905},
+                       FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409},
+               },
+               {
+                       FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409},
+                       FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499},
+                       FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363},
+               },
+               {
+                       FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664},
+                       FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324},
+                       FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940},
+               },
+               {
+                       FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990},
+                       FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914},
+                       FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290},
+               },
+               {
+                       FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257},
+                       FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433},
+                       FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236},
+               },
+               {
+                       FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045},
+                       FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093},
+                       FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347},
+               },
+       },
+       {
+               {
+                       FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191},
+                       FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507},
+                       FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906},
+               },
+               {
+                       FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018},
+                       FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109},
+                       FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926},
+               },
+               {
+                       FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528},
+                       FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625},
+                       FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286},
+               },
+               {
+                       FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033},
+                       FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866},
+                       FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896},
+               },
+               {
+                       FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075},
+                       FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347},
+                       FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437},
+               },
+               {
+                       FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165},
+                       FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588},
+                       FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193},
+               },
+               {
+                       FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017},
+                       FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883},
+                       FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961},
+               },
+               {
+                       FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043},
+                       FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663},
+                       FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362},
+               },
+       },
+       {
+               {
+                       FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860},
+                       FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466},
+                       FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063},
+               },
+               {
+                       FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997},
+                       FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295},
+                       FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369},
+               },
+               {
+                       FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385},
+                       FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109},
+                       FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906},
+               },
+               {
+                       FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424},
+                       FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185},
+                       FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962},
+               },
+               {
+                       FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325},
+                       FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593},
+                       FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404},
+               },
+               {
+                       FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644},
+                       FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801},
+                       FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804},
+               },
+               {
+                       FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884},
+                       FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577},
+                       FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849},
+               },
+               {
+                       FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473},
+                       FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644},
+                       FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319},
+               },
+       },
+       {
+               {
+                       FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599},
+                       FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768},
+                       FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084},
+               },
+               {
+                       FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328},
+                       FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369},
+                       FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920},
+               },
+               {
+                       FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815},
+                       FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025},
+                       FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397},
+               },
+               {
+                       FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448},
+                       FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981},
+                       FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165},
+               },
+               {
+                       FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501},
+                       FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073},
+                       FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861},
+               },
+               {
+                       FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845},
+                       FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211},
+                       FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870},
+               },
+               {
+                       FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096},
+                       FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803},
+                       FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168},
+               },
+               {
+                       FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965},
+                       FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505},
+                       FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598},
+               },
+       },
+       {
+               {
+                       FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782},
+                       FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900},
+                       FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479},
+               },
+               {
+                       FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208},
+                       FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232},
+                       FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719},
+               },
+               {
+                       FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271},
+                       FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326},
+                       FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132},
+               },
+               {
+                       FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300},
+                       FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570},
+                       FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670},
+               },
+               {
+                       FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994},
+                       FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913},
+                       FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317},
+               },
+               {
+                       FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730},
+                       FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096},
+                       FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078},
+               },
+               {
+                       FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411},
+                       FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905},
+                       FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654},
+               },
+               {
+                       FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870},
+                       FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498},
+                       FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579},
+               },
+       },
+       {
+               {
+                       FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677},
+                       FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647},
+                       FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743},
+               },
+               {
+                       FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468},
+                       FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375},
+                       FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155},
+               },
+               {
+                       FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725},
+                       FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612},
+                       FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943},
+               },
+               {
+                       FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944},
+                       FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928},
+                       FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406},
+               },
+               {
+                       FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139},
+                       FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963},
+                       FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693},
+               },
+               {
+                       FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734},
+                       FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680},
+                       FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410},
+               },
+               {
+                       FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931},
+                       FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654},
+                       FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710},
+               },
+               {
+                       FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180},
+                       FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684},
+                       FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895},
+               },
+       },
+       {
+               {
+                       FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501},
+                       FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413},
+                       FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880},
+               },
+               {
+                       FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874},
+                       FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962},
+                       FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899},
+               },
+               {
+                       FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152},
+                       FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063},
+                       FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080},
+               },
+               {
+                       FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146},
+                       FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183},
+                       FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133},
+               },
+               {
+                       FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421},
+                       FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622},
+                       FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197},
+               },
+               {
+                       FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663},
+                       FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753},
+                       FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755},
+               },
+               {
+                       FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862},
+                       FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118},
+                       FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171},
+               },
+               {
+                       FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380},
+                       FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824},
+                       FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270},
+               },
+       },
+       {
+               {
+                       FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438},
+                       FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584},
+                       FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562},
+               },
+               {
+                       FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471},
+                       FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610},
+                       FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269},
+               },
+               {
+                       FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650},
+                       FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369},
+                       FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461},
+               },
+               {
+                       FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462},
+                       FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793},
+                       FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218},
+               },
+               {
+                       FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226},
+                       FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019},
+                       FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037},
+               },
+               {
+                       FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171},
+                       FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132},
+                       FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841},
+               },
+               {
+                       FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181},
+                       FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210},
+                       FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040},
+               },
+               {
+                       FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935},
+                       FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105},
+                       FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814},
+               },
+       },
+       {
+               {
+                       FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852},
+                       FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581},
+                       FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646},
+               },
+               {
+                       FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844},
+                       FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025},
+                       FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453},
+               },
+               {
+                       FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068},
+                       FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192},
+                       FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921},
+               },
+               {
+                       FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259},
+                       FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426},
+                       FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072},
+               },
+               {
+                       FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305},
+                       FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832},
+                       FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943},
+               },
+               {
+                       FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011},
+                       FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447},
+                       FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494},
+               },
+               {
+                       FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245},
+                       FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859},
+                       FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915},
+               },
+               {
+                       FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707},
+                       FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848},
+                       FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224},
+               },
+       },
+       {
+               {
+                       FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391},
+                       FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215},
+                       FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101},
+               },
+               {
+                       FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713},
+                       FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849},
+                       FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930},
+               },
+               {
+                       FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940},
+                       FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031},
+                       FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404},
+               },
+               {
+                       FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243},
+                       FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116},
+                       FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525},
+               },
+               {
+                       FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509},
+                       FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883},
+                       FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865},
+               },
+               {
+                       FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660},
+                       FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273},
+                       FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138},
+               },
+               {
+                       FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560},
+                       FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135},
+                       FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941},
+               },
+               {
+                       FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739},
+                       FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756},
+                       FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819},
+               },
+       },
+       {
+               {
+                       FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347},
+                       FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028},
+                       FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075},
+               },
+               {
+                       FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799},
+                       FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609},
+                       FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817},
+               },
+               {
+                       FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989},
+                       FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523},
+                       FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278},
+               },
+               {
+                       FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045},
+                       FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377},
+                       FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480},
+               },
+               {
+                       FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016},
+                       FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426},
+                       FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525},
+               },
+               {
+                       FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396},
+                       FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080},
+                       FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892},
+               },
+               {
+                       FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275},
+                       FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074},
+                       FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140},
+               },
+               {
+                       FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717},
+                       FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101},
+                       FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127},
+               },
+       },
+       {
+               {
+                       FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632},
+                       FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415},
+                       FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160},
+               },
+               {
+                       FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876},
+                       FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625},
+                       FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478},
+               },
+               {
+                       FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164},
+                       FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595},
+                       FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248},
+               },
+               {
+                       FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858},
+                       FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193},
+                       FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184},
+               },
+               {
+                       FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942},
+                       FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635},
+                       FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948},
+               },
+               {
+                       FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935},
+                       FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415},
+                       FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416},
+               },
+               {
+                       FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018},
+                       FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778},
+                       FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659},
+               },
+               {
+                       FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385},
+                       FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503},
+                       FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329},
+               },
+       },
+       {
+               {
+                       FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056},
+                       FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838},
+                       FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948},
+               },
+               {
+                       FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691},
+                       FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118},
+                       FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517},
+               },
+               {
+                       FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269},
+                       FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904},
+                       FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589},
+               },
+               {
+                       FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193},
+                       FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910},
+                       FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930},
+               },
+               {
+                       FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667},
+                       FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481},
+                       FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876},
+               },
+               {
+                       FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640},
+                       FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278},
+                       FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112},
+               },
+               {
+                       FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272},
+                       FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012},
+                       FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221},
+               },
+               {
+                       FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046},
+                       FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345},
+                       FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310},
+               },
+       },
+       {
+               {
+                       FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937},
+                       FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636},
+                       FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008},
+               },
+               {
+                       FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429},
+                       FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576},
+                       FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066},
+               },
+               {
+                       FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490},
+                       FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104},
+                       FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053},
+               },
+               {
+                       FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275},
+                       FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511},
+                       FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095},
+               },
+               {
+                       FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439},
+                       FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939},
+                       FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424},
+               },
+               {
+                       FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310},
+                       FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608},
+                       FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079},
+               },
+               {
+                       FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101},
+                       FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418},
+                       FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576},
+               },
+               {
+                       FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356},
+                       FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996},
+                       FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099},
+               },
+       },
+       {
+               {
+                       FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728},
+                       FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658},
+                       FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242},
+               },
+               {
+                       FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001},
+                       FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766},
+                       FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373},
+               },
+               {
+                       FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458},
+                       FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628},
+                       FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657},
+               },
+               {
+                       FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062},
+                       FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616},
+                       FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014},
+               },
+               {
+                       FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383},
+                       FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814},
+                       FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718},
+               },
+               {
+                       FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417},
+                       FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222},
+                       FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444},
+               },
+               {
+                       FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597},
+                       FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970},
+                       FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799},
+               },
+               {
+                       FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647},
+                       FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511},
+                       FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032},
+               },
+       },
+       {
+               {
+                       FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834},
+                       FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461},
+                       FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062},
+               },
+               {
+                       FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516},
+                       FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547},
+                       FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240},
+               },
+               {
+                       FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038},
+                       FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741},
+                       FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103},
+               },
+               {
+                       FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747},
+                       FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323},
+                       FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016},
+               },
+               {
+                       FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373},
+                       FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228},
+                       FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141},
+               },
+               {
+                       FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399},
+                       FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831},
+                       FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376},
+               },
+               {
+                       FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313},
+                       FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958},
+                       FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577},
+               },
+               {
+                       FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743},
+                       FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684},
+                       FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476},
+               },
+       },
+}
diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
new file mode 100644 (file)
index 0000000..5f8b994
--- /dev/null
@@ -0,0 +1,1771 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package edwards25519
+
+// This code is a port of the public domain, “ref10” implementation of ed25519
+// from SUPERCOP.
+
+// FieldElement represents an element of the field GF(2^255 - 19).  An element
+// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
+// t[3]+2^102 t[4]+...+2^230 t[9].  Bounds on each t[i] vary depending on
+// context.
+type FieldElement [10]int32
+
+var zero FieldElement
+
+func FeZero(fe *FieldElement) {
+       copy(fe[:], zero[:])
+}
+
+func FeOne(fe *FieldElement) {
+       FeZero(fe)
+       fe[0] = 1
+}
+
+func FeAdd(dst, a, b *FieldElement) {
+       dst[0] = a[0] + b[0]
+       dst[1] = a[1] + b[1]
+       dst[2] = a[2] + b[2]
+       dst[3] = a[3] + b[3]
+       dst[4] = a[4] + b[4]
+       dst[5] = a[5] + b[5]
+       dst[6] = a[6] + b[6]
+       dst[7] = a[7] + b[7]
+       dst[8] = a[8] + b[8]
+       dst[9] = a[9] + b[9]
+}
+
+func FeSub(dst, a, b *FieldElement) {
+       dst[0] = a[0] - b[0]
+       dst[1] = a[1] - b[1]
+       dst[2] = a[2] - b[2]
+       dst[3] = a[3] - b[3]
+       dst[4] = a[4] - b[4]
+       dst[5] = a[5] - b[5]
+       dst[6] = a[6] - b[6]
+       dst[7] = a[7] - b[7]
+       dst[8] = a[8] - b[8]
+       dst[9] = a[9] - b[9]
+}
+
+func FeCopy(dst, src *FieldElement) {
+       copy(dst[:], src[:])
+}
+
+// Replace (f,g) with (g,g) if b == 1;
+// replace (f,g) with (f,g) if b == 0.
+//
+// Preconditions: b in {0,1}.
+func FeCMove(f, g *FieldElement, b int32) {
+       b = -b
+       f[0] ^= b & (f[0] ^ g[0])
+       f[1] ^= b & (f[1] ^ g[1])
+       f[2] ^= b & (f[2] ^ g[2])
+       f[3] ^= b & (f[3] ^ g[3])
+       f[4] ^= b & (f[4] ^ g[4])
+       f[5] ^= b & (f[5] ^ g[5])
+       f[6] ^= b & (f[6] ^ g[6])
+       f[7] ^= b & (f[7] ^ g[7])
+       f[8] ^= b & (f[8] ^ g[8])
+       f[9] ^= b & (f[9] ^ g[9])
+}
+
+func load3(in []byte) int64 {
+       var r int64
+       r = int64(in[0])
+       r |= int64(in[1]) << 8
+       r |= int64(in[2]) << 16
+       return r
+}
+
+func load4(in []byte) int64 {
+       var r int64
+       r = int64(in[0])
+       r |= int64(in[1]) << 8
+       r |= int64(in[2]) << 16
+       r |= int64(in[3]) << 24
+       return r
+}
+
+func FeFromBytes(dst *FieldElement, src *[32]byte) {
+       h0 := load4(src[:])
+       h1 := load3(src[4:]) << 6
+       h2 := load3(src[7:]) << 5
+       h3 := load3(src[10:]) << 3
+       h4 := load3(src[13:]) << 2
+       h5 := load4(src[16:])
+       h6 := load3(src[20:]) << 7
+       h7 := load3(src[23:]) << 5
+       h8 := load3(src[26:]) << 4
+       h9 := (load3(src[29:]) & 8388607) << 2
+
+       FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
+}
+
+// FeToBytes marshals h to s.
+// Preconditions:
+//   |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+//
+// Write p=2^255-19; q=floor(h/p).
+// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
+//
+// Proof:
+//   Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
+//   Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
+//
+//   Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
+//   Then 0<y<1.
+//
+//   Write r=h-pq.
+//   Have 0<=r<=p-1=2^255-20.
+//   Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
+//
+//   Write x=r+19(2^-255)r+y.
+//   Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
+//
+//   Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
+//   so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
+func FeToBytes(s *[32]byte, h *FieldElement) {
+       var carry [10]int32
+
+       q := (19*h[9] + (1 << 24)) >> 25
+       q = (h[0] + q) >> 26
+       q = (h[1] + q) >> 25
+       q = (h[2] + q) >> 26
+       q = (h[3] + q) >> 25
+       q = (h[4] + q) >> 26
+       q = (h[5] + q) >> 25
+       q = (h[6] + q) >> 26
+       q = (h[7] + q) >> 25
+       q = (h[8] + q) >> 26
+       q = (h[9] + q) >> 25
+
+       // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
+       h[0] += 19 * q
+       // Goal: Output h-2^255 q, which is between 0 and 2^255-20.
+
+       carry[0] = h[0] >> 26
+       h[1] += carry[0]
+       h[0] -= carry[0] << 26
+       carry[1] = h[1] >> 25
+       h[2] += carry[1]
+       h[1] -= carry[1] << 25
+       carry[2] = h[2] >> 26
+       h[3] += carry[2]
+       h[2] -= carry[2] << 26
+       carry[3] = h[3] >> 25
+       h[4] += carry[3]
+       h[3] -= carry[3] << 25
+       carry[4] = h[4] >> 26
+       h[5] += carry[4]
+       h[4] -= carry[4] << 26
+       carry[5] = h[5] >> 25
+       h[6] += carry[5]
+       h[5] -= carry[5] << 25
+       carry[6] = h[6] >> 26
+       h[7] += carry[6]
+       h[6] -= carry[6] << 26
+       carry[7] = h[7] >> 25
+       h[8] += carry[7]
+       h[7] -= carry[7] << 25
+       carry[8] = h[8] >> 26
+       h[9] += carry[8]
+       h[8] -= carry[8] << 26
+       carry[9] = h[9] >> 25
+       h[9] -= carry[9] << 25
+       // h10 = carry9
+
+       // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
+       // Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
+       // evidently 2^255 h10-2^255 q = 0.
+       // Goal: Output h[0]+...+2^230 h[9].
+
+       s[0] = byte(h[0] >> 0)
+       s[1] = byte(h[0] >> 8)
+       s[2] = byte(h[0] >> 16)
+       s[3] = byte((h[0] >> 24) | (h[1] << 2))
+       s[4] = byte(h[1] >> 6)
+       s[5] = byte(h[1] >> 14)
+       s[6] = byte((h[1] >> 22) | (h[2] << 3))
+       s[7] = byte(h[2] >> 5)
+       s[8] = byte(h[2] >> 13)
+       s[9] = byte((h[2] >> 21) | (h[3] << 5))
+       s[10] = byte(h[3] >> 3)
+       s[11] = byte(h[3] >> 11)
+       s[12] = byte((h[3] >> 19) | (h[4] << 6))
+       s[13] = byte(h[4] >> 2)
+       s[14] = byte(h[4] >> 10)
+       s[15] = byte(h[4] >> 18)
+       s[16] = byte(h[5] >> 0)
+       s[17] = byte(h[5] >> 8)
+       s[18] = byte(h[5] >> 16)
+       s[19] = byte((h[5] >> 24) | (h[6] << 1))
+       s[20] = byte(h[6] >> 7)
+       s[21] = byte(h[6] >> 15)
+       s[22] = byte((h[6] >> 23) | (h[7] << 3))
+       s[23] = byte(h[7] >> 5)
+       s[24] = byte(h[7] >> 13)
+       s[25] = byte((h[7] >> 21) | (h[8] << 4))
+       s[26] = byte(h[8] >> 4)
+       s[27] = byte(h[8] >> 12)
+       s[28] = byte((h[8] >> 20) | (h[9] << 6))
+       s[29] = byte(h[9] >> 2)
+       s[30] = byte(h[9] >> 10)
+       s[31] = byte(h[9] >> 18)
+}
+
+func FeIsNegative(f *FieldElement) byte {
+       var s [32]byte
+       FeToBytes(&s, f)
+       return s[0] & 1
+}
+
+func FeIsNonZero(f *FieldElement) int32 {
+       var s [32]byte
+       FeToBytes(&s, f)
+       var x uint8
+       for _, b := range s {
+               x |= b
+       }
+       x |= x >> 4
+       x |= x >> 2
+       x |= x >> 1
+       return int32(x & 1)
+}
+
+// FeNeg sets h = -f
+//
+// Preconditions:
+//    |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+//
+// Postconditions:
+//    |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+func FeNeg(h, f *FieldElement) {
+       h[0] = -f[0]
+       h[1] = -f[1]
+       h[2] = -f[2]
+       h[3] = -f[3]
+       h[4] = -f[4]
+       h[5] = -f[5]
+       h[6] = -f[6]
+       h[7] = -f[7]
+       h[8] = -f[8]
+       h[9] = -f[9]
+}
+
+func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) {
+       var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64
+
+       /*
+         |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
+           i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
+         |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
+           i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
+       */
+
+       c0 = (h0 + (1 << 25)) >> 26
+       h1 += c0
+       h0 -= c0 << 26
+       c4 = (h4 + (1 << 25)) >> 26
+       h5 += c4
+       h4 -= c4 << 26
+       /* |h0| <= 2^25 */
+       /* |h4| <= 2^25 */
+       /* |h1| <= 1.51*2^58 */
+       /* |h5| <= 1.51*2^58 */
+
+       c1 = (h1 + (1 << 24)) >> 25
+       h2 += c1
+       h1 -= c1 << 25
+       c5 = (h5 + (1 << 24)) >> 25
+       h6 += c5
+       h5 -= c5 << 25
+       /* |h1| <= 2^24; from now on fits into int32 */
+       /* |h5| <= 2^24; from now on fits into int32 */
+       /* |h2| <= 1.21*2^59 */
+       /* |h6| <= 1.21*2^59 */
+
+       c2 = (h2 + (1 << 25)) >> 26
+       h3 += c2
+       h2 -= c2 << 26
+       c6 = (h6 + (1 << 25)) >> 26
+       h7 += c6
+       h6 -= c6 << 26
+       /* |h2| <= 2^25; from now on fits into int32 unchanged */
+       /* |h6| <= 2^25; from now on fits into int32 unchanged */
+       /* |h3| <= 1.51*2^58 */
+       /* |h7| <= 1.51*2^58 */
+
+       c3 = (h3 + (1 << 24)) >> 25
+       h4 += c3
+       h3 -= c3 << 25
+       c7 = (h7 + (1 << 24)) >> 25
+       h8 += c7
+       h7 -= c7 << 25
+       /* |h3| <= 2^24; from now on fits into int32 unchanged */
+       /* |h7| <= 2^24; from now on fits into int32 unchanged */
+       /* |h4| <= 1.52*2^33 */
+       /* |h8| <= 1.52*2^33 */
+
+       c4 = (h4 + (1 << 25)) >> 26
+       h5 += c4
+       h4 -= c4 << 26
+       c8 = (h8 + (1 << 25)) >> 26
+       h9 += c8
+       h8 -= c8 << 26
+       /* |h4| <= 2^25; from now on fits into int32 unchanged */
+       /* |h8| <= 2^25; from now on fits into int32 unchanged */
+       /* |h5| <= 1.01*2^24 */
+       /* |h9| <= 1.51*2^58 */
+
+       c9 = (h9 + (1 << 24)) >> 25
+       h0 += c9 * 19
+       h9 -= c9 << 25
+       /* |h9| <= 2^24; from now on fits into int32 unchanged */
+       /* |h0| <= 1.8*2^37 */
+
+       c0 = (h0 + (1 << 25)) >> 26
+       h1 += c0
+       h0 -= c0 << 26
+       /* |h0| <= 2^25; from now on fits into int32 unchanged */
+       /* |h1| <= 1.01*2^24 */
+
+       h[0] = int32(h0)
+       h[1] = int32(h1)
+       h[2] = int32(h2)
+       h[3] = int32(h3)
+       h[4] = int32(h4)
+       h[5] = int32(h5)
+       h[6] = int32(h6)
+       h[7] = int32(h7)
+       h[8] = int32(h8)
+       h[9] = int32(h9)
+}
+
+// FeMul calculates h = f * g
+// Can overlap h with f or g.
+//
+// Preconditions:
+//    |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//    |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+//    |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+//
+// Notes on implementation strategy:
+//
+// Using schoolbook multiplication.
+// Karatsuba would save a little in some cost models.
+//
+// Most multiplications by 2 and 19 are 32-bit precomputations;
+// cheaper than 64-bit postcomputations.
+//
+// There is one remaining multiplication by 19 in the carry chain;
+// one *19 precomputation can be merged into this,
+// but the resulting data flow is considerably less clean.
+//
+// There are 12 carries below.
+// 10 of them are 2-way parallelizable and vectorizable.
+// Can get away with 11 carries, but then data flow is much deeper.
+//
+// With tighter constraints on inputs, can squeeze carries into int32.
+func FeMul(h, f, g *FieldElement) {
+       f0 := int64(f[0])
+       f1 := int64(f[1])
+       f2 := int64(f[2])
+       f3 := int64(f[3])
+       f4 := int64(f[4])
+       f5 := int64(f[5])
+       f6 := int64(f[6])
+       f7 := int64(f[7])
+       f8 := int64(f[8])
+       f9 := int64(f[9])
+
+       f1_2 := int64(2 * f[1])
+       f3_2 := int64(2 * f[3])
+       f5_2 := int64(2 * f[5])
+       f7_2 := int64(2 * f[7])
+       f9_2 := int64(2 * f[9])
+
+       g0 := int64(g[0])
+       g1 := int64(g[1])
+       g2 := int64(g[2])
+       g3 := int64(g[3])
+       g4 := int64(g[4])
+       g5 := int64(g[5])
+       g6 := int64(g[6])
+       g7 := int64(g[7])
+       g8 := int64(g[8])
+       g9 := int64(g[9])
+
+       g1_19 := int64(19 * g[1]) /* 1.4*2^29 */
+       g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */
+       g3_19 := int64(19 * g[3])
+       g4_19 := int64(19 * g[4])
+       g5_19 := int64(19 * g[5])
+       g6_19 := int64(19 * g[6])
+       g7_19 := int64(19 * g[7])
+       g8_19 := int64(19 * g[8])
+       g9_19 := int64(19 * g[9])
+
+       h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19
+       h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19
+       h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19
+       h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19
+       h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19
+       h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19
+       h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19
+       h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19
+       h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19
+       h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0
+
+       FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
+}
+
+func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) {
+       f0 := int64(f[0])
+       f1 := int64(f[1])
+       f2 := int64(f[2])
+       f3 := int64(f[3])
+       f4 := int64(f[4])
+       f5 := int64(f[5])
+       f6 := int64(f[6])
+       f7 := int64(f[7])
+       f8 := int64(f[8])
+       f9 := int64(f[9])
+       f0_2 := int64(2 * f[0])
+       f1_2 := int64(2 * f[1])
+       f2_2 := int64(2 * f[2])
+       f3_2 := int64(2 * f[3])
+       f4_2 := int64(2 * f[4])
+       f5_2 := int64(2 * f[5])
+       f6_2 := int64(2 * f[6])
+       f7_2 := int64(2 * f[7])
+       f5_38 := 38 * f5 // 1.31*2^30
+       f6_19 := 19 * f6 // 1.31*2^30
+       f7_38 := 38 * f7 // 1.31*2^30
+       f8_19 := 19 * f8 // 1.31*2^30
+       f9_38 := 38 * f9 // 1.31*2^30
+
+       h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38
+       h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19
+       h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19
+       h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38
+       h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38
+       h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19
+       h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19
+       h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38
+       h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38
+       h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5
+
+       return
+}
+
+// FeSquare calculates h = f*f. Can overlap h with f.
+//
+// Preconditions:
+//    |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+//    |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+func FeSquare(h, f *FieldElement) {
+       h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f)
+       FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
+}
+
+// FeSquare2 sets h = 2 * f * f
+//
+// Can overlap h with f.
+//
+// Preconditions:
+//    |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc.
+//
+// Postconditions:
+//    |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc.
+// See fe_mul.c for discussion of implementation strategy.
+func FeSquare2(h, f *FieldElement) {
+       h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f)
+
+       h0 += h0
+       h1 += h1
+       h2 += h2
+       h3 += h3
+       h4 += h4
+       h5 += h5
+       h6 += h6
+       h7 += h7
+       h8 += h8
+       h9 += h9
+
+       FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9)
+}
+
+func FeInvert(out, z *FieldElement) {
+       var t0, t1, t2, t3 FieldElement
+       var i int
+
+       FeSquare(&t0, z)        // 2^1
+       FeSquare(&t1, &t0)      // 2^2
+       for i = 1; i < 2; i++ { // 2^3
+               FeSquare(&t1, &t1)
+       }
+       FeMul(&t1, z, &t1)      // 2^3 + 2^0
+       FeMul(&t0, &t0, &t1)    // 2^3 + 2^1 + 2^0
+       FeSquare(&t2, &t0)      // 2^4 + 2^2 + 2^1
+       FeMul(&t1, &t1, &t2)    // 2^4 + 2^3 + 2^2 + 2^1 + 2^0
+       FeSquare(&t2, &t1)      // 5,4,3,2,1
+       for i = 1; i < 5; i++ { // 9,8,7,6,5
+               FeSquare(&t2, &t2)
+       }
+       FeMul(&t1, &t2, &t1)     // 9,8,7,6,5,4,3,2,1,0
+       FeSquare(&t2, &t1)       // 10..1
+       for i = 1; i < 10; i++ { // 19..10
+               FeSquare(&t2, &t2)
+       }
+       FeMul(&t2, &t2, &t1)     // 19..0
+       FeSquare(&t3, &t2)       // 20..1
+       for i = 1; i < 20; i++ { // 39..20
+               FeSquare(&t3, &t3)
+       }
+       FeMul(&t2, &t3, &t2)     // 39..0
+       FeSquare(&t2, &t2)       // 40..1
+       for i = 1; i < 10; i++ { // 49..10
+               FeSquare(&t2, &t2)
+       }
+       FeMul(&t1, &t2, &t1)     // 49..0
+       FeSquare(&t2, &t1)       // 50..1
+       for i = 1; i < 50; i++ { // 99..50
+               FeSquare(&t2, &t2)
+       }
+       FeMul(&t2, &t2, &t1)      // 99..0
+       FeSquare(&t3, &t2)        // 100..1
+       for i = 1; i < 100; i++ { // 199..100
+               FeSquare(&t3, &t3)
+       }
+       FeMul(&t2, &t3, &t2)     // 199..0
+       FeSquare(&t2, &t2)       // 200..1
+       for i = 1; i < 50; i++ { // 249..50
+               FeSquare(&t2, &t2)
+       }
+       FeMul(&t1, &t2, &t1)    // 249..0
+       FeSquare(&t1, &t1)      // 250..1
+       for i = 1; i < 5; i++ { // 254..5
+               FeSquare(&t1, &t1)
+       }
+       FeMul(out, &t1, &t0) // 254..5,3,1,0
+}
+
+func fePow22523(out, z *FieldElement) {
+       var t0, t1, t2 FieldElement
+       var i int
+
+       FeSquare(&t0, z)
+       for i = 1; i < 1; i++ {
+               FeSquare(&t0, &t0)
+       }
+       FeSquare(&t1, &t0)
+       for i = 1; i < 2; i++ {
+               FeSquare(&t1, &t1)
+       }
+       FeMul(&t1, z, &t1)
+       FeMul(&t0, &t0, &t1)
+       FeSquare(&t0, &t0)
+       for i = 1; i < 1; i++ {
+               FeSquare(&t0, &t0)
+       }
+       FeMul(&t0, &t1, &t0)
+       FeSquare(&t1, &t0)
+       for i = 1; i < 5; i++ {
+               FeSquare(&t1, &t1)
+       }
+       FeMul(&t0, &t1, &t0)
+       FeSquare(&t1, &t0)
+       for i = 1; i < 10; i++ {
+               FeSquare(&t1, &t1)
+       }
+       FeMul(&t1, &t1, &t0)
+       FeSquare(&t2, &t1)
+       for i = 1; i < 20; i++ {
+               FeSquare(&t2, &t2)
+       }
+       FeMul(&t1, &t2, &t1)
+       FeSquare(&t1, &t1)
+       for i = 1; i < 10; i++ {
+               FeSquare(&t1, &t1)
+       }
+       FeMul(&t0, &t1, &t0)
+       FeSquare(&t1, &t0)
+       for i = 1; i < 50; i++ {
+               FeSquare(&t1, &t1)
+       }
+       FeMul(&t1, &t1, &t0)
+       FeSquare(&t2, &t1)
+       for i = 1; i < 100; i++ {
+               FeSquare(&t2, &t2)
+       }
+       FeMul(&t1, &t2, &t1)
+       FeSquare(&t1, &t1)
+       for i = 1; i < 50; i++ {
+               FeSquare(&t1, &t1)
+       }
+       FeMul(&t0, &t1, &t0)
+       FeSquare(&t0, &t0)
+       for i = 1; i < 2; i++ {
+               FeSquare(&t0, &t0)
+       }
+       FeMul(out, &t0, z)
+}
+
+// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 *
+// y^2 where d = -121665/121666.
+//
+// Several representations are used:
+//   ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z
+//   ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT
+//   CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T
+//   PreComputedGroupElement: (y+x,y-x,2dxy)
+
+type ProjectiveGroupElement struct {
+       X, Y, Z FieldElement
+}
+
+type ExtendedGroupElement struct {
+       X, Y, Z, T FieldElement
+}
+
+type CompletedGroupElement struct {
+       X, Y, Z, T FieldElement
+}
+
+type PreComputedGroupElement struct {
+       yPlusX, yMinusX, xy2d FieldElement
+}
+
+type CachedGroupElement struct {
+       yPlusX, yMinusX, Z, T2d FieldElement
+}
+
+func (p *ProjectiveGroupElement) Zero() {
+       FeZero(&p.X)
+       FeOne(&p.Y)
+       FeOne(&p.Z)
+}
+
+func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) {
+       var t0 FieldElement
+
+       FeSquare(&r.X, &p.X)
+       FeSquare(&r.Z, &p.Y)
+       FeSquare2(&r.T, &p.Z)
+       FeAdd(&r.Y, &p.X, &p.Y)
+       FeSquare(&t0, &r.Y)
+       FeAdd(&r.Y, &r.Z, &r.X)
+       FeSub(&r.Z, &r.Z, &r.X)
+       FeSub(&r.X, &t0, &r.Y)
+       FeSub(&r.T, &r.T, &r.Z)
+}
+
+func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) {
+       var recip, x, y FieldElement
+
+       FeInvert(&recip, &p.Z)
+       FeMul(&x, &p.X, &recip)
+       FeMul(&y, &p.Y, &recip)
+       FeToBytes(s, &y)
+       s[31] ^= FeIsNegative(&x) << 7
+}
+
+func (p *ExtendedGroupElement) Zero() {
+       FeZero(&p.X)
+       FeOne(&p.Y)
+       FeOne(&p.Z)
+       FeZero(&p.T)
+}
+
+func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) {
+       var q ProjectiveGroupElement
+       p.ToProjective(&q)
+       q.Double(r)
+}
+
+func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) {
+       FeAdd(&r.yPlusX, &p.Y, &p.X)
+       FeSub(&r.yMinusX, &p.Y, &p.X)
+       FeCopy(&r.Z, &p.Z)
+       FeMul(&r.T2d, &p.T, &d2)
+}
+
+func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) {
+       FeCopy(&r.X, &p.X)
+       FeCopy(&r.Y, &p.Y)
+       FeCopy(&r.Z, &p.Z)
+}
+
+func (p *ExtendedGroupElement) ToBytes(s *[32]byte) {
+       var recip, x, y FieldElement
+
+       FeInvert(&recip, &p.Z)
+       FeMul(&x, &p.X, &recip)
+       FeMul(&y, &p.Y, &recip)
+       FeToBytes(s, &y)
+       s[31] ^= FeIsNegative(&x) << 7
+}
+
+func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool {
+       var u, v, v3, vxx, check FieldElement
+
+       FeFromBytes(&p.Y, s)
+       FeOne(&p.Z)
+       FeSquare(&u, &p.Y)
+       FeMul(&v, &u, &d)
+       FeSub(&u, &u, &p.Z) // y = y^2-1
+       FeAdd(&v, &v, &p.Z) // v = dy^2+1
+
+       FeSquare(&v3, &v)
+       FeMul(&v3, &v3, &v) // v3 = v^3
+       FeSquare(&p.X, &v3)
+       FeMul(&p.X, &p.X, &v)
+       FeMul(&p.X, &p.X, &u) // x = uv^7
+
+       fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8)
+       FeMul(&p.X, &p.X, &v3)
+       FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8)
+
+       var tmpX, tmp2 [32]byte
+
+       FeSquare(&vxx, &p.X)
+       FeMul(&vxx, &vxx, &v)
+       FeSub(&check, &vxx, &u) // vx^2-u
+       if FeIsNonZero(&check) == 1 {
+               FeAdd(&check, &vxx, &u) // vx^2+u
+               if FeIsNonZero(&check) == 1 {
+                       return false
+               }
+               FeMul(&p.X, &p.X, &SqrtM1)
+
+               FeToBytes(&tmpX, &p.X)
+               for i, v := range tmpX {
+                       tmp2[31-i] = v
+               }
+       }
+
+       if FeIsNegative(&p.X) != (s[31] >> 7) {
+               FeNeg(&p.X, &p.X)
+       }
+
+       FeMul(&p.T, &p.X, &p.Y)
+       return true
+}
+
+func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) {
+       FeMul(&r.X, &p.X, &p.T)
+       FeMul(&r.Y, &p.Y, &p.Z)
+       FeMul(&r.Z, &p.Z, &p.T)
+}
+
+func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) {
+       FeMul(&r.X, &p.X, &p.T)
+       FeMul(&r.Y, &p.Y, &p.Z)
+       FeMul(&r.Z, &p.Z, &p.T)
+       FeMul(&r.T, &p.X, &p.Y)
+}
+
+func (p *PreComputedGroupElement) Zero() {
+       FeOne(&p.yPlusX)
+       FeOne(&p.yMinusX)
+       FeZero(&p.xy2d)
+}
+
+func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) {
+       var t0 FieldElement
+
+       FeAdd(&r.X, &p.Y, &p.X)
+       FeSub(&r.Y, &p.Y, &p.X)
+       FeMul(&r.Z, &r.X, &q.yPlusX)
+       FeMul(&r.Y, &r.Y, &q.yMinusX)
+       FeMul(&r.T, &q.T2d, &p.T)
+       FeMul(&r.X, &p.Z, &q.Z)
+       FeAdd(&t0, &r.X, &r.X)
+       FeSub(&r.X, &r.Z, &r.Y)
+       FeAdd(&r.Y, &r.Z, &r.Y)
+       FeAdd(&r.Z, &t0, &r.T)
+       FeSub(&r.T, &t0, &r.T)
+}
+
+func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) {
+       var t0 FieldElement
+
+       FeAdd(&r.X, &p.Y, &p.X)
+       FeSub(&r.Y, &p.Y, &p.X)
+       FeMul(&r.Z, &r.X, &q.yMinusX)
+       FeMul(&r.Y, &r.Y, &q.yPlusX)
+       FeMul(&r.T, &q.T2d, &p.T)
+       FeMul(&r.X, &p.Z, &q.Z)
+       FeAdd(&t0, &r.X, &r.X)
+       FeSub(&r.X, &r.Z, &r.Y)
+       FeAdd(&r.Y, &r.Z, &r.Y)
+       FeSub(&r.Z, &t0, &r.T)
+       FeAdd(&r.T, &t0, &r.T)
+}
+
+func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) {
+       var t0 FieldElement
+
+       FeAdd(&r.X, &p.Y, &p.X)
+       FeSub(&r.Y, &p.Y, &p.X)
+       FeMul(&r.Z, &r.X, &q.yPlusX)
+       FeMul(&r.Y, &r.Y, &q.yMinusX)
+       FeMul(&r.T, &q.xy2d, &p.T)
+       FeAdd(&t0, &p.Z, &p.Z)
+       FeSub(&r.X, &r.Z, &r.Y)
+       FeAdd(&r.Y, &r.Z, &r.Y)
+       FeAdd(&r.Z, &t0, &r.T)
+       FeSub(&r.T, &t0, &r.T)
+}
+
+func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) {
+       var t0 FieldElement
+
+       FeAdd(&r.X, &p.Y, &p.X)
+       FeSub(&r.Y, &p.Y, &p.X)
+       FeMul(&r.Z, &r.X, &q.yMinusX)
+       FeMul(&r.Y, &r.Y, &q.yPlusX)
+       FeMul(&r.T, &q.xy2d, &p.T)
+       FeAdd(&t0, &p.Z, &p.Z)
+       FeSub(&r.X, &r.Z, &r.Y)
+       FeAdd(&r.Y, &r.Z, &r.Y)
+       FeSub(&r.Z, &t0, &r.T)
+       FeAdd(&r.T, &t0, &r.T)
+}
+
+func slide(r *[256]int8, a *[32]byte) {
+       for i := range r {
+               r[i] = int8(1 & (a[i>>3] >> uint(i&7)))
+       }
+
+       for i := range r {
+               if r[i] != 0 {
+                       for b := 1; b <= 6 && i+b < 256; b++ {
+                               if r[i+b] != 0 {
+                                       if r[i]+(r[i+b]<<uint(b)) <= 15 {
+                                               r[i] += r[i+b] << uint(b)
+                                               r[i+b] = 0
+                                       } else if r[i]-(r[i+b]<<uint(b)) >= -15 {
+                                               r[i] -= r[i+b] << uint(b)
+                                               for k := i + b; k < 256; k++ {
+                                                       if r[k] == 0 {
+                                                               r[k] = 1
+                                                               break
+                                                       }
+                                                       r[k] = 0
+                                               }
+                                       } else {
+                                               break
+                                       }
+                               }
+                       }
+               }
+       }
+}
+
+// GeDoubleScalarMultVartime sets r = a*A + b*B
+// where a = a[0]+256*a[1]+...+256^31 a[31].
+// and b = b[0]+256*b[1]+...+256^31 b[31].
+// B is the Ed25519 base point (x,4/5) with x positive.
+func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) {
+       var aSlide, bSlide [256]int8
+       var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A
+       var t CompletedGroupElement
+       var u, A2 ExtendedGroupElement
+       var i int
+
+       slide(&aSlide, a)
+       slide(&bSlide, b)
+
+       A.ToCached(&Ai[0])
+       A.Double(&t)
+       t.ToExtended(&A2)
+
+       for i := 0; i < 7; i++ {
+               geAdd(&t, &A2, &Ai[i])
+               t.ToExtended(&u)
+               u.ToCached(&Ai[i+1])
+       }
+
+       r.Zero()
+
+       for i = 255; i >= 0; i-- {
+               if aSlide[i] != 0 || bSlide[i] != 0 {
+                       break
+               }
+       }
+
+       for ; i >= 0; i-- {
+               r.Double(&t)
+
+               if aSlide[i] > 0 {
+                       t.ToExtended(&u)
+                       geAdd(&t, &u, &Ai[aSlide[i]/2])
+               } else if aSlide[i] < 0 {
+                       t.ToExtended(&u)
+                       geSub(&t, &u, &Ai[(-aSlide[i])/2])
+               }
+
+               if bSlide[i] > 0 {
+                       t.ToExtended(&u)
+                       geMixedAdd(&t, &u, &bi[bSlide[i]/2])
+               } else if bSlide[i] < 0 {
+                       t.ToExtended(&u)
+                       geMixedSub(&t, &u, &bi[(-bSlide[i])/2])
+               }
+
+               t.ToProjective(r)
+       }
+}
+
+// equal returns 1 if b == c and 0 otherwise, assuming that b and c are
+// non-negative.
+func equal(b, c int32) int32 {
+       x := uint32(b ^ c)
+       x--
+       return int32(x >> 31)
+}
+
+// negative returns 1 if b < 0 and 0 otherwise.
+func negative(b int32) int32 {
+       return (b >> 31) & 1
+}
+
+func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) {
+       FeCMove(&t.yPlusX, &u.yPlusX, b)
+       FeCMove(&t.yMinusX, &u.yMinusX, b)
+       FeCMove(&t.xy2d, &u.xy2d, b)
+}
+
+func selectPoint(t *PreComputedGroupElement, pos int32, b int32) {
+       var minusT PreComputedGroupElement
+       bNegative := negative(b)
+       bAbs := b - (((-bNegative) & b) << 1)
+
+       t.Zero()
+       for i := int32(0); i < 8; i++ {
+               PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1))
+       }
+       FeCopy(&minusT.yPlusX, &t.yMinusX)
+       FeCopy(&minusT.yMinusX, &t.yPlusX)
+       FeNeg(&minusT.xy2d, &t.xy2d)
+       PreComputedGroupElementCMove(t, &minusT, bNegative)
+}
+
+// GeScalarMultBase computes h = a*B, where
+//   a = a[0]+256*a[1]+...+256^31 a[31]
+//   B is the Ed25519 base point (x,4/5) with x positive.
+//
+// Preconditions:
+//   a[31] <= 127
+func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) {
+       var e [64]int8
+
+       for i, v := range a {
+               e[2*i] = int8(v & 15)
+               e[2*i+1] = int8((v >> 4) & 15)
+       }
+
+       // each e[i] is between 0 and 15 and e[63] is between 0 and 7.
+
+       carry := int8(0)
+       for i := 0; i < 63; i++ {
+               e[i] += carry
+               carry = (e[i] + 8) >> 4
+               e[i] -= carry << 4
+       }
+       e[63] += carry
+       // each e[i] is between -8 and 8.
+
+       h.Zero()
+       var t PreComputedGroupElement
+       var r CompletedGroupElement
+       for i := int32(1); i < 64; i += 2 {
+               selectPoint(&t, i/2, int32(e[i]))
+               geMixedAdd(&r, h, &t)
+               r.ToExtended(h)
+       }
+
+       var s ProjectiveGroupElement
+
+       h.Double(&r)
+       r.ToProjective(&s)
+       s.Double(&r)
+       r.ToProjective(&s)
+       s.Double(&r)
+       r.ToProjective(&s)
+       s.Double(&r)
+       r.ToExtended(h)
+
+       for i := int32(0); i < 64; i += 2 {
+               selectPoint(&t, i/2, int32(e[i]))
+               geMixedAdd(&r, h, &t)
+               r.ToExtended(h)
+       }
+}
+
+// The scalars are GF(2^252 + 27742317777372353535851937790883648493).
+
+// Input:
+//   a[0]+256*a[1]+...+256^31*a[31] = a
+//   b[0]+256*b[1]+...+256^31*b[31] = b
+//   c[0]+256*c[1]+...+256^31*c[31] = c
+//
+// Output:
+//   s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l
+//   where l = 2^252 + 27742317777372353535851937790883648493.
+func ScMulAdd(s, a, b, c *[32]byte) {
+       a0 := 2097151 & load3(a[:])
+       a1 := 2097151 & (load4(a[2:]) >> 5)
+       a2 := 2097151 & (load3(a[5:]) >> 2)
+       a3 := 2097151 & (load4(a[7:]) >> 7)
+       a4 := 2097151 & (load4(a[10:]) >> 4)
+       a5 := 2097151 & (load3(a[13:]) >> 1)
+       a6 := 2097151 & (load4(a[15:]) >> 6)
+       a7 := 2097151 & (load3(a[18:]) >> 3)
+       a8 := 2097151 & load3(a[21:])
+       a9 := 2097151 & (load4(a[23:]) >> 5)
+       a10 := 2097151 & (load3(a[26:]) >> 2)
+       a11 := (load4(a[28:]) >> 7)
+       b0 := 2097151 & load3(b[:])
+       b1 := 2097151 & (load4(b[2:]) >> 5)
+       b2 := 2097151 & (load3(b[5:]) >> 2)
+       b3 := 2097151 & (load4(b[7:]) >> 7)
+       b4 := 2097151 & (load4(b[10:]) >> 4)
+       b5 := 2097151 & (load3(b[13:]) >> 1)
+       b6 := 2097151 & (load4(b[15:]) >> 6)
+       b7 := 2097151 & (load3(b[18:]) >> 3)
+       b8 := 2097151 & load3(b[21:])
+       b9 := 2097151 & (load4(b[23:]) >> 5)
+       b10 := 2097151 & (load3(b[26:]) >> 2)
+       b11 := (load4(b[28:]) >> 7)
+       c0 := 2097151 & load3(c[:])
+       c1 := 2097151 & (load4(c[2:]) >> 5)
+       c2 := 2097151 & (load3(c[5:]) >> 2)
+       c3 := 2097151 & (load4(c[7:]) >> 7)
+       c4 := 2097151 & (load4(c[10:]) >> 4)
+       c5 := 2097151 & (load3(c[13:]) >> 1)
+       c6 := 2097151 & (load4(c[15:]) >> 6)
+       c7 := 2097151 & (load3(c[18:]) >> 3)
+       c8 := 2097151 & load3(c[21:])
+       c9 := 2097151 & (load4(c[23:]) >> 5)
+       c10 := 2097151 & (load3(c[26:]) >> 2)
+       c11 := (load4(c[28:]) >> 7)
+       var carry [23]int64
+
+       s0 := c0 + a0*b0
+       s1 := c1 + a0*b1 + a1*b0
+       s2 := c2 + a0*b2 + a1*b1 + a2*b0
+       s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0
+       s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0
+       s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0
+       s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0
+       s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0
+       s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0
+       s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0
+       s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0
+       s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0
+       s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1
+       s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2
+       s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3
+       s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4
+       s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5
+       s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6
+       s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7
+       s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8
+       s20 := a9*b11 + a10*b10 + a11*b9
+       s21 := a10*b11 + a11*b10
+       s22 := a11 * b11
+       s23 := int64(0)
+
+       carry[0] = (s0 + (1 << 20)) >> 21
+       s1 += carry[0]
+       s0 -= carry[0] << 21
+       carry[2] = (s2 + (1 << 20)) >> 21
+       s3 += carry[2]
+       s2 -= carry[2] << 21
+       carry[4] = (s4 + (1 << 20)) >> 21
+       s5 += carry[4]
+       s4 -= carry[4] << 21
+       carry[6] = (s6 + (1 << 20)) >> 21
+       s7 += carry[6]
+       s6 -= carry[6] << 21
+       carry[8] = (s8 + (1 << 20)) >> 21
+       s9 += carry[8]
+       s8 -= carry[8] << 21
+       carry[10] = (s10 + (1 << 20)) >> 21
+       s11 += carry[10]
+       s10 -= carry[10] << 21
+       carry[12] = (s12 + (1 << 20)) >> 21
+       s13 += carry[12]
+       s12 -= carry[12] << 21
+       carry[14] = (s14 + (1 << 20)) >> 21
+       s15 += carry[14]
+       s14 -= carry[14] << 21
+       carry[16] = (s16 + (1 << 20)) >> 21
+       s17 += carry[16]
+       s16 -= carry[16] << 21
+       carry[18] = (s18 + (1 << 20)) >> 21
+       s19 += carry[18]
+       s18 -= carry[18] << 21
+       carry[20] = (s20 + (1 << 20)) >> 21
+       s21 += carry[20]
+       s20 -= carry[20] << 21
+       carry[22] = (s22 + (1 << 20)) >> 21
+       s23 += carry[22]
+       s22 -= carry[22] << 21
+
+       carry[1] = (s1 + (1 << 20)) >> 21
+       s2 += carry[1]
+       s1 -= carry[1] << 21
+       carry[3] = (s3 + (1 << 20)) >> 21
+       s4 += carry[3]
+       s3 -= carry[3] << 21
+       carry[5] = (s5 + (1 << 20)) >> 21
+       s6 += carry[5]
+       s5 -= carry[5] << 21
+       carry[7] = (s7 + (1 << 20)) >> 21
+       s8 += carry[7]
+       s7 -= carry[7] << 21
+       carry[9] = (s9 + (1 << 20)) >> 21
+       s10 += carry[9]
+       s9 -= carry[9] << 21
+       carry[11] = (s11 + (1 << 20)) >> 21
+       s12 += carry[11]
+       s11 -= carry[11] << 21
+       carry[13] = (s13 + (1 << 20)) >> 21
+       s14 += carry[13]
+       s13 -= carry[13] << 21
+       carry[15] = (s15 + (1 << 20)) >> 21
+       s16 += carry[15]
+       s15 -= carry[15] << 21
+       carry[17] = (s17 + (1 << 20)) >> 21
+       s18 += carry[17]
+       s17 -= carry[17] << 21
+       carry[19] = (s19 + (1 << 20)) >> 21
+       s20 += carry[19]
+       s19 -= carry[19] << 21
+       carry[21] = (s21 + (1 << 20)) >> 21
+       s22 += carry[21]
+       s21 -= carry[21] << 21
+
+       s11 += s23 * 666643
+       s12 += s23 * 470296
+       s13 += s23 * 654183
+       s14 -= s23 * 997805
+       s15 += s23 * 136657
+       s16 -= s23 * 683901
+       s23 = 0
+
+       s10 += s22 * 666643
+       s11 += s22 * 470296
+       s12 += s22 * 654183
+       s13 -= s22 * 997805
+       s14 += s22 * 136657
+       s15 -= s22 * 683901
+       s22 = 0
+
+       s9 += s21 * 666643
+       s10 += s21 * 470296
+       s11 += s21 * 654183
+       s12 -= s21 * 997805
+       s13 += s21 * 136657
+       s14 -= s21 * 683901
+       s21 = 0
+
+       s8 += s20 * 666643
+       s9 += s20 * 470296
+       s10 += s20 * 654183
+       s11 -= s20 * 997805
+       s12 += s20 * 136657
+       s13 -= s20 * 683901
+       s20 = 0
+
+       s7 += s19 * 666643
+       s8 += s19 * 470296
+       s9 += s19 * 654183
+       s10 -= s19 * 997805
+       s11 += s19 * 136657
+       s12 -= s19 * 683901
+       s19 = 0
+
+       s6 += s18 * 666643
+       s7 += s18 * 470296
+       s8 += s18 * 654183
+       s9 -= s18 * 997805
+       s10 += s18 * 136657
+       s11 -= s18 * 683901
+       s18 = 0
+
+       carry[6] = (s6 + (1 << 20)) >> 21
+       s7 += carry[6]
+       s6 -= carry[6] << 21
+       carry[8] = (s8 + (1 << 20)) >> 21
+       s9 += carry[8]
+       s8 -= carry[8] << 21
+       carry[10] = (s10 + (1 << 20)) >> 21
+       s11 += carry[10]
+       s10 -= carry[10] << 21
+       carry[12] = (s12 + (1 << 20)) >> 21
+       s13 += carry[12]
+       s12 -= carry[12] << 21
+       carry[14] = (s14 + (1 << 20)) >> 21
+       s15 += carry[14]
+       s14 -= carry[14] << 21
+       carry[16] = (s16 + (1 << 20)) >> 21
+       s17 += carry[16]
+       s16 -= carry[16] << 21
+
+       carry[7] = (s7 + (1 << 20)) >> 21
+       s8 += carry[7]
+       s7 -= carry[7] << 21
+       carry[9] = (s9 + (1 << 20)) >> 21
+       s10 += carry[9]
+       s9 -= carry[9] << 21
+       carry[11] = (s11 + (1 << 20)) >> 21
+       s12 += carry[11]
+       s11 -= carry[11] << 21
+       carry[13] = (s13 + (1 << 20)) >> 21
+       s14 += carry[13]
+       s13 -= carry[13] << 21
+       carry[15] = (s15 + (1 << 20)) >> 21
+       s16 += carry[15]
+       s15 -= carry[15] << 21
+
+       s5 += s17 * 666643
+       s6 += s17 * 470296
+       s7 += s17 * 654183
+       s8 -= s17 * 997805
+       s9 += s17 * 136657
+       s10 -= s17 * 683901
+       s17 = 0
+
+       s4 += s16 * 666643
+       s5 += s16 * 470296
+       s6 += s16 * 654183
+       s7 -= s16 * 997805
+       s8 += s16 * 136657
+       s9 -= s16 * 683901
+       s16 = 0
+
+       s3 += s15 * 666643
+       s4 += s15 * 470296
+       s5 += s15 * 654183
+       s6 -= s15 * 997805
+       s7 += s15 * 136657
+       s8 -= s15 * 683901
+       s15 = 0
+
+       s2 += s14 * 666643
+       s3 += s14 * 470296
+       s4 += s14 * 654183
+       s5 -= s14 * 997805
+       s6 += s14 * 136657
+       s7 -= s14 * 683901
+       s14 = 0
+
+       s1 += s13 * 666643
+       s2 += s13 * 470296
+       s3 += s13 * 654183
+       s4 -= s13 * 997805
+       s5 += s13 * 136657
+       s6 -= s13 * 683901
+       s13 = 0
+
+       s0 += s12 * 666643
+       s1 += s12 * 470296
+       s2 += s12 * 654183
+       s3 -= s12 * 997805
+       s4 += s12 * 136657
+       s5 -= s12 * 683901
+       s12 = 0
+
+       carry[0] = (s0 + (1 << 20)) >> 21
+       s1 += carry[0]
+       s0 -= carry[0] << 21
+       carry[2] = (s2 + (1 << 20)) >> 21
+       s3 += carry[2]
+       s2 -= carry[2] << 21
+       carry[4] = (s4 + (1 << 20)) >> 21
+       s5 += carry[4]
+       s4 -= carry[4] << 21
+       carry[6] = (s6 + (1 << 20)) >> 21
+       s7 += carry[6]
+       s6 -= carry[6] << 21
+       carry[8] = (s8 + (1 << 20)) >> 21
+       s9 += carry[8]
+       s8 -= carry[8] << 21
+       carry[10] = (s10 + (1 << 20)) >> 21
+       s11 += carry[10]
+       s10 -= carry[10] << 21
+
+       carry[1] = (s1 + (1 << 20)) >> 21
+       s2 += carry[1]
+       s1 -= carry[1] << 21
+       carry[3] = (s3 + (1 << 20)) >> 21
+       s4 += carry[3]
+       s3 -= carry[3] << 21
+       carry[5] = (s5 + (1 << 20)) >> 21
+       s6 += carry[5]
+       s5 -= carry[5] << 21
+       carry[7] = (s7 + (1 << 20)) >> 21
+       s8 += carry[7]
+       s7 -= carry[7] << 21
+       carry[9] = (s9 + (1 << 20)) >> 21
+       s10 += carry[9]
+       s9 -= carry[9] << 21
+       carry[11] = (s11 + (1 << 20)) >> 21
+       s12 += carry[11]
+       s11 -= carry[11] << 21
+
+       s0 += s12 * 666643
+       s1 += s12 * 470296
+       s2 += s12 * 654183
+       s3 -= s12 * 997805
+       s4 += s12 * 136657
+       s5 -= s12 * 683901
+       s12 = 0
+
+       carry[0] = s0 >> 21
+       s1 += carry[0]
+       s0 -= carry[0] << 21
+       carry[1] = s1 >> 21
+       s2 += carry[1]
+       s1 -= carry[1] << 21
+       carry[2] = s2 >> 21
+       s3 += carry[2]
+       s2 -= carry[2] << 21
+       carry[3] = s3 >> 21
+       s4 += carry[3]
+       s3 -= carry[3] << 21
+       carry[4] = s4 >> 21
+       s5 += carry[4]
+       s4 -= carry[4] << 21
+       carry[5] = s5 >> 21
+       s6 += carry[5]
+       s5 -= carry[5] << 21
+       carry[6] = s6 >> 21
+       s7 += carry[6]
+       s6 -= carry[6] << 21
+       carry[7] = s7 >> 21
+       s8 += carry[7]
+       s7 -= carry[7] << 21
+       carry[8] = s8 >> 21
+       s9 += carry[8]
+       s8 -= carry[8] << 21
+       carry[9] = s9 >> 21
+       s10 += carry[9]
+       s9 -= carry[9] << 21
+       carry[10] = s10 >> 21
+       s11 += carry[10]
+       s10 -= carry[10] << 21
+       carry[11] = s11 >> 21
+       s12 += carry[11]
+       s11 -= carry[11] << 21
+
+       s0 += s12 * 666643
+       s1 += s12 * 470296
+       s2 += s12 * 654183
+       s3 -= s12 * 997805
+       s4 += s12 * 136657
+       s5 -= s12 * 683901
+       s12 = 0
+
+       carry[0] = s0 >> 21
+       s1 += carry[0]
+       s0 -= carry[0] << 21
+       carry[1] = s1 >> 21
+       s2 += carry[1]
+       s1 -= carry[1] << 21
+       carry[2] = s2 >> 21
+       s3 += carry[2]
+       s2 -= carry[2] << 21
+       carry[3] = s3 >> 21
+       s4 += carry[3]
+       s3 -= carry[3] << 21
+       carry[4] = s4 >> 21
+       s5 += carry[4]
+       s4 -= carry[4] << 21
+       carry[5] = s5 >> 21
+       s6 += carry[5]
+       s5 -= carry[5] << 21
+       carry[6] = s6 >> 21
+       s7 += carry[6]
+       s6 -= carry[6] << 21
+       carry[7] = s7 >> 21
+       s8 += carry[7]
+       s7 -= carry[7] << 21
+       carry[8] = s8 >> 21
+       s9 += carry[8]
+       s8 -= carry[8] << 21
+       carry[9] = s9 >> 21
+       s10 += carry[9]
+       s9 -= carry[9] << 21
+       carry[10] = s10 >> 21
+       s11 += carry[10]
+       s10 -= carry[10] << 21
+
+       s[0] = byte(s0 >> 0)
+       s[1] = byte(s0 >> 8)
+       s[2] = byte((s0 >> 16) | (s1 << 5))
+       s[3] = byte(s1 >> 3)
+       s[4] = byte(s1 >> 11)
+       s[5] = byte((s1 >> 19) | (s2 << 2))
+       s[6] = byte(s2 >> 6)
+       s[7] = byte((s2 >> 14) | (s3 << 7))
+       s[8] = byte(s3 >> 1)
+       s[9] = byte(s3 >> 9)
+       s[10] = byte((s3 >> 17) | (s4 << 4))
+       s[11] = byte(s4 >> 4)
+       s[12] = byte(s4 >> 12)
+       s[13] = byte((s4 >> 20) | (s5 << 1))
+       s[14] = byte(s5 >> 7)
+       s[15] = byte((s5 >> 15) | (s6 << 6))
+       s[16] = byte(s6 >> 2)
+       s[17] = byte(s6 >> 10)
+       s[18] = byte((s6 >> 18) | (s7 << 3))
+       s[19] = byte(s7 >> 5)
+       s[20] = byte(s7 >> 13)
+       s[21] = byte(s8 >> 0)
+       s[22] = byte(s8 >> 8)
+       s[23] = byte((s8 >> 16) | (s9 << 5))
+       s[24] = byte(s9 >> 3)
+       s[25] = byte(s9 >> 11)
+       s[26] = byte((s9 >> 19) | (s10 << 2))
+       s[27] = byte(s10 >> 6)
+       s[28] = byte((s10 >> 14) | (s11 << 7))
+       s[29] = byte(s11 >> 1)
+       s[30] = byte(s11 >> 9)
+       s[31] = byte(s11 >> 17)
+}
+
+// Input:
+//   s[0]+256*s[1]+...+256^63*s[63] = s
+//
+// Output:
+//   s[0]+256*s[1]+...+256^31*s[31] = s mod l
+//   where l = 2^252 + 27742317777372353535851937790883648493.
+func ScReduce(out *[32]byte, s *[64]byte) {
+       s0 := 2097151 & load3(s[:])
+       s1 := 2097151 & (load4(s[2:]) >> 5)
+       s2 := 2097151 & (load3(s[5:]) >> 2)
+       s3 := 2097151 & (load4(s[7:]) >> 7)
+       s4 := 2097151 & (load4(s[10:]) >> 4)
+       s5 := 2097151 & (load3(s[13:]) >> 1)
+       s6 := 2097151 & (load4(s[15:]) >> 6)
+       s7 := 2097151 & (load3(s[18:]) >> 3)
+       s8 := 2097151 & load3(s[21:])
+       s9 := 2097151 & (load4(s[23:]) >> 5)
+       s10 := 2097151 & (load3(s[26:]) >> 2)
+       s11 := 2097151 & (load4(s[28:]) >> 7)
+       s12 := 2097151 & (load4(s[31:]) >> 4)
+       s13 := 2097151 & (load3(s[34:]) >> 1)
+       s14 := 2097151 & (load4(s[36:]) >> 6)
+       s15 := 2097151 & (load3(s[39:]) >> 3)
+       s16 := 2097151 & load3(s[42:])
+       s17 := 2097151 & (load4(s[44:]) >> 5)
+       s18 := 2097151 & (load3(s[47:]) >> 2)
+       s19 := 2097151 & (load4(s[49:]) >> 7)
+       s20 := 2097151 & (load4(s[52:]) >> 4)
+       s21 := 2097151 & (load3(s[55:]) >> 1)
+       s22 := 2097151 & (load4(s[57:]) >> 6)
+       s23 := (load4(s[60:]) >> 3)
+
+       s11 += s23 * 666643
+       s12 += s23 * 470296
+       s13 += s23 * 654183
+       s14 -= s23 * 997805
+       s15 += s23 * 136657
+       s16 -= s23 * 683901
+       s23 = 0
+
+       s10 += s22 * 666643
+       s11 += s22 * 470296
+       s12 += s22 * 654183
+       s13 -= s22 * 997805
+       s14 += s22 * 136657
+       s15 -= s22 * 683901
+       s22 = 0
+
+       s9 += s21 * 666643
+       s10 += s21 * 470296
+       s11 += s21 * 654183
+       s12 -= s21 * 997805
+       s13 += s21 * 136657
+       s14 -= s21 * 683901
+       s21 = 0
+
+       s8 += s20 * 666643
+       s9 += s20 * 470296
+       s10 += s20 * 654183
+       s11 -= s20 * 997805
+       s12 += s20 * 136657
+       s13 -= s20 * 683901
+       s20 = 0
+
+       s7 += s19 * 666643
+       s8 += s19 * 470296
+       s9 += s19 * 654183
+       s10 -= s19 * 997805
+       s11 += s19 * 136657
+       s12 -= s19 * 683901
+       s19 = 0
+
+       s6 += s18 * 666643
+       s7 += s18 * 470296
+       s8 += s18 * 654183
+       s9 -= s18 * 997805
+       s10 += s18 * 136657
+       s11 -= s18 * 683901
+       s18 = 0
+
+       var carry [17]int64
+
+       carry[6] = (s6 + (1 << 20)) >> 21
+       s7 += carry[6]
+       s6 -= carry[6] << 21
+       carry[8] = (s8 + (1 << 20)) >> 21
+       s9 += carry[8]
+       s8 -= carry[8] << 21
+       carry[10] = (s10 + (1 << 20)) >> 21
+       s11 += carry[10]
+       s10 -= carry[10] << 21
+       carry[12] = (s12 + (1 << 20)) >> 21
+       s13 += carry[12]
+       s12 -= carry[12] << 21
+       carry[14] = (s14 + (1 << 20)) >> 21
+       s15 += carry[14]
+       s14 -= carry[14] << 21
+       carry[16] = (s16 + (1 << 20)) >> 21
+       s17 += carry[16]
+       s16 -= carry[16] << 21
+
+       carry[7] = (s7 + (1 << 20)) >> 21
+       s8 += carry[7]
+       s7 -= carry[7] << 21
+       carry[9] = (s9 + (1 << 20)) >> 21
+       s10 += carry[9]
+       s9 -= carry[9] << 21
+       carry[11] = (s11 + (1 << 20)) >> 21
+       s12 += carry[11]
+       s11 -= carry[11] << 21
+       carry[13] = (s13 + (1 << 20)) >> 21
+       s14 += carry[13]
+       s13 -= carry[13] << 21
+       carry[15] = (s15 + (1 << 20)) >> 21
+       s16 += carry[15]
+       s15 -= carry[15] << 21
+
+       s5 += s17 * 666643
+       s6 += s17 * 470296
+       s7 += s17 * 654183
+       s8 -= s17 * 997805
+       s9 += s17 * 136657
+       s10 -= s17 * 683901
+       s17 = 0
+
+       s4 += s16 * 666643
+       s5 += s16 * 470296
+       s6 += s16 * 654183
+       s7 -= s16 * 997805
+       s8 += s16 * 136657
+       s9 -= s16 * 683901
+       s16 = 0
+
+       s3 += s15 * 666643
+       s4 += s15 * 470296
+       s5 += s15 * 654183
+       s6 -= s15 * 997805
+       s7 += s15 * 136657
+       s8 -= s15 * 683901
+       s15 = 0
+
+       s2 += s14 * 666643
+       s3 += s14 * 470296
+       s4 += s14 * 654183
+       s5 -= s14 * 997805
+       s6 += s14 * 136657
+       s7 -= s14 * 683901
+       s14 = 0
+
+       s1 += s13 * 666643
+       s2 += s13 * 470296
+       s3 += s13 * 654183
+       s4 -= s13 * 997805
+       s5 += s13 * 136657
+       s6 -= s13 * 683901
+       s13 = 0
+
+       s0 += s12 * 666643
+       s1 += s12 * 470296
+       s2 += s12 * 654183
+       s3 -= s12 * 997805
+       s4 += s12 * 136657
+       s5 -= s12 * 683901
+       s12 = 0
+
+       carry[0] = (s0 + (1 << 20)) >> 21
+       s1 += carry[0]
+       s0 -= carry[0] << 21
+       carry[2] = (s2 + (1 << 20)) >> 21
+       s3 += carry[2]
+       s2 -= carry[2] << 21
+       carry[4] = (s4 + (1 << 20)) >> 21
+       s5 += carry[4]
+       s4 -= carry[4] << 21
+       carry[6] = (s6 + (1 << 20)) >> 21
+       s7 += carry[6]
+       s6 -= carry[6] << 21
+       carry[8] = (s8 + (1 << 20)) >> 21
+       s9 += carry[8]
+       s8 -= carry[8] << 21
+       carry[10] = (s10 + (1 << 20)) >> 21
+       s11 += carry[10]
+       s10 -= carry[10] << 21
+
+       carry[1] = (s1 + (1 << 20)) >> 21
+       s2 += carry[1]
+       s1 -= carry[1] << 21
+       carry[3] = (s3 + (1 << 20)) >> 21
+       s4 += carry[3]
+       s3 -= carry[3] << 21
+       carry[5] = (s5 + (1 << 20)) >> 21
+       s6 += carry[5]
+       s5 -= carry[5] << 21
+       carry[7] = (s7 + (1 << 20)) >> 21
+       s8 += carry[7]
+       s7 -= carry[7] << 21
+       carry[9] = (s9 + (1 << 20)) >> 21
+       s10 += carry[9]
+       s9 -= carry[9] << 21
+       carry[11] = (s11 + (1 << 20)) >> 21
+       s12 += carry[11]
+       s11 -= carry[11] << 21
+
+       s0 += s12 * 666643
+       s1 += s12 * 470296
+       s2 += s12 * 654183
+       s3 -= s12 * 997805
+       s4 += s12 * 136657
+       s5 -= s12 * 683901
+       s12 = 0
+
+       carry[0] = s0 >> 21
+       s1 += carry[0]
+       s0 -= carry[0] << 21
+       carry[1] = s1 >> 21
+       s2 += carry[1]
+       s1 -= carry[1] << 21
+       carry[2] = s2 >> 21
+       s3 += carry[2]
+       s2 -= carry[2] << 21
+       carry[3] = s3 >> 21
+       s4 += carry[3]
+       s3 -= carry[3] << 21
+       carry[4] = s4 >> 21
+       s5 += carry[4]
+       s4 -= carry[4] << 21
+       carry[5] = s5 >> 21
+       s6 += carry[5]
+       s5 -= carry[5] << 21
+       carry[6] = s6 >> 21
+       s7 += carry[6]
+       s6 -= carry[6] << 21
+       carry[7] = s7 >> 21
+       s8 += carry[7]
+       s7 -= carry[7] << 21
+       carry[8] = s8 >> 21
+       s9 += carry[8]
+       s8 -= carry[8] << 21
+       carry[9] = s9 >> 21
+       s10 += carry[9]
+       s9 -= carry[9] << 21
+       carry[10] = s10 >> 21
+       s11 += carry[10]
+       s10 -= carry[10] << 21
+       carry[11] = s11 >> 21
+       s12 += carry[11]
+       s11 -= carry[11] << 21
+
+       s0 += s12 * 666643
+       s1 += s12 * 470296
+       s2 += s12 * 654183
+       s3 -= s12 * 997805
+       s4 += s12 * 136657
+       s5 -= s12 * 683901
+       s12 = 0
+
+       carry[0] = s0 >> 21
+       s1 += carry[0]
+       s0 -= carry[0] << 21
+       carry[1] = s1 >> 21
+       s2 += carry[1]
+       s1 -= carry[1] << 21
+       carry[2] = s2 >> 21
+       s3 += carry[2]
+       s2 -= carry[2] << 21
+       carry[3] = s3 >> 21
+       s4 += carry[3]
+       s3 -= carry[3] << 21
+       carry[4] = s4 >> 21
+       s5 += carry[4]
+       s4 -= carry[4] << 21
+       carry[5] = s5 >> 21
+       s6 += carry[5]
+       s5 -= carry[5] << 21
+       carry[6] = s6 >> 21
+       s7 += carry[6]
+       s6 -= carry[6] << 21
+       carry[7] = s7 >> 21
+       s8 += carry[7]
+       s7 -= carry[7] << 21
+       carry[8] = s8 >> 21
+       s9 += carry[8]
+       s8 -= carry[8] << 21
+       carry[9] = s9 >> 21
+       s10 += carry[9]
+       s9 -= carry[9] << 21
+       carry[10] = s10 >> 21
+       s11 += carry[10]
+       s10 -= carry[10] << 21
+
+       out[0] = byte(s0 >> 0)
+       out[1] = byte(s0 >> 8)
+       out[2] = byte((s0 >> 16) | (s1 << 5))
+       out[3] = byte(s1 >> 3)
+       out[4] = byte(s1 >> 11)
+       out[5] = byte((s1 >> 19) | (s2 << 2))
+       out[6] = byte(s2 >> 6)
+       out[7] = byte((s2 >> 14) | (s3 << 7))
+       out[8] = byte(s3 >> 1)
+       out[9] = byte(s3 >> 9)
+       out[10] = byte((s3 >> 17) | (s4 << 4))
+       out[11] = byte(s4 >> 4)
+       out[12] = byte(s4 >> 12)
+       out[13] = byte((s4 >> 20) | (s5 << 1))
+       out[14] = byte(s5 >> 7)
+       out[15] = byte((s5 >> 15) | (s6 << 6))
+       out[16] = byte(s6 >> 2)
+       out[17] = byte(s6 >> 10)
+       out[18] = byte((s6 >> 18) | (s7 << 3))
+       out[19] = byte(s7 >> 5)
+       out[20] = byte(s7 >> 13)
+       out[21] = byte(s8 >> 0)
+       out[22] = byte(s8 >> 8)
+       out[23] = byte((s8 >> 16) | (s9 << 5))
+       out[24] = byte(s9 >> 3)
+       out[25] = byte(s9 >> 11)
+       out[26] = byte((s9 >> 19) | (s10 << 2))
+       out[27] = byte(s10 >> 6)
+       out[28] = byte((s10 >> 14) | (s11 << 7))
+       out[29] = byte(s11 >> 1)
+       out[30] = byte(s11 >> 9)
+       out[31] = byte(s11 >> 17)
+}
diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go
new file mode 100644 (file)
index 0000000..6931b51
--- /dev/null
@@ -0,0 +1,98 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "io"
+       "sync"
+)
+
+// buffer provides a linked list buffer for data exchange
+// between producer and consumer. Theoretically the buffer is
+// of unlimited capacity as it does no allocation of its own.
+type buffer struct {
+       // protects concurrent access to head, tail and closed
+       *sync.Cond
+
+       head *element // the buffer that will be read first
+       tail *element // the buffer that will be read last
+
+       closed bool
+}
+
+// An element represents a single link in a linked list.
+type element struct {
+       buf  []byte
+       next *element
+}
+
+// newBuffer returns an empty buffer that is not closed.
+func newBuffer() *buffer {
+       e := new(element)
+       b := &buffer{
+               Cond: newCond(),
+               head: e,
+               tail: e,
+       }
+       return b
+}
+
+// write makes buf available for Read to receive.
+// buf must not be modified after the call to write.
+func (b *buffer) write(buf []byte) {
+       b.Cond.L.Lock()
+       e := &element{buf: buf}
+       b.tail.next = e
+       b.tail = e
+       b.Cond.Signal()
+       b.Cond.L.Unlock()
+}
+
+// eof closes the buffer. Reads from the buffer once all
+// the data has been consumed will receive os.EOF.
+func (b *buffer) eof() error {
+       b.Cond.L.Lock()
+       b.closed = true
+       b.Cond.Signal()
+       b.Cond.L.Unlock()
+       return nil
+}
+
+// Read reads data from the internal buffer in buf.  Reads will block
+// if no data is available, or until the buffer is closed.
+func (b *buffer) Read(buf []byte) (n int, err error) {
+       b.Cond.L.Lock()
+       defer b.Cond.L.Unlock()
+
+       for len(buf) > 0 {
+               // if there is data in b.head, copy it
+               if len(b.head.buf) > 0 {
+                       r := copy(buf, b.head.buf)
+                       buf, b.head.buf = buf[r:], b.head.buf[r:]
+                       n += r
+                       continue
+               }
+               // if there is a next buffer, make it the head
+               if len(b.head.buf) == 0 && b.head != b.tail {
+                       b.head = b.head.next
+                       continue
+               }
+
+               // if at least one byte has been copied, return
+               if n > 0 {
+                       break
+               }
+
+               // if nothing was read, and there is nothing outstanding
+               // check to see if the buffer is closed.
+               if b.closed {
+                       err = io.EOF
+                       break
+               }
+               // out of buffers, wait for producer
+               b.Cond.Wait()
+       }
+       return
+}
diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go
new file mode 100644 (file)
index 0000000..6331c94
--- /dev/null
@@ -0,0 +1,503 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "bytes"
+       "errors"
+       "fmt"
+       "io"
+       "net"
+       "sort"
+       "time"
+)
+
+// These constants from [PROTOCOL.certkeys] represent the algorithm names
+// for certificate types supported by this package.
+const (
+       CertAlgoRSAv01      = "ssh-rsa-cert-v01@openssh.com"
+       CertAlgoDSAv01      = "ssh-dss-cert-v01@openssh.com"
+       CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com"
+       CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com"
+       CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com"
+       CertAlgoED25519v01  = "ssh-ed25519-cert-v01@openssh.com"
+)
+
+// Certificate types distinguish between host and user
+// certificates. The values can be set in the CertType field of
+// Certificate.
+const (
+       UserCert = 1
+       HostCert = 2
+)
+
+// Signature represents a cryptographic signature.
+type Signature struct {
+       Format string
+       Blob   []byte
+}
+
+// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that
+// a certificate does not expire.
+const CertTimeInfinity = 1<<64 - 1
+
+// An Certificate represents an OpenSSH certificate as defined in
+// [PROTOCOL.certkeys]?rev=1.8.
+type Certificate struct {
+       Nonce           []byte
+       Key             PublicKey
+       Serial          uint64
+       CertType        uint32
+       KeyId           string
+       ValidPrincipals []string
+       ValidAfter      uint64
+       ValidBefore     uint64
+       Permissions
+       Reserved     []byte
+       SignatureKey PublicKey
+       Signature    *Signature
+}
+
+// genericCertData holds the key-independent part of the certificate data.
+// Overall, certificates contain an nonce, public key fields and
+// key-independent fields.
+type genericCertData struct {
+       Serial          uint64
+       CertType        uint32
+       KeyId           string
+       ValidPrincipals []byte
+       ValidAfter      uint64
+       ValidBefore     uint64
+       CriticalOptions []byte
+       Extensions      []byte
+       Reserved        []byte
+       SignatureKey    []byte
+       Signature       []byte
+}
+
+func marshalStringList(namelist []string) []byte {
+       var to []byte
+       for _, name := range namelist {
+               s := struct{ N string }{name}
+               to = append(to, Marshal(&s)...)
+       }
+       return to
+}
+
+type optionsTuple struct {
+       Key   string
+       Value []byte
+}
+
+type optionsTupleValue struct {
+       Value string
+}
+
+// serialize a map of critical options or extensions
+// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
+// we need two length prefixes for a non-empty string value
+func marshalTuples(tups map[string]string) []byte {
+       keys := make([]string, 0, len(tups))
+       for key := range tups {
+               keys = append(keys, key)
+       }
+       sort.Strings(keys)
+
+       var ret []byte
+       for _, key := range keys {
+               s := optionsTuple{Key: key}
+               if value := tups[key]; len(value) > 0 {
+                       s.Value = Marshal(&optionsTupleValue{value})
+               }
+               ret = append(ret, Marshal(&s)...)
+       }
+       return ret
+}
+
+// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation,
+// we need two length prefixes for a non-empty option value
+func parseTuples(in []byte) (map[string]string, error) {
+       tups := map[string]string{}
+       var lastKey string
+       var haveLastKey bool
+
+       for len(in) > 0 {
+               var key, val, extra []byte
+               var ok bool
+
+               if key, in, ok = parseString(in); !ok {
+                       return nil, errShortRead
+               }
+               keyStr := string(key)
+               // according to [PROTOCOL.certkeys], the names must be in
+               // lexical order.
+               if haveLastKey && keyStr <= lastKey {
+                       return nil, fmt.Errorf("ssh: certificate options are not in lexical order")
+               }
+               lastKey, haveLastKey = keyStr, true
+               // the next field is a data field, which if non-empty has a string embedded
+               if val, in, ok = parseString(in); !ok {
+                       return nil, errShortRead
+               }
+               if len(val) > 0 {
+                       val, extra, ok = parseString(val)
+                       if !ok {
+                               return nil, errShortRead
+                       }
+                       if len(extra) > 0 {
+                               return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value")
+                       }
+                       tups[keyStr] = string(val)
+               } else {
+                       tups[keyStr] = ""
+               }
+       }
+       return tups, nil
+}
+
+func parseCert(in []byte, privAlgo string) (*Certificate, error) {
+       nonce, rest, ok := parseString(in)
+       if !ok {
+               return nil, errShortRead
+       }
+
+       key, rest, err := parsePubKey(rest, privAlgo)
+       if err != nil {
+               return nil, err
+       }
+
+       var g genericCertData
+       if err := Unmarshal(rest, &g); err != nil {
+               return nil, err
+       }
+
+       c := &Certificate{
+               Nonce:       nonce,
+               Key:         key,
+               Serial:      g.Serial,
+               CertType:    g.CertType,
+               KeyId:       g.KeyId,
+               ValidAfter:  g.ValidAfter,
+               ValidBefore: g.ValidBefore,
+       }
+
+       for principals := g.ValidPrincipals; len(principals) > 0; {
+               principal, rest, ok := parseString(principals)
+               if !ok {
+                       return nil, errShortRead
+               }
+               c.ValidPrincipals = append(c.ValidPrincipals, string(principal))
+               principals = rest
+       }
+
+       c.CriticalOptions, err = parseTuples(g.CriticalOptions)
+       if err != nil {
+               return nil, err
+       }
+       c.Extensions, err = parseTuples(g.Extensions)
+       if err != nil {
+               return nil, err
+       }
+       c.Reserved = g.Reserved
+       k, err := ParsePublicKey(g.SignatureKey)
+       if err != nil {
+               return nil, err
+       }
+
+       c.SignatureKey = k
+       c.Signature, rest, ok = parseSignatureBody(g.Signature)
+       if !ok || len(rest) > 0 {
+               return nil, errors.New("ssh: signature parse error")
+       }
+
+       return c, nil
+}
+
+type openSSHCertSigner struct {
+       pub    *Certificate
+       signer Signer
+}
+
+// NewCertSigner returns a Signer that signs with the given Certificate, whose
+// private key is held by signer. It returns an error if the public key in cert
+// doesn't match the key used by signer.
+func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) {
+       if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 {
+               return nil, errors.New("ssh: signer and cert have different public key")
+       }
+
+       return &openSSHCertSigner{cert, signer}, nil
+}
+
+func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
+       return s.signer.Sign(rand, data)
+}
+
+func (s *openSSHCertSigner) PublicKey() PublicKey {
+       return s.pub
+}
+
+const sourceAddressCriticalOption = "source-address"
+
+// CertChecker does the work of verifying a certificate. Its methods
+// can be plugged into ClientConfig.HostKeyCallback and
+// ServerConfig.PublicKeyCallback. For the CertChecker to work,
+// minimally, the IsAuthority callback should be set.
+type CertChecker struct {
+       // SupportedCriticalOptions lists the CriticalOptions that the
+       // server application layer understands. These are only used
+       // for user certificates.
+       SupportedCriticalOptions []string
+
+       // IsAuthority should return true if the key is recognized as
+       // an authority. This allows for certificates to be signed by other
+       // certificates.
+       IsAuthority func(auth PublicKey) bool
+
+       // Clock is used for verifying time stamps. If nil, time.Now
+       // is used.
+       Clock func() time.Time
+
+       // UserKeyFallback is called when CertChecker.Authenticate encounters a
+       // public key that is not a certificate. It must implement validation
+       // of user keys or else, if nil, all such keys are rejected.
+       UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
+
+       // HostKeyFallback is called when CertChecker.CheckHostKey encounters a
+       // public key that is not a certificate. It must implement host key
+       // validation or else, if nil, all such keys are rejected.
+       HostKeyFallback func(addr string, remote net.Addr, key PublicKey) error
+
+       // IsRevoked is called for each certificate so that revocation checking
+       // can be implemented. It should return true if the given certificate
+       // is revoked and false otherwise. If nil, no certificates are
+       // considered to have been revoked.
+       IsRevoked func(cert *Certificate) bool
+}
+
+// CheckHostKey checks a host key certificate. This method can be
+// plugged into ClientConfig.HostKeyCallback.
+func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error {
+       cert, ok := key.(*Certificate)
+       if !ok {
+               if c.HostKeyFallback != nil {
+                       return c.HostKeyFallback(addr, remote, key)
+               }
+               return errors.New("ssh: non-certificate host key")
+       }
+       if cert.CertType != HostCert {
+               return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType)
+       }
+
+       return c.CheckCert(addr, cert)
+}
+
+// Authenticate checks a user certificate. Authenticate can be used as
+// a value for ServerConfig.PublicKeyCallback.
+func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) {
+       cert, ok := pubKey.(*Certificate)
+       if !ok {
+               if c.UserKeyFallback != nil {
+                       return c.UserKeyFallback(conn, pubKey)
+               }
+               return nil, errors.New("ssh: normal key pairs not accepted")
+       }
+
+       if cert.CertType != UserCert {
+               return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType)
+       }
+
+       if err := c.CheckCert(conn.User(), cert); err != nil {
+               return nil, err
+       }
+
+       return &cert.Permissions, nil
+}
+
+// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and
+// the signature of the certificate.
+func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {
+       if c.IsRevoked != nil && c.IsRevoked(cert) {
+               return fmt.Errorf("ssh: certicate serial %d revoked", cert.Serial)
+       }
+
+       for opt, _ := range cert.CriticalOptions {
+               // sourceAddressCriticalOption will be enforced by
+               // serverAuthenticate
+               if opt == sourceAddressCriticalOption {
+                       continue
+               }
+
+               found := false
+               for _, supp := range c.SupportedCriticalOptions {
+                       if supp == opt {
+                               found = true
+                               break
+                       }
+               }
+               if !found {
+                       return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt)
+               }
+       }
+
+       if len(cert.ValidPrincipals) > 0 {
+               // By default, certs are valid for all users/hosts.
+               found := false
+               for _, p := range cert.ValidPrincipals {
+                       if p == principal {
+                               found = true
+                               break
+                       }
+               }
+               if !found {
+                       return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals)
+               }
+       }
+
+       if !c.IsAuthority(cert.SignatureKey) {
+               return fmt.Errorf("ssh: certificate signed by unrecognized authority")
+       }
+
+       clock := c.Clock
+       if clock == nil {
+               clock = time.Now
+       }
+
+       unixNow := clock().Unix()
+       if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) {
+               return fmt.Errorf("ssh: cert is not yet valid")
+       }
+       if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) {
+               return fmt.Errorf("ssh: cert has expired")
+       }
+       if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil {
+               return fmt.Errorf("ssh: certificate signature does not verify")
+       }
+
+       return nil
+}
+
+// SignCert sets c.SignatureKey to the authority's public key and stores a
+// Signature, by authority, in the certificate.
+func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
+       c.Nonce = make([]byte, 32)
+       if _, err := io.ReadFull(rand, c.Nonce); err != nil {
+               return err
+       }
+       c.SignatureKey = authority.PublicKey()
+
+       sig, err := authority.Sign(rand, c.bytesForSigning())
+       if err != nil {
+               return err
+       }
+       c.Signature = sig
+       return nil
+}
+
+var certAlgoNames = map[string]string{
+       KeyAlgoRSA:      CertAlgoRSAv01,
+       KeyAlgoDSA:      CertAlgoDSAv01,
+       KeyAlgoECDSA256: CertAlgoECDSA256v01,
+       KeyAlgoECDSA384: CertAlgoECDSA384v01,
+       KeyAlgoECDSA521: CertAlgoECDSA521v01,
+       KeyAlgoED25519:  CertAlgoED25519v01,
+}
+
+// certToPrivAlgo returns the underlying algorithm for a certificate algorithm.
+// Panics if a non-certificate algorithm is passed.
+func certToPrivAlgo(algo string) string {
+       for privAlgo, pubAlgo := range certAlgoNames {
+               if pubAlgo == algo {
+                       return privAlgo
+               }
+       }
+       panic("unknown cert algorithm")
+}
+
+func (cert *Certificate) bytesForSigning() []byte {
+       c2 := *cert
+       c2.Signature = nil
+       out := c2.Marshal()
+       // Drop trailing signature length.
+       return out[:len(out)-4]
+}
+
+// Marshal serializes c into OpenSSH's wire format. It is part of the
+// PublicKey interface.
+func (c *Certificate) Marshal() []byte {
+       generic := genericCertData{
+               Serial:          c.Serial,
+               CertType:        c.CertType,
+               KeyId:           c.KeyId,
+               ValidPrincipals: marshalStringList(c.ValidPrincipals),
+               ValidAfter:      uint64(c.ValidAfter),
+               ValidBefore:     uint64(c.ValidBefore),
+               CriticalOptions: marshalTuples(c.CriticalOptions),
+               Extensions:      marshalTuples(c.Extensions),
+               Reserved:        c.Reserved,
+               SignatureKey:    c.SignatureKey.Marshal(),
+       }
+       if c.Signature != nil {
+               generic.Signature = Marshal(c.Signature)
+       }
+       genericBytes := Marshal(&generic)
+       keyBytes := c.Key.Marshal()
+       _, keyBytes, _ = parseString(keyBytes)
+       prefix := Marshal(&struct {
+               Name  string
+               Nonce []byte
+               Key   []byte `ssh:"rest"`
+       }{c.Type(), c.Nonce, keyBytes})
+
+       result := make([]byte, 0, len(prefix)+len(genericBytes))
+       result = append(result, prefix...)
+       result = append(result, genericBytes...)
+       return result
+}
+
+// Type returns the key name. It is part of the PublicKey interface.
+func (c *Certificate) Type() string {
+       algo, ok := certAlgoNames[c.Key.Type()]
+       if !ok {
+               panic("unknown cert key type " + c.Key.Type())
+       }
+       return algo
+}
+
+// Verify verifies a signature against the certificate's public
+// key. It is part of the PublicKey interface.
+func (c *Certificate) Verify(data []byte, sig *Signature) error {
+       return c.Key.Verify(data, sig)
+}
+
+func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) {
+       format, in, ok := parseString(in)
+       if !ok {
+               return
+       }
+
+       out = &Signature{
+               Format: string(format),
+       }
+
+       if out.Blob, in, ok = parseString(in); !ok {
+               return
+       }
+
+       return out, in, ok
+}
+
+func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) {
+       sigBytes, rest, ok := parseString(in)
+       if !ok {
+               return
+       }
+
+       out, trailing, ok := parseSignatureBody(sigBytes)
+       if !ok || len(trailing) > 0 {
+               return nil, nil, false
+       }
+       return
+}
diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go
new file mode 100644 (file)
index 0000000..195530e
--- /dev/null
@@ -0,0 +1,633 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "encoding/binary"
+       "errors"
+       "fmt"
+       "io"
+       "log"
+       "sync"
+)
+
+const (
+       minPacketLength = 9
+       // channelMaxPacket contains the maximum number of bytes that will be
+       // sent in a single packet. As per RFC 4253, section 6.1, 32k is also
+       // the minimum.
+       channelMaxPacket = 1 << 15
+       // We follow OpenSSH here.
+       channelWindowSize = 64 * channelMaxPacket
+)
+
+// NewChannel represents an incoming request to a channel. It must either be
+// accepted for use by calling Accept, or rejected by calling Reject.
+type NewChannel interface {
+       // Accept accepts the channel creation request. It returns the Channel
+       // and a Go channel containing SSH requests. The Go channel must be
+       // serviced otherwise the Channel will hang.
+       Accept() (Channel, <-chan *Request, error)
+
+       // Reject rejects the channel creation request. After calling
+       // this, no other methods on the Channel may be called.
+       Reject(reason RejectionReason, message string) error
+
+       // ChannelType returns the type of the channel, as supplied by the
+       // client.
+       ChannelType() string
+
+       // ExtraData returns the arbitrary payload for this channel, as supplied
+       // by the client. This data is specific to the channel type.
+       ExtraData() []byte
+}
+
+// A Channel is an ordered, reliable, flow-controlled, duplex stream
+// that is multiplexed over an SSH connection.
+type Channel interface {
+       // Read reads up to len(data) bytes from the channel.
+       Read(data []byte) (int, error)
+
+       // Write writes len(data) bytes to the channel.
+       Write(data []byte) (int, error)
+
+       // Close signals end of channel use. No data may be sent after this
+       // call.
+       Close() error
+
+       // CloseWrite signals the end of sending in-band
+       // data. Requests may still be sent, and the other side may
+       // still send data
+       CloseWrite() error
+
+       // SendRequest sends a channel request.  If wantReply is true,
+       // it will wait for a reply and return the result as a
+       // boolean, otherwise the return value will be false. Channel
+       // requests are out-of-band messages so they may be sent even
+       // if the data stream is closed or blocked by flow control.
+       // If the channel is closed before a reply is returned, io.EOF
+       // is returned.
+       SendRequest(name string, wantReply bool, payload []byte) (bool, error)
+
+       // Stderr returns an io.ReadWriter that writes to this channel
+       // with the extended data type set to stderr. Stderr may
+       // safely be read and written from a different goroutine than
+       // Read and Write respectively.
+       Stderr() io.ReadWriter
+}
+
+// Request is a request sent outside of the normal stream of
+// data. Requests can either be specific to an SSH channel, or they
+// can be global.
+type Request struct {
+       Type      string
+       WantReply bool
+       Payload   []byte
+
+       ch  *channel
+       mux *mux
+}
+
+// Reply sends a response to a request. It must be called for all requests
+// where WantReply is true and is a no-op otherwise. The payload argument is
+// ignored for replies to channel-specific requests.
+func (r *Request) Reply(ok bool, payload []byte) error {
+       if !r.WantReply {
+               return nil
+       }
+
+       if r.ch == nil {
+               return r.mux.ackRequest(ok, payload)
+       }
+
+       return r.ch.ackRequest(ok)
+}
+
+// RejectionReason is an enumeration used when rejecting channel creation
+// requests. See RFC 4254, section 5.1.
+type RejectionReason uint32
+
+const (
+       Prohibited RejectionReason = iota + 1
+       ConnectionFailed
+       UnknownChannelType
+       ResourceShortage
+)
+
+// String converts the rejection reason to human readable form.
+func (r RejectionReason) String() string {
+       switch r {
+       case Prohibited:
+               return "administratively prohibited"
+       case ConnectionFailed:
+               return "connect failed"
+       case UnknownChannelType:
+               return "unknown channel type"
+       case ResourceShortage:
+               return "resource shortage"
+       }
+       return fmt.Sprintf("unknown reason %d", int(r))
+}
+
+func min(a uint32, b int) uint32 {
+       if a < uint32(b) {
+               return a
+       }
+       return uint32(b)
+}
+
+type channelDirection uint8
+
+const (
+       channelInbound channelDirection = iota
+       channelOutbound
+)
+
+// channel is an implementation of the Channel interface that works
+// with the mux class.
+type channel struct {
+       // R/O after creation
+       chanType          string
+       extraData         []byte
+       localId, remoteId uint32
+
+       // maxIncomingPayload and maxRemotePayload are the maximum
+       // payload sizes of normal and extended data packets for
+       // receiving and sending, respectively. The wire packet will
+       // be 9 or 13 bytes larger (excluding encryption overhead).
+       maxIncomingPayload uint32
+       maxRemotePayload   uint32
+
+       mux *mux
+
+       // decided is set to true if an accept or reject message has been sent
+       // (for outbound channels) or received (for inbound channels).
+       decided bool
+
+       // direction contains either channelOutbound, for channels created
+       // locally, or channelInbound, for channels created by the peer.
+       direction channelDirection
+
+       // Pending internal channel messages.
+       msg chan interface{}
+
+       // Since requests have no ID, there can be only one request
+       // with WantReply=true outstanding.  This lock is held by a
+       // goroutine that has such an outgoing request pending.
+       sentRequestMu sync.Mutex
+
+       incomingRequests chan *Request
+
+       sentEOF bool
+
+       // thread-safe data
+       remoteWin  window
+       pending    *buffer
+       extPending *buffer
+
+       // windowMu protects myWindow, the flow-control window.
+       windowMu sync.Mutex
+       myWindow uint32
+
+       // writeMu serializes calls to mux.conn.writePacket() and
+       // protects sentClose and packetPool. This mutex must be
+       // different from windowMu, as writePacket can block if there
+       // is a key exchange pending.
+       writeMu   sync.Mutex
+       sentClose bool
+
+       // packetPool has a buffer for each extended channel ID to
+       // save allocations during writes.
+       packetPool map[uint32][]byte
+}
+
+// writePacket sends a packet. If the packet is a channel close, it updates
+// sentClose. This method takes the lock c.writeMu.
+func (c *channel) writePacket(packet []byte) error {
+       c.writeMu.Lock()
+       if c.sentClose {
+               c.writeMu.Unlock()
+               return io.EOF
+       }
+       c.sentClose = (packet[0] == msgChannelClose)
+       err := c.mux.conn.writePacket(packet)
+       c.writeMu.Unlock()
+       return err
+}
+
+func (c *channel) sendMessage(msg interface{}) error {
+       if debugMux {
+               log.Printf("send(%d): %#v", c.mux.chanList.offset, msg)
+       }
+
+       p := Marshal(msg)
+       binary.BigEndian.PutUint32(p[1:], c.remoteId)
+       return c.writePacket(p)
+}
+
+// WriteExtended writes data to a specific extended stream. These streams are
+// used, for example, for stderr.
+func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) {
+       if c.sentEOF {
+               return 0, io.EOF
+       }
+       // 1 byte message type, 4 bytes remoteId, 4 bytes data length
+       opCode := byte(msgChannelData)
+       headerLength := uint32(9)
+       if extendedCode > 0 {
+               headerLength += 4
+               opCode = msgChannelExtendedData
+       }
+
+       c.writeMu.Lock()
+       packet := c.packetPool[extendedCode]
+       // We don't remove the buffer from packetPool, so
+       // WriteExtended calls from different goroutines will be
+       // flagged as errors by the race detector.
+       c.writeMu.Unlock()
+
+       for len(data) > 0 {
+               space := min(c.maxRemotePayload, len(data))
+               if space, err = c.remoteWin.reserve(space); err != nil {
+                       return n, err
+               }
+               if want := headerLength + space; uint32(cap(packet)) < want {
+                       packet = make([]byte, want)
+               } else {
+                       packet = packet[:want]
+               }
+
+               todo := data[:space]
+
+               packet[0] = opCode
+               binary.BigEndian.PutUint32(packet[1:], c.remoteId)
+               if extendedCode > 0 {
+                       binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode))
+               }
+               binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo)))
+               copy(packet[headerLength:], todo)
+               if err = c.writePacket(packet); err != nil {
+                       return n, err
+               }
+
+               n += len(todo)
+               data = data[len(todo):]
+       }
+
+       c.writeMu.Lock()
+       c.packetPool[extendedCode] = packet
+       c.writeMu.Unlock()
+
+       return n, err
+}
+
+func (c *channel) handleData(packet []byte) error {
+       headerLen := 9
+       isExtendedData := packet[0] == msgChannelExtendedData
+       if isExtendedData {
+               headerLen = 13
+       }
+       if len(packet) < headerLen {
+               // malformed data packet
+               return parseError(packet[0])
+       }
+
+       var extended uint32
+       if isExtendedData {
+               extended = binary.BigEndian.Uint32(packet[5:])
+       }
+
+       length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen])
+       if length == 0 {
+               return nil
+       }
+       if length > c.maxIncomingPayload {
+               // TODO(hanwen): should send Disconnect?
+               return errors.New("ssh: incoming packet exceeds maximum payload size")
+       }
+
+       data := packet[headerLen:]
+       if length != uint32(len(data)) {
+               return errors.New("ssh: wrong packet length")
+       }
+
+       c.windowMu.Lock()
+       if c.myWindow < length {
+               c.windowMu.Unlock()
+               // TODO(hanwen): should send Disconnect with reason?
+               return errors.New("ssh: remote side wrote too much")
+       }
+       c.myWindow -= length
+       c.windowMu.Unlock()
+
+       if extended == 1 {
+               c.extPending.write(data)
+       } else if extended > 0 {
+               // discard other extended data.
+       } else {
+               c.pending.write(data)
+       }
+       return nil
+}
+
+func (c *channel) adjustWindow(n uint32) error {
+       c.windowMu.Lock()
+       // Since myWindow is managed on our side, and can never exceed
+       // the initial window setting, we don't worry about overflow.
+       c.myWindow += uint32(n)
+       c.windowMu.Unlock()
+       return c.sendMessage(windowAdjustMsg{
+               AdditionalBytes: uint32(n),
+       })
+}
+
+func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) {
+       switch extended {
+       case 1:
+               n, err = c.extPending.Read(data)
+       case 0:
+               n, err = c.pending.Read(data)
+       default:
+               return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended)
+       }
+
+       if n > 0 {
+               err = c.adjustWindow(uint32(n))
+               // sendWindowAdjust can return io.EOF if the remote
+               // peer has closed the connection, however we want to
+               // defer forwarding io.EOF to the caller of Read until
+               // the buffer has been drained.
+               if n > 0 && err == io.EOF {
+                       err = nil
+               }
+       }
+
+       return n, err
+}
+
+func (c *channel) close() {
+       c.pending.eof()
+       c.extPending.eof()
+       close(c.msg)
+       close(c.incomingRequests)
+       c.writeMu.Lock()
+       // This is not necessary for a normal channel teardown, but if
+       // there was another error, it is.
+       c.sentClose = true
+       c.writeMu.Unlock()
+       // Unblock writers.
+       c.remoteWin.close()
+}
+
+// responseMessageReceived is called when a success or failure message is
+// received on a channel to check that such a message is reasonable for the
+// given channel.
+func (c *channel) responseMessageReceived() error {
+       if c.direction == channelInbound {
+               return errors.New("ssh: channel response message received on inbound channel")
+       }
+       if c.decided {
+               return errors.New("ssh: duplicate response received for channel")
+       }
+       c.decided = true
+       return nil
+}
+
+func (c *channel) handlePacket(packet []byte) error {
+       switch packet[0] {
+       case msgChannelData, msgChannelExtendedData:
+               return c.handleData(packet)
+       case msgChannelClose:
+               c.sendMessage(channelCloseMsg{PeersId: c.remoteId})
+               c.mux.chanList.remove(c.localId)
+               c.close()
+               return nil
+       case msgChannelEOF:
+               // RFC 4254 is mute on how EOF affects dataExt messages but
+               // it is logical to signal EOF at the same time.
+               c.extPending.eof()
+               c.pending.eof()
+               return nil
+       }
+
+       decoded, err := decode(packet)
+       if err != nil {
+               return err
+       }
+
+       switch msg := decoded.(type) {
+       case *channelOpenFailureMsg:
+               if err := c.responseMessageReceived(); err != nil {
+                       return err
+               }
+               c.mux.chanList.remove(msg.PeersId)
+               c.msg <- msg
+       case *channelOpenConfirmMsg:
+               if err := c.responseMessageReceived(); err != nil {
+                       return err
+               }
+               if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
+                       return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize)
+               }
+               c.remoteId = msg.MyId
+               c.maxRemotePayload = msg.MaxPacketSize
+               c.remoteWin.add(msg.MyWindow)
+               c.msg <- msg
+       case *windowAdjustMsg:
+               if !c.remoteWin.add(msg.AdditionalBytes) {
+                       return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes)
+               }
+       case *channelRequestMsg:
+               req := Request{
+                       Type:      msg.Request,
+                       WantReply: msg.WantReply,
+                       Payload:   msg.RequestSpecificData,
+                       ch:        c,
+               }
+
+               c.incomingRequests <- &req
+       default:
+               c.msg <- msg
+       }
+       return nil
+}
+
+func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel {
+       ch := &channel{
+               remoteWin:        window{Cond: newCond()},
+               myWindow:         channelWindowSize,
+               pending:          newBuffer(),
+               extPending:       newBuffer(),
+               direction:        direction,
+               incomingRequests: make(chan *Request, chanSize),
+               msg:              make(chan interface{}, chanSize),
+               chanType:         chanType,
+               extraData:        extraData,
+               mux:              m,
+               packetPool:       make(map[uint32][]byte),
+       }
+       ch.localId = m.chanList.add(ch)
+       return ch
+}
+
+var errUndecided = errors.New("ssh: must Accept or Reject channel")
+var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once")
+
+type extChannel struct {
+       code uint32
+       ch   *channel
+}
+
+func (e *extChannel) Write(data []byte) (n int, err error) {
+       return e.ch.WriteExtended(data, e.code)
+}
+
+func (e *extChannel) Read(data []byte) (n int, err error) {
+       return e.ch.ReadExtended(data, e.code)
+}
+
+func (c *channel) Accept() (Channel, <-chan *Request, error) {
+       if c.decided {
+               return nil, nil, errDecidedAlready
+       }
+       c.maxIncomingPayload = channelMaxPacket
+       confirm := channelOpenConfirmMsg{
+               PeersId:       c.remoteId,
+               MyId:          c.localId,
+               MyWindow:      c.myWindow,
+               MaxPacketSize: c.maxIncomingPayload,
+       }
+       c.decided = true
+       if err := c.sendMessage(confirm); err != nil {
+               return nil, nil, err
+       }
+
+       return c, c.incomingRequests, nil
+}
+
+func (ch *channel) Reject(reason RejectionReason, message string) error {
+       if ch.decided {
+               return errDecidedAlready
+       }
+       reject := channelOpenFailureMsg{
+               PeersId:  ch.remoteId,
+               Reason:   reason,
+               Message:  message,
+               Language: "en",
+       }
+       ch.decided = true
+       return ch.sendMessage(reject)
+}
+
+func (ch *channel) Read(data []byte) (int, error) {
+       if !ch.decided {
+               return 0, errUndecided
+       }
+       return ch.ReadExtended(data, 0)
+}
+
+func (ch *channel) Write(data []byte) (int, error) {
+       if !ch.decided {
+               return 0, errUndecided
+       }
+       return ch.WriteExtended(data, 0)
+}
+
+func (ch *channel) CloseWrite() error {
+       if !ch.decided {
+               return errUndecided
+       }
+       ch.sentEOF = true
+       return ch.sendMessage(channelEOFMsg{
+               PeersId: ch.remoteId})
+}
+
+func (ch *channel) Close() error {
+       if !ch.decided {
+               return errUndecided
+       }
+
+       return ch.sendMessage(channelCloseMsg{
+               PeersId: ch.remoteId})
+}
+
+// Extended returns an io.ReadWriter that sends and receives data on the given,
+// SSH extended stream. Such streams are used, for example, for stderr.
+func (ch *channel) Extended(code uint32) io.ReadWriter {
+       if !ch.decided {
+               return nil
+       }
+       return &extChannel{code, ch}
+}
+
+func (ch *channel) Stderr() io.ReadWriter {
+       return ch.Extended(1)
+}
+
+func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
+       if !ch.decided {
+               return false, errUndecided
+       }
+
+       if wantReply {
+               ch.sentRequestMu.Lock()
+               defer ch.sentRequestMu.Unlock()
+       }
+
+       msg := channelRequestMsg{
+               PeersId:             ch.remoteId,
+               Request:             name,
+               WantReply:           wantReply,
+               RequestSpecificData: payload,
+       }
+
+       if err := ch.sendMessage(msg); err != nil {
+               return false, err
+       }
+
+       if wantReply {
+               m, ok := (<-ch.msg)
+               if !ok {
+                       return false, io.EOF
+               }
+               switch m.(type) {
+               case *channelRequestFailureMsg:
+                       return false, nil
+               case *channelRequestSuccessMsg:
+                       return true, nil
+               default:
+                       return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m)
+               }
+       }
+
+       return false, nil
+}
+
+// ackRequest either sends an ack or nack to the channel request.
+func (ch *channel) ackRequest(ok bool) error {
+       if !ch.decided {
+               return errUndecided
+       }
+
+       var msg interface{}
+       if !ok {
+               msg = channelRequestFailureMsg{
+                       PeersId: ch.remoteId,
+               }
+       } else {
+               msg = channelRequestSuccessMsg{
+                       PeersId: ch.remoteId,
+               }
+       }
+       return ch.sendMessage(msg)
+}
+
+func (ch *channel) ChannelType() string {
+       return ch.chanType
+}
+
+func (ch *channel) ExtraData() []byte {
+       return ch.extraData
+}
diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go
new file mode 100644 (file)
index 0000000..13484ab
--- /dev/null
@@ -0,0 +1,627 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "crypto/aes"
+       "crypto/cipher"
+       "crypto/des"
+       "crypto/rc4"
+       "crypto/subtle"
+       "encoding/binary"
+       "errors"
+       "fmt"
+       "hash"
+       "io"
+       "io/ioutil"
+)
+
+const (
+       packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher.
+
+       // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations
+       // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC
+       // indicates implementations SHOULD be able to handle larger packet sizes, but then
+       // waffles on about reasonable limits.
+       //
+       // OpenSSH caps their maxPacket at 256kB so we choose to do
+       // the same. maxPacket is also used to ensure that uint32
+       // length fields do not overflow, so it should remain well
+       // below 4G.
+       maxPacket = 256 * 1024
+)
+
+// noneCipher implements cipher.Stream and provides no encryption. It is used
+// by the transport before the first key-exchange.
+type noneCipher struct{}
+
+func (c noneCipher) XORKeyStream(dst, src []byte) {
+       copy(dst, src)
+}
+
+func newAESCTR(key, iv []byte) (cipher.Stream, error) {
+       c, err := aes.NewCipher(key)
+       if err != nil {
+               return nil, err
+       }
+       return cipher.NewCTR(c, iv), nil
+}
+
+func newRC4(key, iv []byte) (cipher.Stream, error) {
+       return rc4.NewCipher(key)
+}
+
+type streamCipherMode struct {
+       keySize    int
+       ivSize     int
+       skip       int
+       createFunc func(key, iv []byte) (cipher.Stream, error)
+}
+
+func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) {
+       if len(key) < c.keySize {
+               panic("ssh: key length too small for cipher")
+       }
+       if len(iv) < c.ivSize {
+               panic("ssh: iv too small for cipher")
+       }
+
+       stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize])
+       if err != nil {
+               return nil, err
+       }
+
+       var streamDump []byte
+       if c.skip > 0 {
+               streamDump = make([]byte, 512)
+       }
+
+       for remainingToDump := c.skip; remainingToDump > 0; {
+               dumpThisTime := remainingToDump
+               if dumpThisTime > len(streamDump) {
+                       dumpThisTime = len(streamDump)
+               }
+               stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime])
+               remainingToDump -= dumpThisTime
+       }
+
+       return stream, nil
+}
+
+// cipherModes documents properties of supported ciphers. Ciphers not included
+// are not supported and will not be negotiated, even if explicitly requested in
+// ClientConfig.Crypto.Ciphers.
+var cipherModes = map[string]*streamCipherMode{
+       // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms
+       // are defined in the order specified in the RFC.
+       "aes128-ctr": {16, aes.BlockSize, 0, newAESCTR},
+       "aes192-ctr": {24, aes.BlockSize, 0, newAESCTR},
+       "aes256-ctr": {32, aes.BlockSize, 0, newAESCTR},
+
+       // Ciphers from RFC4345, which introduces security-improved arcfour ciphers.
+       // They are defined in the order specified in the RFC.
+       "arcfour128": {16, 0, 1536, newRC4},
+       "arcfour256": {32, 0, 1536, newRC4},
+
+       // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol.
+       // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and
+       // RC4) has problems with weak keys, and should be used with caution."
+       // RFC4345 introduces improved versions of Arcfour.
+       "arcfour": {16, 0, 0, newRC4},
+
+       // AES-GCM is not a stream cipher, so it is constructed with a
+       // special case. If we add any more non-stream ciphers, we
+       // should invest a cleaner way to do this.
+       gcmCipherID: {16, 12, 0, nil},
+
+       // CBC mode is insecure and so is not included in the default config.
+       // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely
+       // needed, it's possible to specify a custom Config to enable it.
+       // You should expect that an active attacker can recover plaintext if
+       // you do.
+       aes128cbcID: {16, aes.BlockSize, 0, nil},
+
+       // 3des-cbc is insecure and is disabled by default.
+       tripledescbcID: {24, des.BlockSize, 0, nil},
+}
+
+// prefixLen is the length of the packet prefix that contains the packet length
+// and number of padding bytes.
+const prefixLen = 5
+
+// streamPacketCipher is a packetCipher using a stream cipher.
+type streamPacketCipher struct {
+       mac    hash.Hash
+       cipher cipher.Stream
+       etm    bool
+
+       // The following members are to avoid per-packet allocations.
+       prefix      [prefixLen]byte
+       seqNumBytes [4]byte
+       padding     [2 * packetSizeMultiple]byte
+       packetData  []byte
+       macResult   []byte
+}
+
+// readPacket reads and decrypt a single packet from the reader argument.
+func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
+       if _, err := io.ReadFull(r, s.prefix[:]); err != nil {
+               return nil, err
+       }
+
+       var encryptedPaddingLength [1]byte
+       if s.mac != nil && s.etm {
+               copy(encryptedPaddingLength[:], s.prefix[4:5])
+               s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5])
+       } else {
+               s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
+       }
+
+       length := binary.BigEndian.Uint32(s.prefix[0:4])
+       paddingLength := uint32(s.prefix[4])
+
+       var macSize uint32
+       if s.mac != nil {
+               s.mac.Reset()
+               binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
+               s.mac.Write(s.seqNumBytes[:])
+               if s.etm {
+                       s.mac.Write(s.prefix[:4])
+                       s.mac.Write(encryptedPaddingLength[:])
+               } else {
+                       s.mac.Write(s.prefix[:])
+               }
+               macSize = uint32(s.mac.Size())
+       }
+
+       if length <= paddingLength+1 {
+               return nil, errors.New("ssh: invalid packet length, packet too small")
+       }
+
+       if length > maxPacket {
+               return nil, errors.New("ssh: invalid packet length, packet too large")
+       }
+
+       // the maxPacket check above ensures that length-1+macSize
+       // does not overflow.
+       if uint32(cap(s.packetData)) < length-1+macSize {
+               s.packetData = make([]byte, length-1+macSize)
+       } else {
+               s.packetData = s.packetData[:length-1+macSize]
+       }
+
+       if _, err := io.ReadFull(r, s.packetData); err != nil {
+               return nil, err
+       }
+       mac := s.packetData[length-1:]
+       data := s.packetData[:length-1]
+
+       if s.mac != nil && s.etm {
+               s.mac.Write(data)
+       }
+
+       s.cipher.XORKeyStream(data, data)
+
+       if s.mac != nil {
+               if !s.etm {
+                       s.mac.Write(data)
+               }
+               s.macResult = s.mac.Sum(s.macResult[:0])
+               if subtle.ConstantTimeCompare(s.macResult, mac) != 1 {
+                       return nil, errors.New("ssh: MAC failure")
+               }
+       }
+
+       return s.packetData[:length-paddingLength-1], nil
+}
+
+// writePacket encrypts and sends a packet of data to the writer argument
+func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
+       if len(packet) > maxPacket {
+               return errors.New("ssh: packet too large")
+       }
+
+       aadlen := 0
+       if s.mac != nil && s.etm {
+               // packet length is not encrypted for EtM modes
+               aadlen = 4
+       }
+
+       paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple
+       if paddingLength < 4 {
+               paddingLength += packetSizeMultiple
+       }
+
+       length := len(packet) + 1 + paddingLength
+       binary.BigEndian.PutUint32(s.prefix[:], uint32(length))
+       s.prefix[4] = byte(paddingLength)
+       padding := s.padding[:paddingLength]
+       if _, err := io.ReadFull(rand, padding); err != nil {
+               return err
+       }
+
+       if s.mac != nil {
+               s.mac.Reset()
+               binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum)
+               s.mac.Write(s.seqNumBytes[:])
+
+               if s.etm {
+                       // For EtM algorithms, the packet length must stay unencrypted,
+                       // but the following data (padding length) must be encrypted
+                       s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5])
+               }
+
+               s.mac.Write(s.prefix[:])
+
+               if !s.etm {
+                       // For non-EtM algorithms, the algorithm is applied on unencrypted data
+                       s.mac.Write(packet)
+                       s.mac.Write(padding)
+               }
+       }
+
+       if !(s.mac != nil && s.etm) {
+               // For EtM algorithms, the padding length has already been encrypted
+               // and the packet length must remain unencrypted
+               s.cipher.XORKeyStream(s.prefix[:], s.prefix[:])
+       }
+
+       s.cipher.XORKeyStream(packet, packet)
+       s.cipher.XORKeyStream(padding, padding)
+
+       if s.mac != nil && s.etm {
+               // For EtM algorithms, packet and padding must be encrypted
+               s.mac.Write(packet)
+               s.mac.Write(padding)
+       }
+
+       if _, err := w.Write(s.prefix[:]); err != nil {
+               return err
+       }
+       if _, err := w.Write(packet); err != nil {
+               return err
+       }
+       if _, err := w.Write(padding); err != nil {
+               return err
+       }
+
+       if s.mac != nil {
+               s.macResult = s.mac.Sum(s.macResult[:0])
+               if _, err := w.Write(s.macResult); err != nil {
+                       return err
+               }
+       }
+
+       return nil
+}
+
+type gcmCipher struct {
+       aead   cipher.AEAD
+       prefix [4]byte
+       iv     []byte
+       buf    []byte
+}
+
+func newGCMCipher(iv, key, macKey []byte) (packetCipher, error) {
+       c, err := aes.NewCipher(key)
+       if err != nil {
+               return nil, err
+       }
+
+       aead, err := cipher.NewGCM(c)
+       if err != nil {
+               return nil, err
+       }
+
+       return &gcmCipher{
+               aead: aead,
+               iv:   iv,
+       }, nil
+}
+
+const gcmTagSize = 16
+
+func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
+       // Pad out to multiple of 16 bytes. This is different from the
+       // stream cipher because that encrypts the length too.
+       padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple)
+       if padding < 4 {
+               padding += packetSizeMultiple
+       }
+
+       length := uint32(len(packet) + int(padding) + 1)
+       binary.BigEndian.PutUint32(c.prefix[:], length)
+       if _, err := w.Write(c.prefix[:]); err != nil {
+               return err
+       }
+
+       if cap(c.buf) < int(length) {
+               c.buf = make([]byte, length)
+       } else {
+               c.buf = c.buf[:length]
+       }
+
+       c.buf[0] = padding
+       copy(c.buf[1:], packet)
+       if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil {
+               return err
+       }
+       c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:])
+       if _, err := w.Write(c.buf); err != nil {
+               return err
+       }
+       c.incIV()
+
+       return nil
+}
+
+func (c *gcmCipher) incIV() {
+       for i := 4 + 7; i >= 4; i-- {
+               c.iv[i]++
+               if c.iv[i] != 0 {
+                       break
+               }
+       }
+}
+
+func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
+       if _, err := io.ReadFull(r, c.prefix[:]); err != nil {
+               return nil, err
+       }
+       length := binary.BigEndian.Uint32(c.prefix[:])
+       if length > maxPacket {
+               return nil, errors.New("ssh: max packet length exceeded.")
+       }
+
+       if cap(c.buf) < int(length+gcmTagSize) {
+               c.buf = make([]byte, length+gcmTagSize)
+       } else {
+               c.buf = c.buf[:length+gcmTagSize]
+       }
+
+       if _, err := io.ReadFull(r, c.buf); err != nil {
+               return nil, err
+       }
+
+       plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:])
+       if err != nil {
+               return nil, err
+       }
+       c.incIV()
+
+       padding := plain[0]
+       if padding < 4 || padding >= 20 {
+               return nil, fmt.Errorf("ssh: illegal padding %d", padding)
+       }
+
+       if int(padding+1) >= len(plain) {
+               return nil, fmt.Errorf("ssh: padding %d too large", padding)
+       }
+       plain = plain[1 : length-uint32(padding)]
+       return plain, nil
+}
+
+// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1
+type cbcCipher struct {
+       mac       hash.Hash
+       macSize   uint32
+       decrypter cipher.BlockMode
+       encrypter cipher.BlockMode
+
+       // The following members are to avoid per-packet allocations.
+       seqNumBytes [4]byte
+       packetData  []byte
+       macResult   []byte
+
+       // Amount of data we should still read to hide which
+       // verification error triggered.
+       oracleCamouflage uint32
+}
+
+func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
+       cbc := &cbcCipher{
+               mac:        macModes[algs.MAC].new(macKey),
+               decrypter:  cipher.NewCBCDecrypter(c, iv),
+               encrypter:  cipher.NewCBCEncrypter(c, iv),
+               packetData: make([]byte, 1024),
+       }
+       if cbc.mac != nil {
+               cbc.macSize = uint32(cbc.mac.Size())
+       }
+
+       return cbc, nil
+}
+
+func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
+       c, err := aes.NewCipher(key)
+       if err != nil {
+               return nil, err
+       }
+
+       cbc, err := newCBCCipher(c, iv, key, macKey, algs)
+       if err != nil {
+               return nil, err
+       }
+
+       return cbc, nil
+}
+
+func newTripleDESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) {
+       c, err := des.NewTripleDESCipher(key)
+       if err != nil {
+               return nil, err
+       }
+
+       cbc, err := newCBCCipher(c, iv, key, macKey, algs)
+       if err != nil {
+               return nil, err
+       }
+
+       return cbc, nil
+}
+
+func maxUInt32(a, b int) uint32 {
+       if a > b {
+               return uint32(a)
+       }
+       return uint32(b)
+}
+
+const (
+       cbcMinPacketSizeMultiple = 8
+       cbcMinPacketSize         = 16
+       cbcMinPaddingSize        = 4
+)
+
+// cbcError represents a verification error that may leak information.
+type cbcError string
+
+func (e cbcError) Error() string { return string(e) }
+
+func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) {
+       p, err := c.readPacketLeaky(seqNum, r)
+       if err != nil {
+               if _, ok := err.(cbcError); ok {
+                       // Verification error: read a fixed amount of
+                       // data, to make distinguishing between
+                       // failing MAC and failing length check more
+                       // difficult.
+                       io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage))
+               }
+       }
+       return p, err
+}
+
+func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) {
+       blockSize := c.decrypter.BlockSize()
+
+       // Read the header, which will include some of the subsequent data in the
+       // case of block ciphers - this is copied back to the payload later.
+       // How many bytes of payload/padding will be read with this first read.
+       firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize)
+       firstBlock := c.packetData[:firstBlockLength]
+       if _, err := io.ReadFull(r, firstBlock); err != nil {
+               return nil, err
+       }
+
+       c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength
+
+       c.decrypter.CryptBlocks(firstBlock, firstBlock)
+       length := binary.BigEndian.Uint32(firstBlock[:4])
+       if length > maxPacket {
+               return nil, cbcError("ssh: packet too large")
+       }
+       if length+4 < maxUInt32(cbcMinPacketSize, blockSize) {
+               // The minimum size of a packet is 16 (or the cipher block size, whichever
+               // is larger) bytes.
+               return nil, cbcError("ssh: packet too small")
+       }
+       // The length of the packet (including the length field but not the MAC) must
+       // be a multiple of the block size or 8, whichever is larger.
+       if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 {
+               return nil, cbcError("ssh: invalid packet length multiple")
+       }
+
+       paddingLength := uint32(firstBlock[4])
+       if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 {
+               return nil, cbcError("ssh: invalid packet length")
+       }
+
+       // Positions within the c.packetData buffer:
+       macStart := 4 + length
+       paddingStart := macStart - paddingLength
+
+       // Entire packet size, starting before length, ending at end of mac.
+       entirePacketSize := macStart + c.macSize
+
+       // Ensure c.packetData is large enough for the entire packet data.
+       if uint32(cap(c.packetData)) < entirePacketSize {
+               // Still need to upsize and copy, but this should be rare at runtime, only
+               // on upsizing the packetData buffer.
+               c.packetData = make([]byte, entirePacketSize)
+               copy(c.packetData, firstBlock)
+       } else {
+               c.packetData = c.packetData[:entirePacketSize]
+       }
+
+       if n, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil {
+               return nil, err
+       } else {
+               c.oracleCamouflage -= uint32(n)
+       }
+
+       remainingCrypted := c.packetData[firstBlockLength:macStart]
+       c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted)
+
+       mac := c.packetData[macStart:]
+       if c.mac != nil {
+               c.mac.Reset()
+               binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
+               c.mac.Write(c.seqNumBytes[:])
+               c.mac.Write(c.packetData[:macStart])
+               c.macResult = c.mac.Sum(c.macResult[:0])
+               if subtle.ConstantTimeCompare(c.macResult, mac) != 1 {
+                       return nil, cbcError("ssh: MAC failure")
+               }
+       }
+
+       return c.packetData[prefixLen:paddingStart], nil
+}
+
+func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error {
+       effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize())
+
+       // Length of encrypted portion of the packet (header, payload, padding).
+       // Enforce minimum padding and packet size.
+       encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize)
+       // Enforce block size.
+       encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize
+
+       length := encLength - 4
+       paddingLength := int(length) - (1 + len(packet))
+
+       // Overall buffer contains: header, payload, padding, mac.
+       // Space for the MAC is reserved in the capacity but not the slice length.
+       bufferSize := encLength + c.macSize
+       if uint32(cap(c.packetData)) < bufferSize {
+               c.packetData = make([]byte, encLength, bufferSize)
+       } else {
+               c.packetData = c.packetData[:encLength]
+       }
+
+       p := c.packetData
+
+       // Packet header.
+       binary.BigEndian.PutUint32(p, length)
+       p = p[4:]
+       p[0] = byte(paddingLength)
+
+       // Payload.
+       p = p[1:]
+       copy(p, packet)
+
+       // Padding.
+       p = p[len(packet):]
+       if _, err := io.ReadFull(rand, p); err != nil {
+               return err
+       }
+
+       if c.mac != nil {
+               c.mac.Reset()
+               binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum)
+               c.mac.Write(c.seqNumBytes[:])
+               c.mac.Write(c.packetData)
+               // The MAC is now appended into the capacity reserved for it earlier.
+               c.packetData = c.mac.Sum(c.packetData)
+       }
+
+       c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength])
+
+       if _, err := w.Write(c.packetData); err != nil {
+               return err
+       }
+
+       return nil
+}
diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go
new file mode 100644 (file)
index 0000000..c97f297
--- /dev/null
@@ -0,0 +1,211 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "errors"
+       "fmt"
+       "net"
+       "sync"
+       "time"
+)
+
+// Client implements a traditional SSH client that supports shells,
+// subprocesses, port forwarding and tunneled dialing.
+type Client struct {
+       Conn
+
+       forwards        forwardList // forwarded tcpip connections from the remote side
+       mu              sync.Mutex
+       channelHandlers map[string]chan NewChannel
+}
+
+// HandleChannelOpen returns a channel on which NewChannel requests
+// for the given type are sent. If the type already is being handled,
+// nil is returned. The channel is closed when the connection is closed.
+func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel {
+       c.mu.Lock()
+       defer c.mu.Unlock()
+       if c.channelHandlers == nil {
+               // The SSH channel has been closed.
+               c := make(chan NewChannel)
+               close(c)
+               return c
+       }
+
+       ch := c.channelHandlers[channelType]
+       if ch != nil {
+               return nil
+       }
+
+       ch = make(chan NewChannel, chanSize)
+       c.channelHandlers[channelType] = ch
+       return ch
+}
+
+// NewClient creates a Client on top of the given connection.
+func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client {
+       conn := &Client{
+               Conn:            c,
+               channelHandlers: make(map[string]chan NewChannel, 1),
+       }
+
+       go conn.handleGlobalRequests(reqs)
+       go conn.handleChannelOpens(chans)
+       go func() {
+               conn.Wait()
+               conn.forwards.closeAll()
+       }()
+       go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip"))
+       return conn
+}
+
+// NewClientConn establishes an authenticated SSH connection using c
+// as the underlying transport.  The Request and NewChannel channels
+// must be serviced or the connection will hang.
+func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) {
+       fullConf := *config
+       fullConf.SetDefaults()
+       conn := &connection{
+               sshConn: sshConn{conn: c},
+       }
+
+       if err := conn.clientHandshake(addr, &fullConf); err != nil {
+               c.Close()
+               return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err)
+       }
+       conn.mux = newMux(conn.transport)
+       return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil
+}
+
+// clientHandshake performs the client side key exchange. See RFC 4253 Section
+// 7.
+func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error {
+       if config.ClientVersion != "" {
+               c.clientVersion = []byte(config.ClientVersion)
+       } else {
+               c.clientVersion = []byte(packageVersion)
+       }
+       var err error
+       c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion)
+       if err != nil {
+               return err
+       }
+
+       c.transport = newClientTransport(
+               newTransport(c.sshConn.conn, config.Rand, true /* is client */),
+               c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr())
+       if err := c.transport.waitSession(); err != nil {
+               return err
+       }
+
+       c.sessionID = c.transport.getSessionID()
+       return c.clientAuthenticate(config)
+}
+
+// verifyHostKeySignature verifies the host key obtained in the key
+// exchange.
+func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error {
+       sig, rest, ok := parseSignatureBody(result.Signature)
+       if len(rest) > 0 || !ok {
+               return errors.New("ssh: signature parse error")
+       }
+
+       return hostKey.Verify(result.H, sig)
+}
+
+// NewSession opens a new Session for this client. (A session is a remote
+// execution of a program.)
+func (c *Client) NewSession() (*Session, error) {
+       ch, in, err := c.OpenChannel("session", nil)
+       if err != nil {
+               return nil, err
+       }
+       return newSession(ch, in)
+}
+
+func (c *Client) handleGlobalRequests(incoming <-chan *Request) {
+       for r := range incoming {
+               // This handles keepalive messages and matches
+               // the behaviour of OpenSSH.
+               r.Reply(false, nil)
+       }
+}
+
+// handleChannelOpens channel open messages from the remote side.
+func (c *Client) handleChannelOpens(in <-chan NewChannel) {
+       for ch := range in {
+               c.mu.Lock()
+               handler := c.channelHandlers[ch.ChannelType()]
+               c.mu.Unlock()
+
+               if handler != nil {
+                       handler <- ch
+               } else {
+                       ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType()))
+               }
+       }
+
+       c.mu.Lock()
+       for _, ch := range c.channelHandlers {
+               close(ch)
+       }
+       c.channelHandlers = nil
+       c.mu.Unlock()
+}
+
+// Dial starts a client connection to the given SSH server. It is a
+// convenience function that connects to the given network address,
+// initiates the SSH handshake, and then sets up a Client.  For access
+// to incoming channels and requests, use net.Dial with NewClientConn
+// instead.
+func Dial(network, addr string, config *ClientConfig) (*Client, error) {
+       conn, err := net.DialTimeout(network, addr, config.Timeout)
+       if err != nil {
+               return nil, err
+       }
+       c, chans, reqs, err := NewClientConn(conn, addr, config)
+       if err != nil {
+               return nil, err
+       }
+       return NewClient(c, chans, reqs), nil
+}
+
+// A ClientConfig structure is used to configure a Client. It must not be
+// modified after having been passed to an SSH function.
+type ClientConfig struct {
+       // Config contains configuration that is shared between clients and
+       // servers.
+       Config
+
+       // User contains the username to authenticate as.
+       User string
+
+       // Auth contains possible authentication methods to use with the
+       // server. Only the first instance of a particular RFC 4252 method will
+       // be used during authentication.
+       Auth []AuthMethod
+
+       // HostKeyCallback, if not nil, is called during the cryptographic
+       // handshake to validate the server's host key. A nil HostKeyCallback
+       // implies that all host keys are accepted.
+       HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
+
+       // ClientVersion contains the version identification string that will
+       // be used for the connection. If empty, a reasonable default is used.
+       ClientVersion string
+
+       // HostKeyAlgorithms lists the key types that the client will
+       // accept from the server as host key, in order of
+       // preference. If empty, a reasonable default is used. Any
+       // string returned from PublicKey.Type method may be used, or
+       // any of the CertAlgoXxxx and KeyAlgoXxxx constants.
+       HostKeyAlgorithms []string
+
+       // Timeout is the maximum amount of time for the TCP connection to establish.
+       //
+       // A Timeout of zero means no timeout.
+       Timeout time.Duration
+}
diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go
new file mode 100644 (file)
index 0000000..fd1ec5d
--- /dev/null
@@ -0,0 +1,475 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "bytes"
+       "errors"
+       "fmt"
+       "io"
+)
+
+// clientAuthenticate authenticates with the remote server. See RFC 4252.
+func (c *connection) clientAuthenticate(config *ClientConfig) error {
+       // initiate user auth session
+       if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil {
+               return err
+       }
+       packet, err := c.transport.readPacket()
+       if err != nil {
+               return err
+       }
+       var serviceAccept serviceAcceptMsg
+       if err := Unmarshal(packet, &serviceAccept); err != nil {
+               return err
+       }
+
+       // during the authentication phase the client first attempts the "none" method
+       // then any untried methods suggested by the server.
+       tried := make(map[string]bool)
+       var lastMethods []string
+
+       sessionID := c.transport.getSessionID()
+       for auth := AuthMethod(new(noneAuth)); auth != nil; {
+               ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand)
+               if err != nil {
+                       return err
+               }
+               if ok {
+                       // success
+                       return nil
+               }
+               tried[auth.method()] = true
+               if methods == nil {
+                       methods = lastMethods
+               }
+               lastMethods = methods
+
+               auth = nil
+
+       findNext:
+               for _, a := range config.Auth {
+                       candidateMethod := a.method()
+                       if tried[candidateMethod] {
+                               continue
+                       }
+                       for _, meth := range methods {
+                               if meth == candidateMethod {
+                                       auth = a
+                                       break findNext
+                               }
+                       }
+               }
+       }
+       return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried))
+}
+
+func keys(m map[string]bool) []string {
+       s := make([]string, 0, len(m))
+
+       for key := range m {
+               s = append(s, key)
+       }
+       return s
+}
+
+// An AuthMethod represents an instance of an RFC 4252 authentication method.
+type AuthMethod interface {
+       // auth authenticates user over transport t.
+       // Returns true if authentication is successful.
+       // If authentication is not successful, a []string of alternative
+       // method names is returned. If the slice is nil, it will be ignored
+       // and the previous set of possible methods will be reused.
+       auth(session []byte, user string, p packetConn, rand io.Reader) (bool, []string, error)
+
+       // method returns the RFC 4252 method name.
+       method() string
+}
+
+// "none" authentication, RFC 4252 section 5.2.
+type noneAuth int
+
+func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
+       if err := c.writePacket(Marshal(&userAuthRequestMsg{
+               User:    user,
+               Service: serviceSSH,
+               Method:  "none",
+       })); err != nil {
+               return false, nil, err
+       }
+
+       return handleAuthResponse(c)
+}
+
+func (n *noneAuth) method() string {
+       return "none"
+}
+
+// passwordCallback is an AuthMethod that fetches the password through
+// a function call, e.g. by prompting the user.
+type passwordCallback func() (password string, err error)
+
+func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
+       type passwordAuthMsg struct {
+               User     string `sshtype:"50"`
+               Service  string
+               Method   string
+               Reply    bool
+               Password string
+       }
+
+       pw, err := cb()
+       // REVIEW NOTE: is there a need to support skipping a password attempt?
+       // The program may only find out that the user doesn't have a password
+       // when prompting.
+       if err != nil {
+               return false, nil, err
+       }
+
+       if err := c.writePacket(Marshal(&passwordAuthMsg{
+               User:     user,
+               Service:  serviceSSH,
+               Method:   cb.method(),
+               Reply:    false,
+               Password: pw,
+       })); err != nil {
+               return false, nil, err
+       }
+
+       return handleAuthResponse(c)
+}
+
+func (cb passwordCallback) method() string {
+       return "password"
+}
+
+// Password returns an AuthMethod using the given password.
+func Password(secret string) AuthMethod {
+       return passwordCallback(func() (string, error) { return secret, nil })
+}
+
+// PasswordCallback returns an AuthMethod that uses a callback for
+// fetching a password.
+func PasswordCallback(prompt func() (secret string, err error)) AuthMethod {
+       return passwordCallback(prompt)
+}
+
+type publickeyAuthMsg struct {
+       User    string `sshtype:"50"`
+       Service string
+       Method  string
+       // HasSig indicates to the receiver packet that the auth request is signed and
+       // should be used for authentication of the request.
+       HasSig   bool
+       Algoname string
+       PubKey   []byte
+       // Sig is tagged with "rest" so Marshal will exclude it during
+       // validateKey
+       Sig []byte `ssh:"rest"`
+}
+
+// publicKeyCallback is an AuthMethod that uses a set of key
+// pairs for authentication.
+type publicKeyCallback func() ([]Signer, error)
+
+func (cb publicKeyCallback) method() string {
+       return "publickey"
+}
+
+func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
+       // Authentication is performed in two stages. The first stage sends an
+       // enquiry to test if each key is acceptable to the remote. The second
+       // stage attempts to authenticate with the valid keys obtained in the
+       // first stage.
+
+       signers, err := cb()
+       if err != nil {
+               return false, nil, err
+       }
+       var validKeys []Signer
+       for _, signer := range signers {
+               if ok, err := validateKey(signer.PublicKey(), user, c); ok {
+                       validKeys = append(validKeys, signer)
+               } else {
+                       if err != nil {
+                               return false, nil, err
+                       }
+               }
+       }
+
+       // methods that may continue if this auth is not successful.
+       var methods []string
+       for _, signer := range validKeys {
+               pub := signer.PublicKey()
+
+               pubKey := pub.Marshal()
+               sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{
+                       User:    user,
+                       Service: serviceSSH,
+                       Method:  cb.method(),
+               }, []byte(pub.Type()), pubKey))
+               if err != nil {
+                       return false, nil, err
+               }
+
+               // manually wrap the serialized signature in a string
+               s := Marshal(sign)
+               sig := make([]byte, stringLength(len(s)))
+               marshalString(sig, s)
+               msg := publickeyAuthMsg{
+                       User:     user,
+                       Service:  serviceSSH,
+                       Method:   cb.method(),
+                       HasSig:   true,
+                       Algoname: pub.Type(),
+                       PubKey:   pubKey,
+                       Sig:      sig,
+               }
+               p := Marshal(&msg)
+               if err := c.writePacket(p); err != nil {
+                       return false, nil, err
+               }
+               var success bool
+               success, methods, err = handleAuthResponse(c)
+               if err != nil {
+                       return false, nil, err
+               }
+               if success {
+                       return success, methods, err
+               }
+       }
+       return false, methods, nil
+}
+
+// validateKey validates the key provided is acceptable to the server.
+func validateKey(key PublicKey, user string, c packetConn) (bool, error) {
+       pubKey := key.Marshal()
+       msg := publickeyAuthMsg{
+               User:     user,
+               Service:  serviceSSH,
+               Method:   "publickey",
+               HasSig:   false,
+               Algoname: key.Type(),
+               PubKey:   pubKey,
+       }
+       if err := c.writePacket(Marshal(&msg)); err != nil {
+               return false, err
+       }
+
+       return confirmKeyAck(key, c)
+}
+
+func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
+       pubKey := key.Marshal()
+       algoname := key.Type()
+
+       for {
+               packet, err := c.readPacket()
+               if err != nil {
+                       return false, err
+               }
+               switch packet[0] {
+               case msgUserAuthBanner:
+                       // TODO(gpaul): add callback to present the banner to the user
+               case msgUserAuthPubKeyOk:
+                       var msg userAuthPubKeyOkMsg
+                       if err := Unmarshal(packet, &msg); err != nil {
+                               return false, err
+                       }
+                       if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) {
+                               return false, nil
+                       }
+                       return true, nil
+               case msgUserAuthFailure:
+                       return false, nil
+               default:
+                       return false, unexpectedMessageError(msgUserAuthSuccess, packet[0])
+               }
+       }
+}
+
+// PublicKeys returns an AuthMethod that uses the given key
+// pairs.
+func PublicKeys(signers ...Signer) AuthMethod {
+       return publicKeyCallback(func() ([]Signer, error) { return signers, nil })
+}
+
+// PublicKeysCallback returns an AuthMethod that runs the given
+// function to obtain a list of key pairs.
+func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod {
+       return publicKeyCallback(getSigners)
+}
+
+// handleAuthResponse returns whether the preceding authentication request succeeded
+// along with a list of remaining authentication methods to try next and
+// an error if an unexpected response was received.
+func handleAuthResponse(c packetConn) (bool, []string, error) {
+       for {
+               packet, err := c.readPacket()
+               if err != nil {
+                       return false, nil, err
+               }
+
+               switch packet[0] {
+               case msgUserAuthBanner:
+                       // TODO: add callback to present the banner to the user
+               case msgUserAuthFailure:
+                       var msg userAuthFailureMsg
+                       if err := Unmarshal(packet, &msg); err != nil {
+                               return false, nil, err
+                       }
+                       return false, msg.Methods, nil
+               case msgUserAuthSuccess:
+                       return true, nil, nil
+               default:
+                       return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0])
+               }
+       }
+}
+
+// KeyboardInteractiveChallenge should print questions, optionally
+// disabling echoing (e.g. for passwords), and return all the answers.
+// Challenge may be called multiple times in a single session. After
+// successful authentication, the server may send a challenge with no
+// questions, for which the user and instruction messages should be
+// printed.  RFC 4256 section 3.3 details how the UI should behave for
+// both CLI and GUI environments.
+type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error)
+
+// KeyboardInteractive returns a AuthMethod using a prompt/response
+// sequence controlled by the server.
+func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod {
+       return challenge
+}
+
+func (cb KeyboardInteractiveChallenge) method() string {
+       return "keyboard-interactive"
+}
+
+func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) {
+       type initiateMsg struct {
+               User       string `sshtype:"50"`
+               Service    string
+               Method     string
+               Language   string
+               Submethods string
+       }
+
+       if err := c.writePacket(Marshal(&initiateMsg{
+               User:    user,
+               Service: serviceSSH,
+               Method:  "keyboard-interactive",
+       })); err != nil {
+               return false, nil, err
+       }
+
+       for {
+               packet, err := c.readPacket()
+               if err != nil {
+                       return false, nil, err
+               }
+
+               // like handleAuthResponse, but with less options.
+               switch packet[0] {
+               case msgUserAuthBanner:
+                       // TODO: Print banners during userauth.
+                       continue
+               case msgUserAuthInfoRequest:
+                       // OK
+               case msgUserAuthFailure:
+                       var msg userAuthFailureMsg
+                       if err := Unmarshal(packet, &msg); err != nil {
+                               return false, nil, err
+                       }
+                       return false, msg.Methods, nil
+               case msgUserAuthSuccess:
+                       return true, nil, nil
+               default:
+                       return false, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0])
+               }
+
+               var msg userAuthInfoRequestMsg
+               if err := Unmarshal(packet, &msg); err != nil {
+                       return false, nil, err
+               }
+
+               // Manually unpack the prompt/echo pairs.
+               rest := msg.Prompts
+               var prompts []string
+               var echos []bool
+               for i := 0; i < int(msg.NumPrompts); i++ {
+                       prompt, r, ok := parseString(rest)
+                       if !ok || len(r) == 0 {
+                               return false, nil, errors.New("ssh: prompt format error")
+                       }
+                       prompts = append(prompts, string(prompt))
+                       echos = append(echos, r[0] != 0)
+                       rest = r[1:]
+               }
+
+               if len(rest) != 0 {
+                       return false, nil, errors.New("ssh: extra data following keyboard-interactive pairs")
+               }
+
+               answers, err := cb(msg.User, msg.Instruction, prompts, echos)
+               if err != nil {
+                       return false, nil, err
+               }
+
+               if len(answers) != len(prompts) {
+                       return false, nil, errors.New("ssh: not enough answers from keyboard-interactive callback")
+               }
+               responseLength := 1 + 4
+               for _, a := range answers {
+                       responseLength += stringLength(len(a))
+               }
+               serialized := make([]byte, responseLength)
+               p := serialized
+               p[0] = msgUserAuthInfoResponse
+               p = p[1:]
+               p = marshalUint32(p, uint32(len(answers)))
+               for _, a := range answers {
+                       p = marshalString(p, []byte(a))
+               }
+
+               if err := c.writePacket(serialized); err != nil {
+                       return false, nil, err
+               }
+       }
+}
+
+type retryableAuthMethod struct {
+       authMethod AuthMethod
+       maxTries   int
+}
+
+func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok bool, methods []string, err error) {
+       for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ {
+               ok, methods, err = r.authMethod.auth(session, user, c, rand)
+               if ok || err != nil { // either success or error terminate
+                       return ok, methods, err
+               }
+       }
+       return ok, methods, err
+}
+
+func (r *retryableAuthMethod) method() string {
+       return r.authMethod.method()
+}
+
+// RetryableAuthMethod is a decorator for other auth methods enabling them to
+// be retried up to maxTries before considering that AuthMethod itself failed.
+// If maxTries is <= 0, will retry indefinitely
+//
+// This is useful for interactive clients using challenge/response type
+// authentication (e.g. Keyboard-Interactive, Password, etc) where the user
+// could mistype their response resulting in the server issuing a
+// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4
+// [keyboard-interactive]); Without this decorator, the non-retryable
+// AuthMethod would be removed from future consideration, and never tried again
+// (and so the user would never be able to retry their entry).
+func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod {
+       return &retryableAuthMethod{authMethod: auth, maxTries: maxTries}
+}
diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go
new file mode 100644 (file)
index 0000000..8656d0f
--- /dev/null
@@ -0,0 +1,371 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "crypto"
+       "crypto/rand"
+       "fmt"
+       "io"
+       "sync"
+
+       _ "crypto/sha1"
+       _ "crypto/sha256"
+       _ "crypto/sha512"
+)
+
+// These are string constants in the SSH protocol.
+const (
+       compressionNone = "none"
+       serviceUserAuth = "ssh-userauth"
+       serviceSSH      = "ssh-connection"
+)
+
+// supportedCiphers specifies the supported ciphers in preference order.
+var supportedCiphers = []string{
+       "aes128-ctr", "aes192-ctr", "aes256-ctr",
+       "aes128-gcm@openssh.com",
+       "arcfour256", "arcfour128",
+}
+
+// supportedKexAlgos specifies the supported key-exchange algorithms in
+// preference order.
+var supportedKexAlgos = []string{
+       kexAlgoCurve25519SHA256,
+       // P384 and P521 are not constant-time yet, but since we don't
+       // reuse ephemeral keys, using them for ECDH should be OK.
+       kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
+       kexAlgoDH14SHA1, kexAlgoDH1SHA1,
+}
+
+// supportedKexAlgos specifies the supported host-key algorithms (i.e. methods
+// of authenticating servers) in preference order.
+var supportedHostKeyAlgos = []string{
+       CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
+       CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01,
+
+       KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
+       KeyAlgoRSA, KeyAlgoDSA,
+
+       KeyAlgoED25519,
+}
+
+// supportedMACs specifies a default set of MAC algorithms in preference order.
+// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed
+// because they have reached the end of their useful life.
+var supportedMACs = []string{
+       "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96",
+}
+
+var supportedCompressions = []string{compressionNone}
+
+// hashFuncs keeps the mapping of supported algorithms to their respective
+// hashes needed for signature verification.
+var hashFuncs = map[string]crypto.Hash{
+       KeyAlgoRSA:          crypto.SHA1,
+       KeyAlgoDSA:          crypto.SHA1,
+       KeyAlgoECDSA256:     crypto.SHA256,
+       KeyAlgoECDSA384:     crypto.SHA384,
+       KeyAlgoECDSA521:     crypto.SHA512,
+       CertAlgoRSAv01:      crypto.SHA1,
+       CertAlgoDSAv01:      crypto.SHA1,
+       CertAlgoECDSA256v01: crypto.SHA256,
+       CertAlgoECDSA384v01: crypto.SHA384,
+       CertAlgoECDSA521v01: crypto.SHA512,
+}
+
+// unexpectedMessageError results when the SSH message that we received didn't
+// match what we wanted.
+func unexpectedMessageError(expected, got uint8) error {
+       return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected)
+}
+
+// parseError results from a malformed SSH message.
+func parseError(tag uint8) error {
+       return fmt.Errorf("ssh: parse error in message type %d", tag)
+}
+
+func findCommon(what string, client []string, server []string) (common string, err error) {
+       for _, c := range client {
+               for _, s := range server {
+                       if c == s {
+                               return c, nil
+                       }
+               }
+       }
+       return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server)
+}
+
+type directionAlgorithms struct {
+       Cipher      string
+       MAC         string
+       Compression string
+}
+
+// rekeyBytes returns a rekeying intervals in bytes.
+func (a *directionAlgorithms) rekeyBytes() int64 {
+       // According to RFC4344 block ciphers should rekey after
+       // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is
+       // 128.
+       switch a.Cipher {
+       case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID:
+               return 16 * (1 << 32)
+
+       }
+
+       // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data.
+       return 1 << 30
+}
+
+type algorithms struct {
+       kex     string
+       hostKey string
+       w       directionAlgorithms
+       r       directionAlgorithms
+}
+
+func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) {
+       result := &algorithms{}
+
+       result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos)
+       if err != nil {
+               return
+       }
+
+       result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos)
+       if err != nil {
+               return
+       }
+
+       result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer)
+       if err != nil {
+               return
+       }
+
+       result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient)
+       if err != nil {
+               return
+       }
+
+       result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer)
+       if err != nil {
+               return
+       }
+
+       result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient)
+       if err != nil {
+               return
+       }
+
+       result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer)
+       if err != nil {
+               return
+       }
+
+       result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient)
+       if err != nil {
+               return
+       }
+
+       return result, nil
+}
+
+// If rekeythreshold is too small, we can't make any progress sending
+// stuff.
+const minRekeyThreshold uint64 = 256
+
+// Config contains configuration data common to both ServerConfig and
+// ClientConfig.
+type Config struct {
+       // Rand provides the source of entropy for cryptographic
+       // primitives. If Rand is nil, the cryptographic random reader
+       // in package crypto/rand will be used.
+       Rand io.Reader
+
+       // The maximum number of bytes sent or received after which a
+       // new key is negotiated. It must be at least 256. If
+       // unspecified, 1 gigabyte is used.
+       RekeyThreshold uint64
+
+       // The allowed key exchanges algorithms. If unspecified then a
+       // default set of algorithms is used.
+       KeyExchanges []string
+
+       // The allowed cipher algorithms. If unspecified then a sensible
+       // default is used.
+       Ciphers []string
+
+       // The allowed MAC algorithms. If unspecified then a sensible default
+       // is used.
+       MACs []string
+}
+
+// SetDefaults sets sensible values for unset fields in config. This is
+// exported for testing: Configs passed to SSH functions are copied and have
+// default values set automatically.
+func (c *Config) SetDefaults() {
+       if c.Rand == nil {
+               c.Rand = rand.Reader
+       }
+       if c.Ciphers == nil {
+               c.Ciphers = supportedCiphers
+       }
+       var ciphers []string
+       for _, c := range c.Ciphers {
+               if cipherModes[c] != nil {
+                       // reject the cipher if we have no cipherModes definition
+                       ciphers = append(ciphers, c)
+               }
+       }
+       c.Ciphers = ciphers
+
+       if c.KeyExchanges == nil {
+               c.KeyExchanges = supportedKexAlgos
+       }
+
+       if c.MACs == nil {
+               c.MACs = supportedMACs
+       }
+
+       if c.RekeyThreshold == 0 {
+               // RFC 4253, section 9 suggests rekeying after 1G.
+               c.RekeyThreshold = 1 << 30
+       }
+       if c.RekeyThreshold < minRekeyThreshold {
+               c.RekeyThreshold = minRekeyThreshold
+       }
+}
+
+// buildDataSignedForAuth returns the data that is signed in order to prove
+// possession of a private key. See RFC 4252, section 7.
+func buildDataSignedForAuth(sessionId []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte {
+       data := struct {
+               Session []byte
+               Type    byte
+               User    string
+               Service string
+               Method  string
+               Sign    bool
+               Algo    []byte
+               PubKey  []byte
+       }{
+               sessionId,
+               msgUserAuthRequest,
+               req.User,
+               req.Service,
+               req.Method,
+               true,
+               algo,
+               pubKey,
+       }
+       return Marshal(data)
+}
+
+func appendU16(buf []byte, n uint16) []byte {
+       return append(buf, byte(n>>8), byte(n))
+}
+
+func appendU32(buf []byte, n uint32) []byte {
+       return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
+}
+
+func appendU64(buf []byte, n uint64) []byte {
+       return append(buf,
+               byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32),
+               byte(n>>24), byte(n>>16), byte(n>>8), byte(n))
+}
+
+func appendInt(buf []byte, n int) []byte {
+       return appendU32(buf, uint32(n))
+}
+
+func appendString(buf []byte, s string) []byte {
+       buf = appendU32(buf, uint32(len(s)))
+       buf = append(buf, s...)
+       return buf
+}
+
+func appendBool(buf []byte, b bool) []byte {
+       if b {
+               return append(buf, 1)
+       }
+       return append(buf, 0)
+}
+
+// newCond is a helper to hide the fact that there is no usable zero
+// value for sync.Cond.
+func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) }
+
+// window represents the buffer available to clients
+// wishing to write to a channel.
+type window struct {
+       *sync.Cond
+       win          uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1
+       writeWaiters int
+       closed       bool
+}
+
+// add adds win to the amount of window available
+// for consumers.
+func (w *window) add(win uint32) bool {
+       // a zero sized window adjust is a noop.
+       if win == 0 {
+               return true
+       }
+       w.L.Lock()
+       if w.win+win < win {
+               w.L.Unlock()
+               return false
+       }
+       w.win += win
+       // It is unusual that multiple goroutines would be attempting to reserve
+       // window space, but not guaranteed. Use broadcast to notify all waiters
+       // that additional window is available.
+       w.Broadcast()
+       w.L.Unlock()
+       return true
+}
+
+// close sets the window to closed, so all reservations fail
+// immediately.
+func (w *window) close() {
+       w.L.Lock()
+       w.closed = true
+       w.Broadcast()
+       w.L.Unlock()
+}
+
+// reserve reserves win from the available window capacity.
+// If no capacity remains, reserve will block. reserve may
+// return less than requested.
+func (w *window) reserve(win uint32) (uint32, error) {
+       var err error
+       w.L.Lock()
+       w.writeWaiters++
+       w.Broadcast()
+       for w.win == 0 && !w.closed {
+               w.Wait()
+       }
+       w.writeWaiters--
+       if w.win < win {
+               win = w.win
+       }
+       w.win -= win
+       if w.closed {
+               err = io.EOF
+       }
+       w.L.Unlock()
+       return win, err
+}
+
+// waitWriterBlocked waits until some goroutine is blocked for further
+// writes. It is used in tests only.
+func (w *window) waitWriterBlocked() {
+       w.Cond.L.Lock()
+       for w.writeWaiters == 0 {
+               w.Cond.Wait()
+       }
+       w.Cond.L.Unlock()
+}
diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go
new file mode 100644 (file)
index 0000000..e786f2f
--- /dev/null
@@ -0,0 +1,143 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "fmt"
+       "net"
+)
+
+// OpenChannelError is returned if the other side rejects an
+// OpenChannel request.
+type OpenChannelError struct {
+       Reason  RejectionReason
+       Message string
+}
+
+func (e *OpenChannelError) Error() string {
+       return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message)
+}
+
+// ConnMetadata holds metadata for the connection.
+type ConnMetadata interface {
+       // User returns the user ID for this connection.
+       User() string
+
+       // SessionID returns the sesson hash, also denoted by H.
+       SessionID() []byte
+
+       // ClientVersion returns the client's version string as hashed
+       // into the session ID.
+       ClientVersion() []byte
+
+       // ServerVersion returns the server's version string as hashed
+       // into the session ID.
+       ServerVersion() []byte
+
+       // RemoteAddr returns the remote address for this connection.
+       RemoteAddr() net.Addr
+
+       // LocalAddr returns the local address for this connection.
+       LocalAddr() net.Addr
+}
+
+// Conn represents an SSH connection for both server and client roles.
+// Conn is the basis for implementing an application layer, such
+// as ClientConn, which implements the traditional shell access for
+// clients.
+type Conn interface {
+       ConnMetadata
+
+       // SendRequest sends a global request, and returns the
+       // reply. If wantReply is true, it returns the response status
+       // and payload. See also RFC4254, section 4.
+       SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error)
+
+       // OpenChannel tries to open an channel. If the request is
+       // rejected, it returns *OpenChannelError. On success it returns
+       // the SSH Channel and a Go channel for incoming, out-of-band
+       // requests. The Go channel must be serviced, or the
+       // connection will hang.
+       OpenChannel(name string, data []byte) (Channel, <-chan *Request, error)
+
+       // Close closes the underlying network connection
+       Close() error
+
+       // Wait blocks until the connection has shut down, and returns the
+       // error causing the shutdown.
+       Wait() error
+
+       // TODO(hanwen): consider exposing:
+       //   RequestKeyChange
+       //   Disconnect
+}
+
+// DiscardRequests consumes and rejects all requests from the
+// passed-in channel.
+func DiscardRequests(in <-chan *Request) {
+       for req := range in {
+               if req.WantReply {
+                       req.Reply(false, nil)
+               }
+       }
+}
+
+// A connection represents an incoming connection.
+type connection struct {
+       transport *handshakeTransport
+       sshConn
+
+       // The connection protocol.
+       *mux
+}
+
+func (c *connection) Close() error {
+       return c.sshConn.conn.Close()
+}
+
+// sshconn provides net.Conn metadata, but disallows direct reads and
+// writes.
+type sshConn struct {
+       conn net.Conn
+
+       user          string
+       sessionID     []byte
+       clientVersion []byte
+       serverVersion []byte
+}
+
+func dup(src []byte) []byte {
+       dst := make([]byte, len(src))
+       copy(dst, src)
+       return dst
+}
+
+func (c *sshConn) User() string {
+       return c.user
+}
+
+func (c *sshConn) RemoteAddr() net.Addr {
+       return c.conn.RemoteAddr()
+}
+
+func (c *sshConn) Close() error {
+       return c.conn.Close()
+}
+
+func (c *sshConn) LocalAddr() net.Addr {
+       return c.conn.LocalAddr()
+}
+
+func (c *sshConn) SessionID() []byte {
+       return dup(c.sessionID)
+}
+
+func (c *sshConn) ClientVersion() []byte {
+       return dup(c.clientVersion)
+}
+
+func (c *sshConn) ServerVersion() []byte {
+       return dup(c.serverVersion)
+}
diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go
new file mode 100644 (file)
index 0000000..d6be894
--- /dev/null
@@ -0,0 +1,18 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package ssh implements an SSH client and server.
+
+SSH is a transport security protocol, an authentication protocol and a
+family of application protocols. The most typical application level
+protocol is a remote shell and this is specifically implemented.  However,
+the multiplexed nature of SSH is exposed to users that wish to support
+others.
+
+References:
+  [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
+  [SSH-PARAMETERS]:    http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
+*/
+package ssh // import "golang.org/x/crypto/ssh"
diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go
new file mode 100644 (file)
index 0000000..8de6506
--- /dev/null
@@ -0,0 +1,625 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "crypto/rand"
+       "errors"
+       "fmt"
+       "io"
+       "log"
+       "net"
+       "sync"
+)
+
+// debugHandshake, if set, prints messages sent and received.  Key
+// exchange messages are printed as if DH were used, so the debug
+// messages are wrong when using ECDH.
+const debugHandshake = false
+
+// chanSize sets the amount of buffering SSH connections. This is
+// primarily for testing: setting chanSize=0 uncovers deadlocks more
+// quickly.
+const chanSize = 16
+
+// keyingTransport is a packet based transport that supports key
+// changes. It need not be thread-safe. It should pass through
+// msgNewKeys in both directions.
+type keyingTransport interface {
+       packetConn
+
+       // prepareKeyChange sets up a key change. The key change for a
+       // direction will be effected if a msgNewKeys message is sent
+       // or received.
+       prepareKeyChange(*algorithms, *kexResult) error
+}
+
+// handshakeTransport implements rekeying on top of a keyingTransport
+// and offers a thread-safe writePacket() interface.
+type handshakeTransport struct {
+       conn   keyingTransport
+       config *Config
+
+       serverVersion []byte
+       clientVersion []byte
+
+       // hostKeys is non-empty if we are the server. In that case,
+       // it contains all host keys that can be used to sign the
+       // connection.
+       hostKeys []Signer
+
+       // hostKeyAlgorithms is non-empty if we are the client. In that case,
+       // we accept these key types from the server as host key.
+       hostKeyAlgorithms []string
+
+       // On read error, incoming is closed, and readError is set.
+       incoming  chan []byte
+       readError error
+
+       mu             sync.Mutex
+       writeError     error
+       sentInitPacket []byte
+       sentInitMsg    *kexInitMsg
+       pendingPackets [][]byte // Used when a key exchange is in progress.
+
+       // If the read loop wants to schedule a kex, it pings this
+       // channel, and the write loop will send out a kex
+       // message.
+       requestKex chan struct{}
+
+       // If the other side requests or confirms a kex, its kexInit
+       // packet is sent here for the write loop to find it.
+       startKex chan *pendingKex
+
+       // data for host key checking
+       hostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error
+       dialAddress     string
+       remoteAddr      net.Addr
+
+       // Algorithms agreed in the last key exchange.
+       algorithms *algorithms
+
+       readPacketsLeft uint32
+       readBytesLeft   int64
+
+       writePacketsLeft uint32
+       writeBytesLeft   int64
+
+       // The session ID or nil if first kex did not complete yet.
+       sessionID []byte
+}
+
+type pendingKex struct {
+       otherInit []byte
+       done      chan error
+}
+
+func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport {
+       t := &handshakeTransport{
+               conn:          conn,
+               serverVersion: serverVersion,
+               clientVersion: clientVersion,
+               incoming:      make(chan []byte, chanSize),
+               requestKex:    make(chan struct{}, 1),
+               startKex:      make(chan *pendingKex, 1),
+
+               config: config,
+       }
+
+       // We always start with a mandatory key exchange.
+       t.requestKex <- struct{}{}
+       return t
+}
+
+func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport {
+       t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
+       t.dialAddress = dialAddr
+       t.remoteAddr = addr
+       t.hostKeyCallback = config.HostKeyCallback
+       if config.HostKeyAlgorithms != nil {
+               t.hostKeyAlgorithms = config.HostKeyAlgorithms
+       } else {
+               t.hostKeyAlgorithms = supportedHostKeyAlgos
+       }
+       go t.readLoop()
+       go t.kexLoop()
+       return t
+}
+
+func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport {
+       t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion)
+       t.hostKeys = config.hostKeys
+       go t.readLoop()
+       go t.kexLoop()
+       return t
+}
+
+func (t *handshakeTransport) getSessionID() []byte {
+       return t.sessionID
+}
+
+// waitSession waits for the session to be established. This should be
+// the first thing to call after instantiating handshakeTransport.
+func (t *handshakeTransport) waitSession() error {
+       p, err := t.readPacket()
+       if err != nil {
+               return err
+       }
+       if p[0] != msgNewKeys {
+               return fmt.Errorf("ssh: first packet should be msgNewKeys")
+       }
+
+       return nil
+}
+
+func (t *handshakeTransport) id() string {
+       if len(t.hostKeys) > 0 {
+               return "server"
+       }
+       return "client"
+}
+
+func (t *handshakeTransport) printPacket(p []byte, write bool) {
+       action := "got"
+       if write {
+               action = "sent"
+       }
+
+       if p[0] == msgChannelData || p[0] == msgChannelExtendedData {
+               log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p))
+       } else {
+               msg, err := decode(p)
+               log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err)
+       }
+}
+
+func (t *handshakeTransport) readPacket() ([]byte, error) {
+       p, ok := <-t.incoming
+       if !ok {
+               return nil, t.readError
+       }
+       return p, nil
+}
+
+func (t *handshakeTransport) readLoop() {
+       first := true
+       for {
+               p, err := t.readOnePacket(first)
+               first = false
+               if err != nil {
+                       t.readError = err
+                       close(t.incoming)
+                       break
+               }
+               if p[0] == msgIgnore || p[0] == msgDebug {
+                       continue
+               }
+               t.incoming <- p
+       }
+
+       // Stop writers too.
+       t.recordWriteError(t.readError)
+
+       // Unblock the writer should it wait for this.
+       close(t.startKex)
+
+       // Don't close t.requestKex; it's also written to from writePacket.
+}
+
+func (t *handshakeTransport) pushPacket(p []byte) error {
+       if debugHandshake {
+               t.printPacket(p, true)
+       }
+       return t.conn.writePacket(p)
+}
+
+func (t *handshakeTransport) getWriteError() error {
+       t.mu.Lock()
+       defer t.mu.Unlock()
+       return t.writeError
+}
+
+func (t *handshakeTransport) recordWriteError(err error) {
+       t.mu.Lock()
+       defer t.mu.Unlock()
+       if t.writeError == nil && err != nil {
+               t.writeError = err
+       }
+}
+
+func (t *handshakeTransport) requestKeyExchange() {
+       select {
+       case t.requestKex <- struct{}{}:
+       default:
+               // something already requested a kex, so do nothing.
+       }
+}
+
+func (t *handshakeTransport) kexLoop() {
+
+write:
+       for t.getWriteError() == nil {
+               var request *pendingKex
+               var sent bool
+
+               for request == nil || !sent {
+                       var ok bool
+                       select {
+                       case request, ok = <-t.startKex:
+                               if !ok {
+                                       break write
+                               }
+                       case <-t.requestKex:
+                               break
+                       }
+
+                       if !sent {
+                               if err := t.sendKexInit(); err != nil {
+                                       t.recordWriteError(err)
+                                       break
+                               }
+                               sent = true
+                       }
+               }
+
+               if err := t.getWriteError(); err != nil {
+                       if request != nil {
+                               request.done <- err
+                       }
+                       break
+               }
+
+               // We're not servicing t.requestKex, but that is OK:
+               // we never block on sending to t.requestKex.
+
+               // We're not servicing t.startKex, but the remote end
+               // has just sent us a kexInitMsg, so it can't send
+               // another key change request, until we close the done
+               // channel on the pendingKex request.
+
+               err := t.enterKeyExchange(request.otherInit)
+
+               t.mu.Lock()
+               t.writeError = err
+               t.sentInitPacket = nil
+               t.sentInitMsg = nil
+               t.writePacketsLeft = packetRekeyThreshold
+               if t.config.RekeyThreshold > 0 {
+                       t.writeBytesLeft = int64(t.config.RekeyThreshold)
+               } else if t.algorithms != nil {
+                       t.writeBytesLeft = t.algorithms.w.rekeyBytes()
+               }
+
+               // we have completed the key exchange. Since the
+               // reader is still blocked, it is safe to clear out
+               // the requestKex channel. This avoids the situation
+               // where: 1) we consumed our own request for the
+               // initial kex, and 2) the kex from the remote side
+               // caused another send on the requestKex channel,
+       clear:
+               for {
+                       select {
+                       case <-t.requestKex:
+                               //
+                       default:
+                               break clear
+                       }
+               }
+
+               request.done <- t.writeError
+
+               // kex finished. Push packets that we received while
+               // the kex was in progress. Don't look at t.startKex
+               // and don't increment writtenSinceKex: if we trigger
+               // another kex while we are still busy with the last
+               // one, things will become very confusing.
+               for _, p := range t.pendingPackets {
+                       t.writeError = t.pushPacket(p)
+                       if t.writeError != nil {
+                               break
+                       }
+               }
+               t.pendingPackets = t.pendingPackets[:0]
+               t.mu.Unlock()
+       }
+
+       // drain startKex channel. We don't service t.requestKex
+       // because nobody does blocking sends there.
+       go func() {
+               for init := range t.startKex {
+                       init.done <- t.writeError
+               }
+       }()
+
+       // Unblock reader.
+       t.conn.Close()
+}
+
+// The protocol uses uint32 for packet counters, so we can't let them
+// reach 1<<32.  We will actually read and write more packets than
+// this, though: the other side may send more packets, and after we
+// hit this limit on writing we will send a few more packets for the
+// key exchange itself.
+const packetRekeyThreshold = (1 << 31)
+
+func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) {
+       p, err := t.conn.readPacket()
+       if err != nil {
+               return nil, err
+       }
+
+       if t.readPacketsLeft > 0 {
+               t.readPacketsLeft--
+       } else {
+               t.requestKeyExchange()
+       }
+
+       if t.readBytesLeft > 0 {
+               t.readBytesLeft -= int64(len(p))
+       } else {
+               t.requestKeyExchange()
+       }
+
+       if debugHandshake {
+               t.printPacket(p, false)
+       }
+
+       if first && p[0] != msgKexInit {
+               return nil, fmt.Errorf("ssh: first packet should be msgKexInit")
+       }
+
+       if p[0] != msgKexInit {
+               return p, nil
+       }
+
+       firstKex := t.sessionID == nil
+
+       kex := pendingKex{
+               done:      make(chan error, 1),
+               otherInit: p,
+       }
+       t.startKex <- &kex
+       err = <-kex.done
+
+       if debugHandshake {
+               log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err)
+       }
+
+       if err != nil {
+               return nil, err
+       }
+
+       t.readPacketsLeft = packetRekeyThreshold
+       if t.config.RekeyThreshold > 0 {
+               t.readBytesLeft = int64(t.config.RekeyThreshold)
+       } else {
+               t.readBytesLeft = t.algorithms.r.rekeyBytes()
+       }
+
+       // By default, a key exchange is hidden from higher layers by
+       // translating it into msgIgnore.
+       successPacket := []byte{msgIgnore}
+       if firstKex {
+               // sendKexInit() for the first kex waits for
+               // msgNewKeys so the authentication process is
+               // guaranteed to happen over an encrypted transport.
+               successPacket = []byte{msgNewKeys}
+       }
+
+       return successPacket, nil
+}
+
+// sendKexInit sends a key change message.
+func (t *handshakeTransport) sendKexInit() error {
+       t.mu.Lock()
+       defer t.mu.Unlock()
+       if t.sentInitMsg != nil {
+               // kexInits may be sent either in response to the other side,
+               // or because our side wants to initiate a key change, so we
+               // may have already sent a kexInit. In that case, don't send a
+               // second kexInit.
+               return nil
+       }
+
+       msg := &kexInitMsg{
+               KexAlgos:                t.config.KeyExchanges,
+               CiphersClientServer:     t.config.Ciphers,
+               CiphersServerClient:     t.config.Ciphers,
+               MACsClientServer:        t.config.MACs,
+               MACsServerClient:        t.config.MACs,
+               CompressionClientServer: supportedCompressions,
+               CompressionServerClient: supportedCompressions,
+       }
+       io.ReadFull(rand.Reader, msg.Cookie[:])
+
+       if len(t.hostKeys) > 0 {
+               for _, k := range t.hostKeys {
+                       msg.ServerHostKeyAlgos = append(
+                               msg.ServerHostKeyAlgos, k.PublicKey().Type())
+               }
+       } else {
+               msg.ServerHostKeyAlgos = t.hostKeyAlgorithms
+       }
+       packet := Marshal(msg)
+
+       // writePacket destroys the contents, so save a copy.
+       packetCopy := make([]byte, len(packet))
+       copy(packetCopy, packet)
+
+       if err := t.pushPacket(packetCopy); err != nil {
+               return err
+       }
+
+       t.sentInitMsg = msg
+       t.sentInitPacket = packet
+
+       return nil
+}
+
+func (t *handshakeTransport) writePacket(p []byte) error {
+       switch p[0] {
+       case msgKexInit:
+               return errors.New("ssh: only handshakeTransport can send kexInit")
+       case msgNewKeys:
+               return errors.New("ssh: only handshakeTransport can send newKeys")
+       }
+
+       t.mu.Lock()
+       defer t.mu.Unlock()
+       if t.writeError != nil {
+               return t.writeError
+       }
+
+       if t.sentInitMsg != nil {
+               // Copy the packet so the writer can reuse the buffer.
+               cp := make([]byte, len(p))
+               copy(cp, p)
+               t.pendingPackets = append(t.pendingPackets, cp)
+               return nil
+       }
+
+       if t.writeBytesLeft > 0 {
+               t.writeBytesLeft -= int64(len(p))
+       } else {
+               t.requestKeyExchange()
+       }
+
+       if t.writePacketsLeft > 0 {
+               t.writePacketsLeft--
+       } else {
+               t.requestKeyExchange()
+       }
+
+       if err := t.pushPacket(p); err != nil {
+               t.writeError = err
+       }
+
+       return nil
+}
+
+func (t *handshakeTransport) Close() error {
+       return t.conn.Close()
+}
+
+func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
+       if debugHandshake {
+               log.Printf("%s entered key exchange", t.id())
+       }
+
+       otherInit := &kexInitMsg{}
+       if err := Unmarshal(otherInitPacket, otherInit); err != nil {
+               return err
+       }
+
+       magics := handshakeMagics{
+               clientVersion: t.clientVersion,
+               serverVersion: t.serverVersion,
+               clientKexInit: otherInitPacket,
+               serverKexInit: t.sentInitPacket,
+       }
+
+       clientInit := otherInit
+       serverInit := t.sentInitMsg
+       if len(t.hostKeys) == 0 {
+               clientInit, serverInit = serverInit, clientInit
+
+               magics.clientKexInit = t.sentInitPacket
+               magics.serverKexInit = otherInitPacket
+       }
+
+       var err error
+       t.algorithms, err = findAgreedAlgorithms(clientInit, serverInit)
+       if err != nil {
+               return err
+       }
+
+       // We don't send FirstKexFollows, but we handle receiving it.
+       //
+       // RFC 4253 section 7 defines the kex and the agreement method for
+       // first_kex_packet_follows. It states that the guessed packet
+       // should be ignored if the "kex algorithm and/or the host
+       // key algorithm is guessed wrong (server and client have
+       // different preferred algorithm), or if any of the other
+       // algorithms cannot be agreed upon". The other algorithms have
+       // already been checked above so the kex algorithm and host key
+       // algorithm are checked here.
+       if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) {
+               // other side sent a kex message for the wrong algorithm,
+               // which we have to ignore.
+               if _, err := t.conn.readPacket(); err != nil {
+                       return err
+               }
+       }
+
+       kex, ok := kexAlgoMap[t.algorithms.kex]
+       if !ok {
+               return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex)
+       }
+
+       var result *kexResult
+       if len(t.hostKeys) > 0 {
+               result, err = t.server(kex, t.algorithms, &magics)
+       } else {
+               result, err = t.client(kex, t.algorithms, &magics)
+       }
+
+       if err != nil {
+               return err
+       }
+
+       if t.sessionID == nil {
+               t.sessionID = result.H
+       }
+       result.SessionID = t.sessionID
+
+       t.conn.prepareKeyChange(t.algorithms, result)
+       if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil {
+               return err
+       }
+       if packet, err := t.conn.readPacket(); err != nil {
+               return err
+       } else if packet[0] != msgNewKeys {
+               return unexpectedMessageError(msgNewKeys, packet[0])
+       }
+
+       return nil
+}
+
+func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
+       var hostKey Signer
+       for _, k := range t.hostKeys {
+               if algs.hostKey == k.PublicKey().Type() {
+                       hostKey = k
+               }
+       }
+
+       r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey)
+       return r, err
+}
+
+func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
+       result, err := kex.Client(t.conn, t.config.Rand, magics)
+       if err != nil {
+               return nil, err
+       }
+
+       hostKey, err := ParsePublicKey(result.HostKey)
+       if err != nil {
+               return nil, err
+       }
+
+       if err := verifyHostKeySignature(hostKey, result); err != nil {
+               return nil, err
+       }
+
+       if t.hostKeyCallback != nil {
+               err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey)
+               if err != nil {
+                       return nil, err
+               }
+       }
+
+       return result, nil
+}
diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go
new file mode 100644 (file)
index 0000000..c87fbeb
--- /dev/null
@@ -0,0 +1,540 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "crypto"
+       "crypto/ecdsa"
+       "crypto/elliptic"
+       "crypto/rand"
+       "crypto/subtle"
+       "errors"
+       "io"
+       "math/big"
+
+       "golang.org/x/crypto/curve25519"
+)
+
+const (
+       kexAlgoDH1SHA1          = "diffie-hellman-group1-sha1"
+       kexAlgoDH14SHA1         = "diffie-hellman-group14-sha1"
+       kexAlgoECDH256          = "ecdh-sha2-nistp256"
+       kexAlgoECDH384          = "ecdh-sha2-nistp384"
+       kexAlgoECDH521          = "ecdh-sha2-nistp521"
+       kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org"
+)
+
+// kexResult captures the outcome of a key exchange.
+type kexResult struct {
+       // Session hash. See also RFC 4253, section 8.
+       H []byte
+
+       // Shared secret. See also RFC 4253, section 8.
+       K []byte
+
+       // Host key as hashed into H.
+       HostKey []byte
+
+       // Signature of H.
+       Signature []byte
+
+       // A cryptographic hash function that matches the security
+       // level of the key exchange algorithm. It is used for
+       // calculating H, and for deriving keys from H and K.
+       Hash crypto.Hash
+
+       // The session ID, which is the first H computed. This is used
+       // to derive key material inside the transport.
+       SessionID []byte
+}
+
+// handshakeMagics contains data that is always included in the
+// session hash.
+type handshakeMagics struct {
+       clientVersion, serverVersion []byte
+       clientKexInit, serverKexInit []byte
+}
+
+func (m *handshakeMagics) write(w io.Writer) {
+       writeString(w, m.clientVersion)
+       writeString(w, m.serverVersion)
+       writeString(w, m.clientKexInit)
+       writeString(w, m.serverKexInit)
+}
+
+// kexAlgorithm abstracts different key exchange algorithms.
+type kexAlgorithm interface {
+       // Server runs server-side key agreement, signing the result
+       // with a hostkey.
+       Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error)
+
+       // Client runs the client-side key agreement. Caller is
+       // responsible for verifying the host key signature.
+       Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error)
+}
+
+// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement.
+type dhGroup struct {
+       g, p, pMinus1 *big.Int
+}
+
+func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
+       if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 {
+               return nil, errors.New("ssh: DH parameter out of bounds")
+       }
+       return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil
+}
+
+func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
+       hashFunc := crypto.SHA1
+
+       var x *big.Int
+       for {
+               var err error
+               if x, err = rand.Int(randSource, group.pMinus1); err != nil {
+                       return nil, err
+               }
+               if x.Sign() > 0 {
+                       break
+               }
+       }
+
+       X := new(big.Int).Exp(group.g, x, group.p)
+       kexDHInit := kexDHInitMsg{
+               X: X,
+       }
+       if err := c.writePacket(Marshal(&kexDHInit)); err != nil {
+               return nil, err
+       }
+
+       packet, err := c.readPacket()
+       if err != nil {
+               return nil, err
+       }
+
+       var kexDHReply kexDHReplyMsg
+       if err = Unmarshal(packet, &kexDHReply); err != nil {
+               return nil, err
+       }
+
+       kInt, err := group.diffieHellman(kexDHReply.Y, x)
+       if err != nil {
+               return nil, err
+       }
+
+       h := hashFunc.New()
+       magics.write(h)
+       writeString(h, kexDHReply.HostKey)
+       writeInt(h, X)
+       writeInt(h, kexDHReply.Y)
+       K := make([]byte, intLength(kInt))
+       marshalInt(K, kInt)
+       h.Write(K)
+
+       return &kexResult{
+               H:         h.Sum(nil),
+               K:         K,
+               HostKey:   kexDHReply.HostKey,
+               Signature: kexDHReply.Signature,
+               Hash:      crypto.SHA1,
+       }, nil
+}
+
+func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
+       hashFunc := crypto.SHA1
+       packet, err := c.readPacket()
+       if err != nil {
+               return
+       }
+       var kexDHInit kexDHInitMsg
+       if err = Unmarshal(packet, &kexDHInit); err != nil {
+               return
+       }
+
+       var y *big.Int
+       for {
+               if y, err = rand.Int(randSource, group.pMinus1); err != nil {
+                       return
+               }
+               if y.Sign() > 0 {
+                       break
+               }
+       }
+
+       Y := new(big.Int).Exp(group.g, y, group.p)
+       kInt, err := group.diffieHellman(kexDHInit.X, y)
+       if err != nil {
+               return nil, err
+       }
+
+       hostKeyBytes := priv.PublicKey().Marshal()
+
+       h := hashFunc.New()
+       magics.write(h)
+       writeString(h, hostKeyBytes)
+       writeInt(h, kexDHInit.X)
+       writeInt(h, Y)
+
+       K := make([]byte, intLength(kInt))
+       marshalInt(K, kInt)
+       h.Write(K)
+
+       H := h.Sum(nil)
+
+       // H is already a hash, but the hostkey signing will apply its
+       // own key-specific hash algorithm.
+       sig, err := signAndMarshal(priv, randSource, H)
+       if err != nil {
+               return nil, err
+       }
+
+       kexDHReply := kexDHReplyMsg{
+               HostKey:   hostKeyBytes,
+               Y:         Y,
+               Signature: sig,
+       }
+       packet = Marshal(&kexDHReply)
+
+       err = c.writePacket(packet)
+       return &kexResult{
+               H:         H,
+               K:         K,
+               HostKey:   hostKeyBytes,
+               Signature: sig,
+               Hash:      crypto.SHA1,
+       }, nil
+}
+
+// ecdh performs Elliptic Curve Diffie-Hellman key exchange as
+// described in RFC 5656, section 4.
+type ecdh struct {
+       curve elliptic.Curve
+}
+
+func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
+       ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
+       if err != nil {
+               return nil, err
+       }
+
+       kexInit := kexECDHInitMsg{
+               ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y),
+       }
+
+       serialized := Marshal(&kexInit)
+       if err := c.writePacket(serialized); err != nil {
+               return nil, err
+       }
+
+       packet, err := c.readPacket()
+       if err != nil {
+               return nil, err
+       }
+
+       var reply kexECDHReplyMsg
+       if err = Unmarshal(packet, &reply); err != nil {
+               return nil, err
+       }
+
+       x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey)
+       if err != nil {
+               return nil, err
+       }
+
+       // generate shared secret
+       secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes())
+
+       h := ecHash(kex.curve).New()
+       magics.write(h)
+       writeString(h, reply.HostKey)
+       writeString(h, kexInit.ClientPubKey)
+       writeString(h, reply.EphemeralPubKey)
+       K := make([]byte, intLength(secret))
+       marshalInt(K, secret)
+       h.Write(K)
+
+       return &kexResult{
+               H:         h.Sum(nil),
+               K:         K,
+               HostKey:   reply.HostKey,
+               Signature: reply.Signature,
+               Hash:      ecHash(kex.curve),
+       }, nil
+}
+
+// unmarshalECKey parses and checks an EC key.
+func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) {
+       x, y = elliptic.Unmarshal(curve, pubkey)
+       if x == nil {
+               return nil, nil, errors.New("ssh: elliptic.Unmarshal failure")
+       }
+       if !validateECPublicKey(curve, x, y) {
+               return nil, nil, errors.New("ssh: public key not on curve")
+       }
+       return x, y, nil
+}
+
+// validateECPublicKey checks that the point is a valid public key for
+// the given curve. See [SEC1], 3.2.2
+func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool {
+       if x.Sign() == 0 && y.Sign() == 0 {
+               return false
+       }
+
+       if x.Cmp(curve.Params().P) >= 0 {
+               return false
+       }
+
+       if y.Cmp(curve.Params().P) >= 0 {
+               return false
+       }
+
+       if !curve.IsOnCurve(x, y) {
+               return false
+       }
+
+       // We don't check if N * PubKey == 0, since
+       //
+       // - the NIST curves have cofactor = 1, so this is implicit.
+       // (We don't foresee an implementation that supports non NIST
+       // curves)
+       //
+       // - for ephemeral keys, we don't need to worry about small
+       // subgroup attacks.
+       return true
+}
+
+func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
+       packet, err := c.readPacket()
+       if err != nil {
+               return nil, err
+       }
+
+       var kexECDHInit kexECDHInitMsg
+       if err = Unmarshal(packet, &kexECDHInit); err != nil {
+               return nil, err
+       }
+
+       clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey)
+       if err != nil {
+               return nil, err
+       }
+
+       // We could cache this key across multiple users/multiple
+       // connection attempts, but the benefit is small. OpenSSH
+       // generates a new key for each incoming connection.
+       ephKey, err := ecdsa.GenerateKey(kex.curve, rand)
+       if err != nil {
+               return nil, err
+       }
+
+       hostKeyBytes := priv.PublicKey().Marshal()
+
+       serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y)
+
+       // generate shared secret
+       secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes())
+
+       h := ecHash(kex.curve).New()
+       magics.write(h)
+       writeString(h, hostKeyBytes)
+       writeString(h, kexECDHInit.ClientPubKey)
+       writeString(h, serializedEphKey)
+
+       K := make([]byte, intLength(secret))
+       marshalInt(K, secret)
+       h.Write(K)
+
+       H := h.Sum(nil)
+
+       // H is already a hash, but the hostkey signing will apply its
+       // own key-specific hash algorithm.
+       sig, err := signAndMarshal(priv, rand, H)
+       if err != nil {
+               return nil, err
+       }
+
+       reply := kexECDHReplyMsg{
+               EphemeralPubKey: serializedEphKey,
+               HostKey:         hostKeyBytes,
+               Signature:       sig,
+       }
+
+       serialized := Marshal(&reply)
+       if err := c.writePacket(serialized); err != nil {
+               return nil, err
+       }
+
+       return &kexResult{
+               H:         H,
+               K:         K,
+               HostKey:   reply.HostKey,
+               Signature: sig,
+               Hash:      ecHash(kex.curve),
+       }, nil
+}
+
+var kexAlgoMap = map[string]kexAlgorithm{}
+
+func init() {
+       // This is the group called diffie-hellman-group1-sha1 in RFC
+       // 4253 and Oakley Group 2 in RFC 2409.
+       p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16)
+       kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{
+               g: new(big.Int).SetInt64(2),
+               p: p,
+               pMinus1: new(big.Int).Sub(p, bigOne),
+       }
+
+       // This is the group called diffie-hellman-group14-sha1 in RFC
+       // 4253 and Oakley Group 14 in RFC 3526.
+       p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
+
+       kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{
+               g: new(big.Int).SetInt64(2),
+               p: p,
+               pMinus1: new(big.Int).Sub(p, bigOne),
+       }
+
+       kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()}
+       kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()}
+       kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()}
+       kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{}
+}
+
+// curve25519sha256 implements the curve25519-sha256@libssh.org key
+// agreement protocol, as described in
+// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt
+type curve25519sha256 struct{}
+
+type curve25519KeyPair struct {
+       priv [32]byte
+       pub  [32]byte
+}
+
+func (kp *curve25519KeyPair) generate(rand io.Reader) error {
+       if _, err := io.ReadFull(rand, kp.priv[:]); err != nil {
+               return err
+       }
+       curve25519.ScalarBaseMult(&kp.pub, &kp.priv)
+       return nil
+}
+
+// curve25519Zeros is just an array of 32 zero bytes so that we have something
+// convenient to compare against in order to reject curve25519 points with the
+// wrong order.
+var curve25519Zeros [32]byte
+
+func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) {
+       var kp curve25519KeyPair
+       if err := kp.generate(rand); err != nil {
+               return nil, err
+       }
+       if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil {
+               return nil, err
+       }
+
+       packet, err := c.readPacket()
+       if err != nil {
+               return nil, err
+       }
+
+       var reply kexECDHReplyMsg
+       if err = Unmarshal(packet, &reply); err != nil {
+               return nil, err
+       }
+       if len(reply.EphemeralPubKey) != 32 {
+               return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
+       }
+
+       var servPub, secret [32]byte
+       copy(servPub[:], reply.EphemeralPubKey)
+       curve25519.ScalarMult(&secret, &kp.priv, &servPub)
+       if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
+               return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
+       }
+
+       h := crypto.SHA256.New()
+       magics.write(h)
+       writeString(h, reply.HostKey)
+       writeString(h, kp.pub[:])
+       writeString(h, reply.EphemeralPubKey)
+
+       kInt := new(big.Int).SetBytes(secret[:])
+       K := make([]byte, intLength(kInt))
+       marshalInt(K, kInt)
+       h.Write(K)
+
+       return &kexResult{
+               H:         h.Sum(nil),
+               K:         K,
+               HostKey:   reply.HostKey,
+               Signature: reply.Signature,
+               Hash:      crypto.SHA256,
+       }, nil
+}
+
+func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
+       packet, err := c.readPacket()
+       if err != nil {
+               return
+       }
+       var kexInit kexECDHInitMsg
+       if err = Unmarshal(packet, &kexInit); err != nil {
+               return
+       }
+
+       if len(kexInit.ClientPubKey) != 32 {
+               return nil, errors.New("ssh: peer's curve25519 public value has wrong length")
+       }
+
+       var kp curve25519KeyPair
+       if err := kp.generate(rand); err != nil {
+               return nil, err
+       }
+
+       var clientPub, secret [32]byte
+       copy(clientPub[:], kexInit.ClientPubKey)
+       curve25519.ScalarMult(&secret, &kp.priv, &clientPub)
+       if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 {
+               return nil, errors.New("ssh: peer's curve25519 public value has wrong order")
+       }
+
+       hostKeyBytes := priv.PublicKey().Marshal()
+
+       h := crypto.SHA256.New()
+       magics.write(h)
+       writeString(h, hostKeyBytes)
+       writeString(h, kexInit.ClientPubKey)
+       writeString(h, kp.pub[:])
+
+       kInt := new(big.Int).SetBytes(secret[:])
+       K := make([]byte, intLength(kInt))
+       marshalInt(K, kInt)
+       h.Write(K)
+
+       H := h.Sum(nil)
+
+       sig, err := signAndMarshal(priv, rand, H)
+       if err != nil {
+               return nil, err
+       }
+
+       reply := kexECDHReplyMsg{
+               EphemeralPubKey: kp.pub[:],
+               HostKey:         hostKeyBytes,
+               Signature:       sig,
+       }
+       if err := c.writePacket(Marshal(&reply)); err != nil {
+               return nil, err
+       }
+       return &kexResult{
+               H:         H,
+               K:         K,
+               HostKey:   hostKeyBytes,
+               Signature: sig,
+               Hash:      crypto.SHA256,
+       }, nil
+}
diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go
new file mode 100644 (file)
index 0000000..f38de98
--- /dev/null
@@ -0,0 +1,905 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "bytes"
+       "crypto"
+       "crypto/dsa"
+       "crypto/ecdsa"
+       "crypto/elliptic"
+       "crypto/md5"
+       "crypto/rsa"
+       "crypto/sha256"
+       "crypto/x509"
+       "encoding/asn1"
+       "encoding/base64"
+       "encoding/hex"
+       "encoding/pem"
+       "errors"
+       "fmt"
+       "io"
+       "math/big"
+       "strings"
+
+       "golang.org/x/crypto/ed25519"
+)
+
+// These constants represent the algorithm names for key types supported by this
+// package.
+const (
+       KeyAlgoRSA      = "ssh-rsa"
+       KeyAlgoDSA      = "ssh-dss"
+       KeyAlgoECDSA256 = "ecdsa-sha2-nistp256"
+       KeyAlgoECDSA384 = "ecdsa-sha2-nistp384"
+       KeyAlgoECDSA521 = "ecdsa-sha2-nistp521"
+       KeyAlgoED25519  = "ssh-ed25519"
+)
+
+// parsePubKey parses a public key of the given algorithm.
+// Use ParsePublicKey for keys with prepended algorithm.
+func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) {
+       switch algo {
+       case KeyAlgoRSA:
+               return parseRSA(in)
+       case KeyAlgoDSA:
+               return parseDSA(in)
+       case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521:
+               return parseECDSA(in)
+       case KeyAlgoED25519:
+               return parseED25519(in)
+       case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01:
+               cert, err := parseCert(in, certToPrivAlgo(algo))
+               if err != nil {
+                       return nil, nil, err
+               }
+               return cert, nil, nil
+       }
+       return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo)
+}
+
+// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format
+// (see sshd(8) manual page) once the options and key type fields have been
+// removed.
+func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) {
+       in = bytes.TrimSpace(in)
+
+       i := bytes.IndexAny(in, " \t")
+       if i == -1 {
+               i = len(in)
+       }
+       base64Key := in[:i]
+
+       key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key)))
+       n, err := base64.StdEncoding.Decode(key, base64Key)
+       if err != nil {
+               return nil, "", err
+       }
+       key = key[:n]
+       out, err = ParsePublicKey(key)
+       if err != nil {
+               return nil, "", err
+       }
+       comment = string(bytes.TrimSpace(in[i:]))
+       return out, comment, nil
+}
+
+// ParseKnownHosts parses an entry in the format of the known_hosts file.
+//
+// The known_hosts format is documented in the sshd(8) manual page. This
+// function will parse a single entry from in. On successful return, marker
+// will contain the optional marker value (i.e. "cert-authority" or "revoked")
+// or else be empty, hosts will contain the hosts that this entry matches,
+// pubKey will contain the public key and comment will contain any trailing
+// comment at the end of the line. See the sshd(8) manual page for the various
+// forms that a host string can take.
+//
+// The unparsed remainder of the input will be returned in rest. This function
+// can be called repeatedly to parse multiple entries.
+//
+// If no entries were found in the input then err will be io.EOF. Otherwise a
+// non-nil err value indicates a parse error.
+func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) {
+       for len(in) > 0 {
+               end := bytes.IndexByte(in, '\n')
+               if end != -1 {
+                       rest = in[end+1:]
+                       in = in[:end]
+               } else {
+                       rest = nil
+               }
+
+               end = bytes.IndexByte(in, '\r')
+               if end != -1 {
+                       in = in[:end]
+               }
+
+               in = bytes.TrimSpace(in)
+               if len(in) == 0 || in[0] == '#' {
+                       in = rest
+                       continue
+               }
+
+               i := bytes.IndexAny(in, " \t")
+               if i == -1 {
+                       in = rest
+                       continue
+               }
+
+               // Strip out the beginning of the known_host key.
+               // This is either an optional marker or a (set of) hostname(s).
+               keyFields := bytes.Fields(in)
+               if len(keyFields) < 3 || len(keyFields) > 5 {
+                       return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data")
+               }
+
+               // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated
+               // list of hosts
+               marker := ""
+               if keyFields[0][0] == '@' {
+                       marker = string(keyFields[0][1:])
+                       keyFields = keyFields[1:]
+               }
+
+               hosts := string(keyFields[0])
+               // keyFields[1] contains the key type (e.g. “ssh-rsa”).
+               // However, that information is duplicated inside the
+               // base64-encoded key and so is ignored here.
+
+               key := bytes.Join(keyFields[2:], []byte(" "))
+               if pubKey, comment, err = parseAuthorizedKey(key); err != nil {
+                       return "", nil, nil, "", nil, err
+               }
+
+               return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil
+       }
+
+       return "", nil, nil, "", nil, io.EOF
+}
+
+// ParseAuthorizedKeys parses a public key from an authorized_keys
+// file used in OpenSSH according to the sshd(8) manual page.
+func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) {
+       for len(in) > 0 {
+               end := bytes.IndexByte(in, '\n')
+               if end != -1 {
+                       rest = in[end+1:]
+                       in = in[:end]
+               } else {
+                       rest = nil
+               }
+
+               end = bytes.IndexByte(in, '\r')
+               if end != -1 {
+                       in = in[:end]
+               }
+
+               in = bytes.TrimSpace(in)
+               if len(in) == 0 || in[0] == '#' {
+                       in = rest
+                       continue
+               }
+
+               i := bytes.IndexAny(in, " \t")
+               if i == -1 {
+                       in = rest
+                       continue
+               }
+
+               if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
+                       return out, comment, options, rest, nil
+               }
+
+               // No key type recognised. Maybe there's an options field at
+               // the beginning.
+               var b byte
+               inQuote := false
+               var candidateOptions []string
+               optionStart := 0
+               for i, b = range in {
+                       isEnd := !inQuote && (b == ' ' || b == '\t')
+                       if (b == ',' && !inQuote) || isEnd {
+                               if i-optionStart > 0 {
+                                       candidateOptions = append(candidateOptions, string(in[optionStart:i]))
+                               }
+                               optionStart = i + 1
+                       }
+                       if isEnd {
+                               break
+                       }
+                       if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) {
+                               inQuote = !inQuote
+                       }
+               }
+               for i < len(in) && (in[i] == ' ' || in[i] == '\t') {
+                       i++
+               }
+               if i == len(in) {
+                       // Invalid line: unmatched quote
+                       in = rest
+                       continue
+               }
+
+               in = in[i:]
+               i = bytes.IndexAny(in, " \t")
+               if i == -1 {
+                       in = rest
+                       continue
+               }
+
+               if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
+                       options = candidateOptions
+                       return out, comment, options, rest, nil
+               }
+
+               in = rest
+               continue
+       }
+
+       return nil, "", nil, nil, errors.New("ssh: no key found")
+}
+
+// ParsePublicKey parses an SSH public key formatted for use in
+// the SSH wire protocol according to RFC 4253, section 6.6.
+func ParsePublicKey(in []byte) (out PublicKey, err error) {
+       algo, in, ok := parseString(in)
+       if !ok {
+               return nil, errShortRead
+       }
+       var rest []byte
+       out, rest, err = parsePubKey(in, string(algo))
+       if len(rest) > 0 {
+               return nil, errors.New("ssh: trailing junk in public key")
+       }
+
+       return out, err
+}
+
+// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH
+// authorized_keys file. The return value ends with newline.
+func MarshalAuthorizedKey(key PublicKey) []byte {
+       b := &bytes.Buffer{}
+       b.WriteString(key.Type())
+       b.WriteByte(' ')
+       e := base64.NewEncoder(base64.StdEncoding, b)
+       e.Write(key.Marshal())
+       e.Close()
+       b.WriteByte('\n')
+       return b.Bytes()
+}
+
+// PublicKey is an abstraction of different types of public keys.
+type PublicKey interface {
+       // Type returns the key's type, e.g. "ssh-rsa".
+       Type() string
+
+       // Marshal returns the serialized key data in SSH wire format,
+       // with the name prefix.
+       Marshal() []byte
+
+       // Verify that sig is a signature on the given data using this
+       // key. This function will hash the data appropriately first.
+       Verify(data []byte, sig *Signature) error
+}
+
+// CryptoPublicKey, if implemented by a PublicKey,
+// returns the underlying crypto.PublicKey form of the key.
+type CryptoPublicKey interface {
+       CryptoPublicKey() crypto.PublicKey
+}
+
+// A Signer can create signatures that verify against a public key.
+type Signer interface {
+       // PublicKey returns an associated PublicKey instance.
+       PublicKey() PublicKey
+
+       // Sign returns raw signature for the given data. This method
+       // will apply the hash specified for the keytype to the data.
+       Sign(rand io.Reader, data []byte) (*Signature, error)
+}
+
+type rsaPublicKey rsa.PublicKey
+
+func (r *rsaPublicKey) Type() string {
+       return "ssh-rsa"
+}
+
+// parseRSA parses an RSA key according to RFC 4253, section 6.6.
+func parseRSA(in []byte) (out PublicKey, rest []byte, err error) {
+       var w struct {
+               E    *big.Int
+               N    *big.Int
+               Rest []byte `ssh:"rest"`
+       }
+       if err := Unmarshal(in, &w); err != nil {
+               return nil, nil, err
+       }
+
+       if w.E.BitLen() > 24 {
+               return nil, nil, errors.New("ssh: exponent too large")
+       }
+       e := w.E.Int64()
+       if e < 3 || e&1 == 0 {
+               return nil, nil, errors.New("ssh: incorrect exponent")
+       }
+
+       var key rsa.PublicKey
+       key.E = int(e)
+       key.N = w.N
+       return (*rsaPublicKey)(&key), w.Rest, nil
+}
+
+func (r *rsaPublicKey) Marshal() []byte {
+       e := new(big.Int).SetInt64(int64(r.E))
+       // RSA publickey struct layout should match the struct used by
+       // parseRSACert in the x/crypto/ssh/agent package.
+       wirekey := struct {
+               Name string
+               E    *big.Int
+               N    *big.Int
+       }{
+               KeyAlgoRSA,
+               e,
+               r.N,
+       }
+       return Marshal(&wirekey)
+}
+
+func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
+       if sig.Format != r.Type() {
+               return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
+       }
+       h := crypto.SHA1.New()
+       h.Write(data)
+       digest := h.Sum(nil)
+       return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob)
+}
+
+func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey {
+       return (*rsa.PublicKey)(r)
+}
+
+type dsaPublicKey dsa.PublicKey
+
+func (r *dsaPublicKey) Type() string {
+       return "ssh-dss"
+}
+
+// parseDSA parses an DSA key according to RFC 4253, section 6.6.
+func parseDSA(in []byte) (out PublicKey, rest []byte, err error) {
+       var w struct {
+               P, Q, G, Y *big.Int
+               Rest       []byte `ssh:"rest"`
+       }
+       if err := Unmarshal(in, &w); err != nil {
+               return nil, nil, err
+       }
+
+       key := &dsaPublicKey{
+               Parameters: dsa.Parameters{
+                       P: w.P,
+                       Q: w.Q,
+                       G: w.G,
+               },
+               Y: w.Y,
+       }
+       return key, w.Rest, nil
+}
+
+func (k *dsaPublicKey) Marshal() []byte {
+       // DSA publickey struct layout should match the struct used by
+       // parseDSACert in the x/crypto/ssh/agent package.
+       w := struct {
+               Name       string
+               P, Q, G, Y *big.Int
+       }{
+               k.Type(),
+               k.P,
+               k.Q,
+               k.G,
+               k.Y,
+       }
+
+       return Marshal(&w)
+}
+
+func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error {
+       if sig.Format != k.Type() {
+               return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
+       }
+       h := crypto.SHA1.New()
+       h.Write(data)
+       digest := h.Sum(nil)
+
+       // Per RFC 4253, section 6.6,
+       // The value for 'dss_signature_blob' is encoded as a string containing
+       // r, followed by s (which are 160-bit integers, without lengths or
+       // padding, unsigned, and in network byte order).
+       // For DSS purposes, sig.Blob should be exactly 40 bytes in length.
+       if len(sig.Blob) != 40 {
+               return errors.New("ssh: DSA signature parse error")
+       }
+       r := new(big.Int).SetBytes(sig.Blob[:20])
+       s := new(big.Int).SetBytes(sig.Blob[20:])
+       if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) {
+               return nil
+       }
+       return errors.New("ssh: signature did not verify")
+}
+
+func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey {
+       return (*dsa.PublicKey)(k)
+}
+
+type dsaPrivateKey struct {
+       *dsa.PrivateKey
+}
+
+func (k *dsaPrivateKey) PublicKey() PublicKey {
+       return (*dsaPublicKey)(&k.PrivateKey.PublicKey)
+}
+
+func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) {
+       h := crypto.SHA1.New()
+       h.Write(data)
+       digest := h.Sum(nil)
+       r, s, err := dsa.Sign(rand, k.PrivateKey, digest)
+       if err != nil {
+               return nil, err
+       }
+
+       sig := make([]byte, 40)
+       rb := r.Bytes()
+       sb := s.Bytes()
+
+       copy(sig[20-len(rb):20], rb)
+       copy(sig[40-len(sb):], sb)
+
+       return &Signature{
+               Format: k.PublicKey().Type(),
+               Blob:   sig,
+       }, nil
+}
+
+type ecdsaPublicKey ecdsa.PublicKey
+
+func (key *ecdsaPublicKey) Type() string {
+       return "ecdsa-sha2-" + key.nistID()
+}
+
+func (key *ecdsaPublicKey) nistID() string {
+       switch key.Params().BitSize {
+       case 256:
+               return "nistp256"
+       case 384:
+               return "nistp384"
+       case 521:
+               return "nistp521"
+       }
+       panic("ssh: unsupported ecdsa key size")
+}
+
+type ed25519PublicKey ed25519.PublicKey
+
+func (key ed25519PublicKey) Type() string {
+       return KeyAlgoED25519
+}
+
+func parseED25519(in []byte) (out PublicKey, rest []byte, err error) {
+       var w struct {
+               KeyBytes []byte
+               Rest     []byte `ssh:"rest"`
+       }
+
+       if err := Unmarshal(in, &w); err != nil {
+               return nil, nil, err
+       }
+
+       key := ed25519.PublicKey(w.KeyBytes)
+
+       return (ed25519PublicKey)(key), w.Rest, nil
+}
+
+func (key ed25519PublicKey) Marshal() []byte {
+       w := struct {
+               Name     string
+               KeyBytes []byte
+       }{
+               KeyAlgoED25519,
+               []byte(key),
+       }
+       return Marshal(&w)
+}
+
+func (key ed25519PublicKey) Verify(b []byte, sig *Signature) error {
+       if sig.Format != key.Type() {
+               return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type())
+       }
+
+       edKey := (ed25519.PublicKey)(key)
+       if ok := ed25519.Verify(edKey, b, sig.Blob); !ok {
+               return errors.New("ssh: signature did not verify")
+       }
+
+       return nil
+}
+
+func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey {
+       return ed25519.PublicKey(k)
+}
+
+func supportedEllipticCurve(curve elliptic.Curve) bool {
+       return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521()
+}
+
+// ecHash returns the hash to match the given elliptic curve, see RFC
+// 5656, section 6.2.1
+func ecHash(curve elliptic.Curve) crypto.Hash {
+       bitSize := curve.Params().BitSize
+       switch {
+       case bitSize <= 256:
+               return crypto.SHA256
+       case bitSize <= 384:
+               return crypto.SHA384
+       }
+       return crypto.SHA512
+}
+
+// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1.
+func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) {
+       var w struct {
+               Curve    string
+               KeyBytes []byte
+               Rest     []byte `ssh:"rest"`
+       }
+
+       if err := Unmarshal(in, &w); err != nil {
+               return nil, nil, err
+       }
+
+       key := new(ecdsa.PublicKey)
+
+       switch w.Curve {
+       case "nistp256":
+               key.Curve = elliptic.P256()
+       case "nistp384":
+               key.Curve = elliptic.P384()
+       case "nistp521":
+               key.Curve = elliptic.P521()
+       default:
+               return nil, nil, errors.New("ssh: unsupported curve")
+       }
+
+       key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes)
+       if key.X == nil || key.Y == nil {
+               return nil, nil, errors.New("ssh: invalid curve point")
+       }
+       return (*ecdsaPublicKey)(key), w.Rest, nil
+}
+
+func (key *ecdsaPublicKey) Marshal() []byte {
+       // See RFC 5656, section 3.1.
+       keyBytes := elliptic.Marshal(key.Curve, key.X, key.Y)
+       // ECDSA publickey struct layout should match the struct used by
+       // parseECDSACert in the x/crypto/ssh/agent package.
+       w := struct {
+               Name string
+               ID   string
+               Key  []byte
+       }{
+               key.Type(),
+               key.nistID(),
+               keyBytes,
+       }
+
+       return Marshal(&w)
+}
+
+func (key *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
+       if sig.Format != key.Type() {
+               return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type())
+       }
+
+       h := ecHash(key.Curve).New()
+       h.Write(data)
+       digest := h.Sum(nil)
+
+       // Per RFC 5656, section 3.1.2,
+       // The ecdsa_signature_blob value has the following specific encoding:
+       //    mpint    r
+       //    mpint    s
+       var ecSig struct {
+               R *big.Int
+               S *big.Int
+       }
+
+       if err := Unmarshal(sig.Blob, &ecSig); err != nil {
+               return err
+       }
+
+       if ecdsa.Verify((*ecdsa.PublicKey)(key), digest, ecSig.R, ecSig.S) {
+               return nil
+       }
+       return errors.New("ssh: signature did not verify")
+}
+
+func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey {
+       return (*ecdsa.PublicKey)(k)
+}
+
+// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey,
+// *ecdsa.PrivateKey or any other crypto.Signer and returns a corresponding
+// Signer instance. ECDSA keys must use P-256, P-384 or P-521.
+func NewSignerFromKey(key interface{}) (Signer, error) {
+       switch key := key.(type) {
+       case crypto.Signer:
+               return NewSignerFromSigner(key)
+       case *dsa.PrivateKey:
+               return &dsaPrivateKey{key}, nil
+       default:
+               return nil, fmt.Errorf("ssh: unsupported key type %T", key)
+       }
+}
+
+type wrappedSigner struct {
+       signer crypto.Signer
+       pubKey PublicKey
+}
+
+// NewSignerFromSigner takes any crypto.Signer implementation and
+// returns a corresponding Signer interface. This can be used, for
+// example, with keys kept in hardware modules.
+func NewSignerFromSigner(signer crypto.Signer) (Signer, error) {
+       pubKey, err := NewPublicKey(signer.Public())
+       if err != nil {
+               return nil, err
+       }
+
+       return &wrappedSigner{signer, pubKey}, nil
+}
+
+func (s *wrappedSigner) PublicKey() PublicKey {
+       return s.pubKey
+}
+
+func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
+       var hashFunc crypto.Hash
+
+       switch key := s.pubKey.(type) {
+       case *rsaPublicKey, *dsaPublicKey:
+               hashFunc = crypto.SHA1
+       case *ecdsaPublicKey:
+               hashFunc = ecHash(key.Curve)
+       case ed25519PublicKey:
+       default:
+               return nil, fmt.Errorf("ssh: unsupported key type %T", key)
+       }
+
+       var digest []byte
+       if hashFunc != 0 {
+               h := hashFunc.New()
+               h.Write(data)
+               digest = h.Sum(nil)
+       } else {
+               digest = data
+       }
+
+       signature, err := s.signer.Sign(rand, digest, hashFunc)
+       if err != nil {
+               return nil, err
+       }
+
+       // crypto.Signer.Sign is expected to return an ASN.1-encoded signature
+       // for ECDSA and DSA, but that's not the encoding expected by SSH, so
+       // re-encode.
+       switch s.pubKey.(type) {
+       case *ecdsaPublicKey, *dsaPublicKey:
+               type asn1Signature struct {
+                       R, S *big.Int
+               }
+               asn1Sig := new(asn1Signature)
+               _, err := asn1.Unmarshal(signature, asn1Sig)
+               if err != nil {
+                       return nil, err
+               }
+
+               switch s.pubKey.(type) {
+               case *ecdsaPublicKey:
+                       signature = Marshal(asn1Sig)
+
+               case *dsaPublicKey:
+                       signature = make([]byte, 40)
+                       r := asn1Sig.R.Bytes()
+                       s := asn1Sig.S.Bytes()
+                       copy(signature[20-len(r):20], r)
+                       copy(signature[40-len(s):40], s)
+               }
+       }
+
+       return &Signature{
+               Format: s.pubKey.Type(),
+               Blob:   signature,
+       }, nil
+}
+
+// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey,
+// or ed25519.PublicKey returns a corresponding PublicKey instance.
+// ECDSA keys must use P-256, P-384 or P-521.
+func NewPublicKey(key interface{}) (PublicKey, error) {
+       switch key := key.(type) {
+       case *rsa.PublicKey:
+               return (*rsaPublicKey)(key), nil
+       case *ecdsa.PublicKey:
+               if !supportedEllipticCurve(key.Curve) {
+                       return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported.")
+               }
+               return (*ecdsaPublicKey)(key), nil
+       case *dsa.PublicKey:
+               return (*dsaPublicKey)(key), nil
+       case ed25519.PublicKey:
+               return (ed25519PublicKey)(key), nil
+       default:
+               return nil, fmt.Errorf("ssh: unsupported key type %T", key)
+       }
+}
+
+// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports
+// the same keys as ParseRawPrivateKey.
+func ParsePrivateKey(pemBytes []byte) (Signer, error) {
+       key, err := ParseRawPrivateKey(pemBytes)
+       if err != nil {
+               return nil, err
+       }
+
+       return NewSignerFromKey(key)
+}
+
+// encryptedBlock tells whether a private key is
+// encrypted by examining its Proc-Type header
+// for a mention of ENCRYPTED
+// according to RFC 1421 Section 4.6.1.1.
+func encryptedBlock(block *pem.Block) bool {
+       return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED")
+}
+
+// ParseRawPrivateKey returns a private key from a PEM encoded private key. It
+// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys.
+func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) {
+       block, _ := pem.Decode(pemBytes)
+       if block == nil {
+               return nil, errors.New("ssh: no key found")
+       }
+
+       if encryptedBlock(block) {
+               return nil, errors.New("ssh: cannot decode encrypted private keys")
+       }
+
+       switch block.Type {
+       case "RSA PRIVATE KEY":
+               return x509.ParsePKCS1PrivateKey(block.Bytes)
+       case "EC PRIVATE KEY":
+               return x509.ParseECPrivateKey(block.Bytes)
+       case "DSA PRIVATE KEY":
+               return ParseDSAPrivateKey(block.Bytes)
+       case "OPENSSH PRIVATE KEY":
+               return parseOpenSSHPrivateKey(block.Bytes)
+       default:
+               return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type)
+       }
+}
+
+// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as
+// specified by the OpenSSL DSA man page.
+func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) {
+       var k struct {
+               Version int
+               P       *big.Int
+               Q       *big.Int
+               G       *big.Int
+               Pub     *big.Int
+               Priv    *big.Int
+       }
+       rest, err := asn1.Unmarshal(der, &k)
+       if err != nil {
+               return nil, errors.New("ssh: failed to parse DSA key: " + err.Error())
+       }
+       if len(rest) > 0 {
+               return nil, errors.New("ssh: garbage after DSA key")
+       }
+
+       return &dsa.PrivateKey{
+               PublicKey: dsa.PublicKey{
+                       Parameters: dsa.Parameters{
+                               P: k.P,
+                               Q: k.Q,
+                               G: k.G,
+                       },
+                       Y: k.Pub,
+               },
+               X: k.Priv,
+       }, nil
+}
+
+// Implemented based on the documentation at
+// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
+func parseOpenSSHPrivateKey(key []byte) (*ed25519.PrivateKey, error) {
+       magic := append([]byte("openssh-key-v1"), 0)
+       if !bytes.Equal(magic, key[0:len(magic)]) {
+               return nil, errors.New("ssh: invalid openssh private key format")
+       }
+       remaining := key[len(magic):]
+
+       var w struct {
+               CipherName   string
+               KdfName      string
+               KdfOpts      string
+               NumKeys      uint32
+               PubKey       []byte
+               PrivKeyBlock []byte
+       }
+
+       if err := Unmarshal(remaining, &w); err != nil {
+               return nil, err
+       }
+
+       pk1 := struct {
+               Check1  uint32
+               Check2  uint32
+               Keytype string
+               Pub     []byte
+               Priv    []byte
+               Comment string
+               Pad     []byte `ssh:"rest"`
+       }{}
+
+       if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil {
+               return nil, err
+       }
+
+       if pk1.Check1 != pk1.Check2 {
+               return nil, errors.New("ssh: checkint mismatch")
+       }
+
+       // we only handle ed25519 keys currently
+       if pk1.Keytype != KeyAlgoED25519 {
+               return nil, errors.New("ssh: unhandled key type")
+       }
+
+       for i, b := range pk1.Pad {
+               if int(b) != i+1 {
+                       return nil, errors.New("ssh: padding not as expected")
+               }
+       }
+
+       if len(pk1.Priv) != ed25519.PrivateKeySize {
+               return nil, errors.New("ssh: private key unexpected length")
+       }
+
+       pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize))
+       copy(pk, pk1.Priv)
+       return &pk, nil
+}
+
+// FingerprintLegacyMD5 returns the user presentation of the key's
+// fingerprint as described by RFC 4716 section 4.
+func FingerprintLegacyMD5(pubKey PublicKey) string {
+       md5sum := md5.Sum(pubKey.Marshal())
+       hexarray := make([]string, len(md5sum))
+       for i, c := range md5sum {
+               hexarray[i] = hex.EncodeToString([]byte{c})
+       }
+       return strings.Join(hexarray, ":")
+}
+
+// FingerprintSHA256 returns the user presentation of the key's
+// fingerprint as unpadded base64 encoded sha256 hash.
+// This format was introduced from OpenSSH 6.8.
+// https://www.openssh.com/txt/release-6.8
+// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding)
+func FingerprintSHA256(pubKey PublicKey) string {
+       sha256sum := sha256.Sum256(pubKey.Marshal())
+       hash := base64.RawStdEncoding.EncodeToString(sha256sum[:])
+       return "SHA256:" + hash
+}
diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go
new file mode 100644 (file)
index 0000000..c07a062
--- /dev/null
@@ -0,0 +1,61 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+// Message authentication support
+
+import (
+       "crypto/hmac"
+       "crypto/sha1"
+       "crypto/sha256"
+       "hash"
+)
+
+type macMode struct {
+       keySize int
+       etm     bool
+       new     func(key []byte) hash.Hash
+}
+
+// truncatingMAC wraps around a hash.Hash and truncates the output digest to
+// a given size.
+type truncatingMAC struct {
+       length int
+       hmac   hash.Hash
+}
+
+func (t truncatingMAC) Write(data []byte) (int, error) {
+       return t.hmac.Write(data)
+}
+
+func (t truncatingMAC) Sum(in []byte) []byte {
+       out := t.hmac.Sum(in)
+       return out[:len(in)+t.length]
+}
+
+func (t truncatingMAC) Reset() {
+       t.hmac.Reset()
+}
+
+func (t truncatingMAC) Size() int {
+       return t.length
+}
+
+func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
+
+var macModes = map[string]*macMode{
+       "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash {
+               return hmac.New(sha256.New, key)
+       }},
+       "hmac-sha2-256": {32, false, func(key []byte) hash.Hash {
+               return hmac.New(sha256.New, key)
+       }},
+       "hmac-sha1": {20, false, func(key []byte) hash.Hash {
+               return hmac.New(sha1.New, key)
+       }},
+       "hmac-sha1-96": {20, false, func(key []byte) hash.Hash {
+               return truncatingMAC{12, hmac.New(sha1.New, key)}
+       }},
+}
diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go
new file mode 100644 (file)
index 0000000..e6ecd3a
--- /dev/null
@@ -0,0 +1,758 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "bytes"
+       "encoding/binary"
+       "errors"
+       "fmt"
+       "io"
+       "math/big"
+       "reflect"
+       "strconv"
+       "strings"
+)
+
+// These are SSH message type numbers. They are scattered around several
+// documents but many were taken from [SSH-PARAMETERS].
+const (
+       msgIgnore        = 2
+       msgUnimplemented = 3
+       msgDebug         = 4
+       msgNewKeys       = 21
+
+       // Standard authentication messages
+       msgUserAuthSuccess = 52
+       msgUserAuthBanner  = 53
+)
+
+// SSH messages:
+//
+// These structures mirror the wire format of the corresponding SSH messages.
+// They are marshaled using reflection with the marshal and unmarshal functions
+// in this file. The only wrinkle is that a final member of type []byte with a
+// ssh tag of "rest" receives the remainder of a packet when unmarshaling.
+
+// See RFC 4253, section 11.1.
+const msgDisconnect = 1
+
+// disconnectMsg is the message that signals a disconnect. It is also
+// the error type returned from mux.Wait()
+type disconnectMsg struct {
+       Reason   uint32 `sshtype:"1"`
+       Message  string
+       Language string
+}
+
+func (d *disconnectMsg) Error() string {
+       return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message)
+}
+
+// See RFC 4253, section 7.1.
+const msgKexInit = 20
+
+type kexInitMsg struct {
+       Cookie                  [16]byte `sshtype:"20"`
+       KexAlgos                []string
+       ServerHostKeyAlgos      []string
+       CiphersClientServer     []string
+       CiphersServerClient     []string
+       MACsClientServer        []string
+       MACsServerClient        []string
+       CompressionClientServer []string
+       CompressionServerClient []string
+       LanguagesClientServer   []string
+       LanguagesServerClient   []string
+       FirstKexFollows         bool
+       Reserved                uint32
+}
+
+// See RFC 4253, section 8.
+
+// Diffie-Helman
+const msgKexDHInit = 30
+
+type kexDHInitMsg struct {
+       X *big.Int `sshtype:"30"`
+}
+
+const msgKexECDHInit = 30
+
+type kexECDHInitMsg struct {
+       ClientPubKey []byte `sshtype:"30"`
+}
+
+const msgKexECDHReply = 31
+
+type kexECDHReplyMsg struct {
+       HostKey         []byte `sshtype:"31"`
+       EphemeralPubKey []byte
+       Signature       []byte
+}
+
+const msgKexDHReply = 31
+
+type kexDHReplyMsg struct {
+       HostKey   []byte `sshtype:"31"`
+       Y         *big.Int
+       Signature []byte
+}
+
+// See RFC 4253, section 10.
+const msgServiceRequest = 5
+
+type serviceRequestMsg struct {
+       Service string `sshtype:"5"`
+}
+
+// See RFC 4253, section 10.
+const msgServiceAccept = 6
+
+type serviceAcceptMsg struct {
+       Service string `sshtype:"6"`
+}
+
+// See RFC 4252, section 5.
+const msgUserAuthRequest = 50
+
+type userAuthRequestMsg struct {
+       User    string `sshtype:"50"`
+       Service string
+       Method  string
+       Payload []byte `ssh:"rest"`
+}
+
+// Used for debug printouts of packets.
+type userAuthSuccessMsg struct {
+}
+
+// See RFC 4252, section 5.1
+const msgUserAuthFailure = 51
+
+type userAuthFailureMsg struct {
+       Methods        []string `sshtype:"51"`
+       PartialSuccess bool
+}
+
+// See RFC 4256, section 3.2
+const msgUserAuthInfoRequest = 60
+const msgUserAuthInfoResponse = 61
+
+type userAuthInfoRequestMsg struct {
+       User               string `sshtype:"60"`
+       Instruction        string
+       DeprecatedLanguage string
+       NumPrompts         uint32
+       Prompts            []byte `ssh:"rest"`
+}
+
+// See RFC 4254, section 5.1.
+const msgChannelOpen = 90
+
+type channelOpenMsg struct {
+       ChanType         string `sshtype:"90"`
+       PeersId          uint32
+       PeersWindow      uint32
+       MaxPacketSize    uint32
+       TypeSpecificData []byte `ssh:"rest"`
+}
+
+const msgChannelExtendedData = 95
+const msgChannelData = 94
+
+// Used for debug print outs of packets.
+type channelDataMsg struct {
+       PeersId uint32 `sshtype:"94"`
+       Length  uint32
+       Rest    []byte `ssh:"rest"`
+}
+
+// See RFC 4254, section 5.1.
+const msgChannelOpenConfirm = 91
+
+type channelOpenConfirmMsg struct {
+       PeersId          uint32 `sshtype:"91"`
+       MyId             uint32
+       MyWindow         uint32
+       MaxPacketSize    uint32
+       TypeSpecificData []byte `ssh:"rest"`
+}
+
+// See RFC 4254, section 5.1.
+const msgChannelOpenFailure = 92
+
+type channelOpenFailureMsg struct {
+       PeersId  uint32 `sshtype:"92"`
+       Reason   RejectionReason
+       Message  string
+       Language string
+}
+
+const msgChannelRequest = 98
+
+type channelRequestMsg struct {
+       PeersId             uint32 `sshtype:"98"`
+       Request             string
+       WantReply           bool
+       RequestSpecificData []byte `ssh:"rest"`
+}
+
+// See RFC 4254, section 5.4.
+const msgChannelSuccess = 99
+
+type channelRequestSuccessMsg struct {
+       PeersId uint32 `sshtype:"99"`
+}
+
+// See RFC 4254, section 5.4.
+const msgChannelFailure = 100
+
+type channelRequestFailureMsg struct {
+       PeersId uint32 `sshtype:"100"`
+}
+
+// See RFC 4254, section 5.3
+const msgChannelClose = 97
+
+type channelCloseMsg struct {
+       PeersId uint32 `sshtype:"97"`
+}
+
+// See RFC 4254, section 5.3
+const msgChannelEOF = 96
+
+type channelEOFMsg struct {
+       PeersId uint32 `sshtype:"96"`
+}
+
+// See RFC 4254, section 4
+const msgGlobalRequest = 80
+
+type globalRequestMsg struct {
+       Type      string `sshtype:"80"`
+       WantReply bool
+       Data      []byte `ssh:"rest"`
+}
+
+// See RFC 4254, section 4
+const msgRequestSuccess = 81
+
+type globalRequestSuccessMsg struct {
+       Data []byte `ssh:"rest" sshtype:"81"`
+}
+
+// See RFC 4254, section 4
+const msgRequestFailure = 82
+
+type globalRequestFailureMsg struct {
+       Data []byte `ssh:"rest" sshtype:"82"`
+}
+
+// See RFC 4254, section 5.2
+const msgChannelWindowAdjust = 93
+
+type windowAdjustMsg struct {
+       PeersId         uint32 `sshtype:"93"`
+       AdditionalBytes uint32
+}
+
+// See RFC 4252, section 7
+const msgUserAuthPubKeyOk = 60
+
+type userAuthPubKeyOkMsg struct {
+       Algo   string `sshtype:"60"`
+       PubKey []byte
+}
+
+// typeTags returns the possible type bytes for the given reflect.Type, which
+// should be a struct. The possible values are separated by a '|' character.
+func typeTags(structType reflect.Type) (tags []byte) {
+       tagStr := structType.Field(0).Tag.Get("sshtype")
+
+       for _, tag := range strings.Split(tagStr, "|") {
+               i, err := strconv.Atoi(tag)
+               if err == nil {
+                       tags = append(tags, byte(i))
+               }
+       }
+
+       return tags
+}
+
+func fieldError(t reflect.Type, field int, problem string) error {
+       if problem != "" {
+               problem = ": " + problem
+       }
+       return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem)
+}
+
+var errShortRead = errors.New("ssh: short read")
+
+// Unmarshal parses data in SSH wire format into a structure. The out
+// argument should be a pointer to struct. If the first member of the
+// struct has the "sshtype" tag set to a '|'-separated set of numbers
+// in decimal, the packet must start with one of those numbers. In
+// case of error, Unmarshal returns a ParseError or
+// UnexpectedMessageError.
+func Unmarshal(data []byte, out interface{}) error {
+       v := reflect.ValueOf(out).Elem()
+       structType := v.Type()
+       expectedTypes := typeTags(structType)
+
+       var expectedType byte
+       if len(expectedTypes) > 0 {
+               expectedType = expectedTypes[0]
+       }
+
+       if len(data) == 0 {
+               return parseError(expectedType)
+       }
+
+       if len(expectedTypes) > 0 {
+               goodType := false
+               for _, e := range expectedTypes {
+                       if e > 0 && data[0] == e {
+                               goodType = true
+                               break
+                       }
+               }
+               if !goodType {
+                       return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes)
+               }
+               data = data[1:]
+       }
+
+       var ok bool
+       for i := 0; i < v.NumField(); i++ {
+               field := v.Field(i)
+               t := field.Type()
+               switch t.Kind() {
+               case reflect.Bool:
+                       if len(data) < 1 {
+                               return errShortRead
+                       }
+                       field.SetBool(data[0] != 0)
+                       data = data[1:]
+               case reflect.Array:
+                       if t.Elem().Kind() != reflect.Uint8 {
+                               return fieldError(structType, i, "array of unsupported type")
+                       }
+                       if len(data) < t.Len() {
+                               return errShortRead
+                       }
+                       for j, n := 0, t.Len(); j < n; j++ {
+                               field.Index(j).Set(reflect.ValueOf(data[j]))
+                       }
+                       data = data[t.Len():]
+               case reflect.Uint64:
+                       var u64 uint64
+                       if u64, data, ok = parseUint64(data); !ok {
+                               return errShortRead
+                       }
+                       field.SetUint(u64)
+               case reflect.Uint32:
+                       var u32 uint32
+                       if u32, data, ok = parseUint32(data); !ok {
+                               return errShortRead
+                       }
+                       field.SetUint(uint64(u32))
+               case reflect.Uint8:
+                       if len(data) < 1 {
+                               return errShortRead
+                       }
+                       field.SetUint(uint64(data[0]))
+                       data = data[1:]
+               case reflect.String:
+                       var s []byte
+                       if s, data, ok = parseString(data); !ok {
+                               return fieldError(structType, i, "")
+                       }
+                       field.SetString(string(s))
+               case reflect.Slice:
+                       switch t.Elem().Kind() {
+                       case reflect.Uint8:
+                               if structType.Field(i).Tag.Get("ssh") == "rest" {
+                                       field.Set(reflect.ValueOf(data))
+                                       data = nil
+                               } else {
+                                       var s []byte
+                                       if s, data, ok = parseString(data); !ok {
+                                               return errShortRead
+                                       }
+                                       field.Set(reflect.ValueOf(s))
+                               }
+                       case reflect.String:
+                               var nl []string
+                               if nl, data, ok = parseNameList(data); !ok {
+                                       return errShortRead
+                               }
+                               field.Set(reflect.ValueOf(nl))
+                       default:
+                               return fieldError(structType, i, "slice of unsupported type")
+                       }
+               case reflect.Ptr:
+                       if t == bigIntType {
+                               var n *big.Int
+                               if n, data, ok = parseInt(data); !ok {
+                                       return errShortRead
+                               }
+                               field.Set(reflect.ValueOf(n))
+                       } else {
+                               return fieldError(structType, i, "pointer to unsupported type")
+                       }
+               default:
+                       return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t))
+               }
+       }
+
+       if len(data) != 0 {
+               return parseError(expectedType)
+       }
+
+       return nil
+}
+
+// Marshal serializes the message in msg to SSH wire format.  The msg
+// argument should be a struct or pointer to struct. If the first
+// member has the "sshtype" tag set to a number in decimal, that
+// number is prepended to the result. If the last of member has the
+// "ssh" tag set to "rest", its contents are appended to the output.
+func Marshal(msg interface{}) []byte {
+       out := make([]byte, 0, 64)
+       return marshalStruct(out, msg)
+}
+
+func marshalStruct(out []byte, msg interface{}) []byte {
+       v := reflect.Indirect(reflect.ValueOf(msg))
+       msgTypes := typeTags(v.Type())
+       if len(msgTypes) > 0 {
+               out = append(out, msgTypes[0])
+       }
+
+       for i, n := 0, v.NumField(); i < n; i++ {
+               field := v.Field(i)
+               switch t := field.Type(); t.Kind() {
+               case reflect.Bool:
+                       var v uint8
+                       if field.Bool() {
+                               v = 1
+                       }
+                       out = append(out, v)
+               case reflect.Array:
+                       if t.Elem().Kind() != reflect.Uint8 {
+                               panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface()))
+                       }
+                       for j, l := 0, t.Len(); j < l; j++ {
+                               out = append(out, uint8(field.Index(j).Uint()))
+                       }
+               case reflect.Uint32:
+                       out = appendU32(out, uint32(field.Uint()))
+               case reflect.Uint64:
+                       out = appendU64(out, uint64(field.Uint()))
+               case reflect.Uint8:
+                       out = append(out, uint8(field.Uint()))
+               case reflect.String:
+                       s := field.String()
+                       out = appendInt(out, len(s))
+                       out = append(out, s...)
+               case reflect.Slice:
+                       switch t.Elem().Kind() {
+                       case reflect.Uint8:
+                               if v.Type().Field(i).Tag.Get("ssh") != "rest" {
+                                       out = appendInt(out, field.Len())
+                               }
+                               out = append(out, field.Bytes()...)
+                       case reflect.String:
+                               offset := len(out)
+                               out = appendU32(out, 0)
+                               if n := field.Len(); n > 0 {
+                                       for j := 0; j < n; j++ {
+                                               f := field.Index(j)
+                                               if j != 0 {
+                                                       out = append(out, ',')
+                                               }
+                                               out = append(out, f.String()...)
+                                       }
+                                       // overwrite length value
+                                       binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4))
+                               }
+                       default:
+                               panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface()))
+                       }
+               case reflect.Ptr:
+                       if t == bigIntType {
+                               var n *big.Int
+                               nValue := reflect.ValueOf(&n)
+                               nValue.Elem().Set(field)
+                               needed := intLength(n)
+                               oldLength := len(out)
+
+                               if cap(out)-len(out) < needed {
+                                       newOut := make([]byte, len(out), 2*(len(out)+needed))
+                                       copy(newOut, out)
+                                       out = newOut
+                               }
+                               out = out[:oldLength+needed]
+                               marshalInt(out[oldLength:], n)
+                       } else {
+                               panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface()))
+                       }
+               }
+       }
+
+       return out
+}
+
+var bigOne = big.NewInt(1)
+
+func parseString(in []byte) (out, rest []byte, ok bool) {
+       if len(in) < 4 {
+               return
+       }
+       length := binary.BigEndian.Uint32(in)
+       in = in[4:]
+       if uint32(len(in)) < length {
+               return
+       }
+       out = in[:length]
+       rest = in[length:]
+       ok = true
+       return
+}
+
+var (
+       comma         = []byte{','}
+       emptyNameList = []string{}
+)
+
+func parseNameList(in []byte) (out []string, rest []byte, ok bool) {
+       contents, rest, ok := parseString(in)
+       if !ok {
+               return
+       }
+       if len(contents) == 0 {
+               out = emptyNameList
+               return
+       }
+       parts := bytes.Split(contents, comma)
+       out = make([]string, len(parts))
+       for i, part := range parts {
+               out[i] = string(part)
+       }
+       return
+}
+
+func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) {
+       contents, rest, ok := parseString(in)
+       if !ok {
+               return
+       }
+       out = new(big.Int)
+
+       if len(contents) > 0 && contents[0]&0x80 == 0x80 {
+               // This is a negative number
+               notBytes := make([]byte, len(contents))
+               for i := range notBytes {
+                       notBytes[i] = ^contents[i]
+               }
+               out.SetBytes(notBytes)
+               out.Add(out, bigOne)
+               out.Neg(out)
+       } else {
+               // Positive number
+               out.SetBytes(contents)
+       }
+       ok = true
+       return
+}
+
+func parseUint32(in []byte) (uint32, []byte, bool) {
+       if len(in) < 4 {
+               return 0, nil, false
+       }
+       return binary.BigEndian.Uint32(in), in[4:], true
+}
+
+func parseUint64(in []byte) (uint64, []byte, bool) {
+       if len(in) < 8 {
+               return 0, nil, false
+       }
+       return binary.BigEndian.Uint64(in), in[8:], true
+}
+
+func intLength(n *big.Int) int {
+       length := 4 /* length bytes */
+       if n.Sign() < 0 {
+               nMinus1 := new(big.Int).Neg(n)
+               nMinus1.Sub(nMinus1, bigOne)
+               bitLen := nMinus1.BitLen()
+               if bitLen%8 == 0 {
+                       // The number will need 0xff padding
+                       length++
+               }
+               length += (bitLen + 7) / 8
+       } else if n.Sign() == 0 {
+               // A zero is the zero length string
+       } else {
+               bitLen := n.BitLen()
+               if bitLen%8 == 0 {
+                       // The number will need 0x00 padding
+                       length++
+               }
+               length += (bitLen + 7) / 8
+       }
+
+       return length
+}
+
+func marshalUint32(to []byte, n uint32) []byte {
+       binary.BigEndian.PutUint32(to, n)
+       return to[4:]
+}
+
+func marshalUint64(to []byte, n uint64) []byte {
+       binary.BigEndian.PutUint64(to, n)
+       return to[8:]
+}
+
+func marshalInt(to []byte, n *big.Int) []byte {
+       lengthBytes := to
+       to = to[4:]
+       length := 0
+
+       if n.Sign() < 0 {
+               // A negative number has to be converted to two's-complement
+               // form. So we'll subtract 1 and invert. If the
+               // most-significant-bit isn't set then we'll need to pad the
+               // beginning with 0xff in order to keep the number negative.
+               nMinus1 := new(big.Int).Neg(n)
+               nMinus1.Sub(nMinus1, bigOne)
+               bytes := nMinus1.Bytes()
+               for i := range bytes {
+                       bytes[i] ^= 0xff
+               }
+               if len(bytes) == 0 || bytes[0]&0x80 == 0 {
+                       to[0] = 0xff
+                       to = to[1:]
+                       length++
+               }
+               nBytes := copy(to, bytes)
+               to = to[nBytes:]
+               length += nBytes
+       } else if n.Sign() == 0 {
+               // A zero is the zero length string
+       } else {
+               bytes := n.Bytes()
+               if len(bytes) > 0 && bytes[0]&0x80 != 0 {
+                       // We'll have to pad this with a 0x00 in order to
+                       // stop it looking like a negative number.
+                       to[0] = 0
+                       to = to[1:]
+                       length++
+               }
+               nBytes := copy(to, bytes)
+               to = to[nBytes:]
+               length += nBytes
+       }
+
+       lengthBytes[0] = byte(length >> 24)
+       lengthBytes[1] = byte(length >> 16)
+       lengthBytes[2] = byte(length >> 8)
+       lengthBytes[3] = byte(length)
+       return to
+}
+
+func writeInt(w io.Writer, n *big.Int) {
+       length := intLength(n)
+       buf := make([]byte, length)
+       marshalInt(buf, n)
+       w.Write(buf)
+}
+
+func writeString(w io.Writer, s []byte) {
+       var lengthBytes [4]byte
+       lengthBytes[0] = byte(len(s) >> 24)
+       lengthBytes[1] = byte(len(s) >> 16)
+       lengthBytes[2] = byte(len(s) >> 8)
+       lengthBytes[3] = byte(len(s))
+       w.Write(lengthBytes[:])
+       w.Write(s)
+}
+
+func stringLength(n int) int {
+       return 4 + n
+}
+
+func marshalString(to []byte, s []byte) []byte {
+       to[0] = byte(len(s) >> 24)
+       to[1] = byte(len(s) >> 16)
+       to[2] = byte(len(s) >> 8)
+       to[3] = byte(len(s))
+       to = to[4:]
+       copy(to, s)
+       return to[len(s):]
+}
+
+var bigIntType = reflect.TypeOf((*big.Int)(nil))
+
+// Decode a packet into its corresponding message.
+func decode(packet []byte) (interface{}, error) {
+       var msg interface{}
+       switch packet[0] {
+       case msgDisconnect:
+               msg = new(disconnectMsg)
+       case msgServiceRequest:
+               msg = new(serviceRequestMsg)
+       case msgServiceAccept:
+               msg = new(serviceAcceptMsg)
+       case msgKexInit:
+               msg = new(kexInitMsg)
+       case msgKexDHInit:
+               msg = new(kexDHInitMsg)
+       case msgKexDHReply:
+               msg = new(kexDHReplyMsg)
+       case msgUserAuthRequest:
+               msg = new(userAuthRequestMsg)
+       case msgUserAuthSuccess:
+               return new(userAuthSuccessMsg), nil
+       case msgUserAuthFailure:
+               msg = new(userAuthFailureMsg)
+       case msgUserAuthPubKeyOk:
+               msg = new(userAuthPubKeyOkMsg)
+       case msgGlobalRequest:
+               msg = new(globalRequestMsg)
+       case msgRequestSuccess:
+               msg = new(globalRequestSuccessMsg)
+       case msgRequestFailure:
+               msg = new(globalRequestFailureMsg)
+       case msgChannelOpen:
+               msg = new(channelOpenMsg)
+       case msgChannelData:
+               msg = new(channelDataMsg)
+       case msgChannelOpenConfirm:
+               msg = new(channelOpenConfirmMsg)
+       case msgChannelOpenFailure:
+               msg = new(channelOpenFailureMsg)
+       case msgChannelWindowAdjust:
+               msg = new(windowAdjustMsg)
+       case msgChannelEOF:
+               msg = new(channelEOFMsg)
+       case msgChannelClose:
+               msg = new(channelCloseMsg)
+       case msgChannelRequest:
+               msg = new(channelRequestMsg)
+       case msgChannelSuccess:
+               msg = new(channelRequestSuccessMsg)
+       case msgChannelFailure:
+               msg = new(channelRequestFailureMsg)
+       default:
+               return nil, unexpectedMessageError(0, packet[0])
+       }
+       if err := Unmarshal(packet, msg); err != nil {
+               return nil, err
+       }
+       return msg, nil
+}
diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go
new file mode 100644 (file)
index 0000000..27a527c
--- /dev/null
@@ -0,0 +1,330 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "encoding/binary"
+       "fmt"
+       "io"
+       "log"
+       "sync"
+       "sync/atomic"
+)
+
+// debugMux, if set, causes messages in the connection protocol to be
+// logged.
+const debugMux = false
+
+// chanList is a thread safe channel list.
+type chanList struct {
+       // protects concurrent access to chans
+       sync.Mutex
+
+       // chans are indexed by the local id of the channel, which the
+       // other side should send in the PeersId field.
+       chans []*channel
+
+       // This is a debugging aid: it offsets all IDs by this
+       // amount. This helps distinguish otherwise identical
+       // server/client muxes
+       offset uint32
+}
+
+// Assigns a channel ID to the given channel.
+func (c *chanList) add(ch *channel) uint32 {
+       c.Lock()
+       defer c.Unlock()
+       for i := range c.chans {
+               if c.chans[i] == nil {
+                       c.chans[i] = ch
+                       return uint32(i) + c.offset
+               }
+       }
+       c.chans = append(c.chans, ch)
+       return uint32(len(c.chans)-1) + c.offset
+}
+
+// getChan returns the channel for the given ID.
+func (c *chanList) getChan(id uint32) *channel {
+       id -= c.offset
+
+       c.Lock()
+       defer c.Unlock()
+       if id < uint32(len(c.chans)) {
+               return c.chans[id]
+       }
+       return nil
+}
+
+func (c *chanList) remove(id uint32) {
+       id -= c.offset
+       c.Lock()
+       if id < uint32(len(c.chans)) {
+               c.chans[id] = nil
+       }
+       c.Unlock()
+}
+
+// dropAll forgets all channels it knows, returning them in a slice.
+func (c *chanList) dropAll() []*channel {
+       c.Lock()
+       defer c.Unlock()
+       var r []*channel
+
+       for _, ch := range c.chans {
+               if ch == nil {
+                       continue
+               }
+               r = append(r, ch)
+       }
+       c.chans = nil
+       return r
+}
+
+// mux represents the state for the SSH connection protocol, which
+// multiplexes many channels onto a single packet transport.
+type mux struct {
+       conn     packetConn
+       chanList chanList
+
+       incomingChannels chan NewChannel
+
+       globalSentMu     sync.Mutex
+       globalResponses  chan interface{}
+       incomingRequests chan *Request
+
+       errCond *sync.Cond
+       err     error
+}
+
+// When debugging, each new chanList instantiation has a different
+// offset.
+var globalOff uint32
+
+func (m *mux) Wait() error {
+       m.errCond.L.Lock()
+       defer m.errCond.L.Unlock()
+       for m.err == nil {
+               m.errCond.Wait()
+       }
+       return m.err
+}
+
+// newMux returns a mux that runs over the given connection.
+func newMux(p packetConn) *mux {
+       m := &mux{
+               conn:             p,
+               incomingChannels: make(chan NewChannel, chanSize),
+               globalResponses:  make(chan interface{}, 1),
+               incomingRequests: make(chan *Request, chanSize),
+               errCond:          newCond(),
+       }
+       if debugMux {
+               m.chanList.offset = atomic.AddUint32(&globalOff, 1)
+       }
+
+       go m.loop()
+       return m
+}
+
+func (m *mux) sendMessage(msg interface{}) error {
+       p := Marshal(msg)
+       if debugMux {
+               log.Printf("send global(%d): %#v", m.chanList.offset, msg)
+       }
+       return m.conn.writePacket(p)
+}
+
+func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) {
+       if wantReply {
+               m.globalSentMu.Lock()
+               defer m.globalSentMu.Unlock()
+       }
+
+       if err := m.sendMessage(globalRequestMsg{
+               Type:      name,
+               WantReply: wantReply,
+               Data:      payload,
+       }); err != nil {
+               return false, nil, err
+       }
+
+       if !wantReply {
+               return false, nil, nil
+       }
+
+       msg, ok := <-m.globalResponses
+       if !ok {
+               return false, nil, io.EOF
+       }
+       switch msg := msg.(type) {
+       case *globalRequestFailureMsg:
+               return false, msg.Data, nil
+       case *globalRequestSuccessMsg:
+               return true, msg.Data, nil
+       default:
+               return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg)
+       }
+}
+
+// ackRequest must be called after processing a global request that
+// has WantReply set.
+func (m *mux) ackRequest(ok bool, data []byte) error {
+       if ok {
+               return m.sendMessage(globalRequestSuccessMsg{Data: data})
+       }
+       return m.sendMessage(globalRequestFailureMsg{Data: data})
+}
+
+func (m *mux) Close() error {
+       return m.conn.Close()
+}
+
+// loop runs the connection machine. It will process packets until an
+// error is encountered. To synchronize on loop exit, use mux.Wait.
+func (m *mux) loop() {
+       var err error
+       for err == nil {
+               err = m.onePacket()
+       }
+
+       for _, ch := range m.chanList.dropAll() {
+               ch.close()
+       }
+
+       close(m.incomingChannels)
+       close(m.incomingRequests)
+       close(m.globalResponses)
+
+       m.conn.Close()
+
+       m.errCond.L.Lock()
+       m.err = err
+       m.errCond.Broadcast()
+       m.errCond.L.Unlock()
+
+       if debugMux {
+               log.Println("loop exit", err)
+       }
+}
+
+// onePacket reads and processes one packet.
+func (m *mux) onePacket() error {
+       packet, err := m.conn.readPacket()
+       if err != nil {
+               return err
+       }
+
+       if debugMux {
+               if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData {
+                       log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet))
+               } else {
+                       p, _ := decode(packet)
+                       log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet))
+               }
+       }
+
+       switch packet[0] {
+       case msgChannelOpen:
+               return m.handleChannelOpen(packet)
+       case msgGlobalRequest, msgRequestSuccess, msgRequestFailure:
+               return m.handleGlobalPacket(packet)
+       }
+
+       // assume a channel packet.
+       if len(packet) < 5 {
+               return parseError(packet[0])
+       }
+       id := binary.BigEndian.Uint32(packet[1:])
+       ch := m.chanList.getChan(id)
+       if ch == nil {
+               return fmt.Errorf("ssh: invalid channel %d", id)
+       }
+
+       return ch.handlePacket(packet)
+}
+
+func (m *mux) handleGlobalPacket(packet []byte) error {
+       msg, err := decode(packet)
+       if err != nil {
+               return err
+       }
+
+       switch msg := msg.(type) {
+       case *globalRequestMsg:
+               m.incomingRequests <- &Request{
+                       Type:      msg.Type,
+                       WantReply: msg.WantReply,
+                       Payload:   msg.Data,
+                       mux:       m,
+               }
+       case *globalRequestSuccessMsg, *globalRequestFailureMsg:
+               m.globalResponses <- msg
+       default:
+               panic(fmt.Sprintf("not a global message %#v", msg))
+       }
+
+       return nil
+}
+
+// handleChannelOpen schedules a channel to be Accept()ed.
+func (m *mux) handleChannelOpen(packet []byte) error {
+       var msg channelOpenMsg
+       if err := Unmarshal(packet, &msg); err != nil {
+               return err
+       }
+
+       if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 {
+               failMsg := channelOpenFailureMsg{
+                       PeersId:  msg.PeersId,
+                       Reason:   ConnectionFailed,
+                       Message:  "invalid request",
+                       Language: "en_US.UTF-8",
+               }
+               return m.sendMessage(failMsg)
+       }
+
+       c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData)
+       c.remoteId = msg.PeersId
+       c.maxRemotePayload = msg.MaxPacketSize
+       c.remoteWin.add(msg.PeersWindow)
+       m.incomingChannels <- c
+       return nil
+}
+
+func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) {
+       ch, err := m.openChannel(chanType, extra)
+       if err != nil {
+               return nil, nil, err
+       }
+
+       return ch, ch.incomingRequests, nil
+}
+
+func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) {
+       ch := m.newChannel(chanType, channelOutbound, extra)
+
+       ch.maxIncomingPayload = channelMaxPacket
+
+       open := channelOpenMsg{
+               ChanType:         chanType,
+               PeersWindow:      ch.myWindow,
+               MaxPacketSize:    ch.maxIncomingPayload,
+               TypeSpecificData: extra,
+               PeersId:          ch.localId,
+       }
+       if err := m.sendMessage(open); err != nil {
+               return nil, err
+       }
+
+       switch msg := (<-ch.msg).(type) {
+       case *channelOpenConfirmMsg:
+               return ch, nil
+       case *channelOpenFailureMsg:
+               return nil, &OpenChannelError{msg.Reason, msg.Message}
+       default:
+               return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg)
+       }
+}
diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
new file mode 100644 (file)
index 0000000..77c84d1
--- /dev/null
@@ -0,0 +1,491 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "bytes"
+       "errors"
+       "fmt"
+       "io"
+       "net"
+       "strings"
+)
+
+// The Permissions type holds fine-grained permissions that are
+// specific to a user or a specific authentication method for a
+// user. Permissions, except for "source-address", must be enforced in
+// the server application layer, after successful authentication. The
+// Permissions are passed on in ServerConn so a server implementation
+// can honor them.
+type Permissions struct {
+       // Critical options restrict default permissions. Common
+       // restrictions are "source-address" and "force-command". If
+       // the server cannot enforce the restriction, or does not
+       // recognize it, the user should not authenticate.
+       CriticalOptions map[string]string
+
+       // Extensions are extra functionality that the server may
+       // offer on authenticated connections. Common extensions are
+       // "permit-agent-forwarding", "permit-X11-forwarding". Lack of
+       // support for an extension does not preclude authenticating a
+       // user.
+       Extensions map[string]string
+}
+
+// ServerConfig holds server specific configuration data.
+type ServerConfig struct {
+       // Config contains configuration shared between client and server.
+       Config
+
+       hostKeys []Signer
+
+       // NoClientAuth is true if clients are allowed to connect without
+       // authenticating.
+       NoClientAuth bool
+
+       // PasswordCallback, if non-nil, is called when a user
+       // attempts to authenticate using a password.
+       PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error)
+
+       // PublicKeyCallback, if non-nil, is called when a client attempts public
+       // key authentication. It must return true if the given public key is
+       // valid for the given user. For example, see CertChecker.Authenticate.
+       PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
+
+       // KeyboardInteractiveCallback, if non-nil, is called when
+       // keyboard-interactive authentication is selected (RFC
+       // 4256). The client object's Challenge function should be
+       // used to query the user. The callback may offer multiple
+       // Challenge rounds. To avoid information leaks, the client
+       // should be presented a challenge even if the user is
+       // unknown.
+       KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error)
+
+       // AuthLogCallback, if non-nil, is called to log all authentication
+       // attempts.
+       AuthLogCallback func(conn ConnMetadata, method string, err error)
+
+       // ServerVersion is the version identification string to announce in
+       // the public handshake.
+       // If empty, a reasonable default is used.
+       // Note that RFC 4253 section 4.2 requires that this string start with
+       // "SSH-2.0-".
+       ServerVersion string
+}
+
+// AddHostKey adds a private key as a host key. If an existing host
+// key exists with the same algorithm, it is overwritten. Each server
+// config must have at least one host key.
+func (s *ServerConfig) AddHostKey(key Signer) {
+       for i, k := range s.hostKeys {
+               if k.PublicKey().Type() == key.PublicKey().Type() {
+                       s.hostKeys[i] = key
+                       return
+               }
+       }
+
+       s.hostKeys = append(s.hostKeys, key)
+}
+
+// cachedPubKey contains the results of querying whether a public key is
+// acceptable for a user.
+type cachedPubKey struct {
+       user       string
+       pubKeyData []byte
+       result     error
+       perms      *Permissions
+}
+
+const maxCachedPubKeys = 16
+
+// pubKeyCache caches tests for public keys.  Since SSH clients
+// will query whether a public key is acceptable before attempting to
+// authenticate with it, we end up with duplicate queries for public
+// key validity.  The cache only applies to a single ServerConn.
+type pubKeyCache struct {
+       keys []cachedPubKey
+}
+
+// get returns the result for a given user/algo/key tuple.
+func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) {
+       for _, k := range c.keys {
+               if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) {
+                       return k, true
+               }
+       }
+       return cachedPubKey{}, false
+}
+
+// add adds the given tuple to the cache.
+func (c *pubKeyCache) add(candidate cachedPubKey) {
+       if len(c.keys) < maxCachedPubKeys {
+               c.keys = append(c.keys, candidate)
+       }
+}
+
+// ServerConn is an authenticated SSH connection, as seen from the
+// server
+type ServerConn struct {
+       Conn
+
+       // If the succeeding authentication callback returned a
+       // non-nil Permissions pointer, it is stored here.
+       Permissions *Permissions
+}
+
+// NewServerConn starts a new SSH server with c as the underlying
+// transport.  It starts with a handshake and, if the handshake is
+// unsuccessful, it closes the connection and returns an error.  The
+// Request and NewChannel channels must be serviced, or the connection
+// will hang.
+func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) {
+       fullConf := *config
+       fullConf.SetDefaults()
+       s := &connection{
+               sshConn: sshConn{conn: c},
+       }
+       perms, err := s.serverHandshake(&fullConf)
+       if err != nil {
+               c.Close()
+               return nil, nil, nil, err
+       }
+       return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil
+}
+
+// signAndMarshal signs the data with the appropriate algorithm,
+// and serializes the result in SSH wire format.
+func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) {
+       sig, err := k.Sign(rand, data)
+       if err != nil {
+               return nil, err
+       }
+
+       return Marshal(sig), nil
+}
+
+// handshake performs key exchange and user authentication.
+func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) {
+       if len(config.hostKeys) == 0 {
+               return nil, errors.New("ssh: server has no host keys")
+       }
+
+       if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil {
+               return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
+       }
+
+       if config.ServerVersion != "" {
+               s.serverVersion = []byte(config.ServerVersion)
+       } else {
+               s.serverVersion = []byte(packageVersion)
+       }
+       var err error
+       s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion)
+       if err != nil {
+               return nil, err
+       }
+
+       tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */)
+       s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config)
+
+       if err := s.transport.waitSession(); err != nil {
+               return nil, err
+       }
+
+       // We just did the key change, so the session ID is established.
+       s.sessionID = s.transport.getSessionID()
+
+       var packet []byte
+       if packet, err = s.transport.readPacket(); err != nil {
+               return nil, err
+       }
+
+       var serviceRequest serviceRequestMsg
+       if err = Unmarshal(packet, &serviceRequest); err != nil {
+               return nil, err
+       }
+       if serviceRequest.Service != serviceUserAuth {
+               return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating")
+       }
+       serviceAccept := serviceAcceptMsg{
+               Service: serviceUserAuth,
+       }
+       if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil {
+               return nil, err
+       }
+
+       perms, err := s.serverAuthenticate(config)
+       if err != nil {
+               return nil, err
+       }
+       s.mux = newMux(s.transport)
+       return perms, err
+}
+
+func isAcceptableAlgo(algo string) bool {
+       switch algo {
+       case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519,
+               CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01:
+               return true
+       }
+       return false
+}
+
+func checkSourceAddress(addr net.Addr, sourceAddrs string) error {
+       if addr == nil {
+               return errors.New("ssh: no address known for client, but source-address match required")
+       }
+
+       tcpAddr, ok := addr.(*net.TCPAddr)
+       if !ok {
+               return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr)
+       }
+
+       for _, sourceAddr := range strings.Split(sourceAddrs, ",") {
+               if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil {
+                       if allowedIP.Equal(tcpAddr.IP) {
+                               return nil
+                       }
+               } else {
+                       _, ipNet, err := net.ParseCIDR(sourceAddr)
+                       if err != nil {
+                               return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err)
+                       }
+
+                       if ipNet.Contains(tcpAddr.IP) {
+                               return nil
+                       }
+               }
+       }
+
+       return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr)
+}
+
+func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) {
+       sessionID := s.transport.getSessionID()
+       var cache pubKeyCache
+       var perms *Permissions
+
+userAuthLoop:
+       for {
+               var userAuthReq userAuthRequestMsg
+               if packet, err := s.transport.readPacket(); err != nil {
+                       return nil, err
+               } else if err = Unmarshal(packet, &userAuthReq); err != nil {
+                       return nil, err
+               }
+
+               if userAuthReq.Service != serviceSSH {
+                       return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service)
+               }
+
+               s.user = userAuthReq.User
+               perms = nil
+               authErr := errors.New("no auth passed yet")
+
+               switch userAuthReq.Method {
+               case "none":
+                       if config.NoClientAuth {
+                               authErr = nil
+                       }
+               case "password":
+                       if config.PasswordCallback == nil {
+                               authErr = errors.New("ssh: password auth not configured")
+                               break
+                       }
+                       payload := userAuthReq.Payload
+                       if len(payload) < 1 || payload[0] != 0 {
+                               return nil, parseError(msgUserAuthRequest)
+                       }
+                       payload = payload[1:]
+                       password, payload, ok := parseString(payload)
+                       if !ok || len(payload) > 0 {
+                               return nil, parseError(msgUserAuthRequest)
+                       }
+
+                       perms, authErr = config.PasswordCallback(s, password)
+               case "keyboard-interactive":
+                       if config.KeyboardInteractiveCallback == nil {
+                               authErr = errors.New("ssh: keyboard-interactive auth not configubred")
+                               break
+                       }
+
+                       prompter := &sshClientKeyboardInteractive{s}
+                       perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge)
+               case "publickey":
+                       if config.PublicKeyCallback == nil {
+                               authErr = errors.New("ssh: publickey auth not configured")
+                               break
+                       }
+                       payload := userAuthReq.Payload
+                       if len(payload) < 1 {
+                               return nil, parseError(msgUserAuthRequest)
+                       }
+                       isQuery := payload[0] == 0
+                       payload = payload[1:]
+                       algoBytes, payload, ok := parseString(payload)
+                       if !ok {
+                               return nil, parseError(msgUserAuthRequest)
+                       }
+                       algo := string(algoBytes)
+                       if !isAcceptableAlgo(algo) {
+                               authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
+                               break
+                       }
+
+                       pubKeyData, payload, ok := parseString(payload)
+                       if !ok {
+                               return nil, parseError(msgUserAuthRequest)
+                       }
+
+                       pubKey, err := ParsePublicKey(pubKeyData)
+                       if err != nil {
+                               return nil, err
+                       }
+
+                       candidate, ok := cache.get(s.user, pubKeyData)
+                       if !ok {
+                               candidate.user = s.user
+                               candidate.pubKeyData = pubKeyData
+                               candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey)
+                               if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" {
+                                       candidate.result = checkSourceAddress(
+                                               s.RemoteAddr(),
+                                               candidate.perms.CriticalOptions[sourceAddressCriticalOption])
+                               }
+                               cache.add(candidate)
+                       }
+
+                       if isQuery {
+                               // The client can query if the given public key
+                               // would be okay.
+                               if len(payload) > 0 {
+                                       return nil, parseError(msgUserAuthRequest)
+                               }
+
+                               if candidate.result == nil {
+                                       okMsg := userAuthPubKeyOkMsg{
+                                               Algo:   algo,
+                                               PubKey: pubKeyData,
+                                       }
+                                       if err = s.transport.writePacket(Marshal(&okMsg)); err != nil {
+                                               return nil, err
+                                       }
+                                       continue userAuthLoop
+                               }
+                               authErr = candidate.result
+                       } else {
+                               sig, payload, ok := parseSignature(payload)
+                               if !ok || len(payload) > 0 {
+                                       return nil, parseError(msgUserAuthRequest)
+                               }
+                               // Ensure the public key algo and signature algo
+                               // are supported.  Compare the private key
+                               // algorithm name that corresponds to algo with
+                               // sig.Format.  This is usually the same, but
+                               // for certs, the names differ.
+                               if !isAcceptableAlgo(sig.Format) {
+                                       break
+                               }
+                               signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData)
+
+                               if err := pubKey.Verify(signedData, sig); err != nil {
+                                       return nil, err
+                               }
+
+                               authErr = candidate.result
+                               perms = candidate.perms
+                       }
+               default:
+                       authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method)
+               }
+
+               if config.AuthLogCallback != nil {
+                       config.AuthLogCallback(s, userAuthReq.Method, authErr)
+               }
+
+               if authErr == nil {
+                       break userAuthLoop
+               }
+
+               var failureMsg userAuthFailureMsg
+               if config.PasswordCallback != nil {
+                       failureMsg.Methods = append(failureMsg.Methods, "password")
+               }
+               if config.PublicKeyCallback != nil {
+                       failureMsg.Methods = append(failureMsg.Methods, "publickey")
+               }
+               if config.KeyboardInteractiveCallback != nil {
+                       failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive")
+               }
+
+               if len(failureMsg.Methods) == 0 {
+                       return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false")
+               }
+
+               if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil {
+                       return nil, err
+               }
+       }
+
+       if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil {
+               return nil, err
+       }
+       return perms, nil
+}
+
+// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by
+// asking the client on the other side of a ServerConn.
+type sshClientKeyboardInteractive struct {
+       *connection
+}
+
+func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) {
+       if len(questions) != len(echos) {
+               return nil, errors.New("ssh: echos and questions must have equal length")
+       }
+
+       var prompts []byte
+       for i := range questions {
+               prompts = appendString(prompts, questions[i])
+               prompts = appendBool(prompts, echos[i])
+       }
+
+       if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{
+               Instruction: instruction,
+               NumPrompts:  uint32(len(questions)),
+               Prompts:     prompts,
+       })); err != nil {
+               return nil, err
+       }
+
+       packet, err := c.transport.readPacket()
+       if err != nil {
+               return nil, err
+       }
+       if packet[0] != msgUserAuthInfoResponse {
+               return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0])
+       }
+       packet = packet[1:]
+
+       n, packet, ok := parseUint32(packet)
+       if !ok || int(n) != len(questions) {
+               return nil, parseError(msgUserAuthInfoResponse)
+       }
+
+       for i := uint32(0); i < n; i++ {
+               ans, rest, ok := parseString(packet)
+               if !ok {
+                       return nil, parseError(msgUserAuthInfoResponse)
+               }
+
+               answers = append(answers, string(ans))
+               packet = rest
+       }
+       if len(packet) != 0 {
+               return nil, errors.New("ssh: junk at end of message")
+       }
+
+       return answers, nil
+}
diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go
new file mode 100644 (file)
index 0000000..17e2aa8
--- /dev/null
@@ -0,0 +1,627 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+// Session implements an interactive session described in
+// "RFC 4254, section 6".
+
+import (
+       "bytes"
+       "encoding/binary"
+       "errors"
+       "fmt"
+       "io"
+       "io/ioutil"
+       "sync"
+)
+
+type Signal string
+
+// POSIX signals as listed in RFC 4254 Section 6.10.
+const (
+       SIGABRT Signal = "ABRT"
+       SIGALRM Signal = "ALRM"
+       SIGFPE  Signal = "FPE"
+       SIGHUP  Signal = "HUP"
+       SIGILL  Signal = "ILL"
+       SIGINT  Signal = "INT"
+       SIGKILL Signal = "KILL"
+       SIGPIPE Signal = "PIPE"
+       SIGQUIT Signal = "QUIT"
+       SIGSEGV Signal = "SEGV"
+       SIGTERM Signal = "TERM"
+       SIGUSR1 Signal = "USR1"
+       SIGUSR2 Signal = "USR2"
+)
+
+var signals = map[Signal]int{
+       SIGABRT: 6,
+       SIGALRM: 14,
+       SIGFPE:  8,
+       SIGHUP:  1,
+       SIGILL:  4,
+       SIGINT:  2,
+       SIGKILL: 9,
+       SIGPIPE: 13,
+       SIGQUIT: 3,
+       SIGSEGV: 11,
+       SIGTERM: 15,
+}
+
+type TerminalModes map[uint8]uint32
+
+// POSIX terminal mode flags as listed in RFC 4254 Section 8.
+const (
+       tty_OP_END    = 0
+       VINTR         = 1
+       VQUIT         = 2
+       VERASE        = 3
+       VKILL         = 4
+       VEOF          = 5
+       VEOL          = 6
+       VEOL2         = 7
+       VSTART        = 8
+       VSTOP         = 9
+       VSUSP         = 10
+       VDSUSP        = 11
+       VREPRINT      = 12
+       VWERASE       = 13
+       VLNEXT        = 14
+       VFLUSH        = 15
+       VSWTCH        = 16
+       VSTATUS       = 17
+       VDISCARD      = 18
+       IGNPAR        = 30
+       PARMRK        = 31
+       INPCK         = 32
+       ISTRIP        = 33
+       INLCR         = 34
+       IGNCR         = 35
+       ICRNL         = 36
+       IUCLC         = 37
+       IXON          = 38
+       IXANY         = 39
+       IXOFF         = 40
+       IMAXBEL       = 41
+       ISIG          = 50
+       ICANON        = 51
+       XCASE         = 52
+       ECHO          = 53
+       ECHOE         = 54
+       ECHOK         = 55
+       ECHONL        = 56
+       NOFLSH        = 57
+       TOSTOP        = 58
+       IEXTEN        = 59
+       ECHOCTL       = 60
+       ECHOKE        = 61
+       PENDIN        = 62
+       OPOST         = 70
+       OLCUC         = 71
+       ONLCR         = 72
+       OCRNL         = 73
+       ONOCR         = 74
+       ONLRET        = 75
+       CS7           = 90
+       CS8           = 91
+       PARENB        = 92
+       PARODD        = 93
+       TTY_OP_ISPEED = 128
+       TTY_OP_OSPEED = 129
+)
+
+// A Session represents a connection to a remote command or shell.
+type Session struct {
+       // Stdin specifies the remote process's standard input.
+       // If Stdin is nil, the remote process reads from an empty
+       // bytes.Buffer.
+       Stdin io.Reader
+
+       // Stdout and Stderr specify the remote process's standard
+       // output and error.
+       //
+       // If either is nil, Run connects the corresponding file
+       // descriptor to an instance of ioutil.Discard. There is a
+       // fixed amount of buffering that is shared for the two streams.
+       // If either blocks it may eventually cause the remote
+       // command to block.
+       Stdout io.Writer
+       Stderr io.Writer
+
+       ch        Channel // the channel backing this session
+       started   bool    // true once Start, Run or Shell is invoked.
+       copyFuncs []func() error
+       errors    chan error // one send per copyFunc
+
+       // true if pipe method is active
+       stdinpipe, stdoutpipe, stderrpipe bool
+
+       // stdinPipeWriter is non-nil if StdinPipe has not been called
+       // and Stdin was specified by the user; it is the write end of
+       // a pipe connecting Session.Stdin to the stdin channel.
+       stdinPipeWriter io.WriteCloser
+
+       exitStatus chan error
+}
+
+// SendRequest sends an out-of-band channel request on the SSH channel
+// underlying the session.
+func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) {
+       return s.ch.SendRequest(name, wantReply, payload)
+}
+
+func (s *Session) Close() error {
+       return s.ch.Close()
+}
+
+// RFC 4254 Section 6.4.
+type setenvRequest struct {
+       Name  string
+       Value string
+}
+
+// Setenv sets an environment variable that will be applied to any
+// command executed by Shell or Run.
+func (s *Session) Setenv(name, value string) error {
+       msg := setenvRequest{
+               Name:  name,
+               Value: value,
+       }
+       ok, err := s.ch.SendRequest("env", true, Marshal(&msg))
+       if err == nil && !ok {
+               err = errors.New("ssh: setenv failed")
+       }
+       return err
+}
+
+// RFC 4254 Section 6.2.
+type ptyRequestMsg struct {
+       Term     string
+       Columns  uint32
+       Rows     uint32
+       Width    uint32
+       Height   uint32
+       Modelist string
+}
+
+// RequestPty requests the association of a pty with the session on the remote host.
+func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error {
+       var tm []byte
+       for k, v := range termmodes {
+               kv := struct {
+                       Key byte
+                       Val uint32
+               }{k, v}
+
+               tm = append(tm, Marshal(&kv)...)
+       }
+       tm = append(tm, tty_OP_END)
+       req := ptyRequestMsg{
+               Term:     term,
+               Columns:  uint32(w),
+               Rows:     uint32(h),
+               Width:    uint32(w * 8),
+               Height:   uint32(h * 8),
+               Modelist: string(tm),
+       }
+       ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req))
+       if err == nil && !ok {
+               err = errors.New("ssh: pty-req failed")
+       }
+       return err
+}
+
+// RFC 4254 Section 6.5.
+type subsystemRequestMsg struct {
+       Subsystem string
+}
+
+// RequestSubsystem requests the association of a subsystem with the session on the remote host.
+// A subsystem is a predefined command that runs in the background when the ssh session is initiated
+func (s *Session) RequestSubsystem(subsystem string) error {
+       msg := subsystemRequestMsg{
+               Subsystem: subsystem,
+       }
+       ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg))
+       if err == nil && !ok {
+               err = errors.New("ssh: subsystem request failed")
+       }
+       return err
+}
+
+// RFC 4254 Section 6.9.
+type signalMsg struct {
+       Signal string
+}
+
+// Signal sends the given signal to the remote process.
+// sig is one of the SIG* constants.
+func (s *Session) Signal(sig Signal) error {
+       msg := signalMsg{
+               Signal: string(sig),
+       }
+
+       _, err := s.ch.SendRequest("signal", false, Marshal(&msg))
+       return err
+}
+
+// RFC 4254 Section 6.5.
+type execMsg struct {
+       Command string
+}
+
+// Start runs cmd on the remote host. Typically, the remote
+// server passes cmd to the shell for interpretation.
+// A Session only accepts one call to Run, Start or Shell.
+func (s *Session) Start(cmd string) error {
+       if s.started {
+               return errors.New("ssh: session already started")
+       }
+       req := execMsg{
+               Command: cmd,
+       }
+
+       ok, err := s.ch.SendRequest("exec", true, Marshal(&req))
+       if err == nil && !ok {
+               err = fmt.Errorf("ssh: command %v failed", cmd)
+       }
+       if err != nil {
+               return err
+       }
+       return s.start()
+}
+
+// Run runs cmd on the remote host. Typically, the remote
+// server passes cmd to the shell for interpretation.
+// A Session only accepts one call to Run, Start, Shell, Output,
+// or CombinedOutput.
+//
+// The returned error is nil if the command runs, has no problems
+// copying stdin, stdout, and stderr, and exits with a zero exit
+// status.
+//
+// If the remote server does not send an exit status, an error of type
+// *ExitMissingError is returned. If the command completes
+// unsuccessfully or is interrupted by a signal, the error is of type
+// *ExitError. Other error types may be returned for I/O problems.
+func (s *Session) Run(cmd string) error {
+       err := s.Start(cmd)
+       if err != nil {
+               return err
+       }
+       return s.Wait()
+}
+
+// Output runs cmd on the remote host and returns its standard output.
+func (s *Session) Output(cmd string) ([]byte, error) {
+       if s.Stdout != nil {
+               return nil, errors.New("ssh: Stdout already set")
+       }
+       var b bytes.Buffer
+       s.Stdout = &b
+       err := s.Run(cmd)
+       return b.Bytes(), err
+}
+
+type singleWriter struct {
+       b  bytes.Buffer
+       mu sync.Mutex
+}
+
+func (w *singleWriter) Write(p []byte) (int, error) {
+       w.mu.Lock()
+       defer w.mu.Unlock()
+       return w.b.Write(p)
+}
+
+// CombinedOutput runs cmd on the remote host and returns its combined
+// standard output and standard error.
+func (s *Session) CombinedOutput(cmd string) ([]byte, error) {
+       if s.Stdout != nil {
+               return nil, errors.New("ssh: Stdout already set")
+       }
+       if s.Stderr != nil {
+               return nil, errors.New("ssh: Stderr already set")
+       }
+       var b singleWriter
+       s.Stdout = &b
+       s.Stderr = &b
+       err := s.Run(cmd)
+       return b.b.Bytes(), err
+}
+
+// Shell starts a login shell on the remote host. A Session only
+// accepts one call to Run, Start, Shell, Output, or CombinedOutput.
+func (s *Session) Shell() error {
+       if s.started {
+               return errors.New("ssh: session already started")
+       }
+
+       ok, err := s.ch.SendRequest("shell", true, nil)
+       if err == nil && !ok {
+               return errors.New("ssh: could not start shell")
+       }
+       if err != nil {
+               return err
+       }
+       return s.start()
+}
+
+func (s *Session) start() error {
+       s.started = true
+
+       type F func(*Session)
+       for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} {
+               setupFd(s)
+       }
+
+       s.errors = make(chan error, len(s.copyFuncs))
+       for _, fn := range s.copyFuncs {
+               go func(fn func() error) {
+                       s.errors <- fn()
+               }(fn)
+       }
+       return nil
+}
+
+// Wait waits for the remote command to exit.
+//
+// The returned error is nil if the command runs, has no problems
+// copying stdin, stdout, and stderr, and exits with a zero exit
+// status.
+//
+// If the remote server does not send an exit status, an error of type
+// *ExitMissingError is returned. If the command completes
+// unsuccessfully or is interrupted by a signal, the error is of type
+// *ExitError. Other error types may be returned for I/O problems.
+func (s *Session) Wait() error {
+       if !s.started {
+               return errors.New("ssh: session not started")
+       }
+       waitErr := <-s.exitStatus
+
+       if s.stdinPipeWriter != nil {
+               s.stdinPipeWriter.Close()
+       }
+       var copyError error
+       for _ = range s.copyFuncs {
+               if err := <-s.errors; err != nil && copyError == nil {
+                       copyError = err
+               }
+       }
+       if waitErr != nil {
+               return waitErr
+       }
+       return copyError
+}
+
+func (s *Session) wait(reqs <-chan *Request) error {
+       wm := Waitmsg{status: -1}
+       // Wait for msg channel to be closed before returning.
+       for msg := range reqs {
+               switch msg.Type {
+               case "exit-status":
+                       wm.status = int(binary.BigEndian.Uint32(msg.Payload))
+               case "exit-signal":
+                       var sigval struct {
+                               Signal     string
+                               CoreDumped bool
+                               Error      string
+                               Lang       string
+                       }
+                       if err := Unmarshal(msg.Payload, &sigval); err != nil {
+                               return err
+                       }
+
+                       // Must sanitize strings?
+                       wm.signal = sigval.Signal
+                       wm.msg = sigval.Error
+                       wm.lang = sigval.Lang
+               default:
+                       // This handles keepalives and matches
+                       // OpenSSH's behaviour.
+                       if msg.WantReply {
+                               msg.Reply(false, nil)
+                       }
+               }
+       }
+       if wm.status == 0 {
+               return nil
+       }
+       if wm.status == -1 {
+               // exit-status was never sent from server
+               if wm.signal == "" {
+                       // signal was not sent either.  RFC 4254
+                       // section 6.10 recommends against this
+                       // behavior, but it is allowed, so we let
+                       // clients handle it.
+                       return &ExitMissingError{}
+               }
+               wm.status = 128
+               if _, ok := signals[Signal(wm.signal)]; ok {
+                       wm.status += signals[Signal(wm.signal)]
+               }
+       }
+
+       return &ExitError{wm}
+}
+
+// ExitMissingError is returned if a session is torn down cleanly, but
+// the server sends no confirmation of the exit status.
+type ExitMissingError struct{}
+
+func (e *ExitMissingError) Error() string {
+       return "wait: remote command exited without exit status or exit signal"
+}
+
+func (s *Session) stdin() {
+       if s.stdinpipe {
+               return
+       }
+       var stdin io.Reader
+       if s.Stdin == nil {
+               stdin = new(bytes.Buffer)
+       } else {
+               r, w := io.Pipe()
+               go func() {
+                       _, err := io.Copy(w, s.Stdin)
+                       w.CloseWithError(err)
+               }()
+               stdin, s.stdinPipeWriter = r, w
+       }
+       s.copyFuncs = append(s.copyFuncs, func() error {
+               _, err := io.Copy(s.ch, stdin)
+               if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF {
+                       err = err1
+               }
+               return err
+       })
+}
+
+func (s *Session) stdout() {
+       if s.stdoutpipe {
+               return
+       }
+       if s.Stdout == nil {
+               s.Stdout = ioutil.Discard
+       }
+       s.copyFuncs = append(s.copyFuncs, func() error {
+               _, err := io.Copy(s.Stdout, s.ch)
+               return err
+       })
+}
+
+func (s *Session) stderr() {
+       if s.stderrpipe {
+               return
+       }
+       if s.Stderr == nil {
+               s.Stderr = ioutil.Discard
+       }
+       s.copyFuncs = append(s.copyFuncs, func() error {
+               _, err := io.Copy(s.Stderr, s.ch.Stderr())
+               return err
+       })
+}
+
+// sessionStdin reroutes Close to CloseWrite.
+type sessionStdin struct {
+       io.Writer
+       ch Channel
+}
+
+func (s *sessionStdin) Close() error {
+       return s.ch.CloseWrite()
+}
+
+// StdinPipe returns a pipe that will be connected to the
+// remote command's standard input when the command starts.
+func (s *Session) StdinPipe() (io.WriteCloser, error) {
+       if s.Stdin != nil {
+               return nil, errors.New("ssh: Stdin already set")
+       }
+       if s.started {
+               return nil, errors.New("ssh: StdinPipe after process started")
+       }
+       s.stdinpipe = true
+       return &sessionStdin{s.ch, s.ch}, nil
+}
+
+// StdoutPipe returns a pipe that will be connected to the
+// remote command's standard output when the command starts.
+// There is a fixed amount of buffering that is shared between
+// stdout and stderr streams. If the StdoutPipe reader is
+// not serviced fast enough it may eventually cause the
+// remote command to block.
+func (s *Session) StdoutPipe() (io.Reader, error) {
+       if s.Stdout != nil {
+               return nil, errors.New("ssh: Stdout already set")
+       }
+       if s.started {
+               return nil, errors.New("ssh: StdoutPipe after process started")
+       }
+       s.stdoutpipe = true
+       return s.ch, nil
+}
+
+// StderrPipe returns a pipe that will be connected to the
+// remote command's standard error when the command starts.
+// There is a fixed amount of buffering that is shared between
+// stdout and stderr streams. If the StderrPipe reader is
+// not serviced fast enough it may eventually cause the
+// remote command to block.
+func (s *Session) StderrPipe() (io.Reader, error) {
+       if s.Stderr != nil {
+               return nil, errors.New("ssh: Stderr already set")
+       }
+       if s.started {
+               return nil, errors.New("ssh: StderrPipe after process started")
+       }
+       s.stderrpipe = true
+       return s.ch.Stderr(), nil
+}
+
+// newSession returns a new interactive session on the remote host.
+func newSession(ch Channel, reqs <-chan *Request) (*Session, error) {
+       s := &Session{
+               ch: ch,
+       }
+       s.exitStatus = make(chan error, 1)
+       go func() {
+               s.exitStatus <- s.wait(reqs)
+       }()
+
+       return s, nil
+}
+
+// An ExitError reports unsuccessful completion of a remote command.
+type ExitError struct {
+       Waitmsg
+}
+
+func (e *ExitError) Error() string {
+       return e.Waitmsg.String()
+}
+
+// Waitmsg stores the information about an exited remote command
+// as reported by Wait.
+type Waitmsg struct {
+       status int
+       signal string
+       msg    string
+       lang   string
+}
+
+// ExitStatus returns the exit status of the remote command.
+func (w Waitmsg) ExitStatus() int {
+       return w.status
+}
+
+// Signal returns the exit signal of the remote command if
+// it was terminated violently.
+func (w Waitmsg) Signal() string {
+       return w.signal
+}
+
+// Msg returns the exit message given by the remote command
+func (w Waitmsg) Msg() string {
+       return w.msg
+}
+
+// Lang returns the language tag. See RFC 3066
+func (w Waitmsg) Lang() string {
+       return w.lang
+}
+
+func (w Waitmsg) String() string {
+       str := fmt.Sprintf("Process exited with status %v", w.status)
+       if w.signal != "" {
+               str += fmt.Sprintf(" from signal %v", w.signal)
+       }
+       if w.msg != "" {
+               str += fmt.Sprintf(". Reason was: %v", w.msg)
+       }
+       return str
+}
diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go
new file mode 100644 (file)
index 0000000..6151241
--- /dev/null
@@ -0,0 +1,407 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "errors"
+       "fmt"
+       "io"
+       "math/rand"
+       "net"
+       "strconv"
+       "strings"
+       "sync"
+       "time"
+)
+
+// Listen requests the remote peer open a listening socket on
+// addr. Incoming connections will be available by calling Accept on
+// the returned net.Listener. The listener must be serviced, or the
+// SSH connection may hang.
+func (c *Client) Listen(n, addr string) (net.Listener, error) {
+       laddr, err := net.ResolveTCPAddr(n, addr)
+       if err != nil {
+               return nil, err
+       }
+       return c.ListenTCP(laddr)
+}
+
+// Automatic port allocation is broken with OpenSSH before 6.0. See
+// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017.  In
+// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0,
+// rather than the actual port number. This means you can never open
+// two different listeners with auto allocated ports. We work around
+// this by trying explicit ports until we succeed.
+
+const openSSHPrefix = "OpenSSH_"
+
+var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+// isBrokenOpenSSHVersion returns true if the given version string
+// specifies a version of OpenSSH that is known to have a bug in port
+// forwarding.
+func isBrokenOpenSSHVersion(versionStr string) bool {
+       i := strings.Index(versionStr, openSSHPrefix)
+       if i < 0 {
+               return false
+       }
+       i += len(openSSHPrefix)
+       j := i
+       for ; j < len(versionStr); j++ {
+               if versionStr[j] < '0' || versionStr[j] > '9' {
+                       break
+               }
+       }
+       version, _ := strconv.Atoi(versionStr[i:j])
+       return version < 6
+}
+
+// autoPortListenWorkaround simulates automatic port allocation by
+// trying random ports repeatedly.
+func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) {
+       var sshListener net.Listener
+       var err error
+       const tries = 10
+       for i := 0; i < tries; i++ {
+               addr := *laddr
+               addr.Port = 1024 + portRandomizer.Intn(60000)
+               sshListener, err = c.ListenTCP(&addr)
+               if err == nil {
+                       laddr.Port = addr.Port
+                       return sshListener, err
+               }
+       }
+       return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err)
+}
+
+// RFC 4254 7.1
+type channelForwardMsg struct {
+       addr  string
+       rport uint32
+}
+
+// ListenTCP requests the remote peer open a listening socket
+// on laddr. Incoming connections will be available by calling
+// Accept on the returned net.Listener.
+func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) {
+       if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) {
+               return c.autoPortListenWorkaround(laddr)
+       }
+
+       m := channelForwardMsg{
+               laddr.IP.String(),
+               uint32(laddr.Port),
+       }
+       // send message
+       ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m))
+       if err != nil {
+               return nil, err
+       }
+       if !ok {
+               return nil, errors.New("ssh: tcpip-forward request denied by peer")
+       }
+
+       // If the original port was 0, then the remote side will
+       // supply a real port number in the response.
+       if laddr.Port == 0 {
+               var p struct {
+                       Port uint32
+               }
+               if err := Unmarshal(resp, &p); err != nil {
+                       return nil, err
+               }
+               laddr.Port = int(p.Port)
+       }
+
+       // Register this forward, using the port number we obtained.
+       ch := c.forwards.add(*laddr)
+
+       return &tcpListener{laddr, c, ch}, nil
+}
+
+// forwardList stores a mapping between remote
+// forward requests and the tcpListeners.
+type forwardList struct {
+       sync.Mutex
+       entries []forwardEntry
+}
+
+// forwardEntry represents an established mapping of a laddr on a
+// remote ssh server to a channel connected to a tcpListener.
+type forwardEntry struct {
+       laddr net.TCPAddr
+       c     chan forward
+}
+
+// forward represents an incoming forwarded tcpip connection. The
+// arguments to add/remove/lookup should be address as specified in
+// the original forward-request.
+type forward struct {
+       newCh NewChannel   // the ssh client channel underlying this forward
+       raddr *net.TCPAddr // the raddr of the incoming connection
+}
+
+func (l *forwardList) add(addr net.TCPAddr) chan forward {
+       l.Lock()
+       defer l.Unlock()
+       f := forwardEntry{
+               addr,
+               make(chan forward, 1),
+       }
+       l.entries = append(l.entries, f)
+       return f.c
+}
+
+// See RFC 4254, section 7.2
+type forwardedTCPPayload struct {
+       Addr       string
+       Port       uint32
+       OriginAddr string
+       OriginPort uint32
+}
+
+// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr.
+func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) {
+       if port == 0 || port > 65535 {
+               return nil, fmt.Errorf("ssh: port number out of range: %d", port)
+       }
+       ip := net.ParseIP(string(addr))
+       if ip == nil {
+               return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr)
+       }
+       return &net.TCPAddr{IP: ip, Port: int(port)}, nil
+}
+
+func (l *forwardList) handleChannels(in <-chan NewChannel) {
+       for ch := range in {
+               var payload forwardedTCPPayload
+               if err := Unmarshal(ch.ExtraData(), &payload); err != nil {
+                       ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error())
+                       continue
+               }
+
+               // RFC 4254 section 7.2 specifies that incoming
+               // addresses should list the address, in string
+               // format. It is implied that this should be an IP
+               // address, as it would be impossible to connect to it
+               // otherwise.
+               laddr, err := parseTCPAddr(payload.Addr, payload.Port)
+               if err != nil {
+                       ch.Reject(ConnectionFailed, err.Error())
+                       continue
+               }
+               raddr, err := parseTCPAddr(payload.OriginAddr, payload.OriginPort)
+               if err != nil {
+                       ch.Reject(ConnectionFailed, err.Error())
+                       continue
+               }
+
+               if ok := l.forward(*laddr, *raddr, ch); !ok {
+                       // Section 7.2, implementations MUST reject spurious incoming
+                       // connections.
+                       ch.Reject(Prohibited, "no forward for address")
+                       continue
+               }
+       }
+}
+
+// remove removes the forward entry, and the channel feeding its
+// listener.
+func (l *forwardList) remove(addr net.TCPAddr) {
+       l.Lock()
+       defer l.Unlock()
+       for i, f := range l.entries {
+               if addr.IP.Equal(f.laddr.IP) && addr.Port == f.laddr.Port {
+                       l.entries = append(l.entries[:i], l.entries[i+1:]...)
+                       close(f.c)
+                       return
+               }
+       }
+}
+
+// closeAll closes and clears all forwards.
+func (l *forwardList) closeAll() {
+       l.Lock()
+       defer l.Unlock()
+       for _, f := range l.entries {
+               close(f.c)
+       }
+       l.entries = nil
+}
+
+func (l *forwardList) forward(laddr, raddr net.TCPAddr, ch NewChannel) bool {
+       l.Lock()
+       defer l.Unlock()
+       for _, f := range l.entries {
+               if laddr.IP.Equal(f.laddr.IP) && laddr.Port == f.laddr.Port {
+                       f.c <- forward{ch, &raddr}
+                       return true
+               }
+       }
+       return false
+}
+
+type tcpListener struct {
+       laddr *net.TCPAddr
+
+       conn *Client
+       in   <-chan forward
+}
+
+// Accept waits for and returns the next connection to the listener.
+func (l *tcpListener) Accept() (net.Conn, error) {
+       s, ok := <-l.in
+       if !ok {
+               return nil, io.EOF
+       }
+       ch, incoming, err := s.newCh.Accept()
+       if err != nil {
+               return nil, err
+       }
+       go DiscardRequests(incoming)
+
+       return &tcpChanConn{
+               Channel: ch,
+               laddr:   l.laddr,
+               raddr:   s.raddr,
+       }, nil
+}
+
+// Close closes the listener.
+func (l *tcpListener) Close() error {
+       m := channelForwardMsg{
+               l.laddr.IP.String(),
+               uint32(l.laddr.Port),
+       }
+
+       // this also closes the listener.
+       l.conn.forwards.remove(*l.laddr)
+       ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m))
+       if err == nil && !ok {
+               err = errors.New("ssh: cancel-tcpip-forward failed")
+       }
+       return err
+}
+
+// Addr returns the listener's network address.
+func (l *tcpListener) Addr() net.Addr {
+       return l.laddr
+}
+
+// Dial initiates a connection to the addr from the remote host.
+// The resulting connection has a zero LocalAddr() and RemoteAddr().
+func (c *Client) Dial(n, addr string) (net.Conn, error) {
+       // Parse the address into host and numeric port.
+       host, portString, err := net.SplitHostPort(addr)
+       if err != nil {
+               return nil, err
+       }
+       port, err := strconv.ParseUint(portString, 10, 16)
+       if err != nil {
+               return nil, err
+       }
+       // Use a zero address for local and remote address.
+       zeroAddr := &net.TCPAddr{
+               IP:   net.IPv4zero,
+               Port: 0,
+       }
+       ch, err := c.dial(net.IPv4zero.String(), 0, host, int(port))
+       if err != nil {
+               return nil, err
+       }
+       return &tcpChanConn{
+               Channel: ch,
+               laddr:   zeroAddr,
+               raddr:   zeroAddr,
+       }, nil
+}
+
+// DialTCP connects to the remote address raddr on the network net,
+// which must be "tcp", "tcp4", or "tcp6".  If laddr is not nil, it is used
+// as the local address for the connection.
+func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) {
+       if laddr == nil {
+               laddr = &net.TCPAddr{
+                       IP:   net.IPv4zero,
+                       Port: 0,
+               }
+       }
+       ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port)
+       if err != nil {
+               return nil, err
+       }
+       return &tcpChanConn{
+               Channel: ch,
+               laddr:   laddr,
+               raddr:   raddr,
+       }, nil
+}
+
+// RFC 4254 7.2
+type channelOpenDirectMsg struct {
+       raddr string
+       rport uint32
+       laddr string
+       lport uint32
+}
+
+func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) {
+       msg := channelOpenDirectMsg{
+               raddr: raddr,
+               rport: uint32(rport),
+               laddr: laddr,
+               lport: uint32(lport),
+       }
+       ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg))
+       if err != nil {
+               return nil, err
+       }
+       go DiscardRequests(in)
+       return ch, err
+}
+
+type tcpChan struct {
+       Channel // the backing channel
+}
+
+// tcpChanConn fulfills the net.Conn interface without
+// the tcpChan having to hold laddr or raddr directly.
+type tcpChanConn struct {
+       Channel
+       laddr, raddr net.Addr
+}
+
+// LocalAddr returns the local network address.
+func (t *tcpChanConn) LocalAddr() net.Addr {
+       return t.laddr
+}
+
+// RemoteAddr returns the remote network address.
+func (t *tcpChanConn) RemoteAddr() net.Addr {
+       return t.raddr
+}
+
+// SetDeadline sets the read and write deadlines associated
+// with the connection.
+func (t *tcpChanConn) SetDeadline(deadline time.Time) error {
+       if err := t.SetReadDeadline(deadline); err != nil {
+               return err
+       }
+       return t.SetWriteDeadline(deadline)
+}
+
+// SetReadDeadline sets the read deadline.
+// A zero value for t means Read will not time out.
+// After the deadline, the error from Read will implement net.Error
+// with Timeout() == true.
+func (t *tcpChanConn) SetReadDeadline(deadline time.Time) error {
+       return errors.New("ssh: tcpChan: deadline not supported")
+}
+
+// SetWriteDeadline exists to satisfy the net.Conn interface
+// but is not implemented by this type.  It always returns an error.
+func (t *tcpChanConn) SetWriteDeadline(deadline time.Time) error {
+       return errors.New("ssh: tcpChan: deadline not supported")
+}
diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go
new file mode 100644 (file)
index 0000000..f9780e0
--- /dev/null
@@ -0,0 +1,375 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssh
+
+import (
+       "bufio"
+       "errors"
+       "io"
+       "log"
+)
+
+// debugTransport if set, will print packet types as they go over the
+// wire. No message decoding is done, to minimize the impact on timing.
+const debugTransport = false
+
+const (
+       gcmCipherID    = "aes128-gcm@openssh.com"
+       aes128cbcID    = "aes128-cbc"
+       tripledescbcID = "3des-cbc"
+)
+
+// packetConn represents a transport that implements packet based
+// operations.
+type packetConn interface {
+       // Encrypt and send a packet of data to the remote peer.
+       writePacket(packet []byte) error
+
+       // Read a packet from the connection. The read is blocking,
+       // i.e. if error is nil, then the returned byte slice is
+       // always non-empty.
+       readPacket() ([]byte, error)
+
+       // Close closes the write-side of the connection.
+       Close() error
+}
+
+// transport is the keyingTransport that implements the SSH packet
+// protocol.
+type transport struct {
+       reader connectionState
+       writer connectionState
+
+       bufReader *bufio.Reader
+       bufWriter *bufio.Writer
+       rand      io.Reader
+       isClient  bool
+       io.Closer
+}
+
+// packetCipher represents a combination of SSH encryption/MAC
+// protocol.  A single instance should be used for one direction only.
+type packetCipher interface {
+       // writePacket encrypts the packet and writes it to w. The
+       // contents of the packet are generally scrambled.
+       writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error
+
+       // readPacket reads and decrypts a packet of data. The
+       // returned packet may be overwritten by future calls of
+       // readPacket.
+       readPacket(seqnum uint32, r io.Reader) ([]byte, error)
+}
+
+// connectionState represents one side (read or write) of the
+// connection. This is necessary because each direction has its own
+// keys, and can even have its own algorithms
+type connectionState struct {
+       packetCipher
+       seqNum           uint32
+       dir              direction
+       pendingKeyChange chan packetCipher
+}
+
+// prepareKeyChange sets up key material for a keychange. The key changes in
+// both directions are triggered by reading and writing a msgNewKey packet
+// respectively.
+func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error {
+       if ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult); err != nil {
+               return err
+       } else {
+               t.reader.pendingKeyChange <- ciph
+       }
+
+       if ciph, err := newPacketCipher(t.writer.dir, algs.w, kexResult); err != nil {
+               return err
+       } else {
+               t.writer.pendingKeyChange <- ciph
+       }
+
+       return nil
+}
+
+func (t *transport) printPacket(p []byte, write bool) {
+       if len(p) == 0 {
+               return
+       }
+       who := "server"
+       if t.isClient {
+               who = "client"
+       }
+       what := "read"
+       if write {
+               what = "write"
+       }
+
+       log.Println(what, who, p[0])
+}
+
+// Read and decrypt next packet.
+func (t *transport) readPacket() (p []byte, err error) {
+       for {
+               p, err = t.reader.readPacket(t.bufReader)
+               if err != nil {
+                       break
+               }
+               if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) {
+                       break
+               }
+       }
+       if debugTransport {
+               t.printPacket(p, false)
+       }
+
+       return p, err
+}
+
+func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
+       packet, err := s.packetCipher.readPacket(s.seqNum, r)
+       s.seqNum++
+       if err == nil && len(packet) == 0 {
+               err = errors.New("ssh: zero length packet")
+       }
+
+       if len(packet) > 0 {
+               switch packet[0] {
+               case msgNewKeys:
+                       select {
+                       case cipher := <-s.pendingKeyChange:
+                               s.packetCipher = cipher
+                       default:
+                               return nil, errors.New("ssh: got bogus newkeys message.")
+                       }
+
+               case msgDisconnect:
+                       // Transform a disconnect message into an
+                       // error. Since this is lowest level at which
+                       // we interpret message types, doing it here
+                       // ensures that we don't have to handle it
+                       // elsewhere.
+                       var msg disconnectMsg
+                       if err := Unmarshal(packet, &msg); err != nil {
+                               return nil, err
+                       }
+                       return nil, &msg
+               }
+       }
+
+       // The packet may point to an internal buffer, so copy the
+       // packet out here.
+       fresh := make([]byte, len(packet))
+       copy(fresh, packet)
+
+       return fresh, err
+}
+
+func (t *transport) writePacket(packet []byte) error {
+       if debugTransport {
+               t.printPacket(packet, true)
+       }
+       return t.writer.writePacket(t.bufWriter, t.rand, packet)
+}
+
+func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error {
+       changeKeys := len(packet) > 0 && packet[0] == msgNewKeys
+
+       err := s.packetCipher.writePacket(s.seqNum, w, rand, packet)
+       if err != nil {
+               return err
+       }
+       if err = w.Flush(); err != nil {
+               return err
+       }
+       s.seqNum++
+       if changeKeys {
+               select {
+               case cipher := <-s.pendingKeyChange:
+                       s.packetCipher = cipher
+               default:
+                       panic("ssh: no key material for msgNewKeys")
+               }
+       }
+       return err
+}
+
+func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport {
+       t := &transport{
+               bufReader: bufio.NewReader(rwc),
+               bufWriter: bufio.NewWriter(rwc),
+               rand:      rand,
+               reader: connectionState{
+                       packetCipher:     &streamPacketCipher{cipher: noneCipher{}},
+                       pendingKeyChange: make(chan packetCipher, 1),
+               },
+               writer: connectionState{
+                       packetCipher:     &streamPacketCipher{cipher: noneCipher{}},
+                       pendingKeyChange: make(chan packetCipher, 1),
+               },
+               Closer: rwc,
+       }
+       t.isClient = isClient
+
+       if isClient {
+               t.reader.dir = serverKeys
+               t.writer.dir = clientKeys
+       } else {
+               t.reader.dir = clientKeys
+               t.writer.dir = serverKeys
+       }
+
+       return t
+}
+
+type direction struct {
+       ivTag     []byte
+       keyTag    []byte
+       macKeyTag []byte
+}
+
+var (
+       serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}}
+       clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}}
+)
+
+// generateKeys generates key material for IV, MAC and encryption.
+func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) {
+       cipherMode := cipherModes[algs.Cipher]
+       macMode := macModes[algs.MAC]
+
+       iv = make([]byte, cipherMode.ivSize)
+       key = make([]byte, cipherMode.keySize)
+       macKey = make([]byte, macMode.keySize)
+
+       generateKeyMaterial(iv, d.ivTag, kex)
+       generateKeyMaterial(key, d.keyTag, kex)
+       generateKeyMaterial(macKey, d.macKeyTag, kex)
+       return
+}
+
+// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as
+// described in RFC 4253, section 6.4. direction should either be serverKeys
+// (to setup server->client keys) or clientKeys (for client->server keys).
+func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) {
+       iv, key, macKey := generateKeys(d, algs, kex)
+
+       if algs.Cipher == gcmCipherID {
+               return newGCMCipher(iv, key, macKey)
+       }
+
+       if algs.Cipher == aes128cbcID {
+               return newAESCBCCipher(iv, key, macKey, algs)
+       }
+
+       if algs.Cipher == tripledescbcID {
+               return newTripleDESCBCCipher(iv, key, macKey, algs)
+       }
+
+       c := &streamPacketCipher{
+               mac: macModes[algs.MAC].new(macKey),
+               etm: macModes[algs.MAC].etm,
+       }
+       c.macResult = make([]byte, c.mac.Size())
+
+       var err error
+       c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv)
+       if err != nil {
+               return nil, err
+       }
+
+       return c, nil
+}
+
+// generateKeyMaterial fills out with key material generated from tag, K, H
+// and sessionId, as specified in RFC 4253, section 7.2.
+func generateKeyMaterial(out, tag []byte, r *kexResult) {
+       var digestsSoFar []byte
+
+       h := r.Hash.New()
+       for len(out) > 0 {
+               h.Reset()
+               h.Write(r.K)
+               h.Write(r.H)
+
+               if len(digestsSoFar) == 0 {
+                       h.Write(tag)
+                       h.Write(r.SessionID)
+               } else {
+                       h.Write(digestsSoFar)
+               }
+
+               digest := h.Sum(nil)
+               n := copy(out, digest)
+               out = out[n:]
+               if len(out) > 0 {
+                       digestsSoFar = append(digestsSoFar, digest...)
+               }
+       }
+}
+
+const packageVersion = "SSH-2.0-Go"
+
+// Sends and receives a version line.  The versionLine string should
+// be US ASCII, start with "SSH-2.0-", and should not include a
+// newline. exchangeVersions returns the other side's version line.
+func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) {
+       // Contrary to the RFC, we do not ignore lines that don't
+       // start with "SSH-2.0-" to make the library usable with
+       // nonconforming servers.
+       for _, c := range versionLine {
+               // The spec disallows non US-ASCII chars, and
+               // specifically forbids null chars.
+               if c < 32 {
+                       return nil, errors.New("ssh: junk character in version line")
+               }
+       }
+       if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil {
+               return
+       }
+
+       them, err = readVersion(rw)
+       return them, err
+}
+
+// maxVersionStringBytes is the maximum number of bytes that we'll
+// accept as a version string. RFC 4253 section 4.2 limits this at 255
+// chars
+const maxVersionStringBytes = 255
+
+// Read version string as specified by RFC 4253, section 4.2.
+func readVersion(r io.Reader) ([]byte, error) {
+       versionString := make([]byte, 0, 64)
+       var ok bool
+       var buf [1]byte
+
+       for len(versionString) < maxVersionStringBytes {
+               _, err := io.ReadFull(r, buf[:])
+               if err != nil {
+                       return nil, err
+               }
+               // The RFC says that the version should be terminated with \r\n
+               // but several SSH servers actually only send a \n.
+               if buf[0] == '\n' {
+                       ok = true
+                       break
+               }
+
+               // non ASCII chars are disallowed, but we are lenient,
+               // since Go doesn't use null-terminated strings.
+
+               // The RFC allows a comment after a space, however,
+               // all of it (version and comments) goes into the
+               // session hash.
+               versionString = append(versionString, buf[0])
+       }
+
+       if !ok {
+               return nil, errors.New("ssh: overflow reading version string")
+       }
+
+       // There might be a '\r' on the end which we should remove.
+       if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' {
+               versionString = versionString[:len(versionString)-1]
+       }
+       return versionString, nil
+}
index ca37577cf29d5f5935709c4e498ae85a4d3d4b70..8377e9426b199e0672ac15ad21e11e5ca2248249 100644 (file)
                        "revision": "453249f01cfeb54c3d549ddb75ff152ca243f9d8",
                        "revisionTime": "2017-02-08T20:51:15Z"
                }
-       ]
+       ],
+       "rootPath": "github.com/terraform-providers/terraform-provider-statuscake"
 }